diff --git a/pythonProject/.venv/Lib/site-packages/MarkupSafe-2.1.5.dist-info/INSTALLER b/pythonProject/.venv/Lib/site-packages/MarkupSafe-2.1.5.dist-info/INSTALLER
new file mode 100644
index 0000000000000000000000000000000000000000..a1b589e38a32041e49332e5e81c2d363dc418d68
--- /dev/null
+++ b/pythonProject/.venv/Lib/site-packages/MarkupSafe-2.1.5.dist-info/INSTALLER
@@ -0,0 +1 @@
+pip
diff --git a/pythonProject/.venv/Lib/site-packages/MarkupSafe-2.1.5.dist-info/LICENSE.rst b/pythonProject/.venv/Lib/site-packages/MarkupSafe-2.1.5.dist-info/LICENSE.rst
new file mode 100644
index 0000000000000000000000000000000000000000..c4700f975c9f76ccf9dec953157a92c549f450cc
--- /dev/null
+++ b/pythonProject/.venv/Lib/site-packages/MarkupSafe-2.1.5.dist-info/LICENSE.rst
@@ -0,0 +1,28 @@
+Copyright 2010 Pallets
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are
+met:
+
+1. Redistributions of source code must retain the above copyright
+ notice, this list of conditions and the following disclaimer.
+
+2. Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in the
+ documentation and/or other materials provided with the distribution.
+
+3. Neither the name of the copyright holder nor the names of its
+ contributors may be used to endorse or promote products derived from
+ this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
+PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED
+TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
+LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
diff --git a/pythonProject/.venv/Lib/site-packages/MarkupSafe-2.1.5.dist-info/METADATA b/pythonProject/.venv/Lib/site-packages/MarkupSafe-2.1.5.dist-info/METADATA
new file mode 100644
index 0000000000000000000000000000000000000000..7354b5a5f91fbb92cf6a126745f51252bae9f0ce
--- /dev/null
+++ b/pythonProject/.venv/Lib/site-packages/MarkupSafe-2.1.5.dist-info/METADATA
@@ -0,0 +1,93 @@
+Metadata-Version: 2.1
+Name: MarkupSafe
+Version: 2.1.5
+Summary: Safely add untrusted strings to HTML/XML markup.
+Home-page: https://palletsprojects.com/p/markupsafe/
+Maintainer: Pallets
+Maintainer-email: contact@palletsprojects.com
+License: BSD-3-Clause
+Project-URL: Donate, https://palletsprojects.com/donate
+Project-URL: Documentation, https://markupsafe.palletsprojects.com/
+Project-URL: Changes, https://markupsafe.palletsprojects.com/changes/
+Project-URL: Source Code, https://github.com/pallets/markupsafe/
+Project-URL: Issue Tracker, https://github.com/pallets/markupsafe/issues/
+Project-URL: Chat, https://discord.gg/pallets
+Classifier: Development Status :: 5 - Production/Stable
+Classifier: Environment :: Web Environment
+Classifier: Intended Audience :: Developers
+Classifier: License :: OSI Approved :: BSD License
+Classifier: Operating System :: OS Independent
+Classifier: Programming Language :: Python
+Classifier: Topic :: Internet :: WWW/HTTP :: Dynamic Content
+Classifier: Topic :: Text Processing :: Markup :: HTML
+Requires-Python: >=3.7
+Description-Content-Type: text/x-rst
+License-File: LICENSE.rst
+
+MarkupSafe
+==========
+
+MarkupSafe implements a text object that escapes characters so it is
+safe to use in HTML and XML. Characters that have special meanings are
+replaced so that they display as the actual characters. This mitigates
+injection attacks, meaning untrusted user input can safely be displayed
+on a page.
+
+
+Installing
+----------
+
+Install and update using `pip`_:
+
+.. code-block:: text
+
+ pip install -U MarkupSafe
+
+.. _pip: https://pip.pypa.io/en/stable/getting-started/
+
+
+Examples
+--------
+
+.. code-block:: pycon
+
+ >>> from markupsafe import Markup, escape
+
+ >>> # escape replaces special characters and wraps in Markup
+ >>> escape("")
+ Markup('<script>alert(document.cookie);</script>')
+
+ >>> # wrap in Markup to mark text "safe" and prevent escaping
+ >>> Markup("Hello")
+ Markup('hello')
+
+ >>> escape(Markup("Hello"))
+ Markup('hello')
+
+ >>> # Markup is a str subclass
+ >>> # methods and operators escape their arguments
+ >>> template = Markup("Hello {name}")
+ >>> template.format(name='"World"')
+ Markup('Hello "World"')
+
+
+Donate
+------
+
+The Pallets organization develops and supports MarkupSafe and other
+popular packages. In order to grow the community of contributors and
+users, and allow the maintainers to devote more time to the projects,
+`please donate today`_.
+
+.. _please donate today: https://palletsprojects.com/donate
+
+
+Links
+-----
+
+- Documentation: https://markupsafe.palletsprojects.com/
+- Changes: https://markupsafe.palletsprojects.com/changes/
+- PyPI Releases: https://pypi.org/project/MarkupSafe/
+- Source Code: https://github.com/pallets/markupsafe/
+- Issue Tracker: https://github.com/pallets/markupsafe/issues/
+- Chat: https://discord.gg/pallets
diff --git a/pythonProject/.venv/Lib/site-packages/MarkupSafe-2.1.5.dist-info/RECORD b/pythonProject/.venv/Lib/site-packages/MarkupSafe-2.1.5.dist-info/RECORD
new file mode 100644
index 0000000000000000000000000000000000000000..9a585b08c68964c9e8e4e462950eddf60c000eaf
--- /dev/null
+++ b/pythonProject/.venv/Lib/site-packages/MarkupSafe-2.1.5.dist-info/RECORD
@@ -0,0 +1,14 @@
+MarkupSafe-2.1.5.dist-info/INSTALLER,sha256=zuuue4knoyJ-UwPPXg8fezS7VCrXJQrAP7zeNuwvFQg,4
+MarkupSafe-2.1.5.dist-info/LICENSE.rst,sha256=RjHsDbX9kKVH4zaBcmTGeYIUM4FG-KyUtKV_lu6MnsQ,1503
+MarkupSafe-2.1.5.dist-info/METADATA,sha256=icNlaniV7YIQZ1BScCVqNaRtm7MAgfw8d3OBmoSVyAY,3096
+MarkupSafe-2.1.5.dist-info/RECORD,,
+MarkupSafe-2.1.5.dist-info/WHEEL,sha256=5JPYeYl5ZdvdSkrGS4u21mmpPzpFx42qrXOSIgWf4pg,102
+MarkupSafe-2.1.5.dist-info/top_level.txt,sha256=qy0Plje5IJuvsCBjejJyhDCjEAdcDLK_2agVcex8Z6U,11
+markupsafe/__init__.py,sha256=m1ysNeqf55zbEoJtaovca40ivrkEFolPlw5bGoC5Gi4,11290
+markupsafe/__pycache__/__init__.cpython-310.pyc,,
+markupsafe/__pycache__/_native.cpython-310.pyc,,
+markupsafe/_native.py,sha256=_Q7UsXCOvgdonCgqG3l5asANI6eo50EKnDM-mlwEC5M,1776
+markupsafe/_speedups.c,sha256=n3jzzaJwXcoN8nTFyA53f3vSqsWK2vujI-v6QYifjhQ,7403
+markupsafe/_speedups.cp310-win_amd64.pyd,sha256=f7I42eA3ZdDubzr--gvmEu2tzH32l9kfbhyWzWLfI7o,15872
+markupsafe/_speedups.pyi,sha256=f5QtwIOP0eLrxh2v5p6SmaYmlcHIGIfmz0DovaqL0OU,238
+markupsafe/py.typed,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
diff --git a/pythonProject/.venv/Lib/site-packages/MarkupSafe-2.1.5.dist-info/WHEEL b/pythonProject/.venv/Lib/site-packages/MarkupSafe-2.1.5.dist-info/WHEEL
new file mode 100644
index 0000000000000000000000000000000000000000..bdd62dbce6fdfdd827fe67f6ed4363837abad475
--- /dev/null
+++ b/pythonProject/.venv/Lib/site-packages/MarkupSafe-2.1.5.dist-info/WHEEL
@@ -0,0 +1,5 @@
+Wheel-Version: 1.0
+Generator: bdist_wheel (0.42.0)
+Root-Is-Purelib: false
+Tag: cp310-cp310-win_amd64
+
diff --git a/pythonProject/.venv/Lib/site-packages/MarkupSafe-2.1.5.dist-info/top_level.txt b/pythonProject/.venv/Lib/site-packages/MarkupSafe-2.1.5.dist-info/top_level.txt
new file mode 100644
index 0000000000000000000000000000000000000000..75bf729258f9daef77370b6df1a57940f90fc23f
--- /dev/null
+++ b/pythonProject/.venv/Lib/site-packages/MarkupSafe-2.1.5.dist-info/top_level.txt
@@ -0,0 +1 @@
+markupsafe
diff --git a/pythonProject/.venv/Lib/site-packages/markupsafe/__init__.py b/pythonProject/.venv/Lib/site-packages/markupsafe/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..1b35509bdafcc643456f09635313cc16d5616042
--- /dev/null
+++ b/pythonProject/.venv/Lib/site-packages/markupsafe/__init__.py
@@ -0,0 +1,332 @@
+import functools
+import string
+import sys
+import typing as t
+
+if t.TYPE_CHECKING:
+ import typing_extensions as te
+
+ class HasHTML(te.Protocol):
+ def __html__(self) -> str:
+ pass
+
+ _P = te.ParamSpec("_P")
+
+
+__version__ = "2.1.5"
+
+
+def _simple_escaping_wrapper(func: "t.Callable[_P, str]") -> "t.Callable[_P, Markup]":
+ @functools.wraps(func)
+ def wrapped(self: "Markup", *args: "_P.args", **kwargs: "_P.kwargs") -> "Markup":
+ arg_list = _escape_argspec(list(args), enumerate(args), self.escape)
+ _escape_argspec(kwargs, kwargs.items(), self.escape)
+ return self.__class__(func(self, *arg_list, **kwargs)) # type: ignore[arg-type]
+
+ return wrapped # type: ignore[return-value]
+
+
+class Markup(str):
+ """A string that is ready to be safely inserted into an HTML or XML
+ document, either because it was escaped or because it was marked
+ safe.
+
+ Passing an object to the constructor converts it to text and wraps
+ it to mark it safe without escaping. To escape the text, use the
+ :meth:`escape` class method instead.
+
+ >>> Markup("Hello, World!")
+ Markup('Hello, World!')
+ >>> Markup(42)
+ Markup('42')
+ >>> Markup.escape("Hello, World!")
+ Markup('Hello <em>World</em>!')
+
+ This implements the ``__html__()`` interface that some frameworks
+ use. Passing an object that implements ``__html__()`` will wrap the
+ output of that method, marking it safe.
+
+ >>> class Foo:
+ ... def __html__(self):
+ ... return 'foo'
+ ...
+ >>> Markup(Foo())
+ Markup('foo')
+
+ This is a subclass of :class:`str`. It has the same methods, but
+ escapes their arguments and returns a ``Markup`` instance.
+
+ >>> Markup("%s") % ("foo & bar",)
+ Markup('foo & bar')
+ >>> Markup("Hello ") + ""
+ Markup('Hello <foo>')
+ """
+
+ __slots__ = ()
+
+ def __new__(
+ cls, base: t.Any = "", encoding: t.Optional[str] = None, errors: str = "strict"
+ ) -> "te.Self":
+ if hasattr(base, "__html__"):
+ base = base.__html__()
+
+ if encoding is None:
+ return super().__new__(cls, base)
+
+ return super().__new__(cls, base, encoding, errors)
+
+ def __html__(self) -> "te.Self":
+ return self
+
+ def __add__(self, other: t.Union[str, "HasHTML"]) -> "te.Self":
+ if isinstance(other, str) or hasattr(other, "__html__"):
+ return self.__class__(super().__add__(self.escape(other)))
+
+ return NotImplemented
+
+ def __radd__(self, other: t.Union[str, "HasHTML"]) -> "te.Self":
+ if isinstance(other, str) or hasattr(other, "__html__"):
+ return self.escape(other).__add__(self)
+
+ return NotImplemented
+
+ def __mul__(self, num: "te.SupportsIndex") -> "te.Self":
+ if isinstance(num, int):
+ return self.__class__(super().__mul__(num))
+
+ return NotImplemented
+
+ __rmul__ = __mul__
+
+ def __mod__(self, arg: t.Any) -> "te.Self":
+ if isinstance(arg, tuple):
+ # a tuple of arguments, each wrapped
+ arg = tuple(_MarkupEscapeHelper(x, self.escape) for x in arg)
+ elif hasattr(type(arg), "__getitem__") and not isinstance(arg, str):
+ # a mapping of arguments, wrapped
+ arg = _MarkupEscapeHelper(arg, self.escape)
+ else:
+ # a single argument, wrapped with the helper and a tuple
+ arg = (_MarkupEscapeHelper(arg, self.escape),)
+
+ return self.__class__(super().__mod__(arg))
+
+ def __repr__(self) -> str:
+ return f"{self.__class__.__name__}({super().__repr__()})"
+
+ def join(self, seq: t.Iterable[t.Union[str, "HasHTML"]]) -> "te.Self":
+ return self.__class__(super().join(map(self.escape, seq)))
+
+ join.__doc__ = str.join.__doc__
+
+ def split( # type: ignore[override]
+ self, sep: t.Optional[str] = None, maxsplit: int = -1
+ ) -> t.List["te.Self"]:
+ return [self.__class__(v) for v in super().split(sep, maxsplit)]
+
+ split.__doc__ = str.split.__doc__
+
+ def rsplit( # type: ignore[override]
+ self, sep: t.Optional[str] = None, maxsplit: int = -1
+ ) -> t.List["te.Self"]:
+ return [self.__class__(v) for v in super().rsplit(sep, maxsplit)]
+
+ rsplit.__doc__ = str.rsplit.__doc__
+
+ def splitlines( # type: ignore[override]
+ self, keepends: bool = False
+ ) -> t.List["te.Self"]:
+ return [self.__class__(v) for v in super().splitlines(keepends)]
+
+ splitlines.__doc__ = str.splitlines.__doc__
+
+ def unescape(self) -> str:
+ """Convert escaped markup back into a text string. This replaces
+ HTML entities with the characters they represent.
+
+ >>> Markup("Main » About").unescape()
+ 'Main » About'
+ """
+ from html import unescape
+
+ return unescape(str(self))
+
+ def striptags(self) -> str:
+ """:meth:`unescape` the markup, remove tags, and normalize
+ whitespace to single spaces.
+
+ >>> Markup("Main »\tAbout").striptags()
+ 'Main » About'
+ """
+ value = str(self)
+
+ # Look for comments then tags separately. Otherwise, a comment that
+ # contains a tag would end early, leaving some of the comment behind.
+
+ while True:
+ # keep finding comment start marks
+ start = value.find("", start)
+
+ if end == -1:
+ break
+
+ value = f"{value[:start]}{value[end + 3:]}"
+
+ # remove tags using the same method
+ while True:
+ start = value.find("<")
+
+ if start == -1:
+ break
+
+ end = value.find(">", start)
+
+ if end == -1:
+ break
+
+ value = f"{value[:start]}{value[end + 1:]}"
+
+ # collapse spaces
+ value = " ".join(value.split())
+ return self.__class__(value).unescape()
+
+ @classmethod
+ def escape(cls, s: t.Any) -> "te.Self":
+ """Escape a string. Calls :func:`escape` and ensures that for
+ subclasses the correct type is returned.
+ """
+ rv = escape(s)
+
+ if rv.__class__ is not cls:
+ return cls(rv)
+
+ return rv # type: ignore[return-value]
+
+ __getitem__ = _simple_escaping_wrapper(str.__getitem__)
+ capitalize = _simple_escaping_wrapper(str.capitalize)
+ title = _simple_escaping_wrapper(str.title)
+ lower = _simple_escaping_wrapper(str.lower)
+ upper = _simple_escaping_wrapper(str.upper)
+ replace = _simple_escaping_wrapper(str.replace)
+ ljust = _simple_escaping_wrapper(str.ljust)
+ rjust = _simple_escaping_wrapper(str.rjust)
+ lstrip = _simple_escaping_wrapper(str.lstrip)
+ rstrip = _simple_escaping_wrapper(str.rstrip)
+ center = _simple_escaping_wrapper(str.center)
+ strip = _simple_escaping_wrapper(str.strip)
+ translate = _simple_escaping_wrapper(str.translate)
+ expandtabs = _simple_escaping_wrapper(str.expandtabs)
+ swapcase = _simple_escaping_wrapper(str.swapcase)
+ zfill = _simple_escaping_wrapper(str.zfill)
+ casefold = _simple_escaping_wrapper(str.casefold)
+
+ if sys.version_info >= (3, 9):
+ removeprefix = _simple_escaping_wrapper(str.removeprefix)
+ removesuffix = _simple_escaping_wrapper(str.removesuffix)
+
+ def partition(self, sep: str) -> t.Tuple["te.Self", "te.Self", "te.Self"]:
+ l, s, r = super().partition(self.escape(sep))
+ cls = self.__class__
+ return cls(l), cls(s), cls(r)
+
+ def rpartition(self, sep: str) -> t.Tuple["te.Self", "te.Self", "te.Self"]:
+ l, s, r = super().rpartition(self.escape(sep))
+ cls = self.__class__
+ return cls(l), cls(s), cls(r)
+
+ def format(self, *args: t.Any, **kwargs: t.Any) -> "te.Self":
+ formatter = EscapeFormatter(self.escape)
+ return self.__class__(formatter.vformat(self, args, kwargs))
+
+ def format_map( # type: ignore[override]
+ self, map: t.Mapping[str, t.Any]
+ ) -> "te.Self":
+ formatter = EscapeFormatter(self.escape)
+ return self.__class__(formatter.vformat(self, (), map))
+
+ def __html_format__(self, format_spec: str) -> "te.Self":
+ if format_spec:
+ raise ValueError("Unsupported format specification for Markup.")
+
+ return self
+
+
+class EscapeFormatter(string.Formatter):
+ __slots__ = ("escape",)
+
+ def __init__(self, escape: t.Callable[[t.Any], Markup]) -> None:
+ self.escape = escape
+ super().__init__()
+
+ def format_field(self, value: t.Any, format_spec: str) -> str:
+ if hasattr(value, "__html_format__"):
+ rv = value.__html_format__(format_spec)
+ elif hasattr(value, "__html__"):
+ if format_spec:
+ raise ValueError(
+ f"Format specifier {format_spec} given, but {type(value)} does not"
+ " define __html_format__. A class that defines __html__ must define"
+ " __html_format__ to work with format specifiers."
+ )
+ rv = value.__html__()
+ else:
+ # We need to make sure the format spec is str here as
+ # otherwise the wrong callback methods are invoked.
+ rv = string.Formatter.format_field(self, value, str(format_spec))
+ return str(self.escape(rv))
+
+
+_ListOrDict = t.TypeVar("_ListOrDict", list, dict)
+
+
+def _escape_argspec(
+ obj: _ListOrDict, iterable: t.Iterable[t.Any], escape: t.Callable[[t.Any], Markup]
+) -> _ListOrDict:
+ """Helper for various string-wrapped functions."""
+ for key, value in iterable:
+ if isinstance(value, str) or hasattr(value, "__html__"):
+ obj[key] = escape(value)
+
+ return obj
+
+
+class _MarkupEscapeHelper:
+ """Helper for :meth:`Markup.__mod__`."""
+
+ __slots__ = ("obj", "escape")
+
+ def __init__(self, obj: t.Any, escape: t.Callable[[t.Any], Markup]) -> None:
+ self.obj = obj
+ self.escape = escape
+
+ def __getitem__(self, item: t.Any) -> "te.Self":
+ return self.__class__(self.obj[item], self.escape)
+
+ def __str__(self) -> str:
+ return str(self.escape(self.obj))
+
+ def __repr__(self) -> str:
+ return str(self.escape(repr(self.obj)))
+
+ def __int__(self) -> int:
+ return int(self.obj)
+
+ def __float__(self) -> float:
+ return float(self.obj)
+
+
+# circular import
+try:
+ from ._speedups import escape as escape
+ from ._speedups import escape_silent as escape_silent
+ from ._speedups import soft_str as soft_str
+except ImportError:
+ from ._native import escape as escape
+ from ._native import escape_silent as escape_silent # noqa: F401
+ from ._native import soft_str as soft_str # noqa: F401
diff --git a/pythonProject/.venv/Lib/site-packages/markupsafe/__pycache__/__init__.cpython-310.pyc b/pythonProject/.venv/Lib/site-packages/markupsafe/__pycache__/__init__.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..5f1a75aa2eb27a67de9329187ad2f201dd394c52
Binary files /dev/null and b/pythonProject/.venv/Lib/site-packages/markupsafe/__pycache__/__init__.cpython-310.pyc differ
diff --git a/pythonProject/.venv/Lib/site-packages/markupsafe/__pycache__/_native.cpython-310.pyc b/pythonProject/.venv/Lib/site-packages/markupsafe/__pycache__/_native.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..f8185d616f7082f1e1e521252f3977257dc97ca6
Binary files /dev/null and b/pythonProject/.venv/Lib/site-packages/markupsafe/__pycache__/_native.cpython-310.pyc differ
diff --git a/pythonProject/.venv/Lib/site-packages/markupsafe/_native.py b/pythonProject/.venv/Lib/site-packages/markupsafe/_native.py
new file mode 100644
index 0000000000000000000000000000000000000000..2fcdb454987b1c01ddc117a8c8b1f996fcc9fc98
--- /dev/null
+++ b/pythonProject/.venv/Lib/site-packages/markupsafe/_native.py
@@ -0,0 +1,63 @@
+import typing as t
+
+from . import Markup
+
+
+def escape(s: t.Any) -> Markup:
+ """Replace the characters ``&``, ``<``, ``>``, ``'``, and ``"`` in
+ the string with HTML-safe sequences. Use this if you need to display
+ text that might contain such characters in HTML.
+
+ If the object has an ``__html__`` method, it is called and the
+ return value is assumed to already be safe for HTML.
+
+ :param s: An object to be converted to a string and escaped.
+ :return: A :class:`Markup` string with the escaped text.
+ """
+ if hasattr(s, "__html__"):
+ return Markup(s.__html__())
+
+ return Markup(
+ str(s)
+ .replace("&", "&")
+ .replace(">", ">")
+ .replace("<", "<")
+ .replace("'", "'")
+ .replace('"', """)
+ )
+
+
+def escape_silent(s: t.Optional[t.Any]) -> Markup:
+ """Like :func:`escape` but treats ``None`` as the empty string.
+ Useful with optional values, as otherwise you get the string
+ ``'None'`` when the value is ``None``.
+
+ >>> escape(None)
+ Markup('None')
+ >>> escape_silent(None)
+ Markup('')
+ """
+ if s is None:
+ return Markup()
+
+ return escape(s)
+
+
+def soft_str(s: t.Any) -> str:
+ """Convert an object to a string if it isn't already. This preserves
+ a :class:`Markup` string rather than converting it back to a basic
+ string, so it will still be marked as safe and won't be escaped
+ again.
+
+ >>> value = escape("")
+ >>> value
+ Markup('<User 1>')
+ >>> escape(str(value))
+ Markup('<User 1>')
+ >>> escape(soft_str(value))
+ Markup('<User 1>')
+ """
+ if not isinstance(s, str):
+ return str(s)
+
+ return s
diff --git a/pythonProject/.venv/Lib/site-packages/markupsafe/_speedups.c b/pythonProject/.venv/Lib/site-packages/markupsafe/_speedups.c
new file mode 100644
index 0000000000000000000000000000000000000000..a3922347a556c8be67c09d4e96eeef69f22832bc
--- /dev/null
+++ b/pythonProject/.venv/Lib/site-packages/markupsafe/_speedups.c
@@ -0,0 +1,320 @@
+#include
+
+static PyObject* markup;
+
+static int
+init_constants(void)
+{
+ PyObject *module;
+
+ /* import markup type so that we can mark the return value */
+ module = PyImport_ImportModule("markupsafe");
+ if (!module)
+ return 0;
+ markup = PyObject_GetAttrString(module, "Markup");
+ Py_DECREF(module);
+
+ return 1;
+}
+
+#define GET_DELTA(inp, inp_end, delta) \
+ while (inp < inp_end) { \
+ switch (*inp++) { \
+ case '"': \
+ case '\'': \
+ case '&': \
+ delta += 4; \
+ break; \
+ case '<': \
+ case '>': \
+ delta += 3; \
+ break; \
+ } \
+ }
+
+#define DO_ESCAPE(inp, inp_end, outp) \
+ { \
+ Py_ssize_t ncopy = 0; \
+ while (inp < inp_end) { \
+ switch (*inp) { \
+ case '"': \
+ memcpy(outp, inp-ncopy, sizeof(*outp)*ncopy); \
+ outp += ncopy; ncopy = 0; \
+ *outp++ = '&'; \
+ *outp++ = '#'; \
+ *outp++ = '3'; \
+ *outp++ = '4'; \
+ *outp++ = ';'; \
+ break; \
+ case '\'': \
+ memcpy(outp, inp-ncopy, sizeof(*outp)*ncopy); \
+ outp += ncopy; ncopy = 0; \
+ *outp++ = '&'; \
+ *outp++ = '#'; \
+ *outp++ = '3'; \
+ *outp++ = '9'; \
+ *outp++ = ';'; \
+ break; \
+ case '&': \
+ memcpy(outp, inp-ncopy, sizeof(*outp)*ncopy); \
+ outp += ncopy; ncopy = 0; \
+ *outp++ = '&'; \
+ *outp++ = 'a'; \
+ *outp++ = 'm'; \
+ *outp++ = 'p'; \
+ *outp++ = ';'; \
+ break; \
+ case '<': \
+ memcpy(outp, inp-ncopy, sizeof(*outp)*ncopy); \
+ outp += ncopy; ncopy = 0; \
+ *outp++ = '&'; \
+ *outp++ = 'l'; \
+ *outp++ = 't'; \
+ *outp++ = ';'; \
+ break; \
+ case '>': \
+ memcpy(outp, inp-ncopy, sizeof(*outp)*ncopy); \
+ outp += ncopy; ncopy = 0; \
+ *outp++ = '&'; \
+ *outp++ = 'g'; \
+ *outp++ = 't'; \
+ *outp++ = ';'; \
+ break; \
+ default: \
+ ncopy++; \
+ } \
+ inp++; \
+ } \
+ memcpy(outp, inp-ncopy, sizeof(*outp)*ncopy); \
+ }
+
+static PyObject*
+escape_unicode_kind1(PyUnicodeObject *in)
+{
+ Py_UCS1 *inp = PyUnicode_1BYTE_DATA(in);
+ Py_UCS1 *inp_end = inp + PyUnicode_GET_LENGTH(in);
+ Py_UCS1 *outp;
+ PyObject *out;
+ Py_ssize_t delta = 0;
+
+ GET_DELTA(inp, inp_end, delta);
+ if (!delta) {
+ Py_INCREF(in);
+ return (PyObject*)in;
+ }
+
+ out = PyUnicode_New(PyUnicode_GET_LENGTH(in) + delta,
+ PyUnicode_IS_ASCII(in) ? 127 : 255);
+ if (!out)
+ return NULL;
+
+ inp = PyUnicode_1BYTE_DATA(in);
+ outp = PyUnicode_1BYTE_DATA(out);
+ DO_ESCAPE(inp, inp_end, outp);
+ return out;
+}
+
+static PyObject*
+escape_unicode_kind2(PyUnicodeObject *in)
+{
+ Py_UCS2 *inp = PyUnicode_2BYTE_DATA(in);
+ Py_UCS2 *inp_end = inp + PyUnicode_GET_LENGTH(in);
+ Py_UCS2 *outp;
+ PyObject *out;
+ Py_ssize_t delta = 0;
+
+ GET_DELTA(inp, inp_end, delta);
+ if (!delta) {
+ Py_INCREF(in);
+ return (PyObject*)in;
+ }
+
+ out = PyUnicode_New(PyUnicode_GET_LENGTH(in) + delta, 65535);
+ if (!out)
+ return NULL;
+
+ inp = PyUnicode_2BYTE_DATA(in);
+ outp = PyUnicode_2BYTE_DATA(out);
+ DO_ESCAPE(inp, inp_end, outp);
+ return out;
+}
+
+
+static PyObject*
+escape_unicode_kind4(PyUnicodeObject *in)
+{
+ Py_UCS4 *inp = PyUnicode_4BYTE_DATA(in);
+ Py_UCS4 *inp_end = inp + PyUnicode_GET_LENGTH(in);
+ Py_UCS4 *outp;
+ PyObject *out;
+ Py_ssize_t delta = 0;
+
+ GET_DELTA(inp, inp_end, delta);
+ if (!delta) {
+ Py_INCREF(in);
+ return (PyObject*)in;
+ }
+
+ out = PyUnicode_New(PyUnicode_GET_LENGTH(in) + delta, 1114111);
+ if (!out)
+ return NULL;
+
+ inp = PyUnicode_4BYTE_DATA(in);
+ outp = PyUnicode_4BYTE_DATA(out);
+ DO_ESCAPE(inp, inp_end, outp);
+ return out;
+}
+
+static PyObject*
+escape_unicode(PyUnicodeObject *in)
+{
+ if (PyUnicode_READY(in))
+ return NULL;
+
+ switch (PyUnicode_KIND(in)) {
+ case PyUnicode_1BYTE_KIND:
+ return escape_unicode_kind1(in);
+ case PyUnicode_2BYTE_KIND:
+ return escape_unicode_kind2(in);
+ case PyUnicode_4BYTE_KIND:
+ return escape_unicode_kind4(in);
+ }
+ assert(0); /* shouldn't happen */
+ return NULL;
+}
+
+static PyObject*
+escape(PyObject *self, PyObject *text)
+{
+ static PyObject *id_html;
+ PyObject *s = NULL, *rv = NULL, *html;
+
+ if (id_html == NULL) {
+ id_html = PyUnicode_InternFromString("__html__");
+ if (id_html == NULL) {
+ return NULL;
+ }
+ }
+
+ /* we don't have to escape integers, bools or floats */
+ if (PyLong_CheckExact(text) ||
+ PyFloat_CheckExact(text) || PyBool_Check(text) ||
+ text == Py_None)
+ return PyObject_CallFunctionObjArgs(markup, text, NULL);
+
+ /* if the object has an __html__ method that performs the escaping */
+ html = PyObject_GetAttr(text ,id_html);
+ if (html) {
+ s = PyObject_CallObject(html, NULL);
+ Py_DECREF(html);
+ if (s == NULL) {
+ return NULL;
+ }
+ /* Convert to Markup object */
+ rv = PyObject_CallFunctionObjArgs(markup, (PyObject*)s, NULL);
+ Py_DECREF(s);
+ return rv;
+ }
+
+ /* otherwise make the object unicode if it isn't, then escape */
+ PyErr_Clear();
+ if (!PyUnicode_Check(text)) {
+ PyObject *unicode = PyObject_Str(text);
+ if (!unicode)
+ return NULL;
+ s = escape_unicode((PyUnicodeObject*)unicode);
+ Py_DECREF(unicode);
+ }
+ else
+ s = escape_unicode((PyUnicodeObject*)text);
+
+ /* convert the unicode string into a markup object. */
+ rv = PyObject_CallFunctionObjArgs(markup, (PyObject*)s, NULL);
+ Py_DECREF(s);
+ return rv;
+}
+
+
+static PyObject*
+escape_silent(PyObject *self, PyObject *text)
+{
+ if (text != Py_None)
+ return escape(self, text);
+ return PyObject_CallFunctionObjArgs(markup, NULL);
+}
+
+
+static PyObject*
+soft_str(PyObject *self, PyObject *s)
+{
+ if (!PyUnicode_Check(s))
+ return PyObject_Str(s);
+ Py_INCREF(s);
+ return s;
+}
+
+
+static PyMethodDef module_methods[] = {
+ {
+ "escape",
+ (PyCFunction)escape,
+ METH_O,
+ "Replace the characters ``&``, ``<``, ``>``, ``'``, and ``\"`` in"
+ " the string with HTML-safe sequences. Use this if you need to display"
+ " text that might contain such characters in HTML.\n\n"
+ "If the object has an ``__html__`` method, it is called and the"
+ " return value is assumed to already be safe for HTML.\n\n"
+ ":param s: An object to be converted to a string and escaped.\n"
+ ":return: A :class:`Markup` string with the escaped text.\n"
+ },
+ {
+ "escape_silent",
+ (PyCFunction)escape_silent,
+ METH_O,
+ "Like :func:`escape` but treats ``None`` as the empty string."
+ " Useful with optional values, as otherwise you get the string"
+ " ``'None'`` when the value is ``None``.\n\n"
+ ">>> escape(None)\n"
+ "Markup('None')\n"
+ ">>> escape_silent(None)\n"
+ "Markup('')\n"
+ },
+ {
+ "soft_str",
+ (PyCFunction)soft_str,
+ METH_O,
+ "Convert an object to a string if it isn't already. This preserves"
+ " a :class:`Markup` string rather than converting it back to a basic"
+ " string, so it will still be marked as safe and won't be escaped"
+ " again.\n\n"
+ ">>> value = escape(\"\")\n"
+ ">>> value\n"
+ "Markup('<User 1>')\n"
+ ">>> escape(str(value))\n"
+ "Markup('<User 1>')\n"
+ ">>> escape(soft_str(value))\n"
+ "Markup('<User 1>')\n"
+ },
+ {NULL, NULL, 0, NULL} /* Sentinel */
+};
+
+static struct PyModuleDef module_definition = {
+ PyModuleDef_HEAD_INIT,
+ "markupsafe._speedups",
+ NULL,
+ -1,
+ module_methods,
+ NULL,
+ NULL,
+ NULL,
+ NULL
+};
+
+PyMODINIT_FUNC
+PyInit__speedups(void)
+{
+ if (!init_constants())
+ return NULL;
+
+ return PyModule_Create(&module_definition);
+}
diff --git a/pythonProject/.venv/Lib/site-packages/markupsafe/_speedups.cp310-win_amd64.pyd b/pythonProject/.venv/Lib/site-packages/markupsafe/_speedups.cp310-win_amd64.pyd
new file mode 100644
index 0000000000000000000000000000000000000000..36c1ba697394714e3f82ea4ff29932e64813c6b4
Binary files /dev/null and b/pythonProject/.venv/Lib/site-packages/markupsafe/_speedups.cp310-win_amd64.pyd differ
diff --git a/pythonProject/.venv/Lib/site-packages/markupsafe/_speedups.pyi b/pythonProject/.venv/Lib/site-packages/markupsafe/_speedups.pyi
new file mode 100644
index 0000000000000000000000000000000000000000..ca41f9cbcf775bcf6432850f5a81740a7c15c8d3
--- /dev/null
+++ b/pythonProject/.venv/Lib/site-packages/markupsafe/_speedups.pyi
@@ -0,0 +1,9 @@
+from typing import Any
+from typing import Optional
+
+from . import Markup
+
+def escape(s: Any) -> Markup: ...
+def escape_silent(s: Optional[Any]) -> Markup: ...
+def soft_str(s: Any) -> str: ...
+def soft_unicode(s: Any) -> str: ...
diff --git a/pythonProject/.venv/Lib/site-packages/ml_dtypes-0.5.3.dist-info/INSTALLER b/pythonProject/.venv/Lib/site-packages/ml_dtypes-0.5.3.dist-info/INSTALLER
new file mode 100644
index 0000000000000000000000000000000000000000..a1b589e38a32041e49332e5e81c2d363dc418d68
--- /dev/null
+++ b/pythonProject/.venv/Lib/site-packages/ml_dtypes-0.5.3.dist-info/INSTALLER
@@ -0,0 +1 @@
+pip
diff --git a/pythonProject/.venv/Lib/site-packages/ml_dtypes-0.5.3.dist-info/METADATA b/pythonProject/.venv/Lib/site-packages/ml_dtypes-0.5.3.dist-info/METADATA
new file mode 100644
index 0000000000000000000000000000000000000000..edd892795ab4cefb6b4a66030a03aa96c9215eca
--- /dev/null
+++ b/pythonProject/.venv/Lib/site-packages/ml_dtypes-0.5.3.dist-info/METADATA
@@ -0,0 +1,268 @@
+Metadata-Version: 2.4
+Name: ml_dtypes
+Version: 0.5.3
+Summary: ml_dtypes is a stand-alone implementation of several NumPy dtype extensions used in machine learning.
+Author-email: ml_dtypes authors
+License-Expression: Apache-2.0
+Project-URL: homepage, https://github.com/jax-ml/ml_dtypes
+Project-URL: repository, https://github.com/jax-ml/ml_dtypes
+Classifier: Programming Language :: Python :: 3
+Classifier: Programming Language :: Python :: 3 :: Only
+Classifier: Intended Audience :: Science/Research
+Requires-Python: >=3.9
+Description-Content-Type: text/markdown
+License-File: LICENSE
+License-File: LICENSE.eigen
+Requires-Dist: numpy>=1.21
+Requires-Dist: numpy>=1.21.2; python_version >= "3.10"
+Requires-Dist: numpy>=1.23.3; python_version >= "3.11"
+Requires-Dist: numpy>=1.26.0; python_version >= "3.12"
+Requires-Dist: numpy>=2.1.0; python_version >= "3.13"
+Provides-Extra: dev
+Requires-Dist: absl-py; extra == "dev"
+Requires-Dist: pytest; extra == "dev"
+Requires-Dist: pytest-xdist; extra == "dev"
+Requires-Dist: pylint>=2.6.0; extra == "dev"
+Requires-Dist: pyink; extra == "dev"
+Dynamic: license-file
+
+# ml_dtypes
+
+[](https://github.com/jax-ml/ml_dtypes/actions/workflows/test.yml)
+[](https://github.com/jax-ml/ml_dtypes/actions/workflows/wheels.yml)
+[](https://badge.fury.io/py/ml_dtypes)
+
+`ml_dtypes` is a stand-alone implementation of several NumPy dtype extensions used in machine learning libraries, including:
+
+- [`bfloat16`](https://en.wikipedia.org/wiki/Bfloat16_floating-point_format):
+ an alternative to the standard [`float16`](https://en.wikipedia.org/wiki/Half-precision_floating-point_format) format
+- 8-bit floating point representations, parameterized by number of exponent and
+ mantissa bits, as well as the bias (if any) and representability of infinity,
+ NaN, and signed zero.
+ * `float8_e3m4`
+ * `float8_e4m3`
+ * `float8_e4m3b11fnuz`
+ * `float8_e4m3fn`
+ * `float8_e4m3fnuz`
+ * `float8_e5m2`
+ * `float8_e5m2fnuz`
+ * `float8_e8m0fnu`
+- Microscaling (MX) sub-byte floating point representations:
+ * `float4_e2m1fn`
+ * `float6_e2m3fn`
+ * `float6_e3m2fn`
+- Narrow integer encodings:
+ * `int2`
+ * `int4`
+ * `uint2`
+ * `uint4`
+
+See below for specifications of these number formats.
+
+## Installation
+
+The `ml_dtypes` package is tested with Python versions 3.9-3.12, and can be installed
+with the following command:
+```
+pip install ml_dtypes
+```
+To test your installation, you can run the following:
+```
+pip install absl-py pytest
+pytest --pyargs ml_dtypes
+```
+To build from source, clone the repository and run:
+```
+git submodule init
+git submodule update
+pip install .
+```
+
+## Example Usage
+
+```python
+>>> from ml_dtypes import bfloat16
+>>> import numpy as np
+>>> np.zeros(4, dtype=bfloat16)
+array([0, 0, 0, 0], dtype=bfloat16)
+```
+Importing `ml_dtypes` also registers the data types with numpy, so that they may
+be referred to by their string name:
+
+```python
+>>> np.dtype('bfloat16')
+dtype(bfloat16)
+>>> np.dtype('float8_e5m2')
+dtype(float8_e5m2)
+```
+
+## Specifications of implemented floating point formats
+
+### `bfloat16`
+
+A `bfloat16` number is a single-precision float truncated at 16 bits.
+
+Exponent: 8, Mantissa: 7, exponent bias: 127. IEEE 754, with NaN and inf.
+
+### `float4_e2m1fn`
+
+Exponent: 2, Mantissa: 1, bias: 1.
+
+Extended range: no inf, no NaN.
+
+Microscaling format, 4 bits (encoding: `0bSEEM`) using byte storage (higher 4
+bits are unused). NaN representation is undefined.
+
+Possible absolute values: [`0`, `0.5`, `1`, `1.5`, `2`, `3`, `4`, `6`]
+
+### `float6_e2m3fn`
+
+Exponent: 2, Mantissa: 3, bias: 1.
+
+Extended range: no inf, no NaN.
+
+Microscaling format, 6 bits (encoding: `0bSEEMMM`) using byte storage (higher 2
+bits are unused). NaN representation is undefined.
+
+Possible values range: [`-7.5`; `7.5`]
+
+### `float6_e3m2fn`
+
+Exponent: 3, Mantissa: 2, bias: 3.
+
+Extended range: no inf, no NaN.
+
+Microscaling format, 4 bits (encoding: `0bSEEEMM`) using byte storage (higher 2
+bits are unused). NaN representation is undefined.
+
+Possible values range: [`-28`; `28`]
+
+### `float8_e3m4`
+
+Exponent: 3, Mantissa: 4, bias: 3. IEEE 754, with NaN and inf.
+
+### `float8_e4m3`
+
+Exponent: 4, Mantissa: 3, bias: 7. IEEE 754, with NaN and inf.
+
+### `float8_e4m3b11fnuz`
+
+Exponent: 4, Mantissa: 3, bias: 11.
+
+Extended range: no inf, NaN represented by 0b1000'0000.
+
+### `float8_e4m3fn`
+
+Exponent: 4, Mantissa: 3, bias: 7.
+
+Extended range: no inf, NaN represented by 0bS111'1111.
+
+The `fn` suffix is for consistency with the corresponding LLVM/MLIR type, signaling this type is not consistent with IEEE-754. The `f` indicates it is finite values only. The `n` indicates it includes NaNs, but only at the outer range.
+
+### `float8_e4m3fnuz`
+
+8-bit floating point with 3 bit mantissa.
+
+An 8-bit floating point type with 1 sign bit, 4 bits exponent and 3 bits mantissa. The suffix `fnuz` is consistent with LLVM/MLIR naming and is derived from the differences to IEEE floating point conventions. `F` is for "finite" (no infinities), `N` for with special NaN encoding, `UZ` for unsigned zero.
+
+This type has the following characteristics:
+ * bit encoding: S1E4M3 - `0bSEEEEMMM`
+ * exponent bias: 8
+ * infinities: Not supported
+ * NaNs: Supported with sign bit set to 1, exponent bits and mantissa bits set to all 0s - `0b10000000`
+ * denormals when exponent is 0
+
+### `float8_e5m2`
+
+Exponent: 5, Mantissa: 2, bias: 15. IEEE 754, with NaN and inf.
+
+### `float8_e5m2fnuz`
+
+8-bit floating point with 2 bit mantissa.
+
+An 8-bit floating point type with 1 sign bit, 5 bits exponent and 2 bits mantissa. The suffix `fnuz` is consistent with LLVM/MLIR naming and is derived from the differences to IEEE floating point conventions. `F` is for "finite" (no infinities), `N` for with special NaN encoding, `UZ` for unsigned zero.
+
+This type has the following characteristics:
+ * bit encoding: S1E5M2 - `0bSEEEEEMM`
+ * exponent bias: 16
+ * infinities: Not supported
+ * NaNs: Supported with sign bit set to 1, exponent bits and mantissa bits set to all 0s - `0b10000000`
+ * denormals when exponent is 0
+
+### `float8_e8m0fnu`
+
+[OpenCompute MX](https://www.opencompute.org/documents/ocp-microscaling-formats-mx-v1-0-spec-final-pdf)
+scale format E8M0, which has the following properties:
+ * Unsigned format
+ * 8 exponent bits
+ * Exponent range from -127 to 127
+ * No zero and infinity
+ * Single NaN value (0xFF).
+
+## `int2`, `int4`, `uint2` and `uint4`
+
+2 and 4-bit integer types, where each element is represented unpacked (i.e.,
+padded up to a byte in memory).
+
+NumPy does not support types smaller than a single byte: for example, the
+distance between adjacent elements in an array (`.strides`) is expressed as
+an integer number of bytes. Relaxing this restriction would be a considerable
+engineering project. These types therefore use an unpacked representation, where
+each element of the array is padded up to a byte in memory. The lower two or four
+bits of each byte contain the representation of the number, whereas the remaining
+upper bits are ignored.
+
+## Quirks of low-precision Arithmetic
+
+If you're exploring the use of low-precision dtypes in your code, you should be
+careful to anticipate when the precision loss might lead to surprising results.
+One example is the behavior of aggregations like `sum`; consider this `bfloat16`
+summation in NumPy (run with version 1.24.2):
+
+```python
+>>> from ml_dtypes import bfloat16
+>>> import numpy as np
+>>> rng = np.random.default_rng(seed=0)
+>>> vals = rng.uniform(size=10000).astype(bfloat16)
+>>> vals.sum()
+256
+```
+The true sum should be close to 5000, but numpy returns exactly 256: this is
+because `bfloat16` does not have the precision to increment `256` by values less than
+`1`:
+
+```python
+>>> bfloat16(256) + bfloat16(1)
+256
+```
+After 256, the next representable value in bfloat16 is 258:
+
+```python
+>>> np.nextafter(bfloat16(256), bfloat16(np.inf))
+258
+```
+For better results you can specify that the accumulation should happen in a
+higher-precision type like `float32`:
+
+```python
+>>> vals.sum(dtype='float32').astype(bfloat16)
+4992
+```
+In contrast to NumPy, projects like [JAX](http://jax.readthedocs.io/) which support
+low-precision arithmetic more natively will often do these kinds of higher-precision
+accumulations automatically:
+
+```python
+>>> import jax.numpy as jnp
+>>> jnp.array(vals).sum()
+Array(4992, dtype=bfloat16)
+```
+
+## License
+
+*This is not an officially supported Google product.*
+
+The `ml_dtypes` source code is licensed under the Apache 2.0 license
+(see [LICENSE](LICENSE)). Pre-compiled wheels are built with the
+[EIGEN](https://eigen.tuxfamily.org/) project, which is released under the
+MPL 2.0 license (see [LICENSE.eigen](LICENSE.eigen)).
diff --git a/pythonProject/.venv/Lib/site-packages/ml_dtypes-0.5.3.dist-info/RECORD b/pythonProject/.venv/Lib/site-packages/ml_dtypes-0.5.3.dist-info/RECORD
new file mode 100644
index 0000000000000000000000000000000000000000..9b5e71bcc7ff8f4821d7efcb22c370e0bf766a2b
--- /dev/null
+++ b/pythonProject/.venv/Lib/site-packages/ml_dtypes-0.5.3.dist-info/RECORD
@@ -0,0 +1,15 @@
+ml_dtypes-0.5.3.dist-info/INSTALLER,sha256=zuuue4knoyJ-UwPPXg8fezS7VCrXJQrAP7zeNuwvFQg,4
+ml_dtypes-0.5.3.dist-info/METADATA,sha256=vTuE8TEQUarkGkqN9k74SDyAJwKu6NuyseNbQuXFnUg,9176
+ml_dtypes-0.5.3.dist-info/RECORD,,
+ml_dtypes-0.5.3.dist-info/WHEEL,sha256=d0clRNJVaR7HXdCKNsk2VLvFV9HQ7R7Q1JcMhuI_WV0,101
+ml_dtypes-0.5.3.dist-info/licenses/LICENSE,sha256=Pd-b5cKP4n2tFDpdx27qJSIq0d1ok0oEcGTlbtL6QMU,11560
+ml_dtypes-0.5.3.dist-info/licenses/LICENSE.eigen,sha256=kiHC-TYVm4RG0ykkn7TA8lvlEPRHODoPEzNqx5hWaKM,17099
+ml_dtypes-0.5.3.dist-info/top_level.txt,sha256=meeeNkM1LLmTU5q_0ssFs21A_42VAoES24ntCrPqASw,10
+ml_dtypes/__init__.py,sha256=qgl4mRKusn8HKVoy50flhBUujpXRWet7a3DxdfpZ7VQ,2434
+ml_dtypes/__pycache__/__init__.cpython-310.pyc,,
+ml_dtypes/__pycache__/_finfo.cpython-310.pyc,,
+ml_dtypes/__pycache__/_iinfo.cpython-310.pyc,,
+ml_dtypes/_finfo.py,sha256=JWqJABIHDtgVFaxoy5dkmCRmwbQYdahH-CqA-xH1kos,23612
+ml_dtypes/_iinfo.py,sha256=il9ONlgDbzJeV2vVlnSRhwut4WQ7LxXmsYWNTgiL4-o,2100
+ml_dtypes/_ml_dtypes_ext.cp310-win_amd64.pyd,sha256=MxvH6qwQg2btrVngK7FwEaZ9SGCThAzKQrLub7dOSRU,790016
+ml_dtypes/py.typed,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
diff --git a/pythonProject/.venv/Lib/site-packages/ml_dtypes-0.5.3.dist-info/WHEEL b/pythonProject/.venv/Lib/site-packages/ml_dtypes-0.5.3.dist-info/WHEEL
new file mode 100644
index 0000000000000000000000000000000000000000..0a81e14e8cd991f8fdaa894179c3fecc4331f673
--- /dev/null
+++ b/pythonProject/.venv/Lib/site-packages/ml_dtypes-0.5.3.dist-info/WHEEL
@@ -0,0 +1,5 @@
+Wheel-Version: 1.0
+Generator: setuptools (80.8.0)
+Root-Is-Purelib: false
+Tag: cp310-cp310-win_amd64
+
diff --git a/pythonProject/.venv/Lib/site-packages/ml_dtypes-0.5.3.dist-info/licenses/LICENSE b/pythonProject/.venv/Lib/site-packages/ml_dtypes-0.5.3.dist-info/licenses/LICENSE
new file mode 100644
index 0000000000000000000000000000000000000000..75b52484ea471f882c29e02693b4f02dba175b5e
--- /dev/null
+++ b/pythonProject/.venv/Lib/site-packages/ml_dtypes-0.5.3.dist-info/licenses/LICENSE
@@ -0,0 +1,202 @@
+
+ Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+ 1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+ 3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+ 4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+ 6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+ 7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+ 8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+ 9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+ END OF TERMS AND CONDITIONS
+
+ APPENDIX: How to apply the Apache License to your work.
+
+ To apply the Apache License to your work, attach the following
+ boilerplate notice, with the fields enclosed by brackets "[]"
+ replaced with your own identifying information. (Don't include
+ the brackets!) The text should be enclosed in the appropriate
+ comment syntax for the file format. We also recommend that a
+ file or class name and description of purpose be included on the
+ same "printed page" as the copyright notice for easier
+ identification within third-party archives.
+
+ Copyright [yyyy] [name of copyright owner]
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
diff --git a/pythonProject/.venv/Lib/site-packages/ml_dtypes-0.5.3.dist-info/licenses/LICENSE.eigen b/pythonProject/.venv/Lib/site-packages/ml_dtypes-0.5.3.dist-info/licenses/LICENSE.eigen
new file mode 100644
index 0000000000000000000000000000000000000000..3d73aee29999ccd34b9495745d08be6c4b613712
--- /dev/null
+++ b/pythonProject/.venv/Lib/site-packages/ml_dtypes-0.5.3.dist-info/licenses/LICENSE.eigen
@@ -0,0 +1,373 @@
+Mozilla Public License Version 2.0
+==================================
+
+1. Definitions
+--------------
+
+1.1. "Contributor"
+ means each individual or legal entity that creates, contributes to
+ the creation of, or owns Covered Software.
+
+1.2. "Contributor Version"
+ means the combination of the Contributions of others (if any) used
+ by a Contributor and that particular Contributor's Contribution.
+
+1.3. "Contribution"
+ means Covered Software of a particular Contributor.
+
+1.4. "Covered Software"
+ means Source Code Form to which the initial Contributor has attached
+ the notice in Exhibit A, the Executable Form of such Source Code
+ Form, and Modifications of such Source Code Form, in each case
+ including portions thereof.
+
+1.5. "Incompatible With Secondary Licenses"
+ means
+
+ (a) that the initial Contributor has attached the notice described
+ in Exhibit B to the Covered Software; or
+
+ (b) that the Covered Software was made available under the terms of
+ version 1.1 or earlier of the License, but not also under the
+ terms of a Secondary License.
+
+1.6. "Executable Form"
+ means any form of the work other than Source Code Form.
+
+1.7. "Larger Work"
+ means a work that combines Covered Software with other material, in
+ a separate file or files, that is not Covered Software.
+
+1.8. "License"
+ means this document.
+
+1.9. "Licensable"
+ means having the right to grant, to the maximum extent possible,
+ whether at the time of the initial grant or subsequently, any and
+ all of the rights conveyed by this License.
+
+1.10. "Modifications"
+ means any of the following:
+
+ (a) any file in Source Code Form that results from an addition to,
+ deletion from, or modification of the contents of Covered
+ Software; or
+
+ (b) any new file in Source Code Form that contains any Covered
+ Software.
+
+1.11. "Patent Claims" of a Contributor
+ means any patent claim(s), including without limitation, method,
+ process, and apparatus claims, in any patent Licensable by such
+ Contributor that would be infringed, but for the grant of the
+ License, by the making, using, selling, offering for sale, having
+ made, import, or transfer of either its Contributions or its
+ Contributor Version.
+
+1.12. "Secondary License"
+ means either the GNU General Public License, Version 2.0, the GNU
+ Lesser General Public License, Version 2.1, the GNU Affero General
+ Public License, Version 3.0, or any later versions of those
+ licenses.
+
+1.13. "Source Code Form"
+ means the form of the work preferred for making modifications.
+
+1.14. "You" (or "Your")
+ means an individual or a legal entity exercising rights under this
+ License. For legal entities, "You" includes any entity that
+ controls, is controlled by, or is under common control with You. For
+ purposes of this definition, "control" means (a) the power, direct
+ or indirect, to cause the direction or management of such entity,
+ whether by contract or otherwise, or (b) ownership of more than
+ fifty percent (50%) of the outstanding shares or beneficial
+ ownership of such entity.
+
+2. License Grants and Conditions
+--------------------------------
+
+2.1. Grants
+
+Each Contributor hereby grants You a world-wide, royalty-free,
+non-exclusive license:
+
+(a) under intellectual property rights (other than patent or trademark)
+ Licensable by such Contributor to use, reproduce, make available,
+ modify, display, perform, distribute, and otherwise exploit its
+ Contributions, either on an unmodified basis, with Modifications, or
+ as part of a Larger Work; and
+
+(b) under Patent Claims of such Contributor to make, use, sell, offer
+ for sale, have made, import, and otherwise transfer either its
+ Contributions or its Contributor Version.
+
+2.2. Effective Date
+
+The licenses granted in Section 2.1 with respect to any Contribution
+become effective for each Contribution on the date the Contributor first
+distributes such Contribution.
+
+2.3. Limitations on Grant Scope
+
+The licenses granted in this Section 2 are the only rights granted under
+this License. No additional rights or licenses will be implied from the
+distribution or licensing of Covered Software under this License.
+Notwithstanding Section 2.1(b) above, no patent license is granted by a
+Contributor:
+
+(a) for any code that a Contributor has removed from Covered Software;
+ or
+
+(b) for infringements caused by: (i) Your and any other third party's
+ modifications of Covered Software, or (ii) the combination of its
+ Contributions with other software (except as part of its Contributor
+ Version); or
+
+(c) under Patent Claims infringed by Covered Software in the absence of
+ its Contributions.
+
+This License does not grant any rights in the trademarks, service marks,
+or logos of any Contributor (except as may be necessary to comply with
+the notice requirements in Section 3.4).
+
+2.4. Subsequent Licenses
+
+No Contributor makes additional grants as a result of Your choice to
+distribute the Covered Software under a subsequent version of this
+License (see Section 10.2) or under the terms of a Secondary License (if
+permitted under the terms of Section 3.3).
+
+2.5. Representation
+
+Each Contributor represents that the Contributor believes its
+Contributions are its original creation(s) or it has sufficient rights
+to grant the rights to its Contributions conveyed by this License.
+
+2.6. Fair Use
+
+This License is not intended to limit any rights You have under
+applicable copyright doctrines of fair use, fair dealing, or other
+equivalents.
+
+2.7. Conditions
+
+Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted
+in Section 2.1.
+
+3. Responsibilities
+-------------------
+
+3.1. Distribution of Source Form
+
+All distribution of Covered Software in Source Code Form, including any
+Modifications that You create or to which You contribute, must be under
+the terms of this License. You must inform recipients that the Source
+Code Form of the Covered Software is governed by the terms of this
+License, and how they can obtain a copy of this License. You may not
+attempt to alter or restrict the recipients' rights in the Source Code
+Form.
+
+3.2. Distribution of Executable Form
+
+If You distribute Covered Software in Executable Form then:
+
+(a) such Covered Software must also be made available in Source Code
+ Form, as described in Section 3.1, and You must inform recipients of
+ the Executable Form how they can obtain a copy of such Source Code
+ Form by reasonable means in a timely manner, at a charge no more
+ than the cost of distribution to the recipient; and
+
+(b) You may distribute such Executable Form under the terms of this
+ License, or sublicense it under different terms, provided that the
+ license for the Executable Form does not attempt to limit or alter
+ the recipients' rights in the Source Code Form under this License.
+
+3.3. Distribution of a Larger Work
+
+You may create and distribute a Larger Work under terms of Your choice,
+provided that You also comply with the requirements of this License for
+the Covered Software. If the Larger Work is a combination of Covered
+Software with a work governed by one or more Secondary Licenses, and the
+Covered Software is not Incompatible With Secondary Licenses, this
+License permits You to additionally distribute such Covered Software
+under the terms of such Secondary License(s), so that the recipient of
+the Larger Work may, at their option, further distribute the Covered
+Software under the terms of either this License or such Secondary
+License(s).
+
+3.4. Notices
+
+You may not remove or alter the substance of any license notices
+(including copyright notices, patent notices, disclaimers of warranty,
+or limitations of liability) contained within the Source Code Form of
+the Covered Software, except that You may alter any license notices to
+the extent required to remedy known factual inaccuracies.
+
+3.5. Application of Additional Terms
+
+You may choose to offer, and to charge a fee for, warranty, support,
+indemnity or liability obligations to one or more recipients of Covered
+Software. However, You may do so only on Your own behalf, and not on
+behalf of any Contributor. You must make it absolutely clear that any
+such warranty, support, indemnity, or liability obligation is offered by
+You alone, and You hereby agree to indemnify every Contributor for any
+liability incurred by such Contributor as a result of warranty, support,
+indemnity or liability terms You offer. You may include additional
+disclaimers of warranty and limitations of liability specific to any
+jurisdiction.
+
+4. Inability to Comply Due to Statute or Regulation
+---------------------------------------------------
+
+If it is impossible for You to comply with any of the terms of this
+License with respect to some or all of the Covered Software due to
+statute, judicial order, or regulation then You must: (a) comply with
+the terms of this License to the maximum extent possible; and (b)
+describe the limitations and the code they affect. Such description must
+be placed in a text file included with all distributions of the Covered
+Software under this License. Except to the extent prohibited by statute
+or regulation, such description must be sufficiently detailed for a
+recipient of ordinary skill to be able to understand it.
+
+5. Termination
+--------------
+
+5.1. The rights granted under this License will terminate automatically
+if You fail to comply with any of its terms. However, if You become
+compliant, then the rights granted under this License from a particular
+Contributor are reinstated (a) provisionally, unless and until such
+Contributor explicitly and finally terminates Your grants, and (b) on an
+ongoing basis, if such Contributor fails to notify You of the
+non-compliance by some reasonable means prior to 60 days after You have
+come back into compliance. Moreover, Your grants from a particular
+Contributor are reinstated on an ongoing basis if such Contributor
+notifies You of the non-compliance by some reasonable means, this is the
+first time You have received notice of non-compliance with this License
+from such Contributor, and You become compliant prior to 30 days after
+Your receipt of the notice.
+
+5.2. If You initiate litigation against any entity by asserting a patent
+infringement claim (excluding declaratory judgment actions,
+counter-claims, and cross-claims) alleging that a Contributor Version
+directly or indirectly infringes any patent, then the rights granted to
+You by any and all Contributors for the Covered Software under Section
+2.1 of this License shall terminate.
+
+5.3. In the event of termination under Sections 5.1 or 5.2 above, all
+end user license agreements (excluding distributors and resellers) which
+have been validly granted by You or Your distributors under this License
+prior to termination shall survive termination.
+
+************************************************************************
+* *
+* 6. Disclaimer of Warranty *
+* ------------------------- *
+* *
+* Covered Software is provided under this License on an "as is" *
+* basis, without warranty of any kind, either expressed, implied, or *
+* statutory, including, without limitation, warranties that the *
+* Covered Software is free of defects, merchantable, fit for a *
+* particular purpose or non-infringing. The entire risk as to the *
+* quality and performance of the Covered Software is with You. *
+* Should any Covered Software prove defective in any respect, You *
+* (not any Contributor) assume the cost of any necessary servicing, *
+* repair, or correction. This disclaimer of warranty constitutes an *
+* essential part of this License. No use of any Covered Software is *
+* authorized under this License except under this disclaimer. *
+* *
+************************************************************************
+
+************************************************************************
+* *
+* 7. Limitation of Liability *
+* -------------------------- *
+* *
+* Under no circumstances and under no legal theory, whether tort *
+* (including negligence), contract, or otherwise, shall any *
+* Contributor, or anyone who distributes Covered Software as *
+* permitted above, be liable to You for any direct, indirect, *
+* special, incidental, or consequential damages of any character *
+* including, without limitation, damages for lost profits, loss of *
+* goodwill, work stoppage, computer failure or malfunction, or any *
+* and all other commercial damages or losses, even if such party *
+* shall have been informed of the possibility of such damages. This *
+* limitation of liability shall not apply to liability for death or *
+* personal injury resulting from such party's negligence to the *
+* extent applicable law prohibits such limitation. Some *
+* jurisdictions do not allow the exclusion or limitation of *
+* incidental or consequential damages, so this exclusion and *
+* limitation may not apply to You. *
+* *
+************************************************************************
+
+8. Litigation
+-------------
+
+Any litigation relating to this License may be brought only in the
+courts of a jurisdiction where the defendant maintains its principal
+place of business and such litigation shall be governed by laws of that
+jurisdiction, without reference to its conflict-of-law provisions.
+Nothing in this Section shall prevent a party's ability to bring
+cross-claims or counter-claims.
+
+9. Miscellaneous
+----------------
+
+This License represents the complete agreement concerning the subject
+matter hereof. If any provision of this License is held to be
+unenforceable, such provision shall be reformed only to the extent
+necessary to make it enforceable. Any law or regulation which provides
+that the language of a contract shall be construed against the drafter
+shall not be used to construe this License against a Contributor.
+
+10. Versions of the License
+---------------------------
+
+10.1. New Versions
+
+Mozilla Foundation is the license steward. Except as provided in Section
+10.3, no one other than the license steward has the right to modify or
+publish new versions of this License. Each version will be given a
+distinguishing version number.
+
+10.2. Effect of New Versions
+
+You may distribute the Covered Software under the terms of the version
+of the License under which You originally received the Covered Software,
+or under the terms of any subsequent version published by the license
+steward.
+
+10.3. Modified Versions
+
+If you create software not governed by this License, and you want to
+create a new license for such software, you may create and use a
+modified version of this License if you rename the license and remove
+any references to the name of the license steward (except to note that
+such modified license differs from this License).
+
+10.4. Distributing Source Code Form that is Incompatible With Secondary
+Licenses
+
+If You choose to distribute Source Code Form that is Incompatible With
+Secondary Licenses under the terms of this version of the License, the
+notice described in Exhibit B of this License must be attached.
+
+Exhibit A - Source Code Form License Notice
+-------------------------------------------
+
+ This Source Code Form is subject to the terms of the Mozilla Public
+ License, v. 2.0. If a copy of the MPL was not distributed with this
+ file, You can obtain one at https://mozilla.org/MPL/2.0/.
+
+If it is not possible or desirable to put the notice in a particular
+file, then You may include the notice in a location (such as a LICENSE
+file in a relevant directory) where a recipient would be likely to look
+for such a notice.
+
+You may add additional accurate notices of copyright ownership.
+
+Exhibit B - "Incompatible With Secondary Licenses" Notice
+---------------------------------------------------------
+
+ This Source Code Form is "Incompatible With Secondary Licenses", as
+ defined by the Mozilla Public License, v. 2.0.
diff --git a/pythonProject/.venv/Lib/site-packages/ml_dtypes-0.5.3.dist-info/top_level.txt b/pythonProject/.venv/Lib/site-packages/ml_dtypes-0.5.3.dist-info/top_level.txt
new file mode 100644
index 0000000000000000000000000000000000000000..a65a36928cdc346035db028b8162cb4d637c763f
--- /dev/null
+++ b/pythonProject/.venv/Lib/site-packages/ml_dtypes-0.5.3.dist-info/top_level.txt
@@ -0,0 +1 @@
+ml_dtypes
diff --git a/pythonProject/.venv/Lib/site-packages/ml_dtypes/__init__.py b/pythonProject/.venv/Lib/site-packages/ml_dtypes/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..7c9c255b68d99add60ea403ed0c024bb79defb84
--- /dev/null
+++ b/pythonProject/.venv/Lib/site-packages/ml_dtypes/__init__.py
@@ -0,0 +1,77 @@
+# Copyright 2022 The ml_dtypes Authors.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+__version__ = "0.5.3"
+__all__ = [
+ "__version__",
+ "bfloat16",
+ "finfo",
+ "float4_e2m1fn",
+ "float6_e2m3fn",
+ "float6_e3m2fn",
+ "float8_e3m4",
+ "float8_e4m3",
+ "float8_e4m3b11fnuz",
+ "float8_e4m3fn",
+ "float8_e4m3fnuz",
+ "float8_e5m2",
+ "float8_e5m2fnuz",
+ "float8_e8m0fnu",
+ "iinfo",
+ "int2",
+ "int4",
+ "uint2",
+ "uint4",
+]
+
+from typing import Type
+
+from ml_dtypes._finfo import finfo
+from ml_dtypes._iinfo import iinfo
+from ml_dtypes._ml_dtypes_ext import bfloat16
+from ml_dtypes._ml_dtypes_ext import float4_e2m1fn
+from ml_dtypes._ml_dtypes_ext import float6_e2m3fn
+from ml_dtypes._ml_dtypes_ext import float6_e3m2fn
+from ml_dtypes._ml_dtypes_ext import float8_e3m4
+from ml_dtypes._ml_dtypes_ext import float8_e4m3
+from ml_dtypes._ml_dtypes_ext import float8_e4m3b11fnuz
+from ml_dtypes._ml_dtypes_ext import float8_e4m3fn
+from ml_dtypes._ml_dtypes_ext import float8_e4m3fnuz
+from ml_dtypes._ml_dtypes_ext import float8_e5m2
+from ml_dtypes._ml_dtypes_ext import float8_e5m2fnuz
+from ml_dtypes._ml_dtypes_ext import float8_e8m0fnu
+from ml_dtypes._ml_dtypes_ext import int2
+from ml_dtypes._ml_dtypes_ext import int4
+from ml_dtypes._ml_dtypes_ext import uint2
+from ml_dtypes._ml_dtypes_ext import uint4
+import numpy as np
+
+bfloat16: Type[np.generic]
+float4_e2m1fn: Type[np.generic]
+float6_e2m3fn: Type[np.generic]
+float6_e3m2fn: Type[np.generic]
+float8_e3m4: Type[np.generic]
+float8_e4m3: Type[np.generic]
+float8_e4m3b11fnuz: Type[np.generic]
+float8_e4m3fn: Type[np.generic]
+float8_e4m3fnuz: Type[np.generic]
+float8_e5m2: Type[np.generic]
+float8_e5m2fnuz: Type[np.generic]
+float8_e8m0fnu: Type[np.generic]
+int2: Type[np.generic]
+int4: Type[np.generic]
+uint2: Type[np.generic]
+uint4: Type[np.generic]
+
+del np, Type
diff --git a/pythonProject/.venv/Lib/site-packages/ml_dtypes/__pycache__/__init__.cpython-310.pyc b/pythonProject/.venv/Lib/site-packages/ml_dtypes/__pycache__/__init__.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..ab97a3e533425c8b8261531316bbfdb4ac8ce554
Binary files /dev/null and b/pythonProject/.venv/Lib/site-packages/ml_dtypes/__pycache__/__init__.cpython-310.pyc differ
diff --git a/pythonProject/.venv/Lib/site-packages/ml_dtypes/__pycache__/_finfo.cpython-310.pyc b/pythonProject/.venv/Lib/site-packages/ml_dtypes/__pycache__/_finfo.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..e2deb48fd45177449b3c83f39e3fab127ef45b24
Binary files /dev/null and b/pythonProject/.venv/Lib/site-packages/ml_dtypes/__pycache__/_finfo.cpython-310.pyc differ
diff --git a/pythonProject/.venv/Lib/site-packages/ml_dtypes/__pycache__/_iinfo.cpython-310.pyc b/pythonProject/.venv/Lib/site-packages/ml_dtypes/__pycache__/_iinfo.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..f6a79c3236bb3b2fb14b1213a8112870ef0cb610
Binary files /dev/null and b/pythonProject/.venv/Lib/site-packages/ml_dtypes/__pycache__/_iinfo.cpython-310.pyc differ
diff --git a/pythonProject/.venv/Lib/site-packages/ml_dtypes/_finfo.py b/pythonProject/.venv/Lib/site-packages/ml_dtypes/_finfo.py
new file mode 100644
index 0000000000000000000000000000000000000000..93f02ded816b54a8f0652916e09c5e403c0b460e
--- /dev/null
+++ b/pythonProject/.venv/Lib/site-packages/ml_dtypes/_finfo.py
@@ -0,0 +1,713 @@
+# Copyright 2023 The ml_dtypes Authors.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""Overload of numpy.finfo to handle dtypes defined in ml_dtypes."""
+
+from ml_dtypes._ml_dtypes_ext import bfloat16
+from ml_dtypes._ml_dtypes_ext import float4_e2m1fn
+from ml_dtypes._ml_dtypes_ext import float6_e2m3fn
+from ml_dtypes._ml_dtypes_ext import float6_e3m2fn
+from ml_dtypes._ml_dtypes_ext import float8_e3m4
+from ml_dtypes._ml_dtypes_ext import float8_e4m3
+from ml_dtypes._ml_dtypes_ext import float8_e4m3b11fnuz
+from ml_dtypes._ml_dtypes_ext import float8_e4m3fn
+from ml_dtypes._ml_dtypes_ext import float8_e4m3fnuz
+from ml_dtypes._ml_dtypes_ext import float8_e5m2
+from ml_dtypes._ml_dtypes_ext import float8_e5m2fnuz
+from ml_dtypes._ml_dtypes_ext import float8_e8m0fnu
+import numpy as np
+
+_bfloat16_dtype = np.dtype(bfloat16)
+_float4_e2m1fn_dtype = np.dtype(float4_e2m1fn)
+_float6_e2m3fn_dtype = np.dtype(float6_e2m3fn)
+_float6_e3m2fn_dtype = np.dtype(float6_e3m2fn)
+_float8_e3m4_dtype = np.dtype(float8_e3m4)
+_float8_e4m3_dtype = np.dtype(float8_e4m3)
+_float8_e4m3b11fnuz_dtype = np.dtype(float8_e4m3b11fnuz)
+_float8_e4m3fn_dtype = np.dtype(float8_e4m3fn)
+_float8_e4m3fnuz_dtype = np.dtype(float8_e4m3fnuz)
+_float8_e5m2_dtype = np.dtype(float8_e5m2)
+_float8_e5m2fnuz_dtype = np.dtype(float8_e5m2fnuz)
+_float8_e8m0fnu_dtype = np.dtype(float8_e8m0fnu)
+
+
+class _Bfloat16MachArLike:
+
+ def __init__(self):
+ smallest_normal = float.fromhex("0x1p-126")
+ self.smallest_normal = bfloat16(smallest_normal)
+ smallest_subnormal = float.fromhex("0x1p-133")
+ self.smallest_subnormal = bfloat16(smallest_subnormal)
+
+
+class _Float4E2m1fnMachArLike:
+
+ def __init__(self):
+ smallest_normal = float.fromhex("0x1p0")
+ self.smallest_normal = float4_e2m1fn(smallest_normal)
+ smallest_subnormal = float.fromhex("0x0.8p0")
+ self.smallest_subnormal = float4_e2m1fn(smallest_subnormal)
+
+
+class _Float6E2m3fnMachArLike:
+
+ def __init__(self):
+ smallest_normal = float.fromhex("0x1p0")
+ self.smallest_normal = float6_e2m3fn(smallest_normal)
+ smallest_subnormal = float.fromhex("0x0.2p0")
+ self.smallest_subnormal = float6_e2m3fn(smallest_subnormal)
+
+
+class _Float6E3m2fnMachArLike:
+
+ def __init__(self):
+ smallest_normal = float.fromhex("0x1p-2")
+ self.smallest_normal = float6_e3m2fn(smallest_normal)
+ smallest_subnormal = float.fromhex("0x0.4p-2")
+ self.smallest_subnormal = float6_e3m2fn(smallest_subnormal)
+
+
+class _Float8E3m4MachArLike:
+
+ def __init__(self):
+ smallest_normal = float.fromhex("0x1p-2")
+ self.smallest_normal = float8_e3m4(smallest_normal)
+ smallest_subnormal = float.fromhex("0x0.1p-2")
+ self.smallest_subnormal = float8_e3m4(smallest_subnormal)
+
+
+class _Float8E4m3MachArLike:
+
+ def __init__(self):
+ smallest_normal = float.fromhex("0x1p-6")
+ self.smallest_normal = float8_e4m3(smallest_normal)
+ smallest_subnormal = float.fromhex("0x0.2p-6")
+ self.smallest_subnormal = float8_e4m3(smallest_subnormal)
+
+
+class _Float8E4m3b11fnuzMachArLike:
+
+ def __init__(self):
+ smallest_normal = float.fromhex("0x1p-10")
+ self.smallest_normal = float8_e4m3b11fnuz(smallest_normal)
+ smallest_subnormal = float.fromhex("0x1p-13")
+ self.smallest_subnormal = float8_e4m3b11fnuz(smallest_subnormal)
+
+
+class _Float8E4m3fnMachArLike:
+
+ def __init__(self):
+ smallest_normal = float.fromhex("0x1p-6")
+ self.smallest_normal = float8_e4m3fn(smallest_normal)
+ smallest_subnormal = float.fromhex("0x1p-9")
+ self.smallest_subnormal = float8_e4m3fn(smallest_subnormal)
+
+
+class _Float8E4m3fnuzMachArLike:
+
+ def __init__(self):
+ smallest_normal = float.fromhex("0x1p-7")
+ self.smallest_normal = float8_e4m3fnuz(smallest_normal)
+ smallest_subnormal = float.fromhex("0x1p-10")
+ self.smallest_subnormal = float8_e4m3fnuz(smallest_subnormal)
+
+
+class _Float8E5m2MachArLike:
+
+ def __init__(self):
+ smallest_normal = float.fromhex("0x1p-14")
+ self.smallest_normal = float8_e5m2(smallest_normal)
+ smallest_subnormal = float.fromhex("0x1p-16")
+ self.smallest_subnormal = float8_e5m2(smallest_subnormal)
+
+
+class _Float8E5m2fnuzMachArLike:
+
+ def __init__(self):
+ smallest_normal = float.fromhex("0x1p-15")
+ self.smallest_normal = float8_e5m2fnuz(smallest_normal)
+ smallest_subnormal = float.fromhex("0x1p-17")
+ self.smallest_subnormal = float8_e5m2fnuz(smallest_subnormal)
+
+
+class _Float8E8m0fnuMachArLike:
+
+ def __init__(self):
+ smallest_normal = float.fromhex("0x1p-127")
+ self.smallest_normal = float8_e8m0fnu(smallest_normal)
+ smallest_subnormal = float.fromhex("0x1p-127")
+ self.smallest_subnormal = float8_e8m0fnu(smallest_subnormal)
+
+
+class finfo(np.finfo): # pylint: disable=invalid-name,missing-class-docstring
+ __doc__ = np.finfo.__doc__
+
+ @staticmethod
+ def _bfloat16_finfo():
+ def float_to_str(f):
+ return "%12.4e" % float(f)
+
+ tiny = float.fromhex("0x1p-126")
+ resolution = 0.01
+ eps = float.fromhex("0x1p-7")
+ epsneg = float.fromhex("0x1p-8")
+ max_ = float.fromhex("0x1.FEp127")
+
+ obj = object.__new__(np.finfo)
+ obj.dtype = _bfloat16_dtype
+ obj.bits = 16
+ obj.eps = bfloat16(eps)
+ obj.epsneg = bfloat16(epsneg)
+ obj.machep = -7
+ obj.negep = -8
+ obj.max = bfloat16(max_)
+ obj.min = bfloat16(-max_)
+ obj.nexp = 8
+ obj.nmant = 7
+ obj.iexp = obj.nexp
+ obj.maxexp = 128
+ obj.minexp = -126
+ obj.precision = 2
+ obj.resolution = bfloat16(resolution)
+ # pylint: disable=protected-access
+ obj._machar = _Bfloat16MachArLike()
+ if not hasattr(obj, "tiny"):
+ obj.tiny = bfloat16(tiny)
+ if not hasattr(obj, "smallest_normal"):
+ obj.smallest_normal = obj._machar.smallest_normal
+ obj.smallest_subnormal = obj._machar.smallest_subnormal
+
+ obj._str_tiny = float_to_str(tiny)
+ obj._str_smallest_normal = float_to_str(tiny)
+ obj._str_smallest_subnormal = float_to_str(obj.smallest_subnormal)
+ obj._str_max = float_to_str(max_)
+ obj._str_epsneg = float_to_str(epsneg)
+ obj._str_eps = float_to_str(eps)
+ obj._str_resolution = float_to_str(resolution)
+ # pylint: enable=protected-access
+ return obj
+
+ @staticmethod
+ def _float4_e2m1fn_finfo():
+ eps = float.fromhex("0x0.8p0") # 0.5
+ max_ = float.fromhex("0x1.8p2") # 6.0
+
+ obj = object.__new__(np.finfo)
+ obj.dtype = _float4_e2m1fn_dtype
+ obj.bits = 4
+ obj.eps = eps
+ obj.epsneg = eps
+ obj.machep = -1
+ obj.negep = -1
+ obj.max = float4_e2m1fn(max_)
+ obj.min = float4_e2m1fn(-max_)
+ obj.nexp = 2
+ obj.nmant = 1
+ obj.iexp = obj.nexp
+ obj.maxexp = 3
+ obj.minexp = 0
+ obj.precision = 0
+ obj.resolution = float4_e2m1fn(1.0)
+ # pylint: disable=protected-access
+ obj._machar = _Float4E2m1fnMachArLike()
+ tiny = obj._machar.smallest_normal
+ if not hasattr(obj, "tiny"):
+ obj.tiny = tiny
+ if not hasattr(obj, "smallest_normal"):
+ obj.smallest_normal = tiny
+ obj.smallest_subnormal = obj._machar.smallest_subnormal
+
+ float_to_str = str
+ obj._str_tiny = float_to_str(tiny)
+ obj._str_smallest_normal = float_to_str(tiny)
+ obj._str_smallest_subnormal = float_to_str(obj.smallest_subnormal)
+ obj._str_max = float_to_str(obj.max)
+ obj._str_epsneg = float_to_str(obj.epsneg)
+ obj._str_eps = float_to_str(obj.eps)
+ obj._str_resolution = float_to_str(obj.resolution)
+ # pylint: enable=protected-access
+ return obj
+
+ @staticmethod
+ def _float6_e2m3fn_finfo():
+ eps = float.fromhex("0x0.2p0") # 0.125
+ max_ = float.fromhex("0x1.Ep2") # 7.5
+
+ obj = object.__new__(np.finfo)
+ obj.dtype = _float6_e2m3fn_dtype
+ obj.bits = 6
+ obj.eps = eps
+ obj.epsneg = eps
+ obj.machep = -3
+ obj.negep = -3
+ obj.max = float6_e2m3fn(max_)
+ obj.min = float6_e2m3fn(-max_)
+ obj.nexp = 2
+ obj.nmant = 3
+ obj.iexp = obj.nexp
+ obj.maxexp = 3
+ obj.minexp = 0
+ obj.precision = 0
+ obj.resolution = float6_e2m3fn(1.0)
+ # pylint: disable=protected-access
+ obj._machar = _Float6E2m3fnMachArLike()
+ tiny = obj._machar.smallest_normal
+ if not hasattr(obj, "tiny"):
+ obj.tiny = tiny
+ if not hasattr(obj, "smallest_normal"):
+ obj.smallest_normal = tiny
+ obj.smallest_subnormal = obj._machar.smallest_subnormal
+
+ float_to_str = str
+ obj._str_tiny = float_to_str(tiny)
+ obj._str_smallest_normal = float_to_str(tiny)
+ obj._str_smallest_subnormal = float_to_str(obj.smallest_subnormal)
+ obj._str_max = float_to_str(obj.max)
+ obj._str_epsneg = float_to_str(obj.epsneg)
+ obj._str_eps = float_to_str(obj.eps)
+ obj._str_resolution = float_to_str(obj.resolution)
+ # pylint: enable=protected-access
+ return obj
+
+ @staticmethod
+ def _float6_e3m2fn_finfo():
+ eps = float.fromhex("0x1p-2") # 0.25
+ max_ = float.fromhex("0x1.Cp4") # 28
+
+ obj = object.__new__(np.finfo)
+ obj.dtype = _float6_e3m2fn_dtype
+ obj.bits = 6
+ obj.eps = eps
+ obj.epsneg = eps / 2
+ obj.machep = -2
+ obj.negep = -3
+ obj.max = float6_e3m2fn(max_)
+ obj.min = float6_e3m2fn(-max_)
+ obj.nexp = 3
+ obj.nmant = 2
+ obj.iexp = obj.nexp
+ obj.maxexp = 5
+ obj.minexp = -2
+ obj.precision = 0
+ obj.resolution = float6_e3m2fn(1.0)
+ # pylint: disable=protected-access
+ obj._machar = _Float6E3m2fnMachArLike()
+ tiny = obj._machar.smallest_normal
+ if not hasattr(obj, "tiny"):
+ obj.tiny = tiny
+ if not hasattr(obj, "smallest_normal"):
+ obj.smallest_normal = tiny
+ obj.smallest_subnormal = obj._machar.smallest_subnormal
+
+ float_to_str = str
+ obj._str_tiny = float_to_str(tiny)
+ obj._str_smallest_normal = float_to_str(tiny)
+ obj._str_smallest_subnormal = float_to_str(obj.smallest_subnormal)
+ obj._str_max = float_to_str(obj.max)
+ obj._str_epsneg = float_to_str(obj.epsneg)
+ obj._str_eps = float_to_str(obj.eps)
+ obj._str_resolution = float_to_str(obj.resolution)
+ # pylint: enable=protected-access
+ return obj
+
+ @staticmethod
+ def _float8_e3m4_finfo():
+ def float_to_str(f):
+ return "%6.2e" % float(f)
+
+ tiny = float.fromhex("0x1p-2") # 1/4 min normal
+ resolution = 0.1
+ eps = float.fromhex("0x1p-4") # 1/16
+ epsneg = float.fromhex("0x1p-5") # 1/32
+ max_ = float.fromhex("0x1.Fp3") # 15.5 max normal
+
+ obj = object.__new__(np.finfo)
+ obj.dtype = _float8_e3m4_dtype
+ obj.bits = 8
+ obj.eps = float8_e3m4(eps)
+ obj.epsneg = float8_e3m4(epsneg)
+ obj.machep = -4
+ obj.negep = -5
+ obj.max = float8_e3m4(max_)
+ obj.min = float8_e3m4(-max_)
+ obj.nexp = 3
+ obj.nmant = 4
+ obj.iexp = obj.nexp
+ obj.maxexp = 4
+ obj.minexp = -2
+ obj.precision = 1
+ obj.resolution = float8_e3m4(resolution)
+ # pylint: disable=protected-access
+ obj._machar = _Float8E3m4MachArLike()
+ if not hasattr(obj, "tiny"):
+ obj.tiny = float8_e3m4(tiny)
+ if not hasattr(obj, "smallest_normal"):
+ obj.smallest_normal = obj._machar.smallest_normal
+ obj.smallest_subnormal = obj._machar.smallest_subnormal
+
+ obj._str_tiny = float_to_str(tiny)
+ obj._str_smallest_normal = float_to_str(tiny)
+ obj._str_smallest_subnormal = float_to_str(obj.smallest_subnormal)
+ obj._str_max = float_to_str(max_)
+ obj._str_epsneg = float_to_str(epsneg)
+ obj._str_eps = float_to_str(eps)
+ obj._str_resolution = float_to_str(resolution)
+ # pylint: enable=protected-access
+ return obj
+
+ @staticmethod
+ def _float8_e4m3_finfo():
+ def float_to_str(f):
+ return "%6.2e" % float(f)
+
+ tiny = float.fromhex("0x1p-6") # 1/64 min normal
+ resolution = 0.1
+ eps = float.fromhex("0x1p-3") # 1/8
+ epsneg = float.fromhex("0x1p-4") # 1/16
+ max_ = float.fromhex("0x1.Ep7") # 240 max normal
+
+ obj = object.__new__(np.finfo)
+ obj.dtype = _float8_e4m3_dtype
+ obj.bits = 8
+ obj.eps = float8_e4m3(eps)
+ obj.epsneg = float8_e4m3(epsneg)
+ obj.machep = -3
+ obj.negep = -4
+ obj.max = float8_e4m3(max_)
+ obj.min = float8_e4m3(-max_)
+ obj.nexp = 4
+ obj.nmant = 3
+ obj.iexp = obj.nexp
+ obj.maxexp = 8
+ obj.minexp = -6
+ obj.precision = 1
+ obj.resolution = float8_e4m3(resolution)
+ # pylint: disable=protected-access
+ obj._machar = _Float8E4m3MachArLike()
+ if not hasattr(obj, "tiny"):
+ obj.tiny = float8_e4m3(tiny)
+ if not hasattr(obj, "smallest_normal"):
+ obj.smallest_normal = obj._machar.smallest_normal
+ obj.smallest_subnormal = obj._machar.smallest_subnormal
+
+ obj._str_tiny = float_to_str(tiny)
+ obj._str_smallest_normal = float_to_str(tiny)
+ obj._str_smallest_subnormal = float_to_str(obj.smallest_subnormal)
+ obj._str_max = float_to_str(max_)
+ obj._str_epsneg = float_to_str(epsneg)
+ obj._str_eps = float_to_str(eps)
+ obj._str_resolution = float_to_str(resolution)
+ # pylint: enable=protected-access
+ return obj
+
+ @staticmethod
+ def _float8_e4m3b11fnuz_finfo():
+ def float_to_str(f):
+ return "%6.2e" % float(f)
+
+ tiny = float.fromhex("0x1p-10")
+ resolution = 0.1
+ eps = float.fromhex("0x1p-3")
+ epsneg = float.fromhex("0x1p-4")
+ max_ = float.fromhex("0x1.Ep4")
+
+ obj = object.__new__(np.finfo)
+ obj.dtype = _float8_e4m3b11fnuz_dtype
+ obj.bits = 8
+ obj.eps = float8_e4m3b11fnuz(eps)
+ obj.epsneg = float8_e4m3b11fnuz(epsneg)
+ obj.machep = -3
+ obj.negep = -4
+ obj.max = float8_e4m3b11fnuz(max_)
+ obj.min = float8_e4m3b11fnuz(-max_)
+ obj.nexp = 4
+ obj.nmant = 3
+ obj.iexp = obj.nexp
+ obj.maxexp = 5
+ obj.minexp = -10
+ obj.precision = 1
+ obj.resolution = float8_e4m3b11fnuz(resolution)
+ # pylint: disable=protected-access
+ obj._machar = _Float8E4m3b11fnuzMachArLike()
+ if not hasattr(obj, "tiny"):
+ obj.tiny = float8_e4m3b11fnuz(tiny)
+ if not hasattr(obj, "smallest_normal"):
+ obj.smallest_normal = obj._machar.smallest_normal
+ obj.smallest_subnormal = obj._machar.smallest_subnormal
+
+ obj._str_tiny = float_to_str(tiny)
+ obj._str_smallest_normal = float_to_str(tiny)
+ obj._str_smallest_subnormal = float_to_str(obj.smallest_subnormal)
+ obj._str_max = float_to_str(max_)
+ obj._str_epsneg = float_to_str(epsneg)
+ obj._str_eps = float_to_str(eps)
+ obj._str_resolution = float_to_str(resolution)
+ # pylint: enable=protected-access
+ return obj
+
+ @staticmethod
+ def _float8_e4m3fn_finfo():
+ def float_to_str(f):
+ return "%6.2e" % float(f)
+
+ tiny = float.fromhex("0x1p-6")
+ resolution = 0.1
+ eps = float.fromhex("0x1p-3")
+ epsneg = float.fromhex("0x1p-4")
+ max_ = float.fromhex("0x1.Cp8")
+
+ obj = object.__new__(np.finfo)
+ obj.dtype = _float8_e4m3fn_dtype
+ obj.bits = 8
+ obj.eps = float8_e4m3fn(eps)
+ obj.epsneg = float8_e4m3fn(epsneg)
+ obj.machep = -3
+ obj.negep = -4
+ obj.max = float8_e4m3fn(max_)
+ obj.min = float8_e4m3fn(-max_)
+ obj.nexp = 4
+ obj.nmant = 3
+ obj.iexp = obj.nexp
+ obj.maxexp = 9
+ obj.minexp = -6
+ obj.precision = 1
+ obj.resolution = float8_e4m3fn(resolution)
+ # pylint: disable=protected-access
+ obj._machar = _Float8E4m3fnMachArLike()
+ if not hasattr(obj, "tiny"):
+ obj.tiny = float8_e4m3fn(tiny)
+ if not hasattr(obj, "smallest_normal"):
+ obj.smallest_normal = obj._machar.smallest_normal
+ obj.smallest_subnormal = obj._machar.smallest_subnormal
+
+ obj._str_tiny = float_to_str(tiny)
+ obj._str_smallest_normal = float_to_str(tiny)
+ obj._str_smallest_subnormal = float_to_str(obj.smallest_subnormal)
+ obj._str_max = float_to_str(max_)
+ obj._str_epsneg = float_to_str(epsneg)
+ obj._str_eps = float_to_str(eps)
+ obj._str_resolution = float_to_str(resolution)
+ # pylint: enable=protected-access
+ return obj
+
+ @staticmethod
+ def _float8_e4m3fnuz_finfo():
+ def float_to_str(f):
+ return "%6.2e" % float(f)
+
+ tiny = float.fromhex("0x1p-7")
+ resolution = 0.1
+ eps = float.fromhex("0x1p-3")
+ epsneg = float.fromhex("0x1p-4")
+ max_ = float.fromhex("0x1.Ep7")
+
+ obj = object.__new__(np.finfo)
+ obj.dtype = _float8_e4m3fnuz_dtype
+ obj.bits = 8
+ obj.eps = float8_e4m3fnuz(eps)
+ obj.epsneg = float8_e4m3fnuz(epsneg)
+ obj.machep = -3
+ obj.negep = -4
+ obj.max = float8_e4m3fnuz(max_)
+ obj.min = float8_e4m3fnuz(-max_)
+ obj.nexp = 4
+ obj.nmant = 3
+ obj.iexp = obj.nexp
+ obj.maxexp = 8
+ obj.minexp = -7
+ obj.precision = 1
+ obj.resolution = float8_e4m3fnuz(resolution)
+ # pylint: disable=protected-access
+ obj._machar = _Float8E4m3fnuzMachArLike()
+ if not hasattr(obj, "tiny"):
+ obj.tiny = float8_e4m3fnuz(tiny)
+ if not hasattr(obj, "smallest_normal"):
+ obj.smallest_normal = obj._machar.smallest_normal
+ obj.smallest_subnormal = obj._machar.smallest_subnormal
+
+ obj._str_tiny = float_to_str(tiny)
+ obj._str_smallest_normal = float_to_str(tiny)
+ obj._str_smallest_subnormal = float_to_str(obj.smallest_subnormal)
+ obj._str_max = float_to_str(max_)
+ obj._str_epsneg = float_to_str(epsneg)
+ obj._str_eps = float_to_str(eps)
+ obj._str_resolution = float_to_str(resolution)
+ # pylint: enable=protected-access
+ return obj
+
+ @staticmethod
+ def _float8_e5m2_finfo():
+ def float_to_str(f):
+ return "%6.2e" % float(f)
+
+ tiny = float.fromhex("0x1p-14")
+ resolution = 0.1
+ eps = float.fromhex("0x1p-2")
+ epsneg = float.fromhex("0x1p-3")
+ max_ = float.fromhex("0x1.Cp15")
+
+ obj = object.__new__(np.finfo)
+ obj.dtype = _float8_e5m2_dtype
+ obj.bits = 8
+ obj.eps = float8_e5m2(eps)
+ obj.epsneg = float8_e5m2(epsneg)
+ obj.machep = -2
+ obj.negep = -3
+ obj.max = float8_e5m2(max_)
+ obj.min = float8_e5m2(-max_)
+ obj.nexp = 5
+ obj.nmant = 2
+ obj.iexp = obj.nexp
+ obj.maxexp = 16
+ obj.minexp = -14
+ obj.precision = 1
+ obj.resolution = float8_e5m2(resolution)
+ # pylint: disable=protected-access
+ obj._machar = _Float8E5m2MachArLike()
+ if not hasattr(obj, "tiny"):
+ obj.tiny = float8_e5m2(tiny)
+ if not hasattr(obj, "smallest_normal"):
+ obj.smallest_normal = obj._machar.smallest_normal
+ obj.smallest_subnormal = obj._machar.smallest_subnormal
+
+ obj._str_tiny = float_to_str(tiny)
+ obj._str_smallest_normal = float_to_str(tiny)
+ obj._str_smallest_subnormal = float_to_str(obj.smallest_subnormal)
+ obj._str_max = float_to_str(max_)
+ obj._str_epsneg = float_to_str(epsneg)
+ obj._str_eps = float_to_str(eps)
+ obj._str_resolution = float_to_str(resolution)
+ # pylint: enable=protected-access
+ return obj
+
+ @staticmethod
+ def _float8_e5m2fnuz_finfo():
+ def float_to_str(f):
+ return "%6.2e" % float(f)
+
+ tiny = float.fromhex("0x1p-15")
+ resolution = 0.1
+ eps = float.fromhex("0x1p-2")
+ epsneg = float.fromhex("0x1p-3")
+ max_ = float.fromhex("0x1.Cp15")
+
+ obj = object.__new__(np.finfo)
+ obj.dtype = _float8_e5m2fnuz_dtype
+ obj.bits = 8
+ obj.eps = float8_e5m2fnuz(eps)
+ obj.epsneg = float8_e5m2fnuz(epsneg)
+ obj.machep = -2
+ obj.negep = -3
+ obj.max = float8_e5m2fnuz(max_)
+ obj.min = float8_e5m2fnuz(-max_)
+ obj.nexp = 5
+ obj.nmant = 2
+ obj.iexp = obj.nexp
+ obj.maxexp = 16
+ obj.minexp = -15
+ obj.precision = 1
+ obj.resolution = float8_e5m2fnuz(resolution)
+ # pylint: disable=protected-access
+ obj._machar = _Float8E5m2fnuzMachArLike()
+ if not hasattr(obj, "tiny"):
+ obj.tiny = float8_e5m2fnuz(tiny)
+ if not hasattr(obj, "smallest_normal"):
+ obj.smallest_normal = obj._machar.smallest_normal
+ obj.smallest_subnormal = obj._machar.smallest_subnormal
+
+ obj._str_tiny = float_to_str(tiny)
+ obj._str_smallest_normal = float_to_str(tiny)
+ obj._str_smallest_subnormal = float_to_str(obj.smallest_subnormal)
+ obj._str_max = float_to_str(max_)
+ obj._str_epsneg = float_to_str(epsneg)
+ obj._str_eps = float_to_str(eps)
+ obj._str_resolution = float_to_str(resolution)
+ # pylint: enable=protected-access
+ return obj
+
+ @staticmethod
+ def _float8_e8m0fnu_finfo():
+ def float_to_str(f):
+ return "%6.2e" % float(f)
+
+ tiny = float.fromhex("0x1p-127")
+ resolution = 0.1
+ eps = float.fromhex("0x1p+0")
+ epsneg = float.fromhex("0x1p-1")
+ max_ = float.fromhex("0x1p+127")
+
+ obj = object.__new__(np.finfo)
+ obj.dtype = _float8_e8m0fnu_dtype
+ obj.bits = 8
+ obj.eps = float8_e8m0fnu(eps)
+ obj.epsneg = float8_e8m0fnu(epsneg)
+ obj.machep = 0
+ obj.negep = -1
+ obj.max = float8_e8m0fnu(max_)
+ obj.min = float8_e8m0fnu(tiny)
+ obj.nexp = 8
+ obj.nmant = 0
+ obj.iexp = obj.nexp
+ obj.maxexp = 128
+ obj.minexp = -127
+ obj.precision = 1
+ obj.resolution = float8_e8m0fnu(resolution)
+ # pylint: disable=protected-access
+ obj._machar = _Float8E8m0fnuMachArLike()
+ if not hasattr(obj, "tiny"):
+ obj.tiny = float8_e8m0fnu(tiny)
+ if not hasattr(obj, "smallest_normal"):
+ obj.smallest_normal = obj._machar.smallest_normal
+ obj.smallest_subnormal = obj._machar.smallest_subnormal
+
+ obj._str_tiny = float_to_str(tiny)
+ obj._str_smallest_normal = float_to_str(tiny)
+ obj._str_smallest_subnormal = float_to_str(obj.smallest_subnormal)
+ obj._str_max = float_to_str(max_)
+ obj._str_epsneg = float_to_str(epsneg)
+ obj._str_eps = float_to_str(eps)
+ obj._str_resolution = float_to_str(resolution)
+ # pylint: enable=protected-access
+ return obj
+
+ _finfo_type_map = {
+ _bfloat16_dtype: _bfloat16_finfo,
+ _float4_e2m1fn_dtype: _float4_e2m1fn_finfo,
+ _float6_e2m3fn_dtype: _float6_e2m3fn_finfo,
+ _float6_e3m2fn_dtype: _float6_e3m2fn_finfo,
+ _float8_e3m4_dtype: _float8_e3m4_finfo,
+ _float8_e4m3_dtype: _float8_e4m3_finfo,
+ _float8_e4m3fn_dtype: _float8_e4m3fn_finfo,
+ _float8_e4m3fnuz_dtype: _float8_e4m3fnuz_finfo,
+ _float8_e4m3b11fnuz_dtype: _float8_e4m3b11fnuz_finfo,
+ _float8_e5m2_dtype: _float8_e5m2_finfo,
+ _float8_e5m2fnuz_dtype: _float8_e5m2fnuz_finfo,
+ _float8_e8m0fnu_dtype: _float8_e8m0fnu_finfo,
+ }
+ _finfo_name_map = {t.name: t for t in _finfo_type_map}
+ _finfo_cache = {
+ t: init_fn.__func__() for t, init_fn in _finfo_type_map.items() # pytype: disable=attribute-error
+ }
+
+ def __new__(cls, dtype):
+ if isinstance(dtype, str):
+ key = cls._finfo_name_map.get(dtype)
+ elif isinstance(dtype, np.dtype):
+ key = dtype
+ else:
+ key = np.dtype(dtype)
+ i = cls._finfo_cache.get(key)
+ if i is not None:
+ return i
+ return super().__new__(cls, dtype)
diff --git a/pythonProject/.venv/Lib/site-packages/ml_dtypes/_iinfo.py b/pythonProject/.venv/Lib/site-packages/ml_dtypes/_iinfo.py
new file mode 100644
index 0000000000000000000000000000000000000000..5705639180e96bd41cc68344e4604275d389dd21
--- /dev/null
+++ b/pythonProject/.venv/Lib/site-packages/ml_dtypes/_iinfo.py
@@ -0,0 +1,73 @@
+# Copyright 2023 The ml_dtypes Authors.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""Overload of numpy.iinfo to handle dtypes defined in ml_dtypes."""
+
+from ml_dtypes._ml_dtypes_ext import int2
+from ml_dtypes._ml_dtypes_ext import int4
+from ml_dtypes._ml_dtypes_ext import uint2
+from ml_dtypes._ml_dtypes_ext import uint4
+import numpy as np
+
+_int2_dtype = np.dtype(int2)
+_uint2_dtype = np.dtype(uint2)
+_int4_dtype = np.dtype(int4)
+_uint4_dtype = np.dtype(uint4)
+
+
+class iinfo: # pylint: disable=invalid-name,missing-class-docstring
+ kind: str
+ bits: int
+ min: int
+ max: int
+ dtype: np.dtype
+
+ def __init__(self, int_type):
+ if int_type == _int2_dtype:
+ self.dtype = _int2_dtype
+ self.kind = "i"
+ self.bits = 2
+ self.min = -2
+ self.max = 1
+ elif int_type == _uint2_dtype:
+ self.dtype = _uint2_dtype
+ self.kind = "u"
+ self.bits = 2
+ self.min = 0
+ self.max = 3
+ elif int_type == _int4_dtype:
+ self.dtype = _int4_dtype
+ self.kind = "i"
+ self.bits = 4
+ self.min = -8
+ self.max = 7
+ elif int_type == _uint4_dtype:
+ self.dtype = _uint4_dtype
+ self.kind = "u"
+ self.bits = 4
+ self.min = 0
+ self.max = 15
+ else:
+ ii = np.iinfo(int_type)
+ self.dtype = ii.dtype
+ self.kind = ii.kind
+ self.bits = ii.bits
+ self.min = ii.min
+ self.max = ii.max
+
+ def __repr__(self):
+ return f"iinfo(min={self.min}, max={self.max}, dtype={self.dtype})"
+
+ def __str__(self):
+ return repr(self)
diff --git a/pythonProject/.venv/Lib/site-packages/ml_dtypes/py.typed b/pythonProject/.venv/Lib/site-packages/ml_dtypes/py.typed
new file mode 100644
index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391
diff --git a/pythonProject/.venv/Lib/site-packages/mpmath-1.3.0.dist-info/INSTALLER b/pythonProject/.venv/Lib/site-packages/mpmath-1.3.0.dist-info/INSTALLER
new file mode 100644
index 0000000000000000000000000000000000000000..a1b589e38a32041e49332e5e81c2d363dc418d68
--- /dev/null
+++ b/pythonProject/.venv/Lib/site-packages/mpmath-1.3.0.dist-info/INSTALLER
@@ -0,0 +1 @@
+pip
diff --git a/pythonProject/.venv/Lib/site-packages/mpmath-1.3.0.dist-info/LICENSE b/pythonProject/.venv/Lib/site-packages/mpmath-1.3.0.dist-info/LICENSE
new file mode 100644
index 0000000000000000000000000000000000000000..9ecdc7586d08805bc984539f6672476e86e538b6
--- /dev/null
+++ b/pythonProject/.venv/Lib/site-packages/mpmath-1.3.0.dist-info/LICENSE
@@ -0,0 +1,27 @@
+Copyright (c) 2005-2021 Fredrik Johansson and mpmath contributors
+
+All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are met:
+
+ a. Redistributions of source code must retain the above copyright notice,
+ this list of conditions and the following disclaimer.
+ b. Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in the
+ documentation and/or other materials provided with the distribution.
+ c. Neither the name of the copyright holder nor the names of its
+ contributors may be used to endorse or promote products derived
+ from this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE FOR
+ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
+DAMAGE.
diff --git a/pythonProject/.venv/Lib/site-packages/mpmath-1.3.0.dist-info/METADATA b/pythonProject/.venv/Lib/site-packages/mpmath-1.3.0.dist-info/METADATA
new file mode 100644
index 0000000000000000000000000000000000000000..994b48acdba5cd0fdfb28cd1fbb0a84ebf81cba5
--- /dev/null
+++ b/pythonProject/.venv/Lib/site-packages/mpmath-1.3.0.dist-info/METADATA
@@ -0,0 +1,233 @@
+Metadata-Version: 2.1
+Name: mpmath
+Version: 1.3.0
+Summary: Python library for arbitrary-precision floating-point arithmetic
+Home-page: http://mpmath.org/
+Author: Fredrik Johansson
+Author-email: fredrik.johansson@gmail.com
+License: BSD
+Project-URL: Source, https://github.com/fredrik-johansson/mpmath
+Project-URL: Tracker, https://github.com/fredrik-johansson/mpmath/issues
+Project-URL: Documentation, http://mpmath.org/doc/current/
+Classifier: License :: OSI Approved :: BSD License
+Classifier: Topic :: Scientific/Engineering :: Mathematics
+Classifier: Topic :: Software Development :: Libraries :: Python Modules
+Classifier: Programming Language :: Python
+Classifier: Programming Language :: Python :: 2
+Classifier: Programming Language :: Python :: 2.7
+Classifier: Programming Language :: Python :: 3
+Classifier: Programming Language :: Python :: 3.5
+Classifier: Programming Language :: Python :: 3.6
+Classifier: Programming Language :: Python :: 3.7
+Classifier: Programming Language :: Python :: 3.8
+Classifier: Programming Language :: Python :: 3.9
+Classifier: Programming Language :: Python :: Implementation :: CPython
+Classifier: Programming Language :: Python :: Implementation :: PyPy
+License-File: LICENSE
+Provides-Extra: develop
+Requires-Dist: pytest (>=4.6) ; extra == 'develop'
+Requires-Dist: pycodestyle ; extra == 'develop'
+Requires-Dist: pytest-cov ; extra == 'develop'
+Requires-Dist: codecov ; extra == 'develop'
+Requires-Dist: wheel ; extra == 'develop'
+Provides-Extra: docs
+Requires-Dist: sphinx ; extra == 'docs'
+Provides-Extra: gmpy
+Requires-Dist: gmpy2 (>=2.1.0a4) ; (platform_python_implementation != "PyPy") and extra == 'gmpy'
+Provides-Extra: tests
+Requires-Dist: pytest (>=4.6) ; extra == 'tests'
+
+mpmath
+======
+
+|pypi version| |Build status| |Code coverage status| |Zenodo Badge|
+
+.. |pypi version| image:: https://img.shields.io/pypi/v/mpmath.svg
+ :target: https://pypi.python.org/pypi/mpmath
+.. |Build status| image:: https://github.com/fredrik-johansson/mpmath/workflows/test/badge.svg
+ :target: https://github.com/fredrik-johansson/mpmath/actions?workflow=test
+.. |Code coverage status| image:: https://codecov.io/gh/fredrik-johansson/mpmath/branch/master/graph/badge.svg
+ :target: https://codecov.io/gh/fredrik-johansson/mpmath
+.. |Zenodo Badge| image:: https://zenodo.org/badge/2934512.svg
+ :target: https://zenodo.org/badge/latestdoi/2934512
+
+A Python library for arbitrary-precision floating-point arithmetic.
+
+Website: http://mpmath.org/
+Main author: Fredrik Johansson
+
+Mpmath is free software released under the New BSD License (see the
+LICENSE file for details)
+
+0. History and credits
+----------------------
+
+The following people (among others) have contributed major patches
+or new features to mpmath:
+
+* Pearu Peterson
+* Mario Pernici
+* Ondrej Certik
+* Vinzent Steinberg
+* Nimish Telang
+* Mike Taschuk
+* Case Van Horsen
+* Jorn Baayen
+* Chris Smith
+* Juan Arias de Reyna
+* Ioannis Tziakos
+* Aaron Meurer
+* Stefan Krastanov
+* Ken Allen
+* Timo Hartmann
+* Sergey B Kirpichev
+* Kris Kuhlman
+* Paul Masson
+* Michael Kagalenko
+* Jonathan Warner
+* Max Gaukler
+* Guillermo Navas-Palencia
+* Nike Dattani
+
+Numerous other people have contributed by reporting bugs,
+requesting new features, or suggesting improvements to the
+documentation.
+
+For a detailed changelog, including individual contributions,
+see the CHANGES file.
+
+Fredrik's work on mpmath during summer 2008 was sponsored by Google
+as part of the Google Summer of Code program.
+
+Fredrik's work on mpmath during summer 2009 was sponsored by the
+American Institute of Mathematics under the support of the National Science
+Foundation Grant No. 0757627 (FRG: L-functions and Modular Forms).
+
+Any opinions, findings, and conclusions or recommendations expressed in this
+material are those of the author(s) and do not necessarily reflect the
+views of the sponsors.
+
+Credit also goes to:
+
+* The authors of the GMP library and the Python wrapper
+ gmpy, enabling mpmath to become much faster at
+ high precision
+* The authors of MPFR, pari/gp, MPFUN, and other arbitrary-
+ precision libraries, whose documentation has been helpful
+ for implementing many of the algorithms in mpmath
+* Wikipedia contributors; Abramowitz & Stegun; Gradshteyn & Ryzhik;
+ Wolfram Research for MathWorld and the Wolfram Functions site.
+ These are the main references used for special functions
+ implementations.
+* George Brandl for developing the Sphinx documentation tool
+ used to build mpmath's documentation
+
+Release history:
+
+* Version 1.3.0 released on March 7, 2023
+* Version 1.2.0 released on February 1, 2021
+* Version 1.1.0 released on December 11, 2018
+* Version 1.0.0 released on September 27, 2017
+* Version 0.19 released on June 10, 2014
+* Version 0.18 released on December 31, 2013
+* Version 0.17 released on February 1, 2011
+* Version 0.16 released on September 24, 2010
+* Version 0.15 released on June 6, 2010
+* Version 0.14 released on February 5, 2010
+* Version 0.13 released on August 13, 2009
+* Version 0.12 released on June 9, 2009
+* Version 0.11 released on January 26, 2009
+* Version 0.10 released on October 15, 2008
+* Version 0.9 released on August 23, 2008
+* Version 0.8 released on April 20, 2008
+* Version 0.7 released on March 12, 2008
+* Version 0.6 released on January 13, 2008
+* Version 0.5 released on November 24, 2007
+* Version 0.4 released on November 3, 2007
+* Version 0.3 released on October 5, 2007
+* Version 0.2 released on October 2, 2007
+* Version 0.1 released on September 27, 2007
+
+1. Download & installation
+--------------------------
+
+Mpmath requires Python 2.7 or 3.5 (or later versions). It has been tested
+with CPython 2.7, 3.5 through 3.7 and for PyPy.
+
+The latest release of mpmath can be downloaded from the mpmath
+website and from https://github.com/fredrik-johansson/mpmath/releases
+
+It should also be available in the Python Package Index at
+https://pypi.python.org/pypi/mpmath
+
+To install latest release of Mpmath with pip, simply run
+
+``pip install mpmath``
+
+Or unpack the mpmath archive and run
+
+``python setup.py install``
+
+Mpmath can also be installed using
+
+``python -m easy_install mpmath``
+
+The latest development code is available from
+https://github.com/fredrik-johansson/mpmath
+
+See the main documentation for more detailed instructions.
+
+2. Running tests
+----------------
+
+The unit tests in mpmath/tests/ can be run via the script
+runtests.py, but it is recommended to run them with py.test
+(https://pytest.org/), especially
+to generate more useful reports in case there are failures.
+
+You may also want to check out the demo scripts in the demo
+directory.
+
+The master branch is automatically tested by Travis CI.
+
+3. Documentation
+----------------
+
+Documentation in reStructuredText format is available in the
+doc directory included with the source package. These files
+are human-readable, but can be compiled to prettier HTML using
+the build.py script (requires Sphinx, http://sphinx.pocoo.org/).
+
+See setup.txt in the documentation for more information.
+
+The most recent documentation is also available in HTML format:
+
+http://mpmath.org/doc/current/
+
+4. Known problems
+-----------------
+
+Mpmath is a work in progress. Major issues include:
+
+* Some functions may return incorrect values when given extremely
+ large arguments or arguments very close to singularities.
+
+* Directed rounding works for arithmetic operations. It is implemented
+ heuristically for other operations, and their results may be off by one
+ or two units in the last place (even if otherwise accurate).
+
+* Some IEEE 754 features are not available. Inifinities and NaN are
+ partially supported; denormal rounding is currently not available
+ at all.
+
+* The interface for switching precision and rounding is not finalized.
+ The current method is not threadsafe.
+
+5. Help and bug reports
+-----------------------
+
+General questions and comments can be sent to the mpmath mailinglist,
+mpmath@googlegroups.com
+
+You can also report bugs and send patches to the mpmath issue tracker,
+https://github.com/fredrik-johansson/mpmath/issues
diff --git a/pythonProject/.venv/Lib/site-packages/mpmath-1.3.0.dist-info/RECORD b/pythonProject/.venv/Lib/site-packages/mpmath-1.3.0.dist-info/RECORD
new file mode 100644
index 0000000000000000000000000000000000000000..21bdf458683495b2e0d7f52464d33fdc500333e7
--- /dev/null
+++ b/pythonProject/.venv/Lib/site-packages/mpmath-1.3.0.dist-info/RECORD
@@ -0,0 +1,180 @@
+mpmath-1.3.0.dist-info/INSTALLER,sha256=zuuue4knoyJ-UwPPXg8fezS7VCrXJQrAP7zeNuwvFQg,4
+mpmath-1.3.0.dist-info/LICENSE,sha256=wmyugdpFCOXiSZhXd6M4IfGDIj67dNf4z7-Q_n7vL7c,1537
+mpmath-1.3.0.dist-info/METADATA,sha256=RLZupES5wNGa6UgV01a_BHrmtoDBkmi1wmVofNaoFAY,8630
+mpmath-1.3.0.dist-info/RECORD,,
+mpmath-1.3.0.dist-info/WHEEL,sha256=2wepM1nk4DS4eFpYrW1TTqPcoGNfHhhO_i5m4cOimbo,92
+mpmath-1.3.0.dist-info/top_level.txt,sha256=BUVWrh8EVlkOhM1n3X9S8msTaVcC-3s6Sjt60avHYus,7
+mpmath/__init__.py,sha256=skFYTSwfwDBLChAV6pI3SdewgAQR3UBtyrfIK_Jdn-g,8765
+mpmath/__pycache__/__init__.cpython-310.pyc,,
+mpmath/__pycache__/ctx_base.cpython-310.pyc,,
+mpmath/__pycache__/ctx_fp.cpython-310.pyc,,
+mpmath/__pycache__/ctx_iv.cpython-310.pyc,,
+mpmath/__pycache__/ctx_mp.cpython-310.pyc,,
+mpmath/__pycache__/ctx_mp_python.cpython-310.pyc,,
+mpmath/__pycache__/function_docs.cpython-310.pyc,,
+mpmath/__pycache__/identification.cpython-310.pyc,,
+mpmath/__pycache__/math2.cpython-310.pyc,,
+mpmath/__pycache__/rational.cpython-310.pyc,,
+mpmath/__pycache__/usertools.cpython-310.pyc,,
+mpmath/__pycache__/visualization.cpython-310.pyc,,
+mpmath/calculus/__init__.py,sha256=UAgCIJ1YmaeyTqpNzjBlCZGeIzLtUZMEEpl99VWNjus,162
+mpmath/calculus/__pycache__/__init__.cpython-310.pyc,,
+mpmath/calculus/__pycache__/approximation.cpython-310.pyc,,
+mpmath/calculus/__pycache__/calculus.cpython-310.pyc,,
+mpmath/calculus/__pycache__/differentiation.cpython-310.pyc,,
+mpmath/calculus/__pycache__/extrapolation.cpython-310.pyc,,
+mpmath/calculus/__pycache__/inverselaplace.cpython-310.pyc,,
+mpmath/calculus/__pycache__/odes.cpython-310.pyc,,
+mpmath/calculus/__pycache__/optimization.cpython-310.pyc,,
+mpmath/calculus/__pycache__/polynomials.cpython-310.pyc,,
+mpmath/calculus/__pycache__/quadrature.cpython-310.pyc,,
+mpmath/calculus/approximation.py,sha256=vyzu3YI6r63Oq1KFHrQz02mGXAcH23emqNYhJuUaFZ4,8817
+mpmath/calculus/calculus.py,sha256=A0gSp0hxSyEDfugJViY3CeWalF-vK701YftzrjSQzQ4,112
+mpmath/calculus/differentiation.py,sha256=2L6CBj8xtX9iip98NPbKsLtwtRjxi571wYmTMHFeL90,20226
+mpmath/calculus/extrapolation.py,sha256=xM0rvk2DFEF4iR1Jhl-Y3aS93iW9VVJX7y9IGpmzC-A,73306
+mpmath/calculus/inverselaplace.py,sha256=5-pn8N_t0PtgBTXixsXZ4xxrihK2J5gYsVfTKfDx4gA,36056
+mpmath/calculus/odes.py,sha256=gaHiw7IJjsONNTAa6izFPZpmcg9uyTp8MULnGdzTIGo,9908
+mpmath/calculus/optimization.py,sha256=bKnShXElBOmVOIOlFeksDsYCp9fYSmYwKmXDt0z26MM,32856
+mpmath/calculus/polynomials.py,sha256=D16BhU_SHbVi06IxNwABHR-H77IylndNsN3muPTuFYs,7877
+mpmath/calculus/quadrature.py,sha256=n-avtS8E43foV-5tr5lofgOBaiMUYE8AJjQcWI9QcKk,42432
+mpmath/ctx_base.py,sha256=rfjmfMyA55x8R_cWFINUwWVTElfZmyx5erKDdauSEVw,15985
+mpmath/ctx_fp.py,sha256=ctUjx_NoU0iFWk05cXDYCL2ZtLZOlWs1n6Zao3pbG2g,6572
+mpmath/ctx_iv.py,sha256=tqdMr-GDfkZk1EhoGeCAajy7pQv-RWtrVqhYjfI8r4g,17211
+mpmath/ctx_mp.py,sha256=d3r4t7xHNqSFtmqsA9Btq1Npy3WTM-pcM2_jeCyECxY,49452
+mpmath/ctx_mp_python.py,sha256=3olYWo4lk1SnQ0A_IaZ181qqG8u5pxGat_v-L4Qtn3Y,37815
+mpmath/function_docs.py,sha256=g4PP8n6ILXmHcLyA50sxK6Tmp_Z4_pRN-wDErU8D1i4,283512
+mpmath/functions/__init__.py,sha256=YXVdhqv-6LKm6cr5xxtTNTtuD9zDPKGQl8GmS0xz2xo,330
+mpmath/functions/__pycache__/__init__.cpython-310.pyc,,
+mpmath/functions/__pycache__/bessel.cpython-310.pyc,,
+mpmath/functions/__pycache__/elliptic.cpython-310.pyc,,
+mpmath/functions/__pycache__/expintegrals.cpython-310.pyc,,
+mpmath/functions/__pycache__/factorials.cpython-310.pyc,,
+mpmath/functions/__pycache__/functions.cpython-310.pyc,,
+mpmath/functions/__pycache__/hypergeometric.cpython-310.pyc,,
+mpmath/functions/__pycache__/orthogonal.cpython-310.pyc,,
+mpmath/functions/__pycache__/qfunctions.cpython-310.pyc,,
+mpmath/functions/__pycache__/rszeta.cpython-310.pyc,,
+mpmath/functions/__pycache__/signals.cpython-310.pyc,,
+mpmath/functions/__pycache__/theta.cpython-310.pyc,,
+mpmath/functions/__pycache__/zeta.cpython-310.pyc,,
+mpmath/functions/__pycache__/zetazeros.cpython-310.pyc,,
+mpmath/functions/bessel.py,sha256=dUPLu8frlK-vmf3-irX_7uvwyw4xccv6EIizmIZ88kM,37938
+mpmath/functions/elliptic.py,sha256=qz0yVMb4lWEeOTDL_DWz5u5awmGIPKAsuZFJXgwHJNU,42237
+mpmath/functions/expintegrals.py,sha256=75X_MRdYc1F_X73bgNiOJqwRlS2hqAzcFLl3RM2tCDc,11644
+mpmath/functions/factorials.py,sha256=8_6kCR7e4k1GwxiAOJu0NRadeF4jA28qx4hidhu4ILk,5273
+mpmath/functions/functions.py,sha256=ub2JExvqzCWLkm5yAm72Fr6fdWmZZUknq9_3w9MEigI,18100
+mpmath/functions/hypergeometric.py,sha256=Z0OMAMC4ylK42n_SnamyFVnUx6zHLyCLCoJDSZ1JrHY,51570
+mpmath/functions/orthogonal.py,sha256=FabkxKfBoSseA5flWu1a3re-2BYaew9augqIsT8LaLw,16097
+mpmath/functions/qfunctions.py,sha256=a3EHGKQt_jMd4x9I772Jz-TGFnGY-arWqPvZGz9QSe0,7633
+mpmath/functions/rszeta.py,sha256=yuUVp4ilIyDmXyE3WTBxDDjwfEJNypJnbPS-xPH5How,46184
+mpmath/functions/signals.py,sha256=ELotwQaW1CDpv-eeJzOZ5c23NhfaZcj9_Gkb3psvS0Q,703
+mpmath/functions/theta.py,sha256=KggOocczoMG6_HMoal4oEP7iZ4SKOou9JFE-WzY2r3M,37320
+mpmath/functions/zeta.py,sha256=ue7JY7GXA0oX8q08sQJl2CSRrZ7kOt8HsftpVjnTwrE,36410
+mpmath/functions/zetazeros.py,sha256=uq6TVyZBcY2MLX7VSdVfn0TOkowBLM9fXtnySEwaNzw,30858
+mpmath/identification.py,sha256=7aMdngRAaeL_MafDUNbmEIlGQSklHDZ8pmPFt-OLgkw,29253
+mpmath/libmp/__init__.py,sha256=UCDjLZw4brbklaCmSixCcPdLdHkz8sF_-6F_wr0duAg,3790
+mpmath/libmp/__pycache__/__init__.cpython-310.pyc,,
+mpmath/libmp/__pycache__/backend.cpython-310.pyc,,
+mpmath/libmp/__pycache__/gammazeta.cpython-310.pyc,,
+mpmath/libmp/__pycache__/libelefun.cpython-310.pyc,,
+mpmath/libmp/__pycache__/libhyper.cpython-310.pyc,,
+mpmath/libmp/__pycache__/libintmath.cpython-310.pyc,,
+mpmath/libmp/__pycache__/libmpc.cpython-310.pyc,,
+mpmath/libmp/__pycache__/libmpf.cpython-310.pyc,,
+mpmath/libmp/__pycache__/libmpi.cpython-310.pyc,,
+mpmath/libmp/backend.py,sha256=26A8pUkaGov26vrrFNQVyWJ5LDtK8sl3UHrYLecaTjA,3360
+mpmath/libmp/gammazeta.py,sha256=Xqdw6PMoswDaSca_sOs-IglRuk3fb8c9p43M_lbcrlc,71469
+mpmath/libmp/libelefun.py,sha256=joBZP4FOdxPfieWso1LPtSr6dHydpG_LQiF_bYQYWMg,43861
+mpmath/libmp/libhyper.py,sha256=J9fmdDF6u27EcssEWvBuVaAa3hFjPvPN1SgRgu1dEbc,36624
+mpmath/libmp/libintmath.py,sha256=aIRT0rkUZ_sdGQf3TNCLd-pBMvtQWjssbvFLfK7U0jc,16688
+mpmath/libmp/libmpc.py,sha256=KBndUjs5YVS32-Id3fflDfYgpdW1Prx6zfo8Ez5Qbrs,26875
+mpmath/libmp/libmpf.py,sha256=vpP0kNVkScbCVoZogJ4Watl4I7Ce0d4dzHVjfVe57so,45021
+mpmath/libmp/libmpi.py,sha256=u0I5Eiwkqa-4-dXETi5k7MuaxBeZbvCAPFtl93U9YF0,27622
+mpmath/math2.py,sha256=O5Dglg81SsW0wfHDUJcXOD8-cCaLvbVIvyw0sVmRbpI,18561
+mpmath/matrices/__init__.py,sha256=ETzGDciYbq9ftiKwaMbJ15EI-KNXHrzRb-ZHehhqFjs,94
+mpmath/matrices/__pycache__/__init__.cpython-310.pyc,,
+mpmath/matrices/__pycache__/calculus.cpython-310.pyc,,
+mpmath/matrices/__pycache__/eigen.cpython-310.pyc,,
+mpmath/matrices/__pycache__/eigen_symmetric.cpython-310.pyc,,
+mpmath/matrices/__pycache__/linalg.cpython-310.pyc,,
+mpmath/matrices/__pycache__/matrices.cpython-310.pyc,,
+mpmath/matrices/calculus.py,sha256=PNRq-p2nxgT-fzC54K2depi8ddhdx6Q86G8qpUiHeUY,18609
+mpmath/matrices/eigen.py,sha256=GbDXI3CixzEdXxr1G86uUWkAngAvd-05MmSQ-Tsu_5k,24394
+mpmath/matrices/eigen_symmetric.py,sha256=FPKPeQr1cGYw6Y6ea32a1YdEWQDLP6JlQHEA2WfNLYg,58534
+mpmath/matrices/linalg.py,sha256=04C3ijzMFom7ob5fXBCDfyPPdo3BIboIeE8x2A6vqF0,26958
+mpmath/matrices/matrices.py,sha256=o78Eq62EHQnxcsR0LBoWDEGREOoN4L2iDM1q3dQrw0o,32331
+mpmath/rational.py,sha256=64d56fvZXngYZT7nOAHeFRUX77eJ1A0R3rpfWBU-mSo,5976
+mpmath/tests/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
+mpmath/tests/__pycache__/__init__.cpython-310.pyc,,
+mpmath/tests/__pycache__/extratest_gamma.cpython-310.pyc,,
+mpmath/tests/__pycache__/extratest_zeta.cpython-310.pyc,,
+mpmath/tests/__pycache__/runtests.cpython-310.pyc,,
+mpmath/tests/__pycache__/test_basic_ops.cpython-310.pyc,,
+mpmath/tests/__pycache__/test_bitwise.cpython-310.pyc,,
+mpmath/tests/__pycache__/test_calculus.cpython-310.pyc,,
+mpmath/tests/__pycache__/test_compatibility.cpython-310.pyc,,
+mpmath/tests/__pycache__/test_convert.cpython-310.pyc,,
+mpmath/tests/__pycache__/test_diff.cpython-310.pyc,,
+mpmath/tests/__pycache__/test_division.cpython-310.pyc,,
+mpmath/tests/__pycache__/test_eigen.cpython-310.pyc,,
+mpmath/tests/__pycache__/test_eigen_symmetric.cpython-310.pyc,,
+mpmath/tests/__pycache__/test_elliptic.cpython-310.pyc,,
+mpmath/tests/__pycache__/test_fp.cpython-310.pyc,,
+mpmath/tests/__pycache__/test_functions.cpython-310.pyc,,
+mpmath/tests/__pycache__/test_functions2.cpython-310.pyc,,
+mpmath/tests/__pycache__/test_gammazeta.cpython-310.pyc,,
+mpmath/tests/__pycache__/test_hp.cpython-310.pyc,,
+mpmath/tests/__pycache__/test_identify.cpython-310.pyc,,
+mpmath/tests/__pycache__/test_interval.cpython-310.pyc,,
+mpmath/tests/__pycache__/test_levin.cpython-310.pyc,,
+mpmath/tests/__pycache__/test_linalg.cpython-310.pyc,,
+mpmath/tests/__pycache__/test_matrices.cpython-310.pyc,,
+mpmath/tests/__pycache__/test_mpmath.cpython-310.pyc,,
+mpmath/tests/__pycache__/test_ode.cpython-310.pyc,,
+mpmath/tests/__pycache__/test_pickle.cpython-310.pyc,,
+mpmath/tests/__pycache__/test_power.cpython-310.pyc,,
+mpmath/tests/__pycache__/test_quad.cpython-310.pyc,,
+mpmath/tests/__pycache__/test_rootfinding.cpython-310.pyc,,
+mpmath/tests/__pycache__/test_special.cpython-310.pyc,,
+mpmath/tests/__pycache__/test_str.cpython-310.pyc,,
+mpmath/tests/__pycache__/test_summation.cpython-310.pyc,,
+mpmath/tests/__pycache__/test_trig.cpython-310.pyc,,
+mpmath/tests/__pycache__/test_visualization.cpython-310.pyc,,
+mpmath/tests/__pycache__/torture.cpython-310.pyc,,
+mpmath/tests/extratest_gamma.py,sha256=xidhXUelILcxtiPGoTBHjqUOKIJzEaZ_v3nntGQyWZQ,7228
+mpmath/tests/extratest_zeta.py,sha256=sg10j9RhjBpV2EdUqyYhGV2ERWvM--EvwwGIz6HTmlw,1003
+mpmath/tests/runtests.py,sha256=7NUV82F3K_5AhU8mCLUFf5OibtT7uloFCwPyM3l71wM,5189
+mpmath/tests/test_basic_ops.py,sha256=dsB8DRG-GrPzBaZ-bIauYabaeqXbfqBo9SIP9BqcTSs,15348
+mpmath/tests/test_bitwise.py,sha256=-nLYhgQbhDza3SQM63BhktYntACagqMYx9ib3dPnTKM,7686
+mpmath/tests/test_calculus.py,sha256=4oxtNfMpO4RLLoOzrv7r9-h8BcqfBsJIE6UpsHe7c4w,9187
+mpmath/tests/test_compatibility.py,sha256=_t3ASZ3jhfAMnN1voWX7PDNIDzn-3PokkJGIdT1x7y0,2306
+mpmath/tests/test_convert.py,sha256=JPcDcTJIWh5prIxjx5DM1aNWgqlUoF2KpHvAgK3uHi4,8834
+mpmath/tests/test_diff.py,sha256=qjiF8NxQ8vueuZ5ZHGPQ-kjcj_I7Jh_fEdFtaA8DzEI,2466
+mpmath/tests/test_division.py,sha256=6lUeZfmaBWvvszdqlWLMHgXPjVsxvW1WZpd4-jFWCpU,5340
+mpmath/tests/test_eigen.py,sha256=2mnqVATGbsJkvSVHPpitfAk881twFfb3LsO3XikV9Hs,3905
+mpmath/tests/test_eigen_symmetric.py,sha256=v0VimCicIU2owASDMBaP-t-30uq-pXcsglt95KBtNO4,8778
+mpmath/tests/test_elliptic.py,sha256=Kjiwq9Bb6N_OOzzWewGQ1M_PMa7vRs42V0t90gloZxo,26225
+mpmath/tests/test_fp.py,sha256=AJo0FTyH4BuUnUsv176LD956om308KGYndy-b54KGxM,89997
+mpmath/tests/test_functions.py,sha256=b47VywdomoOX6KmMmz9-iv2IqVIydwKSuUw2pWlFHrY,30955
+mpmath/tests/test_functions2.py,sha256=vlw2RWhL1oTcifnOMDx1a_YzN96UgNNIE5STeKRv1HY,96990
+mpmath/tests/test_gammazeta.py,sha256=AB34O0DV7AlEf9Z4brnCadeQU5-uAwhWRw5FZas65DA,27917
+mpmath/tests/test_hp.py,sha256=6hcENu6Te2klPEiTSeLBIRPlH7PADlJwFKbx8xpnOhg,10461
+mpmath/tests/test_identify.py,sha256=lGUIPfrB2paTg0cFUo64GmMzF77F9gs9FQjX7gxGHV8,692
+mpmath/tests/test_interval.py,sha256=TjYd7a9ca6iRJiLjw06isLeZTuGoGAPmgleDZ0cYfJ0,17527
+mpmath/tests/test_levin.py,sha256=P8M11yV1dj_gdSNv5xuwCzFiF86QyRDtPMjURy6wJ28,5090
+mpmath/tests/test_linalg.py,sha256=miKEnwB8iwWV13hi1bF1cg3hgB4rTKOR0fvDVfWmXds,10440
+mpmath/tests/test_matrices.py,sha256=qyA4Ml2CvNvW034lzB01G6wVgNr7UrgZqh2wkMXtpzM,7944
+mpmath/tests/test_mpmath.py,sha256=LVyJUeofiaxW-zLKWVBCz59L9UQsjlW0Ts9_oBiEv_4,196
+mpmath/tests/test_ode.py,sha256=zAxexBH4fnmFNO4bvEHbug1NJWC5zqfFaVDlYijowkY,1822
+mpmath/tests/test_pickle.py,sha256=Y8CKmDLFsJHUqG8CDaBw5ilrPP4YT1xijVduLpQ7XFE,401
+mpmath/tests/test_power.py,sha256=sz_K02SmNxpa6Kb1uJLN_N4tXTJGdQ___vPRshEN7Gk,5227
+mpmath/tests/test_quad.py,sha256=49Ltft0vZ_kdKLL5s-Kj-BzAVoF5LPVEUeNUzdOkghI,3893
+mpmath/tests/test_rootfinding.py,sha256=umQegEaKHmYOEl5jEyoD-VLKDtXsTJJkepKEr4c0dC0,3132
+mpmath/tests/test_special.py,sha256=YbMIoMIkJEvvKYIzS0CXthJFG0--j6un7-tcE6b7FPM,2848
+mpmath/tests/test_str.py,sha256=0WsGD9hMPRi8zcuYMA9Cu2mOvQiCFskPwMsMf8lBDK4,544
+mpmath/tests/test_summation.py,sha256=fdNlsvRVOsbWxbhlyDLDaEO2S8kTJrRMKIvB5-aNci0,2035
+mpmath/tests/test_trig.py,sha256=zPtkIEnZaThxcWur4k7BX8-2Jmj-AhO191Svv7ANYUU,4799
+mpmath/tests/test_visualization.py,sha256=1PqtkoUx-WsKYgTRiu5o9pBc85kwhf1lzU2eobDQCJM,944
+mpmath/tests/torture.py,sha256=LD95oES7JY2KroELK-m-jhvtbvZaKChnt0Cq7kFMNCw,7868
+mpmath/usertools.py,sha256=a-TDw7XSRsPdBEffxOooDV4WDFfuXnO58P75dcAD87I,3029
+mpmath/visualization.py,sha256=pnnbjcd9AhFVRBZavYX5gjx4ytK_kXoDDisYR6EpXhs,10627
diff --git a/pythonProject/.venv/Lib/site-packages/mpmath-1.3.0.dist-info/WHEEL b/pythonProject/.venv/Lib/site-packages/mpmath-1.3.0.dist-info/WHEEL
new file mode 100644
index 0000000000000000000000000000000000000000..57e3d840d59a650ac5bccbad5baeec47d155f0ad
--- /dev/null
+++ b/pythonProject/.venv/Lib/site-packages/mpmath-1.3.0.dist-info/WHEEL
@@ -0,0 +1,5 @@
+Wheel-Version: 1.0
+Generator: bdist_wheel (0.38.4)
+Root-Is-Purelib: true
+Tag: py3-none-any
+
diff --git a/pythonProject/.venv/Lib/site-packages/mpmath-1.3.0.dist-info/top_level.txt b/pythonProject/.venv/Lib/site-packages/mpmath-1.3.0.dist-info/top_level.txt
new file mode 100644
index 0000000000000000000000000000000000000000..dda7c273a8dd1c6adffa9d2d9901e0ce6876f4ac
--- /dev/null
+++ b/pythonProject/.venv/Lib/site-packages/mpmath-1.3.0.dist-info/top_level.txt
@@ -0,0 +1 @@
+mpmath
diff --git a/pythonProject/.venv/Lib/site-packages/mpmath/__pycache__/__init__.cpython-310.pyc b/pythonProject/.venv/Lib/site-packages/mpmath/__pycache__/__init__.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..561a4260d4197b6a2e2dff54638d047608131748
Binary files /dev/null and b/pythonProject/.venv/Lib/site-packages/mpmath/__pycache__/__init__.cpython-310.pyc differ
diff --git a/pythonProject/.venv/Lib/site-packages/mpmath/__pycache__/ctx_base.cpython-310.pyc b/pythonProject/.venv/Lib/site-packages/mpmath/__pycache__/ctx_base.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..c61c75614a6829d0d79e8a953c037c257498302b
Binary files /dev/null and b/pythonProject/.venv/Lib/site-packages/mpmath/__pycache__/ctx_base.cpython-310.pyc differ
diff --git a/pythonProject/.venv/Lib/site-packages/mpmath/__pycache__/ctx_fp.cpython-310.pyc b/pythonProject/.venv/Lib/site-packages/mpmath/__pycache__/ctx_fp.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..c2d729775b7504f14fe468d1b9ae3146d87b16b1
Binary files /dev/null and b/pythonProject/.venv/Lib/site-packages/mpmath/__pycache__/ctx_fp.cpython-310.pyc differ
diff --git a/pythonProject/.venv/Lib/site-packages/mpmath/__pycache__/ctx_iv.cpython-310.pyc b/pythonProject/.venv/Lib/site-packages/mpmath/__pycache__/ctx_iv.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..2731b7d955b89b13f08740aae175a5127542f135
Binary files /dev/null and b/pythonProject/.venv/Lib/site-packages/mpmath/__pycache__/ctx_iv.cpython-310.pyc differ
diff --git a/pythonProject/.venv/Lib/site-packages/mpmath/__pycache__/ctx_mp.cpython-310.pyc b/pythonProject/.venv/Lib/site-packages/mpmath/__pycache__/ctx_mp.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..8c1ad20a2d564ad801578150802d72e7d5f0c01f
Binary files /dev/null and b/pythonProject/.venv/Lib/site-packages/mpmath/__pycache__/ctx_mp.cpython-310.pyc differ
diff --git a/pythonProject/.venv/Lib/site-packages/mpmath/__pycache__/ctx_mp_python.cpython-310.pyc b/pythonProject/.venv/Lib/site-packages/mpmath/__pycache__/ctx_mp_python.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..a3eeb64230813975cf3783a10745caa0e82e25d1
Binary files /dev/null and b/pythonProject/.venv/Lib/site-packages/mpmath/__pycache__/ctx_mp_python.cpython-310.pyc differ
diff --git a/pythonProject/.venv/Lib/site-packages/mpmath/__pycache__/identification.cpython-310.pyc b/pythonProject/.venv/Lib/site-packages/mpmath/__pycache__/identification.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..a44686b41bc383a2c23c3e4ef1eeaa9fcc93cdab
Binary files /dev/null and b/pythonProject/.venv/Lib/site-packages/mpmath/__pycache__/identification.cpython-310.pyc differ
diff --git a/pythonProject/.venv/Lib/site-packages/mpmath/__pycache__/math2.cpython-310.pyc b/pythonProject/.venv/Lib/site-packages/mpmath/__pycache__/math2.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..38999266e03ac970c5bede311fa191f752205791
Binary files /dev/null and b/pythonProject/.venv/Lib/site-packages/mpmath/__pycache__/math2.cpython-310.pyc differ
diff --git a/pythonProject/.venv/Lib/site-packages/mpmath/__pycache__/rational.cpython-310.pyc b/pythonProject/.venv/Lib/site-packages/mpmath/__pycache__/rational.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..9a41c4ca8fafbcd059d461fd273505833122dab9
Binary files /dev/null and b/pythonProject/.venv/Lib/site-packages/mpmath/__pycache__/rational.cpython-310.pyc differ
diff --git a/pythonProject/.venv/Lib/site-packages/mpmath/__pycache__/usertools.cpython-310.pyc b/pythonProject/.venv/Lib/site-packages/mpmath/__pycache__/usertools.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..6485fef30dada4b7c537ff91dd8bcb72cd487eab
Binary files /dev/null and b/pythonProject/.venv/Lib/site-packages/mpmath/__pycache__/usertools.cpython-310.pyc differ
diff --git a/pythonProject/.venv/Lib/site-packages/mpmath/__pycache__/visualization.cpython-310.pyc b/pythonProject/.venv/Lib/site-packages/mpmath/__pycache__/visualization.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..606d58667a44ca6f6b617b9b85066fb468b8ac67
Binary files /dev/null and b/pythonProject/.venv/Lib/site-packages/mpmath/__pycache__/visualization.cpython-310.pyc differ
diff --git a/pythonProject/.venv/Lib/site-packages/mpmath/calculus/__pycache__/__init__.cpython-310.pyc b/pythonProject/.venv/Lib/site-packages/mpmath/calculus/__pycache__/__init__.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..c310c8cb6211e55c88184c0cbccf633837d58ac3
Binary files /dev/null and b/pythonProject/.venv/Lib/site-packages/mpmath/calculus/__pycache__/__init__.cpython-310.pyc differ
diff --git a/pythonProject/.venv/Lib/site-packages/mpmath/calculus/__pycache__/odes.cpython-310.pyc b/pythonProject/.venv/Lib/site-packages/mpmath/calculus/__pycache__/odes.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..c10f1ef7140006ecd76e21f5e3b39bf42ae592ed
Binary files /dev/null and b/pythonProject/.venv/Lib/site-packages/mpmath/calculus/__pycache__/odes.cpython-310.pyc differ
diff --git a/pythonProject/.venv/Lib/site-packages/mpmath/calculus/__pycache__/optimization.cpython-310.pyc b/pythonProject/.venv/Lib/site-packages/mpmath/calculus/__pycache__/optimization.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..0982165ca0e025eae4a2a2b4a6148363beebd390
Binary files /dev/null and b/pythonProject/.venv/Lib/site-packages/mpmath/calculus/__pycache__/optimization.cpython-310.pyc differ
diff --git a/pythonProject/.venv/Lib/site-packages/mpmath/calculus/__pycache__/polynomials.cpython-310.pyc b/pythonProject/.venv/Lib/site-packages/mpmath/calculus/__pycache__/polynomials.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..a17c7f2e7bfbf3566d73453979a7a39bbc7fbabd
Binary files /dev/null and b/pythonProject/.venv/Lib/site-packages/mpmath/calculus/__pycache__/polynomials.cpython-310.pyc differ
diff --git a/pythonProject/.venv/Lib/site-packages/mpmath/calculus/__pycache__/quadrature.cpython-310.pyc b/pythonProject/.venv/Lib/site-packages/mpmath/calculus/__pycache__/quadrature.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..19dbd618f90b9aafb854eb52663078b14a06fcad
Binary files /dev/null and b/pythonProject/.venv/Lib/site-packages/mpmath/calculus/__pycache__/quadrature.cpython-310.pyc differ
diff --git a/pythonProject/.venv/Lib/site-packages/mpmath/ctx_base.py b/pythonProject/.venv/Lib/site-packages/mpmath/ctx_base.py
new file mode 100644
index 0000000000000000000000000000000000000000..1946f8daf4dbe165b3943be09af361812828aab1
--- /dev/null
+++ b/pythonProject/.venv/Lib/site-packages/mpmath/ctx_base.py
@@ -0,0 +1,494 @@
+from operator import gt, lt
+
+from .libmp.backend import xrange
+
+from .functions.functions import SpecialFunctions
+from .functions.rszeta import RSCache
+from .calculus.quadrature import QuadratureMethods
+from .calculus.inverselaplace import LaplaceTransformInversionMethods
+from .calculus.calculus import CalculusMethods
+from .calculus.optimization import OptimizationMethods
+from .calculus.odes import ODEMethods
+from .matrices.matrices import MatrixMethods
+from .matrices.calculus import MatrixCalculusMethods
+from .matrices.linalg import LinearAlgebraMethods
+from .matrices.eigen import Eigen
+from .identification import IdentificationMethods
+from .visualization import VisualizationMethods
+
+from . import libmp
+
+class Context(object):
+ pass
+
+class StandardBaseContext(Context,
+ SpecialFunctions,
+ RSCache,
+ QuadratureMethods,
+ LaplaceTransformInversionMethods,
+ CalculusMethods,
+ MatrixMethods,
+ MatrixCalculusMethods,
+ LinearAlgebraMethods,
+ Eigen,
+ IdentificationMethods,
+ OptimizationMethods,
+ ODEMethods,
+ VisualizationMethods):
+
+ NoConvergence = libmp.NoConvergence
+ ComplexResult = libmp.ComplexResult
+
+ def __init__(ctx):
+ ctx._aliases = {}
+ # Call those that need preinitialization (e.g. for wrappers)
+ SpecialFunctions.__init__(ctx)
+ RSCache.__init__(ctx)
+ QuadratureMethods.__init__(ctx)
+ LaplaceTransformInversionMethods.__init__(ctx)
+ CalculusMethods.__init__(ctx)
+ MatrixMethods.__init__(ctx)
+
+ def _init_aliases(ctx):
+ for alias, value in ctx._aliases.items():
+ try:
+ setattr(ctx, alias, getattr(ctx, value))
+ except AttributeError:
+ pass
+
+ _fixed_precision = False
+
+ # XXX
+ verbose = False
+
+ def warn(ctx, msg):
+ print("Warning:", msg)
+
+ def bad_domain(ctx, msg):
+ raise ValueError(msg)
+
+ def _re(ctx, x):
+ if hasattr(x, "real"):
+ return x.real
+ return x
+
+ def _im(ctx, x):
+ if hasattr(x, "imag"):
+ return x.imag
+ return ctx.zero
+
+ def _as_points(ctx, x):
+ return x
+
+ def fneg(ctx, x, **kwargs):
+ return -ctx.convert(x)
+
+ def fadd(ctx, x, y, **kwargs):
+ return ctx.convert(x)+ctx.convert(y)
+
+ def fsub(ctx, x, y, **kwargs):
+ return ctx.convert(x)-ctx.convert(y)
+
+ def fmul(ctx, x, y, **kwargs):
+ return ctx.convert(x)*ctx.convert(y)
+
+ def fdiv(ctx, x, y, **kwargs):
+ return ctx.convert(x)/ctx.convert(y)
+
+ def fsum(ctx, args, absolute=False, squared=False):
+ if absolute:
+ if squared:
+ return sum((abs(x)**2 for x in args), ctx.zero)
+ return sum((abs(x) for x in args), ctx.zero)
+ if squared:
+ return sum((x**2 for x in args), ctx.zero)
+ return sum(args, ctx.zero)
+
+ def fdot(ctx, xs, ys=None, conjugate=False):
+ if ys is not None:
+ xs = zip(xs, ys)
+ if conjugate:
+ cf = ctx.conj
+ return sum((x*cf(y) for (x,y) in xs), ctx.zero)
+ else:
+ return sum((x*y for (x,y) in xs), ctx.zero)
+
+ def fprod(ctx, args):
+ prod = ctx.one
+ for arg in args:
+ prod *= arg
+ return prod
+
+ def nprint(ctx, x, n=6, **kwargs):
+ """
+ Equivalent to ``print(nstr(x, n))``.
+ """
+ print(ctx.nstr(x, n, **kwargs))
+
+ def chop(ctx, x, tol=None):
+ """
+ Chops off small real or imaginary parts, or converts
+ numbers close to zero to exact zeros. The input can be a
+ single number or an iterable::
+
+ >>> from mpmath import *
+ >>> mp.dps = 15; mp.pretty = False
+ >>> chop(5+1e-10j, tol=1e-9)
+ mpf('5.0')
+ >>> nprint(chop([1.0, 1e-20, 3+1e-18j, -4, 2]))
+ [1.0, 0.0, 3.0, -4.0, 2.0]
+
+ The tolerance defaults to ``100*eps``.
+ """
+ if tol is None:
+ tol = 100*ctx.eps
+ try:
+ x = ctx.convert(x)
+ absx = abs(x)
+ if abs(x) < tol:
+ return ctx.zero
+ if ctx._is_complex_type(x):
+ #part_tol = min(tol, absx*tol)
+ part_tol = max(tol, absx*tol)
+ if abs(x.imag) < part_tol:
+ return x.real
+ if abs(x.real) < part_tol:
+ return ctx.mpc(0, x.imag)
+ except TypeError:
+ if isinstance(x, ctx.matrix):
+ return x.apply(lambda a: ctx.chop(a, tol))
+ if hasattr(x, "__iter__"):
+ return [ctx.chop(a, tol) for a in x]
+ return x
+
+ def almosteq(ctx, s, t, rel_eps=None, abs_eps=None):
+ r"""
+ Determine whether the difference between `s` and `t` is smaller
+ than a given epsilon, either relatively or absolutely.
+
+ Both a maximum relative difference and a maximum difference
+ ('epsilons') may be specified. The absolute difference is
+ defined as `|s-t|` and the relative difference is defined
+ as `|s-t|/\max(|s|, |t|)`.
+
+ If only one epsilon is given, both are set to the same value.
+ If none is given, both epsilons are set to `2^{-p+m}` where
+ `p` is the current working precision and `m` is a small
+ integer. The default setting typically allows :func:`~mpmath.almosteq`
+ to be used to check for mathematical equality
+ in the presence of small rounding errors.
+
+ **Examples**
+
+ >>> from mpmath import *
+ >>> mp.dps = 15
+ >>> almosteq(3.141592653589793, 3.141592653589790)
+ True
+ >>> almosteq(3.141592653589793, 3.141592653589700)
+ False
+ >>> almosteq(3.141592653589793, 3.141592653589700, 1e-10)
+ True
+ >>> almosteq(1e-20, 2e-20)
+ True
+ >>> almosteq(1e-20, 2e-20, rel_eps=0, abs_eps=0)
+ False
+
+ """
+ t = ctx.convert(t)
+ if abs_eps is None and rel_eps is None:
+ rel_eps = abs_eps = ctx.ldexp(1, -ctx.prec+4)
+ if abs_eps is None:
+ abs_eps = rel_eps
+ elif rel_eps is None:
+ rel_eps = abs_eps
+ diff = abs(s-t)
+ if diff <= abs_eps:
+ return True
+ abss = abs(s)
+ abst = abs(t)
+ if abss < abst:
+ err = diff/abst
+ else:
+ err = diff/abss
+ return err <= rel_eps
+
+ def arange(ctx, *args):
+ r"""
+ This is a generalized version of Python's :func:`~mpmath.range` function
+ that accepts fractional endpoints and step sizes and
+ returns a list of ``mpf`` instances. Like :func:`~mpmath.range`,
+ :func:`~mpmath.arange` can be called with 1, 2 or 3 arguments:
+
+ ``arange(b)``
+ `[0, 1, 2, \ldots, x]`
+ ``arange(a, b)``
+ `[a, a+1, a+2, \ldots, x]`
+ ``arange(a, b, h)``
+ `[a, a+h, a+h, \ldots, x]`
+
+ where `b-1 \le x < b` (in the third case, `b-h \le x < b`).
+
+ Like Python's :func:`~mpmath.range`, the endpoint is not included. To
+ produce ranges where the endpoint is included, :func:`~mpmath.linspace`
+ is more convenient.
+
+ **Examples**
+
+ >>> from mpmath import *
+ >>> mp.dps = 15; mp.pretty = False
+ >>> arange(4)
+ [mpf('0.0'), mpf('1.0'), mpf('2.0'), mpf('3.0')]
+ >>> arange(1, 2, 0.25)
+ [mpf('1.0'), mpf('1.25'), mpf('1.5'), mpf('1.75')]
+ >>> arange(1, -1, -0.75)
+ [mpf('1.0'), mpf('0.25'), mpf('-0.5')]
+
+ """
+ if not len(args) <= 3:
+ raise TypeError('arange expected at most 3 arguments, got %i'
+ % len(args))
+ if not len(args) >= 1:
+ raise TypeError('arange expected at least 1 argument, got %i'
+ % len(args))
+ # set default
+ a = 0
+ dt = 1
+ # interpret arguments
+ if len(args) == 1:
+ b = args[0]
+ elif len(args) >= 2:
+ a = args[0]
+ b = args[1]
+ if len(args) == 3:
+ dt = args[2]
+ a, b, dt = ctx.mpf(a), ctx.mpf(b), ctx.mpf(dt)
+ assert a + dt != a, 'dt is too small and would cause an infinite loop'
+ # adapt code for sign of dt
+ if a > b:
+ if dt > 0:
+ return []
+ op = gt
+ else:
+ if dt < 0:
+ return []
+ op = lt
+ # create list
+ result = []
+ i = 0
+ t = a
+ while 1:
+ t = a + dt*i
+ i += 1
+ if op(t, b):
+ result.append(t)
+ else:
+ break
+ return result
+
+ def linspace(ctx, *args, **kwargs):
+ """
+ ``linspace(a, b, n)`` returns a list of `n` evenly spaced
+ samples from `a` to `b`. The syntax ``linspace(mpi(a,b), n)``
+ is also valid.
+
+ This function is often more convenient than :func:`~mpmath.arange`
+ for partitioning an interval into subintervals, since
+ the endpoint is included::
+
+ >>> from mpmath import *
+ >>> mp.dps = 15; mp.pretty = False
+ >>> linspace(1, 4, 4)
+ [mpf('1.0'), mpf('2.0'), mpf('3.0'), mpf('4.0')]
+
+ You may also provide the keyword argument ``endpoint=False``::
+
+ >>> linspace(1, 4, 4, endpoint=False)
+ [mpf('1.0'), mpf('1.75'), mpf('2.5'), mpf('3.25')]
+
+ """
+ if len(args) == 3:
+ a = ctx.mpf(args[0])
+ b = ctx.mpf(args[1])
+ n = int(args[2])
+ elif len(args) == 2:
+ assert hasattr(args[0], '_mpi_')
+ a = args[0].a
+ b = args[0].b
+ n = int(args[1])
+ else:
+ raise TypeError('linspace expected 2 or 3 arguments, got %i' \
+ % len(args))
+ if n < 1:
+ raise ValueError('n must be greater than 0')
+ if not 'endpoint' in kwargs or kwargs['endpoint']:
+ if n == 1:
+ return [ctx.mpf(a)]
+ step = (b - a) / ctx.mpf(n - 1)
+ y = [i*step + a for i in xrange(n)]
+ y[-1] = b
+ else:
+ step = (b - a) / ctx.mpf(n)
+ y = [i*step + a for i in xrange(n)]
+ return y
+
+ def cos_sin(ctx, z, **kwargs):
+ return ctx.cos(z, **kwargs), ctx.sin(z, **kwargs)
+
+ def cospi_sinpi(ctx, z, **kwargs):
+ return ctx.cospi(z, **kwargs), ctx.sinpi(z, **kwargs)
+
+ def _default_hyper_maxprec(ctx, p):
+ return int(1000 * p**0.25 + 4*p)
+
+ _gcd = staticmethod(libmp.gcd)
+ list_primes = staticmethod(libmp.list_primes)
+ isprime = staticmethod(libmp.isprime)
+ bernfrac = staticmethod(libmp.bernfrac)
+ moebius = staticmethod(libmp.moebius)
+ _ifac = staticmethod(libmp.ifac)
+ _eulernum = staticmethod(libmp.eulernum)
+ _stirling1 = staticmethod(libmp.stirling1)
+ _stirling2 = staticmethod(libmp.stirling2)
+
+ def sum_accurately(ctx, terms, check_step=1):
+ prec = ctx.prec
+ try:
+ extraprec = 10
+ while 1:
+ ctx.prec = prec + extraprec + 5
+ max_mag = ctx.ninf
+ s = ctx.zero
+ k = 0
+ for term in terms():
+ s += term
+ if (not k % check_step) and term:
+ term_mag = ctx.mag(term)
+ max_mag = max(max_mag, term_mag)
+ sum_mag = ctx.mag(s)
+ if sum_mag - term_mag > ctx.prec:
+ break
+ k += 1
+ cancellation = max_mag - sum_mag
+ if cancellation != cancellation:
+ break
+ if cancellation < extraprec or ctx._fixed_precision:
+ break
+ extraprec += min(ctx.prec, cancellation)
+ return s
+ finally:
+ ctx.prec = prec
+
+ def mul_accurately(ctx, factors, check_step=1):
+ prec = ctx.prec
+ try:
+ extraprec = 10
+ while 1:
+ ctx.prec = prec + extraprec + 5
+ max_mag = ctx.ninf
+ one = ctx.one
+ s = one
+ k = 0
+ for factor in factors():
+ s *= factor
+ term = factor - one
+ if (not k % check_step):
+ term_mag = ctx.mag(term)
+ max_mag = max(max_mag, term_mag)
+ sum_mag = ctx.mag(s-one)
+ #if sum_mag - term_mag > ctx.prec:
+ # break
+ if -term_mag > ctx.prec:
+ break
+ k += 1
+ cancellation = max_mag - sum_mag
+ if cancellation != cancellation:
+ break
+ if cancellation < extraprec or ctx._fixed_precision:
+ break
+ extraprec += min(ctx.prec, cancellation)
+ return s
+ finally:
+ ctx.prec = prec
+
+ def power(ctx, x, y):
+ r"""Converts `x` and `y` to mpmath numbers and evaluates
+ `x^y = \exp(y \log(x))`::
+
+ >>> from mpmath import *
+ >>> mp.dps = 30; mp.pretty = True
+ >>> power(2, 0.5)
+ 1.41421356237309504880168872421
+
+ This shows the leading few digits of a large Mersenne prime
+ (performing the exact calculation ``2**43112609-1`` and
+ displaying the result in Python would be very slow)::
+
+ >>> power(2, 43112609)-1
+ 3.16470269330255923143453723949e+12978188
+ """
+ return ctx.convert(x) ** ctx.convert(y)
+
+ def _zeta_int(ctx, n):
+ return ctx.zeta(n)
+
+ def maxcalls(ctx, f, N):
+ """
+ Return a wrapped copy of *f* that raises ``NoConvergence`` when *f*
+ has been called more than *N* times::
+
+ >>> from mpmath import *
+ >>> mp.dps = 15
+ >>> f = maxcalls(sin, 10)
+ >>> print(sum(f(n) for n in range(10)))
+ 1.95520948210738
+ >>> f(10) # doctest: +IGNORE_EXCEPTION_DETAIL
+ Traceback (most recent call last):
+ ...
+ NoConvergence: maxcalls: function evaluated 10 times
+
+ """
+ counter = [0]
+ def f_maxcalls_wrapped(*args, **kwargs):
+ counter[0] += 1
+ if counter[0] > N:
+ raise ctx.NoConvergence("maxcalls: function evaluated %i times" % N)
+ return f(*args, **kwargs)
+ return f_maxcalls_wrapped
+
+ def memoize(ctx, f):
+ """
+ Return a wrapped copy of *f* that caches computed values, i.e.
+ a memoized copy of *f*. Values are only reused if the cached precision
+ is equal to or higher than the working precision::
+
+ >>> from mpmath import *
+ >>> mp.dps = 15; mp.pretty = True
+ >>> f = memoize(maxcalls(sin, 1))
+ >>> f(2)
+ 0.909297426825682
+ >>> f(2)
+ 0.909297426825682
+ >>> mp.dps = 25
+ >>> f(2) # doctest: +IGNORE_EXCEPTION_DETAIL
+ Traceback (most recent call last):
+ ...
+ NoConvergence: maxcalls: function evaluated 1 times
+
+ """
+ f_cache = {}
+ def f_cached(*args, **kwargs):
+ if kwargs:
+ key = args, tuple(kwargs.items())
+ else:
+ key = args
+ prec = ctx.prec
+ if key in f_cache:
+ cprec, cvalue = f_cache[key]
+ if cprec >= prec:
+ return +cvalue
+ value = f(*args, **kwargs)
+ f_cache[key] = (prec, value)
+ return value
+ f_cached.__name__ = f.__name__
+ f_cached.__doc__ = f.__doc__
+ return f_cached
diff --git a/pythonProject/.venv/Lib/site-packages/mpmath/ctx_fp.py b/pythonProject/.venv/Lib/site-packages/mpmath/ctx_fp.py
new file mode 100644
index 0000000000000000000000000000000000000000..aa72ea5b03fde4da66b0d8fbf8ffa4012e3f6178
--- /dev/null
+++ b/pythonProject/.venv/Lib/site-packages/mpmath/ctx_fp.py
@@ -0,0 +1,253 @@
+from .ctx_base import StandardBaseContext
+
+import math
+import cmath
+from . import math2
+
+from . import function_docs
+
+from .libmp import mpf_bernoulli, to_float, int_types
+from . import libmp
+
+class FPContext(StandardBaseContext):
+ """
+ Context for fast low-precision arithmetic (53-bit precision, giving at most
+ about 15-digit accuracy), using Python's builtin float and complex.
+ """
+
+ def __init__(ctx):
+ StandardBaseContext.__init__(ctx)
+
+ # Override SpecialFunctions implementation
+ ctx.loggamma = math2.loggamma
+ ctx._bernoulli_cache = {}
+ ctx.pretty = False
+
+ ctx._init_aliases()
+
+ _mpq = lambda cls, x: float(x[0])/x[1]
+
+ NoConvergence = libmp.NoConvergence
+
+ def _get_prec(ctx): return 53
+ def _set_prec(ctx, p): return
+ def _get_dps(ctx): return 15
+ def _set_dps(ctx, p): return
+
+ _fixed_precision = True
+
+ prec = property(_get_prec, _set_prec)
+ dps = property(_get_dps, _set_dps)
+
+ zero = 0.0
+ one = 1.0
+ eps = math2.EPS
+ inf = math2.INF
+ ninf = math2.NINF
+ nan = math2.NAN
+ j = 1j
+
+ # Called by SpecialFunctions.__init__()
+ @classmethod
+ def _wrap_specfun(cls, name, f, wrap):
+ if wrap:
+ def f_wrapped(ctx, *args, **kwargs):
+ convert = ctx.convert
+ args = [convert(a) for a in args]
+ return f(ctx, *args, **kwargs)
+ else:
+ f_wrapped = f
+ f_wrapped.__doc__ = function_docs.__dict__.get(name, f.__doc__)
+ setattr(cls, name, f_wrapped)
+
+ def bernoulli(ctx, n):
+ cache = ctx._bernoulli_cache
+ if n in cache:
+ return cache[n]
+ cache[n] = to_float(mpf_bernoulli(n, 53, 'n'), strict=True)
+ return cache[n]
+
+ pi = math2.pi
+ e = math2.e
+ euler = math2.euler
+ sqrt2 = 1.4142135623730950488
+ sqrt5 = 2.2360679774997896964
+ phi = 1.6180339887498948482
+ ln2 = 0.69314718055994530942
+ ln10 = 2.302585092994045684
+ euler = 0.57721566490153286061
+ catalan = 0.91596559417721901505
+ khinchin = 2.6854520010653064453
+ apery = 1.2020569031595942854
+ glaisher = 1.2824271291006226369
+
+ absmin = absmax = abs
+
+ def is_special(ctx, x):
+ return x - x != 0.0
+
+ def isnan(ctx, x):
+ return x != x
+
+ def isinf(ctx, x):
+ return abs(x) == math2.INF
+
+ def isnormal(ctx, x):
+ if x:
+ return x - x == 0.0
+ return False
+
+ def isnpint(ctx, x):
+ if type(x) is complex:
+ if x.imag:
+ return False
+ x = x.real
+ return x <= 0.0 and round(x) == x
+
+ mpf = float
+ mpc = complex
+
+ def convert(ctx, x):
+ try:
+ return float(x)
+ except:
+ return complex(x)
+
+ power = staticmethod(math2.pow)
+ sqrt = staticmethod(math2.sqrt)
+ exp = staticmethod(math2.exp)
+ ln = log = staticmethod(math2.log)
+ cos = staticmethod(math2.cos)
+ sin = staticmethod(math2.sin)
+ tan = staticmethod(math2.tan)
+ cos_sin = staticmethod(math2.cos_sin)
+ acos = staticmethod(math2.acos)
+ asin = staticmethod(math2.asin)
+ atan = staticmethod(math2.atan)
+ cosh = staticmethod(math2.cosh)
+ sinh = staticmethod(math2.sinh)
+ tanh = staticmethod(math2.tanh)
+ gamma = staticmethod(math2.gamma)
+ rgamma = staticmethod(math2.rgamma)
+ fac = factorial = staticmethod(math2.factorial)
+ floor = staticmethod(math2.floor)
+ ceil = staticmethod(math2.ceil)
+ cospi = staticmethod(math2.cospi)
+ sinpi = staticmethod(math2.sinpi)
+ cbrt = staticmethod(math2.cbrt)
+ _nthroot = staticmethod(math2.nthroot)
+ _ei = staticmethod(math2.ei)
+ _e1 = staticmethod(math2.e1)
+ _zeta = _zeta_int = staticmethod(math2.zeta)
+
+ # XXX: math2
+ def arg(ctx, z):
+ z = complex(z)
+ return math.atan2(z.imag, z.real)
+
+ def expj(ctx, x):
+ return ctx.exp(ctx.j*x)
+
+ def expjpi(ctx, x):
+ return ctx.exp(ctx.j*ctx.pi*x)
+
+ ldexp = math.ldexp
+ frexp = math.frexp
+
+ def mag(ctx, z):
+ if z:
+ return ctx.frexp(abs(z))[1]
+ return ctx.ninf
+
+ def isint(ctx, z):
+ if hasattr(z, "imag"): # float/int don't have .real/.imag in py2.5
+ if z.imag:
+ return False
+ z = z.real
+ try:
+ return z == int(z)
+ except:
+ return False
+
+ def nint_distance(ctx, z):
+ if hasattr(z, "imag"): # float/int don't have .real/.imag in py2.5
+ n = round(z.real)
+ else:
+ n = round(z)
+ if n == z:
+ return n, ctx.ninf
+ return n, ctx.mag(abs(z-n))
+
+ def _convert_param(ctx, z):
+ if type(z) is tuple:
+ p, q = z
+ return ctx.mpf(p) / q, 'R'
+ if hasattr(z, "imag"): # float/int don't have .real/.imag in py2.5
+ intz = int(z.real)
+ else:
+ intz = int(z)
+ if z == intz:
+ return intz, 'Z'
+ return z, 'R'
+
+ def _is_real_type(ctx, z):
+ return isinstance(z, float) or isinstance(z, int_types)
+
+ def _is_complex_type(ctx, z):
+ return isinstance(z, complex)
+
+ def hypsum(ctx, p, q, types, coeffs, z, maxterms=6000, **kwargs):
+ coeffs = list(coeffs)
+ num = range(p)
+ den = range(p,p+q)
+ tol = ctx.eps
+ s = t = 1.0
+ k = 0
+ while 1:
+ for i in num: t *= (coeffs[i]+k)
+ for i in den: t /= (coeffs[i]+k)
+ k += 1; t /= k; t *= z; s += t
+ if abs(t) < tol:
+ return s
+ if k > maxterms:
+ raise ctx.NoConvergence
+
+ def atan2(ctx, x, y):
+ return math.atan2(x, y)
+
+ def psi(ctx, m, z):
+ m = int(m)
+ if m == 0:
+ return ctx.digamma(z)
+ return (-1)**(m+1) * ctx.fac(m) * ctx.zeta(m+1, z)
+
+ digamma = staticmethod(math2.digamma)
+
+ def harmonic(ctx, x):
+ x = ctx.convert(x)
+ if x == 0 or x == 1:
+ return x
+ return ctx.digamma(x+1) + ctx.euler
+
+ nstr = str
+
+ def to_fixed(ctx, x, prec):
+ return int(math.ldexp(x, prec))
+
+ def rand(ctx):
+ import random
+ return random.random()
+
+ _erf = staticmethod(math2.erf)
+ _erfc = staticmethod(math2.erfc)
+
+ def sum_accurately(ctx, terms, check_step=1):
+ s = ctx.zero
+ k = 0
+ for term in terms():
+ s += term
+ if (not k % check_step) and term:
+ if abs(term) <= 1e-18*abs(s):
+ break
+ k += 1
+ return s
diff --git a/pythonProject/.venv/Lib/site-packages/mpmath/ctx_iv.py b/pythonProject/.venv/Lib/site-packages/mpmath/ctx_iv.py
new file mode 100644
index 0000000000000000000000000000000000000000..c038e00a5677e318d222b63c22d225e3045e1c2b
--- /dev/null
+++ b/pythonProject/.venv/Lib/site-packages/mpmath/ctx_iv.py
@@ -0,0 +1,551 @@
+import operator
+
+from . import libmp
+
+from .libmp.backend import basestring
+
+from .libmp import (
+ int_types, MPZ_ONE,
+ prec_to_dps, dps_to_prec, repr_dps,
+ round_floor, round_ceiling,
+ fzero, finf, fninf, fnan,
+ mpf_le, mpf_neg,
+ from_int, from_float, from_str, from_rational,
+ mpi_mid, mpi_delta, mpi_str,
+ mpi_abs, mpi_pos, mpi_neg, mpi_add, mpi_sub,
+ mpi_mul, mpi_div, mpi_pow_int, mpi_pow,
+ mpi_from_str,
+ mpci_pos, mpci_neg, mpci_add, mpci_sub, mpci_mul, mpci_div, mpci_pow,
+ mpci_abs, mpci_pow, mpci_exp, mpci_log,
+ ComplexResult,
+ mpf_hash, mpc_hash)
+from .matrices.matrices import _matrix
+
+mpi_zero = (fzero, fzero)
+
+from .ctx_base import StandardBaseContext
+
+new = object.__new__
+
+def convert_mpf_(x, prec, rounding):
+ if hasattr(x, "_mpf_"): return x._mpf_
+ if isinstance(x, int_types): return from_int(x, prec, rounding)
+ if isinstance(x, float): return from_float(x, prec, rounding)
+ if isinstance(x, basestring): return from_str(x, prec, rounding)
+ raise NotImplementedError
+
+
+class ivmpf(object):
+ """
+ Interval arithmetic class. Precision is controlled by iv.prec.
+ """
+
+ def __new__(cls, x=0):
+ return cls.ctx.convert(x)
+
+ def cast(self, cls, f_convert):
+ a, b = self._mpi_
+ if a == b:
+ return cls(f_convert(a))
+ raise ValueError
+
+ def __int__(self):
+ return self.cast(int, libmp.to_int)
+
+ def __float__(self):
+ return self.cast(float, libmp.to_float)
+
+ def __complex__(self):
+ return self.cast(complex, libmp.to_float)
+
+ def __hash__(self):
+ a, b = self._mpi_
+ if a == b:
+ return mpf_hash(a)
+ else:
+ return hash(self._mpi_)
+
+ @property
+ def real(self): return self
+
+ @property
+ def imag(self): return self.ctx.zero
+
+ def conjugate(self): return self
+
+ @property
+ def a(self):
+ a, b = self._mpi_
+ return self.ctx.make_mpf((a, a))
+
+ @property
+ def b(self):
+ a, b = self._mpi_
+ return self.ctx.make_mpf((b, b))
+
+ @property
+ def mid(self):
+ ctx = self.ctx
+ v = mpi_mid(self._mpi_, ctx.prec)
+ return ctx.make_mpf((v, v))
+
+ @property
+ def delta(self):
+ ctx = self.ctx
+ v = mpi_delta(self._mpi_, ctx.prec)
+ return ctx.make_mpf((v,v))
+
+ @property
+ def _mpci_(self):
+ return self._mpi_, mpi_zero
+
+ def _compare(*args):
+ raise TypeError("no ordering relation is defined for intervals")
+
+ __gt__ = _compare
+ __le__ = _compare
+ __gt__ = _compare
+ __ge__ = _compare
+
+ def __contains__(self, t):
+ t = self.ctx.mpf(t)
+ return (self.a <= t.a) and (t.b <= self.b)
+
+ def __str__(self):
+ return mpi_str(self._mpi_, self.ctx.prec)
+
+ def __repr__(self):
+ if self.ctx.pretty:
+ return str(self)
+ a, b = self._mpi_
+ n = repr_dps(self.ctx.prec)
+ a = libmp.to_str(a, n)
+ b = libmp.to_str(b, n)
+ return "mpi(%r, %r)" % (a, b)
+
+ def _compare(s, t, cmpfun):
+ if not hasattr(t, "_mpi_"):
+ try:
+ t = s.ctx.convert(t)
+ except:
+ return NotImplemented
+ return cmpfun(s._mpi_, t._mpi_)
+
+ def __eq__(s, t): return s._compare(t, libmp.mpi_eq)
+ def __ne__(s, t): return s._compare(t, libmp.mpi_ne)
+ def __lt__(s, t): return s._compare(t, libmp.mpi_lt)
+ def __le__(s, t): return s._compare(t, libmp.mpi_le)
+ def __gt__(s, t): return s._compare(t, libmp.mpi_gt)
+ def __ge__(s, t): return s._compare(t, libmp.mpi_ge)
+
+ def __abs__(self):
+ return self.ctx.make_mpf(mpi_abs(self._mpi_, self.ctx.prec))
+ def __pos__(self):
+ return self.ctx.make_mpf(mpi_pos(self._mpi_, self.ctx.prec))
+ def __neg__(self):
+ return self.ctx.make_mpf(mpi_neg(self._mpi_, self.ctx.prec))
+
+ def ae(s, t, rel_eps=None, abs_eps=None):
+ return s.ctx.almosteq(s, t, rel_eps, abs_eps)
+
+class ivmpc(object):
+
+ def __new__(cls, re=0, im=0):
+ re = cls.ctx.convert(re)
+ im = cls.ctx.convert(im)
+ y = new(cls)
+ y._mpci_ = re._mpi_, im._mpi_
+ return y
+
+ def __hash__(self):
+ (a, b), (c,d) = self._mpci_
+ if a == b and c == d:
+ return mpc_hash((a, c))
+ else:
+ return hash(self._mpci_)
+
+ def __repr__(s):
+ if s.ctx.pretty:
+ return str(s)
+ return "iv.mpc(%s, %s)" % (repr(s.real), repr(s.imag))
+
+ def __str__(s):
+ return "(%s + %s*j)" % (str(s.real), str(s.imag))
+
+ @property
+ def a(self):
+ (a, b), (c,d) = self._mpci_
+ return self.ctx.make_mpf((a, a))
+
+ @property
+ def b(self):
+ (a, b), (c,d) = self._mpci_
+ return self.ctx.make_mpf((b, b))
+
+ @property
+ def c(self):
+ (a, b), (c,d) = self._mpci_
+ return self.ctx.make_mpf((c, c))
+
+ @property
+ def d(self):
+ (a, b), (c,d) = self._mpci_
+ return self.ctx.make_mpf((d, d))
+
+ @property
+ def real(s):
+ return s.ctx.make_mpf(s._mpci_[0])
+
+ @property
+ def imag(s):
+ return s.ctx.make_mpf(s._mpci_[1])
+
+ def conjugate(s):
+ a, b = s._mpci_
+ return s.ctx.make_mpc((a, mpf_neg(b)))
+
+ def overlap(s, t):
+ t = s.ctx.convert(t)
+ real_overlap = (s.a <= t.a <= s.b) or (s.a <= t.b <= s.b) or (t.a <= s.a <= t.b) or (t.a <= s.b <= t.b)
+ imag_overlap = (s.c <= t.c <= s.d) or (s.c <= t.d <= s.d) or (t.c <= s.c <= t.d) or (t.c <= s.d <= t.d)
+ return real_overlap and imag_overlap
+
+ def __contains__(s, t):
+ t = s.ctx.convert(t)
+ return t.real in s.real and t.imag in s.imag
+
+ def _compare(s, t, ne=False):
+ if not isinstance(t, s.ctx._types):
+ try:
+ t = s.ctx.convert(t)
+ except:
+ return NotImplemented
+ if hasattr(t, '_mpi_'):
+ tval = t._mpi_, mpi_zero
+ elif hasattr(t, '_mpci_'):
+ tval = t._mpci_
+ if ne:
+ return s._mpci_ != tval
+ return s._mpci_ == tval
+
+ def __eq__(s, t): return s._compare(t)
+ def __ne__(s, t): return s._compare(t, True)
+
+ def __lt__(s, t): raise TypeError("complex intervals cannot be ordered")
+ __le__ = __gt__ = __ge__ = __lt__
+
+ def __neg__(s): return s.ctx.make_mpc(mpci_neg(s._mpci_, s.ctx.prec))
+ def __pos__(s): return s.ctx.make_mpc(mpci_pos(s._mpci_, s.ctx.prec))
+ def __abs__(s): return s.ctx.make_mpf(mpci_abs(s._mpci_, s.ctx.prec))
+
+ def ae(s, t, rel_eps=None, abs_eps=None):
+ return s.ctx.almosteq(s, t, rel_eps, abs_eps)
+
+def _binary_op(f_real, f_complex):
+ def g_complex(ctx, sval, tval):
+ return ctx.make_mpc(f_complex(sval, tval, ctx.prec))
+ def g_real(ctx, sval, tval):
+ try:
+ return ctx.make_mpf(f_real(sval, tval, ctx.prec))
+ except ComplexResult:
+ sval = (sval, mpi_zero)
+ tval = (tval, mpi_zero)
+ return g_complex(ctx, sval, tval)
+ def lop_real(s, t):
+ if isinstance(t, _matrix): return NotImplemented
+ ctx = s.ctx
+ if not isinstance(t, ctx._types): t = ctx.convert(t)
+ if hasattr(t, "_mpi_"): return g_real(ctx, s._mpi_, t._mpi_)
+ if hasattr(t, "_mpci_"): return g_complex(ctx, (s._mpi_, mpi_zero), t._mpci_)
+ return NotImplemented
+ def rop_real(s, t):
+ ctx = s.ctx
+ if not isinstance(t, ctx._types): t = ctx.convert(t)
+ if hasattr(t, "_mpi_"): return g_real(ctx, t._mpi_, s._mpi_)
+ if hasattr(t, "_mpci_"): return g_complex(ctx, t._mpci_, (s._mpi_, mpi_zero))
+ return NotImplemented
+ def lop_complex(s, t):
+ if isinstance(t, _matrix): return NotImplemented
+ ctx = s.ctx
+ if not isinstance(t, s.ctx._types):
+ try:
+ t = s.ctx.convert(t)
+ except (ValueError, TypeError):
+ return NotImplemented
+ return g_complex(ctx, s._mpci_, t._mpci_)
+ def rop_complex(s, t):
+ ctx = s.ctx
+ if not isinstance(t, s.ctx._types):
+ t = s.ctx.convert(t)
+ return g_complex(ctx, t._mpci_, s._mpci_)
+ return lop_real, rop_real, lop_complex, rop_complex
+
+ivmpf.__add__, ivmpf.__radd__, ivmpc.__add__, ivmpc.__radd__ = _binary_op(mpi_add, mpci_add)
+ivmpf.__sub__, ivmpf.__rsub__, ivmpc.__sub__, ivmpc.__rsub__ = _binary_op(mpi_sub, mpci_sub)
+ivmpf.__mul__, ivmpf.__rmul__, ivmpc.__mul__, ivmpc.__rmul__ = _binary_op(mpi_mul, mpci_mul)
+ivmpf.__div__, ivmpf.__rdiv__, ivmpc.__div__, ivmpc.__rdiv__ = _binary_op(mpi_div, mpci_div)
+ivmpf.__pow__, ivmpf.__rpow__, ivmpc.__pow__, ivmpc.__rpow__ = _binary_op(mpi_pow, mpci_pow)
+
+ivmpf.__truediv__ = ivmpf.__div__; ivmpf.__rtruediv__ = ivmpf.__rdiv__
+ivmpc.__truediv__ = ivmpc.__div__; ivmpc.__rtruediv__ = ivmpc.__rdiv__
+
+class ivmpf_constant(ivmpf):
+ def __new__(cls, f):
+ self = new(cls)
+ self._f = f
+ return self
+ def _get_mpi_(self):
+ prec = self.ctx._prec[0]
+ a = self._f(prec, round_floor)
+ b = self._f(prec, round_ceiling)
+ return a, b
+ _mpi_ = property(_get_mpi_)
+
+class MPIntervalContext(StandardBaseContext):
+
+ def __init__(ctx):
+ ctx.mpf = type('ivmpf', (ivmpf,), {})
+ ctx.mpc = type('ivmpc', (ivmpc,), {})
+ ctx._types = (ctx.mpf, ctx.mpc)
+ ctx._constant = type('ivmpf_constant', (ivmpf_constant,), {})
+ ctx._prec = [53]
+ ctx._set_prec(53)
+ ctx._constant._ctxdata = ctx.mpf._ctxdata = ctx.mpc._ctxdata = [ctx.mpf, new, ctx._prec]
+ ctx._constant.ctx = ctx.mpf.ctx = ctx.mpc.ctx = ctx
+ ctx.pretty = False
+ StandardBaseContext.__init__(ctx)
+ ctx._init_builtins()
+
+ def _mpi(ctx, a, b=None):
+ if b is None:
+ return ctx.mpf(a)
+ return ctx.mpf((a,b))
+
+ def _init_builtins(ctx):
+ ctx.one = ctx.mpf(1)
+ ctx.zero = ctx.mpf(0)
+ ctx.inf = ctx.mpf('inf')
+ ctx.ninf = -ctx.inf
+ ctx.nan = ctx.mpf('nan')
+ ctx.j = ctx.mpc(0,1)
+ ctx.exp = ctx._wrap_mpi_function(libmp.mpi_exp, libmp.mpci_exp)
+ ctx.sqrt = ctx._wrap_mpi_function(libmp.mpi_sqrt)
+ ctx.ln = ctx._wrap_mpi_function(libmp.mpi_log, libmp.mpci_log)
+ ctx.cos = ctx._wrap_mpi_function(libmp.mpi_cos, libmp.mpci_cos)
+ ctx.sin = ctx._wrap_mpi_function(libmp.mpi_sin, libmp.mpci_sin)
+ ctx.tan = ctx._wrap_mpi_function(libmp.mpi_tan)
+ ctx.gamma = ctx._wrap_mpi_function(libmp.mpi_gamma, libmp.mpci_gamma)
+ ctx.loggamma = ctx._wrap_mpi_function(libmp.mpi_loggamma, libmp.mpci_loggamma)
+ ctx.rgamma = ctx._wrap_mpi_function(libmp.mpi_rgamma, libmp.mpci_rgamma)
+ ctx.factorial = ctx._wrap_mpi_function(libmp.mpi_factorial, libmp.mpci_factorial)
+ ctx.fac = ctx.factorial
+
+ ctx.eps = ctx._constant(lambda prec, rnd: (0, MPZ_ONE, 1-prec, 1))
+ ctx.pi = ctx._constant(libmp.mpf_pi)
+ ctx.e = ctx._constant(libmp.mpf_e)
+ ctx.ln2 = ctx._constant(libmp.mpf_ln2)
+ ctx.ln10 = ctx._constant(libmp.mpf_ln10)
+ ctx.phi = ctx._constant(libmp.mpf_phi)
+ ctx.euler = ctx._constant(libmp.mpf_euler)
+ ctx.catalan = ctx._constant(libmp.mpf_catalan)
+ ctx.glaisher = ctx._constant(libmp.mpf_glaisher)
+ ctx.khinchin = ctx._constant(libmp.mpf_khinchin)
+ ctx.twinprime = ctx._constant(libmp.mpf_twinprime)
+
+ def _wrap_mpi_function(ctx, f_real, f_complex=None):
+ def g(x, **kwargs):
+ if kwargs:
+ prec = kwargs.get('prec', ctx._prec[0])
+ else:
+ prec = ctx._prec[0]
+ x = ctx.convert(x)
+ if hasattr(x, "_mpi_"):
+ return ctx.make_mpf(f_real(x._mpi_, prec))
+ if hasattr(x, "_mpci_"):
+ return ctx.make_mpc(f_complex(x._mpci_, prec))
+ raise ValueError
+ return g
+
+ @classmethod
+ def _wrap_specfun(cls, name, f, wrap):
+ if wrap:
+ def f_wrapped(ctx, *args, **kwargs):
+ convert = ctx.convert
+ args = [convert(a) for a in args]
+ prec = ctx.prec
+ try:
+ ctx.prec += 10
+ retval = f(ctx, *args, **kwargs)
+ finally:
+ ctx.prec = prec
+ return +retval
+ else:
+ f_wrapped = f
+ setattr(cls, name, f_wrapped)
+
+ def _set_prec(ctx, n):
+ ctx._prec[0] = max(1, int(n))
+ ctx._dps = prec_to_dps(n)
+
+ def _set_dps(ctx, n):
+ ctx._prec[0] = dps_to_prec(n)
+ ctx._dps = max(1, int(n))
+
+ prec = property(lambda ctx: ctx._prec[0], _set_prec)
+ dps = property(lambda ctx: ctx._dps, _set_dps)
+
+ def make_mpf(ctx, v):
+ a = new(ctx.mpf)
+ a._mpi_ = v
+ return a
+
+ def make_mpc(ctx, v):
+ a = new(ctx.mpc)
+ a._mpci_ = v
+ return a
+
+ def _mpq(ctx, pq):
+ p, q = pq
+ a = libmp.from_rational(p, q, ctx.prec, round_floor)
+ b = libmp.from_rational(p, q, ctx.prec, round_ceiling)
+ return ctx.make_mpf((a, b))
+
+ def convert(ctx, x):
+ if isinstance(x, (ctx.mpf, ctx.mpc)):
+ return x
+ if isinstance(x, ctx._constant):
+ return +x
+ if isinstance(x, complex) or hasattr(x, "_mpc_"):
+ re = ctx.convert(x.real)
+ im = ctx.convert(x.imag)
+ return ctx.mpc(re,im)
+ if isinstance(x, basestring):
+ v = mpi_from_str(x, ctx.prec)
+ return ctx.make_mpf(v)
+ if hasattr(x, "_mpi_"):
+ a, b = x._mpi_
+ else:
+ try:
+ a, b = x
+ except (TypeError, ValueError):
+ a = b = x
+ if hasattr(a, "_mpi_"):
+ a = a._mpi_[0]
+ else:
+ a = convert_mpf_(a, ctx.prec, round_floor)
+ if hasattr(b, "_mpi_"):
+ b = b._mpi_[1]
+ else:
+ b = convert_mpf_(b, ctx.prec, round_ceiling)
+ if a == fnan or b == fnan:
+ a = fninf
+ b = finf
+ assert mpf_le(a, b), "endpoints must be properly ordered"
+ return ctx.make_mpf((a, b))
+
+ def nstr(ctx, x, n=5, **kwargs):
+ x = ctx.convert(x)
+ if hasattr(x, "_mpi_"):
+ return libmp.mpi_to_str(x._mpi_, n, **kwargs)
+ if hasattr(x, "_mpci_"):
+ re = libmp.mpi_to_str(x._mpci_[0], n, **kwargs)
+ im = libmp.mpi_to_str(x._mpci_[1], n, **kwargs)
+ return "(%s + %s*j)" % (re, im)
+
+ def mag(ctx, x):
+ x = ctx.convert(x)
+ if isinstance(x, ctx.mpc):
+ return max(ctx.mag(x.real), ctx.mag(x.imag)) + 1
+ a, b = libmp.mpi_abs(x._mpi_)
+ sign, man, exp, bc = b
+ if man:
+ return exp+bc
+ if b == fzero:
+ return ctx.ninf
+ if b == fnan:
+ return ctx.nan
+ return ctx.inf
+
+ def isnan(ctx, x):
+ return False
+
+ def isinf(ctx, x):
+ return x == ctx.inf
+
+ def isint(ctx, x):
+ x = ctx.convert(x)
+ a, b = x._mpi_
+ if a == b:
+ sign, man, exp, bc = a
+ if man:
+ return exp >= 0
+ return a == fzero
+ return None
+
+ def ldexp(ctx, x, n):
+ a, b = ctx.convert(x)._mpi_
+ a = libmp.mpf_shift(a, n)
+ b = libmp.mpf_shift(b, n)
+ return ctx.make_mpf((a,b))
+
+ def absmin(ctx, x):
+ return abs(ctx.convert(x)).a
+
+ def absmax(ctx, x):
+ return abs(ctx.convert(x)).b
+
+ def atan2(ctx, y, x):
+ y = ctx.convert(y)._mpi_
+ x = ctx.convert(x)._mpi_
+ return ctx.make_mpf(libmp.mpi_atan2(y,x,ctx.prec))
+
+ def _convert_param(ctx, x):
+ if isinstance(x, libmp.int_types):
+ return x, 'Z'
+ if isinstance(x, tuple):
+ p, q = x
+ return (ctx.mpf(p) / ctx.mpf(q), 'R')
+ x = ctx.convert(x)
+ if isinstance(x, ctx.mpf):
+ return x, 'R'
+ if isinstance(x, ctx.mpc):
+ return x, 'C'
+ raise ValueError
+
+ def _is_real_type(ctx, z):
+ return isinstance(z, ctx.mpf) or isinstance(z, int_types)
+
+ def _is_complex_type(ctx, z):
+ return isinstance(z, ctx.mpc)
+
+ def hypsum(ctx, p, q, types, coeffs, z, maxterms=6000, **kwargs):
+ coeffs = list(coeffs)
+ num = range(p)
+ den = range(p,p+q)
+ #tol = ctx.eps
+ s = t = ctx.one
+ k = 0
+ while 1:
+ for i in num: t *= (coeffs[i]+k)
+ for i in den: t /= (coeffs[i]+k)
+ k += 1; t /= k; t *= z; s += t
+ if t == 0:
+ return s
+ #if abs(t) < tol:
+ # return s
+ if k > maxterms:
+ raise ctx.NoConvergence
+
+
+# Register with "numbers" ABC
+# We do not subclass, hence we do not use the @abstractmethod checks. While
+# this is less invasive it may turn out that we do not actually support
+# parts of the expected interfaces. See
+# http://docs.python.org/2/library/numbers.html for list of abstract
+# methods.
+try:
+ import numbers
+ numbers.Complex.register(ivmpc)
+ numbers.Real.register(ivmpf)
+except ImportError:
+ pass
diff --git a/pythonProject/.venv/Lib/site-packages/mpmath/ctx_mp.py b/pythonProject/.venv/Lib/site-packages/mpmath/ctx_mp.py
new file mode 100644
index 0000000000000000000000000000000000000000..93594dd44474a415c74e4b0beb83bd7012666c9d
--- /dev/null
+++ b/pythonProject/.venv/Lib/site-packages/mpmath/ctx_mp.py
@@ -0,0 +1,1339 @@
+"""
+This module defines the mpf, mpc classes, and standard functions for
+operating with them.
+"""
+__docformat__ = 'plaintext'
+
+import functools
+
+import re
+
+from .ctx_base import StandardBaseContext
+
+from .libmp.backend import basestring, BACKEND
+
+from . import libmp
+
+from .libmp import (MPZ, MPZ_ZERO, MPZ_ONE, int_types, repr_dps,
+ round_floor, round_ceiling, dps_to_prec, round_nearest, prec_to_dps,
+ ComplexResult, to_pickable, from_pickable, normalize,
+ from_int, from_float, from_str, to_int, to_float, to_str,
+ from_rational, from_man_exp,
+ fone, fzero, finf, fninf, fnan,
+ mpf_abs, mpf_pos, mpf_neg, mpf_add, mpf_sub, mpf_mul, mpf_mul_int,
+ mpf_div, mpf_rdiv_int, mpf_pow_int, mpf_mod,
+ mpf_eq, mpf_cmp, mpf_lt, mpf_gt, mpf_le, mpf_ge,
+ mpf_hash, mpf_rand,
+ mpf_sum,
+ bitcount, to_fixed,
+ mpc_to_str,
+ mpc_to_complex, mpc_hash, mpc_pos, mpc_is_nonzero, mpc_neg, mpc_conjugate,
+ mpc_abs, mpc_add, mpc_add_mpf, mpc_sub, mpc_sub_mpf, mpc_mul, mpc_mul_mpf,
+ mpc_mul_int, mpc_div, mpc_div_mpf, mpc_pow, mpc_pow_mpf, mpc_pow_int,
+ mpc_mpf_div,
+ mpf_pow,
+ mpf_pi, mpf_degree, mpf_e, mpf_phi, mpf_ln2, mpf_ln10,
+ mpf_euler, mpf_catalan, mpf_apery, mpf_khinchin,
+ mpf_glaisher, mpf_twinprime, mpf_mertens,
+ int_types)
+
+from . import function_docs
+from . import rational
+
+new = object.__new__
+
+get_complex = re.compile(r'^\(?(?P[\+\-]?\d*(\.\d*)?(e[\+\-]?\d+)?)??'
+ r'(?P[\+\-]?\d*(\.\d*)?(e[\+\-]?\d+)?j)?\)?$')
+
+if BACKEND == 'sage':
+ from sage.libs.mpmath.ext_main import Context as BaseMPContext
+ # pickle hack
+ import sage.libs.mpmath.ext_main as _mpf_module
+else:
+ from .ctx_mp_python import PythonMPContext as BaseMPContext
+ from . import ctx_mp_python as _mpf_module
+
+from .ctx_mp_python import _mpf, _mpc, mpnumeric
+
+class MPContext(BaseMPContext, StandardBaseContext):
+ """
+ Context for multiprecision arithmetic with a global precision.
+ """
+
+ def __init__(ctx):
+ BaseMPContext.__init__(ctx)
+ ctx.trap_complex = False
+ ctx.pretty = False
+ ctx.types = [ctx.mpf, ctx.mpc, ctx.constant]
+ ctx._mpq = rational.mpq
+ ctx.default()
+ StandardBaseContext.__init__(ctx)
+
+ ctx.mpq = rational.mpq
+ ctx.init_builtins()
+
+ ctx.hyp_summators = {}
+
+ ctx._init_aliases()
+
+ # XXX: automate
+ try:
+ ctx.bernoulli.im_func.func_doc = function_docs.bernoulli
+ ctx.primepi.im_func.func_doc = function_docs.primepi
+ ctx.psi.im_func.func_doc = function_docs.psi
+ ctx.atan2.im_func.func_doc = function_docs.atan2
+ except AttributeError:
+ # python 3
+ ctx.bernoulli.__func__.func_doc = function_docs.bernoulli
+ ctx.primepi.__func__.func_doc = function_docs.primepi
+ ctx.psi.__func__.func_doc = function_docs.psi
+ ctx.atan2.__func__.func_doc = function_docs.atan2
+
+ ctx.digamma.func_doc = function_docs.digamma
+ ctx.cospi.func_doc = function_docs.cospi
+ ctx.sinpi.func_doc = function_docs.sinpi
+
+ def init_builtins(ctx):
+
+ mpf = ctx.mpf
+ mpc = ctx.mpc
+
+ # Exact constants
+ ctx.one = ctx.make_mpf(fone)
+ ctx.zero = ctx.make_mpf(fzero)
+ ctx.j = ctx.make_mpc((fzero,fone))
+ ctx.inf = ctx.make_mpf(finf)
+ ctx.ninf = ctx.make_mpf(fninf)
+ ctx.nan = ctx.make_mpf(fnan)
+
+ eps = ctx.constant(lambda prec, rnd: (0, MPZ_ONE, 1-prec, 1),
+ "epsilon of working precision", "eps")
+ ctx.eps = eps
+
+ # Approximate constants
+ ctx.pi = ctx.constant(mpf_pi, "pi", "pi")
+ ctx.ln2 = ctx.constant(mpf_ln2, "ln(2)", "ln2")
+ ctx.ln10 = ctx.constant(mpf_ln10, "ln(10)", "ln10")
+ ctx.phi = ctx.constant(mpf_phi, "Golden ratio phi", "phi")
+ ctx.e = ctx.constant(mpf_e, "e = exp(1)", "e")
+ ctx.euler = ctx.constant(mpf_euler, "Euler's constant", "euler")
+ ctx.catalan = ctx.constant(mpf_catalan, "Catalan's constant", "catalan")
+ ctx.khinchin = ctx.constant(mpf_khinchin, "Khinchin's constant", "khinchin")
+ ctx.glaisher = ctx.constant(mpf_glaisher, "Glaisher's constant", "glaisher")
+ ctx.apery = ctx.constant(mpf_apery, "Apery's constant", "apery")
+ ctx.degree = ctx.constant(mpf_degree, "1 deg = pi / 180", "degree")
+ ctx.twinprime = ctx.constant(mpf_twinprime, "Twin prime constant", "twinprime")
+ ctx.mertens = ctx.constant(mpf_mertens, "Mertens' constant", "mertens")
+
+ # Standard functions
+ ctx.sqrt = ctx._wrap_libmp_function(libmp.mpf_sqrt, libmp.mpc_sqrt)
+ ctx.cbrt = ctx._wrap_libmp_function(libmp.mpf_cbrt, libmp.mpc_cbrt)
+ ctx.ln = ctx._wrap_libmp_function(libmp.mpf_log, libmp.mpc_log)
+ ctx.atan = ctx._wrap_libmp_function(libmp.mpf_atan, libmp.mpc_atan)
+ ctx.exp = ctx._wrap_libmp_function(libmp.mpf_exp, libmp.mpc_exp)
+ ctx.expj = ctx._wrap_libmp_function(libmp.mpf_expj, libmp.mpc_expj)
+ ctx.expjpi = ctx._wrap_libmp_function(libmp.mpf_expjpi, libmp.mpc_expjpi)
+ ctx.sin = ctx._wrap_libmp_function(libmp.mpf_sin, libmp.mpc_sin)
+ ctx.cos = ctx._wrap_libmp_function(libmp.mpf_cos, libmp.mpc_cos)
+ ctx.tan = ctx._wrap_libmp_function(libmp.mpf_tan, libmp.mpc_tan)
+ ctx.sinh = ctx._wrap_libmp_function(libmp.mpf_sinh, libmp.mpc_sinh)
+ ctx.cosh = ctx._wrap_libmp_function(libmp.mpf_cosh, libmp.mpc_cosh)
+ ctx.tanh = ctx._wrap_libmp_function(libmp.mpf_tanh, libmp.mpc_tanh)
+ ctx.asin = ctx._wrap_libmp_function(libmp.mpf_asin, libmp.mpc_asin)
+ ctx.acos = ctx._wrap_libmp_function(libmp.mpf_acos, libmp.mpc_acos)
+ ctx.atan = ctx._wrap_libmp_function(libmp.mpf_atan, libmp.mpc_atan)
+ ctx.asinh = ctx._wrap_libmp_function(libmp.mpf_asinh, libmp.mpc_asinh)
+ ctx.acosh = ctx._wrap_libmp_function(libmp.mpf_acosh, libmp.mpc_acosh)
+ ctx.atanh = ctx._wrap_libmp_function(libmp.mpf_atanh, libmp.mpc_atanh)
+ ctx.sinpi = ctx._wrap_libmp_function(libmp.mpf_sin_pi, libmp.mpc_sin_pi)
+ ctx.cospi = ctx._wrap_libmp_function(libmp.mpf_cos_pi, libmp.mpc_cos_pi)
+ ctx.floor = ctx._wrap_libmp_function(libmp.mpf_floor, libmp.mpc_floor)
+ ctx.ceil = ctx._wrap_libmp_function(libmp.mpf_ceil, libmp.mpc_ceil)
+ ctx.nint = ctx._wrap_libmp_function(libmp.mpf_nint, libmp.mpc_nint)
+ ctx.frac = ctx._wrap_libmp_function(libmp.mpf_frac, libmp.mpc_frac)
+ ctx.fib = ctx.fibonacci = ctx._wrap_libmp_function(libmp.mpf_fibonacci, libmp.mpc_fibonacci)
+
+ ctx.gamma = ctx._wrap_libmp_function(libmp.mpf_gamma, libmp.mpc_gamma)
+ ctx.rgamma = ctx._wrap_libmp_function(libmp.mpf_rgamma, libmp.mpc_rgamma)
+ ctx.loggamma = ctx._wrap_libmp_function(libmp.mpf_loggamma, libmp.mpc_loggamma)
+ ctx.fac = ctx.factorial = ctx._wrap_libmp_function(libmp.mpf_factorial, libmp.mpc_factorial)
+
+ ctx.digamma = ctx._wrap_libmp_function(libmp.mpf_psi0, libmp.mpc_psi0)
+ ctx.harmonic = ctx._wrap_libmp_function(libmp.mpf_harmonic, libmp.mpc_harmonic)
+ ctx.ei = ctx._wrap_libmp_function(libmp.mpf_ei, libmp.mpc_ei)
+ ctx.e1 = ctx._wrap_libmp_function(libmp.mpf_e1, libmp.mpc_e1)
+ ctx._ci = ctx._wrap_libmp_function(libmp.mpf_ci, libmp.mpc_ci)
+ ctx._si = ctx._wrap_libmp_function(libmp.mpf_si, libmp.mpc_si)
+ ctx.ellipk = ctx._wrap_libmp_function(libmp.mpf_ellipk, libmp.mpc_ellipk)
+ ctx._ellipe = ctx._wrap_libmp_function(libmp.mpf_ellipe, libmp.mpc_ellipe)
+ ctx.agm1 = ctx._wrap_libmp_function(libmp.mpf_agm1, libmp.mpc_agm1)
+ ctx._erf = ctx._wrap_libmp_function(libmp.mpf_erf, None)
+ ctx._erfc = ctx._wrap_libmp_function(libmp.mpf_erfc, None)
+ ctx._zeta = ctx._wrap_libmp_function(libmp.mpf_zeta, libmp.mpc_zeta)
+ ctx._altzeta = ctx._wrap_libmp_function(libmp.mpf_altzeta, libmp.mpc_altzeta)
+
+ # Faster versions
+ ctx.sqrt = getattr(ctx, "_sage_sqrt", ctx.sqrt)
+ ctx.exp = getattr(ctx, "_sage_exp", ctx.exp)
+ ctx.ln = getattr(ctx, "_sage_ln", ctx.ln)
+ ctx.cos = getattr(ctx, "_sage_cos", ctx.cos)
+ ctx.sin = getattr(ctx, "_sage_sin", ctx.sin)
+
+ def to_fixed(ctx, x, prec):
+ return x.to_fixed(prec)
+
+ def hypot(ctx, x, y):
+ r"""
+ Computes the Euclidean norm of the vector `(x, y)`, equal
+ to `\sqrt{x^2 + y^2}`. Both `x` and `y` must be real."""
+ x = ctx.convert(x)
+ y = ctx.convert(y)
+ return ctx.make_mpf(libmp.mpf_hypot(x._mpf_, y._mpf_, *ctx._prec_rounding))
+
+ def _gamma_upper_int(ctx, n, z):
+ n = int(ctx._re(n))
+ if n == 0:
+ return ctx.e1(z)
+ if not hasattr(z, '_mpf_'):
+ raise NotImplementedError
+ prec, rounding = ctx._prec_rounding
+ real, imag = libmp.mpf_expint(n, z._mpf_, prec, rounding, gamma=True)
+ if imag is None:
+ return ctx.make_mpf(real)
+ else:
+ return ctx.make_mpc((real, imag))
+
+ def _expint_int(ctx, n, z):
+ n = int(n)
+ if n == 1:
+ return ctx.e1(z)
+ if not hasattr(z, '_mpf_'):
+ raise NotImplementedError
+ prec, rounding = ctx._prec_rounding
+ real, imag = libmp.mpf_expint(n, z._mpf_, prec, rounding)
+ if imag is None:
+ return ctx.make_mpf(real)
+ else:
+ return ctx.make_mpc((real, imag))
+
+ def _nthroot(ctx, x, n):
+ if hasattr(x, '_mpf_'):
+ try:
+ return ctx.make_mpf(libmp.mpf_nthroot(x._mpf_, n, *ctx._prec_rounding))
+ except ComplexResult:
+ if ctx.trap_complex:
+ raise
+ x = (x._mpf_, libmp.fzero)
+ else:
+ x = x._mpc_
+ return ctx.make_mpc(libmp.mpc_nthroot(x, n, *ctx._prec_rounding))
+
+ def _besselj(ctx, n, z):
+ prec, rounding = ctx._prec_rounding
+ if hasattr(z, '_mpf_'):
+ return ctx.make_mpf(libmp.mpf_besseljn(n, z._mpf_, prec, rounding))
+ elif hasattr(z, '_mpc_'):
+ return ctx.make_mpc(libmp.mpc_besseljn(n, z._mpc_, prec, rounding))
+
+ def _agm(ctx, a, b=1):
+ prec, rounding = ctx._prec_rounding
+ if hasattr(a, '_mpf_') and hasattr(b, '_mpf_'):
+ try:
+ v = libmp.mpf_agm(a._mpf_, b._mpf_, prec, rounding)
+ return ctx.make_mpf(v)
+ except ComplexResult:
+ pass
+ if hasattr(a, '_mpf_'): a = (a._mpf_, libmp.fzero)
+ else: a = a._mpc_
+ if hasattr(b, '_mpf_'): b = (b._mpf_, libmp.fzero)
+ else: b = b._mpc_
+ return ctx.make_mpc(libmp.mpc_agm(a, b, prec, rounding))
+
+ def bernoulli(ctx, n):
+ return ctx.make_mpf(libmp.mpf_bernoulli(int(n), *ctx._prec_rounding))
+
+ def _zeta_int(ctx, n):
+ return ctx.make_mpf(libmp.mpf_zeta_int(int(n), *ctx._prec_rounding))
+
+ def atan2(ctx, y, x):
+ x = ctx.convert(x)
+ y = ctx.convert(y)
+ return ctx.make_mpf(libmp.mpf_atan2(y._mpf_, x._mpf_, *ctx._prec_rounding))
+
+ def psi(ctx, m, z):
+ z = ctx.convert(z)
+ m = int(m)
+ if ctx._is_real_type(z):
+ return ctx.make_mpf(libmp.mpf_psi(m, z._mpf_, *ctx._prec_rounding))
+ else:
+ return ctx.make_mpc(libmp.mpc_psi(m, z._mpc_, *ctx._prec_rounding))
+
+ def cos_sin(ctx, x, **kwargs):
+ if type(x) not in ctx.types:
+ x = ctx.convert(x)
+ prec, rounding = ctx._parse_prec(kwargs)
+ if hasattr(x, '_mpf_'):
+ c, s = libmp.mpf_cos_sin(x._mpf_, prec, rounding)
+ return ctx.make_mpf(c), ctx.make_mpf(s)
+ elif hasattr(x, '_mpc_'):
+ c, s = libmp.mpc_cos_sin(x._mpc_, prec, rounding)
+ return ctx.make_mpc(c), ctx.make_mpc(s)
+ else:
+ return ctx.cos(x, **kwargs), ctx.sin(x, **kwargs)
+
+ def cospi_sinpi(ctx, x, **kwargs):
+ if type(x) not in ctx.types:
+ x = ctx.convert(x)
+ prec, rounding = ctx._parse_prec(kwargs)
+ if hasattr(x, '_mpf_'):
+ c, s = libmp.mpf_cos_sin_pi(x._mpf_, prec, rounding)
+ return ctx.make_mpf(c), ctx.make_mpf(s)
+ elif hasattr(x, '_mpc_'):
+ c, s = libmp.mpc_cos_sin_pi(x._mpc_, prec, rounding)
+ return ctx.make_mpc(c), ctx.make_mpc(s)
+ else:
+ return ctx.cos(x, **kwargs), ctx.sin(x, **kwargs)
+
+ def clone(ctx):
+ """
+ Create a copy of the context, with the same working precision.
+ """
+ a = ctx.__class__()
+ a.prec = ctx.prec
+ return a
+
+ # Several helper methods
+ # TODO: add more of these, make consistent, write docstrings, ...
+
+ def _is_real_type(ctx, x):
+ if hasattr(x, '_mpc_') or type(x) is complex:
+ return False
+ return True
+
+ def _is_complex_type(ctx, x):
+ if hasattr(x, '_mpc_') or type(x) is complex:
+ return True
+ return False
+
+ def isnan(ctx, x):
+ """
+ Return *True* if *x* is a NaN (not-a-number), or for a complex
+ number, whether either the real or complex part is NaN;
+ otherwise return *False*::
+
+ >>> from mpmath import *
+ >>> isnan(3.14)
+ False
+ >>> isnan(nan)
+ True
+ >>> isnan(mpc(3.14,2.72))
+ False
+ >>> isnan(mpc(3.14,nan))
+ True
+
+ """
+ if hasattr(x, "_mpf_"):
+ return x._mpf_ == fnan
+ if hasattr(x, "_mpc_"):
+ return fnan in x._mpc_
+ if isinstance(x, int_types) or isinstance(x, rational.mpq):
+ return False
+ x = ctx.convert(x)
+ if hasattr(x, '_mpf_') or hasattr(x, '_mpc_'):
+ return ctx.isnan(x)
+ raise TypeError("isnan() needs a number as input")
+
+ def isfinite(ctx, x):
+ """
+ Return *True* if *x* is a finite number, i.e. neither
+ an infinity or a NaN.
+
+ >>> from mpmath import *
+ >>> isfinite(inf)
+ False
+ >>> isfinite(-inf)
+ False
+ >>> isfinite(3)
+ True
+ >>> isfinite(nan)
+ False
+ >>> isfinite(3+4j)
+ True
+ >>> isfinite(mpc(3,inf))
+ False
+ >>> isfinite(mpc(nan,3))
+ False
+
+ """
+ if ctx.isinf(x) or ctx.isnan(x):
+ return False
+ return True
+
+ def isnpint(ctx, x):
+ """
+ Determine if *x* is a nonpositive integer.
+ """
+ if not x:
+ return True
+ if hasattr(x, '_mpf_'):
+ sign, man, exp, bc = x._mpf_
+ return sign and exp >= 0
+ if hasattr(x, '_mpc_'):
+ return not x.imag and ctx.isnpint(x.real)
+ if type(x) in int_types:
+ return x <= 0
+ if isinstance(x, ctx.mpq):
+ p, q = x._mpq_
+ if not p:
+ return True
+ return q == 1 and p <= 0
+ return ctx.isnpint(ctx.convert(x))
+
+ def __str__(ctx):
+ lines = ["Mpmath settings:",
+ (" mp.prec = %s" % ctx.prec).ljust(30) + "[default: 53]",
+ (" mp.dps = %s" % ctx.dps).ljust(30) + "[default: 15]",
+ (" mp.trap_complex = %s" % ctx.trap_complex).ljust(30) + "[default: False]",
+ ]
+ return "\n".join(lines)
+
+ @property
+ def _repr_digits(ctx):
+ return repr_dps(ctx._prec)
+
+ @property
+ def _str_digits(ctx):
+ return ctx._dps
+
+ def extraprec(ctx, n, normalize_output=False):
+ """
+ The block
+
+ with extraprec(n):
+
+
+ increases the precision n bits, executes , and then
+ restores the precision.
+
+ extraprec(n)(f) returns a decorated version of the function f
+ that increases the working precision by n bits before execution,
+ and restores the parent precision afterwards. With
+ normalize_output=True, it rounds the return value to the parent
+ precision.
+ """
+ return PrecisionManager(ctx, lambda p: p + n, None, normalize_output)
+
+ def extradps(ctx, n, normalize_output=False):
+ """
+ This function is analogous to extraprec (see documentation)
+ but changes the decimal precision instead of the number of bits.
+ """
+ return PrecisionManager(ctx, None, lambda d: d + n, normalize_output)
+
+ def workprec(ctx, n, normalize_output=False):
+ """
+ The block
+
+ with workprec(n):
+
+
+ sets the precision to n bits, executes , and then restores
+ the precision.
+
+ workprec(n)(f) returns a decorated version of the function f
+ that sets the precision to n bits before execution,
+ and restores the precision afterwards. With normalize_output=True,
+ it rounds the return value to the parent precision.
+ """
+ return PrecisionManager(ctx, lambda p: n, None, normalize_output)
+
+ def workdps(ctx, n, normalize_output=False):
+ """
+ This function is analogous to workprec (see documentation)
+ but changes the decimal precision instead of the number of bits.
+ """
+ return PrecisionManager(ctx, None, lambda d: n, normalize_output)
+
+ def autoprec(ctx, f, maxprec=None, catch=(), verbose=False):
+ r"""
+ Return a wrapped copy of *f* that repeatedly evaluates *f*
+ with increasing precision until the result converges to the
+ full precision used at the point of the call.
+
+ This heuristically protects against rounding errors, at the cost of
+ roughly a 2x slowdown compared to manually setting the optimal
+ precision. This method can, however, easily be fooled if the results
+ from *f* depend "discontinuously" on the precision, for instance
+ if catastrophic cancellation can occur. Therefore, :func:`~mpmath.autoprec`
+ should be used judiciously.
+
+ **Examples**
+
+ Many functions are sensitive to perturbations of the input arguments.
+ If the arguments are decimal numbers, they may have to be converted
+ to binary at a much higher precision. If the amount of required
+ extra precision is unknown, :func:`~mpmath.autoprec` is convenient::
+
+ >>> from mpmath import *
+ >>> mp.dps = 15
+ >>> mp.pretty = True
+ >>> besselj(5, 125 * 10**28) # Exact input
+ -8.03284785591801e-17
+ >>> besselj(5, '1.25e30') # Bad
+ 7.12954868316652e-16
+ >>> autoprec(besselj)(5, '1.25e30') # Good
+ -8.03284785591801e-17
+
+ The following fails to converge because `\sin(\pi) = 0` whereas all
+ finite-precision approximations of `\pi` give nonzero values::
+
+ >>> autoprec(sin)(pi) # doctest: +IGNORE_EXCEPTION_DETAIL
+ Traceback (most recent call last):
+ ...
+ NoConvergence: autoprec: prec increased to 2910 without convergence
+
+ As the following example shows, :func:`~mpmath.autoprec` can protect against
+ cancellation, but is fooled by too severe cancellation::
+
+ >>> x = 1e-10
+ >>> exp(x)-1; expm1(x); autoprec(lambda t: exp(t)-1)(x)
+ 1.00000008274037e-10
+ 1.00000000005e-10
+ 1.00000000005e-10
+ >>> x = 1e-50
+ >>> exp(x)-1; expm1(x); autoprec(lambda t: exp(t)-1)(x)
+ 0.0
+ 1.0e-50
+ 0.0
+
+ With *catch*, an exception or list of exceptions to intercept
+ may be specified. The raised exception is interpreted
+ as signaling insufficient precision. This permits, for example,
+ evaluating a function where a too low precision results in a
+ division by zero::
+
+ >>> f = lambda x: 1/(exp(x)-1)
+ >>> f(1e-30)
+ Traceback (most recent call last):
+ ...
+ ZeroDivisionError
+ >>> autoprec(f, catch=ZeroDivisionError)(1e-30)
+ 1.0e+30
+
+
+ """
+ def f_autoprec_wrapped(*args, **kwargs):
+ prec = ctx.prec
+ if maxprec is None:
+ maxprec2 = ctx._default_hyper_maxprec(prec)
+ else:
+ maxprec2 = maxprec
+ try:
+ ctx.prec = prec + 10
+ try:
+ v1 = f(*args, **kwargs)
+ except catch:
+ v1 = ctx.nan
+ prec2 = prec + 20
+ while 1:
+ ctx.prec = prec2
+ try:
+ v2 = f(*args, **kwargs)
+ except catch:
+ v2 = ctx.nan
+ if v1 == v2:
+ break
+ err = ctx.mag(v2-v1) - ctx.mag(v2)
+ if err < (-prec):
+ break
+ if verbose:
+ print("autoprec: target=%s, prec=%s, accuracy=%s" \
+ % (prec, prec2, -err))
+ v1 = v2
+ if prec2 >= maxprec2:
+ raise ctx.NoConvergence(\
+ "autoprec: prec increased to %i without convergence"\
+ % prec2)
+ prec2 += int(prec2*2)
+ prec2 = min(prec2, maxprec2)
+ finally:
+ ctx.prec = prec
+ return +v2
+ return f_autoprec_wrapped
+
+ def nstr(ctx, x, n=6, **kwargs):
+ """
+ Convert an ``mpf`` or ``mpc`` to a decimal string literal with *n*
+ significant digits. The small default value for *n* is chosen to
+ make this function useful for printing collections of numbers
+ (lists, matrices, etc).
+
+ If *x* is a list or tuple, :func:`~mpmath.nstr` is applied recursively
+ to each element. For unrecognized classes, :func:`~mpmath.nstr`
+ simply returns ``str(x)``.
+
+ The companion function :func:`~mpmath.nprint` prints the result
+ instead of returning it.
+
+ The keyword arguments *strip_zeros*, *min_fixed*, *max_fixed*
+ and *show_zero_exponent* are forwarded to :func:`~mpmath.libmp.to_str`.
+
+ The number will be printed in fixed-point format if the position
+ of the leading digit is strictly between min_fixed
+ (default = min(-dps/3,-5)) and max_fixed (default = dps).
+
+ To force fixed-point format always, set min_fixed = -inf,
+ max_fixed = +inf. To force floating-point format, set
+ min_fixed >= max_fixed.
+
+ >>> from mpmath import *
+ >>> nstr([+pi, ldexp(1,-500)])
+ '[3.14159, 3.05494e-151]'
+ >>> nprint([+pi, ldexp(1,-500)])
+ [3.14159, 3.05494e-151]
+ >>> nstr(mpf("5e-10"), 5)
+ '5.0e-10'
+ >>> nstr(mpf("5e-10"), 5, strip_zeros=False)
+ '5.0000e-10'
+ >>> nstr(mpf("5e-10"), 5, strip_zeros=False, min_fixed=-11)
+ '0.00000000050000'
+ >>> nstr(mpf(0), 5, show_zero_exponent=True)
+ '0.0e+0'
+
+ """
+ if isinstance(x, list):
+ return "[%s]" % (", ".join(ctx.nstr(c, n, **kwargs) for c in x))
+ if isinstance(x, tuple):
+ return "(%s)" % (", ".join(ctx.nstr(c, n, **kwargs) for c in x))
+ if hasattr(x, '_mpf_'):
+ return to_str(x._mpf_, n, **kwargs)
+ if hasattr(x, '_mpc_'):
+ return "(" + mpc_to_str(x._mpc_, n, **kwargs) + ")"
+ if isinstance(x, basestring):
+ return repr(x)
+ if isinstance(x, ctx.matrix):
+ return x.__nstr__(n, **kwargs)
+ return str(x)
+
+ def _convert_fallback(ctx, x, strings):
+ if strings and isinstance(x, basestring):
+ if 'j' in x.lower():
+ x = x.lower().replace(' ', '')
+ match = get_complex.match(x)
+ re = match.group('re')
+ if not re:
+ re = 0
+ im = match.group('im').rstrip('j')
+ return ctx.mpc(ctx.convert(re), ctx.convert(im))
+ if hasattr(x, "_mpi_"):
+ a, b = x._mpi_
+ if a == b:
+ return ctx.make_mpf(a)
+ else:
+ raise ValueError("can only create mpf from zero-width interval")
+ raise TypeError("cannot create mpf from " + repr(x))
+
+ def mpmathify(ctx, *args, **kwargs):
+ return ctx.convert(*args, **kwargs)
+
+ def _parse_prec(ctx, kwargs):
+ if kwargs:
+ if kwargs.get('exact'):
+ return 0, 'f'
+ prec, rounding = ctx._prec_rounding
+ if 'rounding' in kwargs:
+ rounding = kwargs['rounding']
+ if 'prec' in kwargs:
+ prec = kwargs['prec']
+ if prec == ctx.inf:
+ return 0, 'f'
+ else:
+ prec = int(prec)
+ elif 'dps' in kwargs:
+ dps = kwargs['dps']
+ if dps == ctx.inf:
+ return 0, 'f'
+ prec = dps_to_prec(dps)
+ return prec, rounding
+ return ctx._prec_rounding
+
+ _exact_overflow_msg = "the exact result does not fit in memory"
+
+ _hypsum_msg = """hypsum() failed to converge to the requested %i bits of accuracy
+using a working precision of %i bits. Try with a higher maxprec,
+maxterms, or set zeroprec."""
+
+ def hypsum(ctx, p, q, flags, coeffs, z, accurate_small=True, **kwargs):
+ if hasattr(z, "_mpf_"):
+ key = p, q, flags, 'R'
+ v = z._mpf_
+ elif hasattr(z, "_mpc_"):
+ key = p, q, flags, 'C'
+ v = z._mpc_
+ if key not in ctx.hyp_summators:
+ ctx.hyp_summators[key] = libmp.make_hyp_summator(key)[1]
+ summator = ctx.hyp_summators[key]
+ prec = ctx.prec
+ maxprec = kwargs.get('maxprec', ctx._default_hyper_maxprec(prec))
+ extraprec = 50
+ epsshift = 25
+ # Jumps in magnitude occur when parameters are close to negative
+ # integers. We must ensure that these terms are included in
+ # the sum and added accurately
+ magnitude_check = {}
+ max_total_jump = 0
+ for i, c in enumerate(coeffs):
+ if flags[i] == 'Z':
+ if i >= p and c <= 0:
+ ok = False
+ for ii, cc in enumerate(coeffs[:p]):
+ # Note: c <= cc or c < cc, depending on convention
+ if flags[ii] == 'Z' and cc <= 0 and c <= cc:
+ ok = True
+ if not ok:
+ raise ZeroDivisionError("pole in hypergeometric series")
+ continue
+ n, d = ctx.nint_distance(c)
+ n = -int(n)
+ d = -d
+ if i >= p and n >= 0 and d > 4:
+ if n in magnitude_check:
+ magnitude_check[n] += d
+ else:
+ magnitude_check[n] = d
+ extraprec = max(extraprec, d - prec + 60)
+ max_total_jump += abs(d)
+ while 1:
+ if extraprec > maxprec:
+ raise ValueError(ctx._hypsum_msg % (prec, prec+extraprec))
+ wp = prec + extraprec
+ if magnitude_check:
+ mag_dict = dict((n,None) for n in magnitude_check)
+ else:
+ mag_dict = {}
+ zv, have_complex, magnitude = summator(coeffs, v, prec, wp, \
+ epsshift, mag_dict, **kwargs)
+ cancel = -magnitude
+ jumps_resolved = True
+ if extraprec < max_total_jump:
+ for n in mag_dict.values():
+ if (n is None) or (n < prec):
+ jumps_resolved = False
+ break
+ accurate = (cancel < extraprec-25-5 or not accurate_small)
+ if jumps_resolved:
+ if accurate:
+ break
+ # zero?
+ zeroprec = kwargs.get('zeroprec')
+ if zeroprec is not None:
+ if cancel > zeroprec:
+ if have_complex:
+ return ctx.mpc(0)
+ else:
+ return ctx.zero
+
+ # Some near-singularities were not included, so increase
+ # precision and repeat until they are
+ extraprec *= 2
+ # Possible workaround for bad roundoff in fixed-point arithmetic
+ epsshift += 5
+ extraprec += 5
+
+ if type(zv) is tuple:
+ if have_complex:
+ return ctx.make_mpc(zv)
+ else:
+ return ctx.make_mpf(zv)
+ else:
+ return zv
+
+ def ldexp(ctx, x, n):
+ r"""
+ Computes `x 2^n` efficiently. No rounding is performed.
+ The argument `x` must be a real floating-point number (or
+ possible to convert into one) and `n` must be a Python ``int``.
+
+ >>> from mpmath import *
+ >>> mp.dps = 15; mp.pretty = False
+ >>> ldexp(1, 10)
+ mpf('1024.0')
+ >>> ldexp(1, -3)
+ mpf('0.125')
+
+ """
+ x = ctx.convert(x)
+ return ctx.make_mpf(libmp.mpf_shift(x._mpf_, n))
+
+ def frexp(ctx, x):
+ r"""
+ Given a real number `x`, returns `(y, n)` with `y \in [0.5, 1)`,
+ `n` a Python integer, and such that `x = y 2^n`. No rounding is
+ performed.
+
+ >>> from mpmath import *
+ >>> mp.dps = 15; mp.pretty = False
+ >>> frexp(7.5)
+ (mpf('0.9375'), 3)
+
+ """
+ x = ctx.convert(x)
+ y, n = libmp.mpf_frexp(x._mpf_)
+ return ctx.make_mpf(y), n
+
+ def fneg(ctx, x, **kwargs):
+ """
+ Negates the number *x*, giving a floating-point result, optionally
+ using a custom precision and rounding mode.
+
+ See the documentation of :func:`~mpmath.fadd` for a detailed description
+ of how to specify precision and rounding.
+
+ **Examples**
+
+ An mpmath number is returned::
+
+ >>> from mpmath import *
+ >>> mp.dps = 15; mp.pretty = False
+ >>> fneg(2.5)
+ mpf('-2.5')
+ >>> fneg(-5+2j)
+ mpc(real='5.0', imag='-2.0')
+
+ Precise control over rounding is possible::
+
+ >>> x = fadd(2, 1e-100, exact=True)
+ >>> fneg(x)
+ mpf('-2.0')
+ >>> fneg(x, rounding='f')
+ mpf('-2.0000000000000004')
+
+ Negating with and without roundoff::
+
+ >>> n = 200000000000000000000001
+ >>> print(int(-mpf(n)))
+ -200000000000000016777216
+ >>> print(int(fneg(n)))
+ -200000000000000016777216
+ >>> print(int(fneg(n, prec=log(n,2)+1)))
+ -200000000000000000000001
+ >>> print(int(fneg(n, dps=log(n,10)+1)))
+ -200000000000000000000001
+ >>> print(int(fneg(n, prec=inf)))
+ -200000000000000000000001
+ >>> print(int(fneg(n, dps=inf)))
+ -200000000000000000000001
+ >>> print(int(fneg(n, exact=True)))
+ -200000000000000000000001
+
+ """
+ prec, rounding = ctx._parse_prec(kwargs)
+ x = ctx.convert(x)
+ if hasattr(x, '_mpf_'):
+ return ctx.make_mpf(mpf_neg(x._mpf_, prec, rounding))
+ if hasattr(x, '_mpc_'):
+ return ctx.make_mpc(mpc_neg(x._mpc_, prec, rounding))
+ raise ValueError("Arguments need to be mpf or mpc compatible numbers")
+
+ def fadd(ctx, x, y, **kwargs):
+ """
+ Adds the numbers *x* and *y*, giving a floating-point result,
+ optionally using a custom precision and rounding mode.
+
+ The default precision is the working precision of the context.
+ You can specify a custom precision in bits by passing the *prec* keyword
+ argument, or by providing an equivalent decimal precision with the *dps*
+ keyword argument. If the precision is set to ``+inf``, or if the flag
+ *exact=True* is passed, an exact addition with no rounding is performed.
+
+ When the precision is finite, the optional *rounding* keyword argument
+ specifies the direction of rounding. Valid options are ``'n'`` for
+ nearest (default), ``'f'`` for floor, ``'c'`` for ceiling, ``'d'``
+ for down, ``'u'`` for up.
+
+ **Examples**
+
+ Using :func:`~mpmath.fadd` with precision and rounding control::
+
+ >>> from mpmath import *
+ >>> mp.dps = 15; mp.pretty = False
+ >>> fadd(2, 1e-20)
+ mpf('2.0')
+ >>> fadd(2, 1e-20, rounding='u')
+ mpf('2.0000000000000004')
+ >>> nprint(fadd(2, 1e-20, prec=100), 25)
+ 2.00000000000000000001
+ >>> nprint(fadd(2, 1e-20, dps=15), 25)
+ 2.0
+ >>> nprint(fadd(2, 1e-20, dps=25), 25)
+ 2.00000000000000000001
+ >>> nprint(fadd(2, 1e-20, exact=True), 25)
+ 2.00000000000000000001
+
+ Exact addition avoids cancellation errors, enforcing familiar laws
+ of numbers such as `x+y-x = y`, which don't hold in floating-point
+ arithmetic with finite precision::
+
+ >>> x, y = mpf(2), mpf('1e-1000')
+ >>> print(x + y - x)
+ 0.0
+ >>> print(fadd(x, y, prec=inf) - x)
+ 1.0e-1000
+ >>> print(fadd(x, y, exact=True) - x)
+ 1.0e-1000
+
+ Exact addition can be inefficient and may be impossible to perform
+ with large magnitude differences::
+
+ >>> fadd(1, '1e-100000000000000000000', prec=inf)
+ Traceback (most recent call last):
+ ...
+ OverflowError: the exact result does not fit in memory
+
+ """
+ prec, rounding = ctx._parse_prec(kwargs)
+ x = ctx.convert(x)
+ y = ctx.convert(y)
+ try:
+ if hasattr(x, '_mpf_'):
+ if hasattr(y, '_mpf_'):
+ return ctx.make_mpf(mpf_add(x._mpf_, y._mpf_, prec, rounding))
+ if hasattr(y, '_mpc_'):
+ return ctx.make_mpc(mpc_add_mpf(y._mpc_, x._mpf_, prec, rounding))
+ if hasattr(x, '_mpc_'):
+ if hasattr(y, '_mpf_'):
+ return ctx.make_mpc(mpc_add_mpf(x._mpc_, y._mpf_, prec, rounding))
+ if hasattr(y, '_mpc_'):
+ return ctx.make_mpc(mpc_add(x._mpc_, y._mpc_, prec, rounding))
+ except (ValueError, OverflowError):
+ raise OverflowError(ctx._exact_overflow_msg)
+ raise ValueError("Arguments need to be mpf or mpc compatible numbers")
+
+ def fsub(ctx, x, y, **kwargs):
+ """
+ Subtracts the numbers *x* and *y*, giving a floating-point result,
+ optionally using a custom precision and rounding mode.
+
+ See the documentation of :func:`~mpmath.fadd` for a detailed description
+ of how to specify precision and rounding.
+
+ **Examples**
+
+ Using :func:`~mpmath.fsub` with precision and rounding control::
+
+ >>> from mpmath import *
+ >>> mp.dps = 15; mp.pretty = False
+ >>> fsub(2, 1e-20)
+ mpf('2.0')
+ >>> fsub(2, 1e-20, rounding='d')
+ mpf('1.9999999999999998')
+ >>> nprint(fsub(2, 1e-20, prec=100), 25)
+ 1.99999999999999999999
+ >>> nprint(fsub(2, 1e-20, dps=15), 25)
+ 2.0
+ >>> nprint(fsub(2, 1e-20, dps=25), 25)
+ 1.99999999999999999999
+ >>> nprint(fsub(2, 1e-20, exact=True), 25)
+ 1.99999999999999999999
+
+ Exact subtraction avoids cancellation errors, enforcing familiar laws
+ of numbers such as `x-y+y = x`, which don't hold in floating-point
+ arithmetic with finite precision::
+
+ >>> x, y = mpf(2), mpf('1e1000')
+ >>> print(x - y + y)
+ 0.0
+ >>> print(fsub(x, y, prec=inf) + y)
+ 2.0
+ >>> print(fsub(x, y, exact=True) + y)
+ 2.0
+
+ Exact addition can be inefficient and may be impossible to perform
+ with large magnitude differences::
+
+ >>> fsub(1, '1e-100000000000000000000', prec=inf)
+ Traceback (most recent call last):
+ ...
+ OverflowError: the exact result does not fit in memory
+
+ """
+ prec, rounding = ctx._parse_prec(kwargs)
+ x = ctx.convert(x)
+ y = ctx.convert(y)
+ try:
+ if hasattr(x, '_mpf_'):
+ if hasattr(y, '_mpf_'):
+ return ctx.make_mpf(mpf_sub(x._mpf_, y._mpf_, prec, rounding))
+ if hasattr(y, '_mpc_'):
+ return ctx.make_mpc(mpc_sub((x._mpf_, fzero), y._mpc_, prec, rounding))
+ if hasattr(x, '_mpc_'):
+ if hasattr(y, '_mpf_'):
+ return ctx.make_mpc(mpc_sub_mpf(x._mpc_, y._mpf_, prec, rounding))
+ if hasattr(y, '_mpc_'):
+ return ctx.make_mpc(mpc_sub(x._mpc_, y._mpc_, prec, rounding))
+ except (ValueError, OverflowError):
+ raise OverflowError(ctx._exact_overflow_msg)
+ raise ValueError("Arguments need to be mpf or mpc compatible numbers")
+
+ def fmul(ctx, x, y, **kwargs):
+ """
+ Multiplies the numbers *x* and *y*, giving a floating-point result,
+ optionally using a custom precision and rounding mode.
+
+ See the documentation of :func:`~mpmath.fadd` for a detailed description
+ of how to specify precision and rounding.
+
+ **Examples**
+
+ The result is an mpmath number::
+
+ >>> from mpmath import *
+ >>> mp.dps = 15; mp.pretty = False
+ >>> fmul(2, 5.0)
+ mpf('10.0')
+ >>> fmul(0.5j, 0.5)
+ mpc(real='0.0', imag='0.25')
+
+ Avoiding roundoff::
+
+ >>> x, y = 10**10+1, 10**15+1
+ >>> print(x*y)
+ 10000000001000010000000001
+ >>> print(mpf(x) * mpf(y))
+ 1.0000000001e+25
+ >>> print(int(mpf(x) * mpf(y)))
+ 10000000001000011026399232
+ >>> print(int(fmul(x, y)))
+ 10000000001000011026399232
+ >>> print(int(fmul(x, y, dps=25)))
+ 10000000001000010000000001
+ >>> print(int(fmul(x, y, exact=True)))
+ 10000000001000010000000001
+
+ Exact multiplication with complex numbers can be inefficient and may
+ be impossible to perform with large magnitude differences between
+ real and imaginary parts::
+
+ >>> x = 1+2j
+ >>> y = mpc(2, '1e-100000000000000000000')
+ >>> fmul(x, y)
+ mpc(real='2.0', imag='4.0')
+ >>> fmul(x, y, rounding='u')
+ mpc(real='2.0', imag='4.0000000000000009')
+ >>> fmul(x, y, exact=True)
+ Traceback (most recent call last):
+ ...
+ OverflowError: the exact result does not fit in memory
+
+ """
+ prec, rounding = ctx._parse_prec(kwargs)
+ x = ctx.convert(x)
+ y = ctx.convert(y)
+ try:
+ if hasattr(x, '_mpf_'):
+ if hasattr(y, '_mpf_'):
+ return ctx.make_mpf(mpf_mul(x._mpf_, y._mpf_, prec, rounding))
+ if hasattr(y, '_mpc_'):
+ return ctx.make_mpc(mpc_mul_mpf(y._mpc_, x._mpf_, prec, rounding))
+ if hasattr(x, '_mpc_'):
+ if hasattr(y, '_mpf_'):
+ return ctx.make_mpc(mpc_mul_mpf(x._mpc_, y._mpf_, prec, rounding))
+ if hasattr(y, '_mpc_'):
+ return ctx.make_mpc(mpc_mul(x._mpc_, y._mpc_, prec, rounding))
+ except (ValueError, OverflowError):
+ raise OverflowError(ctx._exact_overflow_msg)
+ raise ValueError("Arguments need to be mpf or mpc compatible numbers")
+
+ def fdiv(ctx, x, y, **kwargs):
+ """
+ Divides the numbers *x* and *y*, giving a floating-point result,
+ optionally using a custom precision and rounding mode.
+
+ See the documentation of :func:`~mpmath.fadd` for a detailed description
+ of how to specify precision and rounding.
+
+ **Examples**
+
+ The result is an mpmath number::
+
+ >>> from mpmath import *
+ >>> mp.dps = 15; mp.pretty = False
+ >>> fdiv(3, 2)
+ mpf('1.5')
+ >>> fdiv(2, 3)
+ mpf('0.66666666666666663')
+ >>> fdiv(2+4j, 0.5)
+ mpc(real='4.0', imag='8.0')
+
+ The rounding direction and precision can be controlled::
+
+ >>> fdiv(2, 3, dps=3) # Should be accurate to at least 3 digits
+ mpf('0.6666259765625')
+ >>> fdiv(2, 3, rounding='d')
+ mpf('0.66666666666666663')
+ >>> fdiv(2, 3, prec=60)
+ mpf('0.66666666666666667')
+ >>> fdiv(2, 3, rounding='u')
+ mpf('0.66666666666666674')
+
+ Checking the error of a division by performing it at higher precision::
+
+ >>> fdiv(2, 3) - fdiv(2, 3, prec=100)
+ mpf('-3.7007434154172148e-17')
+
+ Unlike :func:`~mpmath.fadd`, :func:`~mpmath.fmul`, etc., exact division is not
+ allowed since the quotient of two floating-point numbers generally
+ does not have an exact floating-point representation. (In the
+ future this might be changed to allow the case where the division
+ is actually exact.)
+
+ >>> fdiv(2, 3, exact=True)
+ Traceback (most recent call last):
+ ...
+ ValueError: division is not an exact operation
+
+ """
+ prec, rounding = ctx._parse_prec(kwargs)
+ if not prec:
+ raise ValueError("division is not an exact operation")
+ x = ctx.convert(x)
+ y = ctx.convert(y)
+ if hasattr(x, '_mpf_'):
+ if hasattr(y, '_mpf_'):
+ return ctx.make_mpf(mpf_div(x._mpf_, y._mpf_, prec, rounding))
+ if hasattr(y, '_mpc_'):
+ return ctx.make_mpc(mpc_div((x._mpf_, fzero), y._mpc_, prec, rounding))
+ if hasattr(x, '_mpc_'):
+ if hasattr(y, '_mpf_'):
+ return ctx.make_mpc(mpc_div_mpf(x._mpc_, y._mpf_, prec, rounding))
+ if hasattr(y, '_mpc_'):
+ return ctx.make_mpc(mpc_div(x._mpc_, y._mpc_, prec, rounding))
+ raise ValueError("Arguments need to be mpf or mpc compatible numbers")
+
+ def nint_distance(ctx, x):
+ r"""
+ Return `(n,d)` where `n` is the nearest integer to `x` and `d` is
+ an estimate of `\log_2(|x-n|)`. If `d < 0`, `-d` gives the precision
+ (measured in bits) lost to cancellation when computing `x-n`.
+
+ >>> from mpmath import *
+ >>> n, d = nint_distance(5)
+ >>> print(n); print(d)
+ 5
+ -inf
+ >>> n, d = nint_distance(mpf(5))
+ >>> print(n); print(d)
+ 5
+ -inf
+ >>> n, d = nint_distance(mpf(5.00000001))
+ >>> print(n); print(d)
+ 5
+ -26
+ >>> n, d = nint_distance(mpf(4.99999999))
+ >>> print(n); print(d)
+ 5
+ -26
+ >>> n, d = nint_distance(mpc(5,10))
+ >>> print(n); print(d)
+ 5
+ 4
+ >>> n, d = nint_distance(mpc(5,0.000001))
+ >>> print(n); print(d)
+ 5
+ -19
+
+ """
+ typx = type(x)
+ if typx in int_types:
+ return int(x), ctx.ninf
+ elif typx is rational.mpq:
+ p, q = x._mpq_
+ n, r = divmod(p, q)
+ if 2*r >= q:
+ n += 1
+ elif not r:
+ return n, ctx.ninf
+ # log(p/q-n) = log((p-nq)/q) = log(p-nq) - log(q)
+ d = bitcount(abs(p-n*q)) - bitcount(q)
+ return n, d
+ if hasattr(x, "_mpf_"):
+ re = x._mpf_
+ im_dist = ctx.ninf
+ elif hasattr(x, "_mpc_"):
+ re, im = x._mpc_
+ isign, iman, iexp, ibc = im
+ if iman:
+ im_dist = iexp + ibc
+ elif im == fzero:
+ im_dist = ctx.ninf
+ else:
+ raise ValueError("requires a finite number")
+ else:
+ x = ctx.convert(x)
+ if hasattr(x, "_mpf_") or hasattr(x, "_mpc_"):
+ return ctx.nint_distance(x)
+ else:
+ raise TypeError("requires an mpf/mpc")
+ sign, man, exp, bc = re
+ mag = exp+bc
+ # |x| < 0.5
+ if mag < 0:
+ n = 0
+ re_dist = mag
+ elif man:
+ # exact integer
+ if exp >= 0:
+ n = man << exp
+ re_dist = ctx.ninf
+ # exact half-integer
+ elif exp == -1:
+ n = (man>>1)+1
+ re_dist = 0
+ else:
+ d = (-exp-1)
+ t = man >> d
+ if t & 1:
+ t += 1
+ man = (t<>1 # int(t)>>1
+ re_dist = exp+bitcount(man)
+ if sign:
+ n = -n
+ elif re == fzero:
+ re_dist = ctx.ninf
+ n = 0
+ else:
+ raise ValueError("requires a finite number")
+ return n, max(re_dist, im_dist)
+
+ def fprod(ctx, factors):
+ r"""
+ Calculates a product containing a finite number of factors (for
+ infinite products, see :func:`~mpmath.nprod`). The factors will be
+ converted to mpmath numbers.
+
+ >>> from mpmath import *
+ >>> mp.dps = 15; mp.pretty = False
+ >>> fprod([1, 2, 0.5, 7])
+ mpf('7.0')
+
+ """
+ orig = ctx.prec
+ try:
+ v = ctx.one
+ for p in factors:
+ v *= p
+ finally:
+ ctx.prec = orig
+ return +v
+
+ def rand(ctx):
+ """
+ Returns an ``mpf`` with value chosen randomly from `[0, 1)`.
+ The number of randomly generated bits in the mantissa is equal
+ to the working precision.
+ """
+ return ctx.make_mpf(mpf_rand(ctx._prec))
+
+ def fraction(ctx, p, q):
+ """
+ Given Python integers `(p, q)`, returns a lazy ``mpf`` representing
+ the fraction `p/q`. The value is updated with the precision.
+
+ >>> from mpmath import *
+ >>> mp.dps = 15
+ >>> a = fraction(1,100)
+ >>> b = mpf(1)/100
+ >>> print(a); print(b)
+ 0.01
+ 0.01
+ >>> mp.dps = 30
+ >>> print(a); print(b) # a will be accurate
+ 0.01
+ 0.0100000000000000002081668171172
+ >>> mp.dps = 15
+ """
+ return ctx.constant(lambda prec, rnd: from_rational(p, q, prec, rnd),
+ '%s/%s' % (p, q))
+
+ def absmin(ctx, x):
+ return abs(ctx.convert(x))
+
+ def absmax(ctx, x):
+ return abs(ctx.convert(x))
+
+ def _as_points(ctx, x):
+ # XXX: remove this?
+ if hasattr(x, '_mpi_'):
+ a, b = x._mpi_
+ return [ctx.make_mpf(a), ctx.make_mpf(b)]
+ return x
+
+ '''
+ def _zetasum(ctx, s, a, b):
+ """
+ Computes sum of k^(-s) for k = a, a+1, ..., b with a, b both small
+ integers.
+ """
+ a = int(a)
+ b = int(b)
+ s = ctx.convert(s)
+ prec, rounding = ctx._prec_rounding
+ if hasattr(s, '_mpf_'):
+ v = ctx.make_mpf(libmp.mpf_zetasum(s._mpf_, a, b, prec))
+ elif hasattr(s, '_mpc_'):
+ v = ctx.make_mpc(libmp.mpc_zetasum(s._mpc_, a, b, prec))
+ return v
+ '''
+
+ def _zetasum_fast(ctx, s, a, n, derivatives=[0], reflect=False):
+ if not (ctx.isint(a) and hasattr(s, "_mpc_")):
+ raise NotImplementedError
+ a = int(a)
+ prec = ctx._prec
+ xs, ys = libmp.mpc_zetasum(s._mpc_, a, n, derivatives, reflect, prec)
+ xs = [ctx.make_mpc(x) for x in xs]
+ ys = [ctx.make_mpc(y) for y in ys]
+ return xs, ys
+
+class PrecisionManager:
+ def __init__(self, ctx, precfun, dpsfun, normalize_output=False):
+ self.ctx = ctx
+ self.precfun = precfun
+ self.dpsfun = dpsfun
+ self.normalize_output = normalize_output
+ def __call__(self, f):
+ @functools.wraps(f)
+ def g(*args, **kwargs):
+ orig = self.ctx.prec
+ try:
+ if self.precfun:
+ self.ctx.prec = self.precfun(self.ctx.prec)
+ else:
+ self.ctx.dps = self.dpsfun(self.ctx.dps)
+ if self.normalize_output:
+ v = f(*args, **kwargs)
+ if type(v) is tuple:
+ return tuple([+a for a in v])
+ return +v
+ else:
+ return f(*args, **kwargs)
+ finally:
+ self.ctx.prec = orig
+ return g
+ def __enter__(self):
+ self.origp = self.ctx.prec
+ if self.precfun:
+ self.ctx.prec = self.precfun(self.ctx.prec)
+ else:
+ self.ctx.dps = self.dpsfun(self.ctx.dps)
+ def __exit__(self, exc_type, exc_val, exc_tb):
+ self.ctx.prec = self.origp
+ return False
+
+
+if __name__ == '__main__':
+ import doctest
+ doctest.testmod()
diff --git a/pythonProject/.venv/Lib/site-packages/mpmath/ctx_mp_python.py b/pythonProject/.venv/Lib/site-packages/mpmath/ctx_mp_python.py
new file mode 100644
index 0000000000000000000000000000000000000000..cfbd72fb8300bf840069c38529b7b41418d26eeb
--- /dev/null
+++ b/pythonProject/.venv/Lib/site-packages/mpmath/ctx_mp_python.py
@@ -0,0 +1,1149 @@
+#from ctx_base import StandardBaseContext
+
+from .libmp.backend import basestring, exec_
+
+from .libmp import (MPZ, MPZ_ZERO, MPZ_ONE, int_types, repr_dps,
+ round_floor, round_ceiling, dps_to_prec, round_nearest, prec_to_dps,
+ ComplexResult, to_pickable, from_pickable, normalize,
+ from_int, from_float, from_npfloat, from_Decimal, from_str, to_int, to_float, to_str,
+ from_rational, from_man_exp,
+ fone, fzero, finf, fninf, fnan,
+ mpf_abs, mpf_pos, mpf_neg, mpf_add, mpf_sub, mpf_mul, mpf_mul_int,
+ mpf_div, mpf_rdiv_int, mpf_pow_int, mpf_mod,
+ mpf_eq, mpf_cmp, mpf_lt, mpf_gt, mpf_le, mpf_ge,
+ mpf_hash, mpf_rand,
+ mpf_sum,
+ bitcount, to_fixed,
+ mpc_to_str,
+ mpc_to_complex, mpc_hash, mpc_pos, mpc_is_nonzero, mpc_neg, mpc_conjugate,
+ mpc_abs, mpc_add, mpc_add_mpf, mpc_sub, mpc_sub_mpf, mpc_mul, mpc_mul_mpf,
+ mpc_mul_int, mpc_div, mpc_div_mpf, mpc_pow, mpc_pow_mpf, mpc_pow_int,
+ mpc_mpf_div,
+ mpf_pow,
+ mpf_pi, mpf_degree, mpf_e, mpf_phi, mpf_ln2, mpf_ln10,
+ mpf_euler, mpf_catalan, mpf_apery, mpf_khinchin,
+ mpf_glaisher, mpf_twinprime, mpf_mertens,
+ int_types)
+
+from . import rational
+from . import function_docs
+
+new = object.__new__
+
+class mpnumeric(object):
+ """Base class for mpf and mpc."""
+ __slots__ = []
+ def __new__(cls, val):
+ raise NotImplementedError
+
+class _mpf(mpnumeric):
+ """
+ An mpf instance holds a real-valued floating-point number. mpf:s
+ work analogously to Python floats, but support arbitrary-precision
+ arithmetic.
+ """
+ __slots__ = ['_mpf_']
+
+ def __new__(cls, val=fzero, **kwargs):
+ """A new mpf can be created from a Python float, an int, a
+ or a decimal string representing a number in floating-point
+ format."""
+ prec, rounding = cls.context._prec_rounding
+ if kwargs:
+ prec = kwargs.get('prec', prec)
+ if 'dps' in kwargs:
+ prec = dps_to_prec(kwargs['dps'])
+ rounding = kwargs.get('rounding', rounding)
+ if type(val) is cls:
+ sign, man, exp, bc = val._mpf_
+ if (not man) and exp:
+ return val
+ v = new(cls)
+ v._mpf_ = normalize(sign, man, exp, bc, prec, rounding)
+ return v
+ elif type(val) is tuple:
+ if len(val) == 2:
+ v = new(cls)
+ v._mpf_ = from_man_exp(val[0], val[1], prec, rounding)
+ return v
+ if len(val) == 4:
+ if val not in (finf, fninf, fnan):
+ sign, man, exp, bc = val
+ val = normalize(sign, MPZ(man), exp, bc, prec, rounding)
+ v = new(cls)
+ v._mpf_ = val
+ return v
+ raise ValueError
+ else:
+ v = new(cls)
+ v._mpf_ = mpf_pos(cls.mpf_convert_arg(val, prec, rounding), prec, rounding)
+ return v
+
+ @classmethod
+ def mpf_convert_arg(cls, x, prec, rounding):
+ if isinstance(x, int_types): return from_int(x)
+ if isinstance(x, float): return from_float(x)
+ if isinstance(x, basestring): return from_str(x, prec, rounding)
+ if isinstance(x, cls.context.constant): return x.func(prec, rounding)
+ if hasattr(x, '_mpf_'): return x._mpf_
+ if hasattr(x, '_mpmath_'):
+ t = cls.context.convert(x._mpmath_(prec, rounding))
+ if hasattr(t, '_mpf_'):
+ return t._mpf_
+ if hasattr(x, '_mpi_'):
+ a, b = x._mpi_
+ if a == b:
+ return a
+ raise ValueError("can only create mpf from zero-width interval")
+ raise TypeError("cannot create mpf from " + repr(x))
+
+ @classmethod
+ def mpf_convert_rhs(cls, x):
+ if isinstance(x, int_types): return from_int(x)
+ if isinstance(x, float): return from_float(x)
+ if isinstance(x, complex_types): return cls.context.mpc(x)
+ if isinstance(x, rational.mpq):
+ p, q = x._mpq_
+ return from_rational(p, q, cls.context.prec)
+ if hasattr(x, '_mpf_'): return x._mpf_
+ if hasattr(x, '_mpmath_'):
+ t = cls.context.convert(x._mpmath_(*cls.context._prec_rounding))
+ if hasattr(t, '_mpf_'):
+ return t._mpf_
+ return t
+ return NotImplemented
+
+ @classmethod
+ def mpf_convert_lhs(cls, x):
+ x = cls.mpf_convert_rhs(x)
+ if type(x) is tuple:
+ return cls.context.make_mpf(x)
+ return x
+
+ man_exp = property(lambda self: self._mpf_[1:3])
+ man = property(lambda self: self._mpf_[1])
+ exp = property(lambda self: self._mpf_[2])
+ bc = property(lambda self: self._mpf_[3])
+
+ real = property(lambda self: self)
+ imag = property(lambda self: self.context.zero)
+
+ conjugate = lambda self: self
+
+ def __getstate__(self): return to_pickable(self._mpf_)
+ def __setstate__(self, val): self._mpf_ = from_pickable(val)
+
+ def __repr__(s):
+ if s.context.pretty:
+ return str(s)
+ return "mpf('%s')" % to_str(s._mpf_, s.context._repr_digits)
+
+ def __str__(s): return to_str(s._mpf_, s.context._str_digits)
+ def __hash__(s): return mpf_hash(s._mpf_)
+ def __int__(s): return int(to_int(s._mpf_))
+ def __long__(s): return long(to_int(s._mpf_))
+ def __float__(s): return to_float(s._mpf_, rnd=s.context._prec_rounding[1])
+ def __complex__(s): return complex(float(s))
+ def __nonzero__(s): return s._mpf_ != fzero
+
+ __bool__ = __nonzero__
+
+ def __abs__(s):
+ cls, new, (prec, rounding) = s._ctxdata
+ v = new(cls)
+ v._mpf_ = mpf_abs(s._mpf_, prec, rounding)
+ return v
+
+ def __pos__(s):
+ cls, new, (prec, rounding) = s._ctxdata
+ v = new(cls)
+ v._mpf_ = mpf_pos(s._mpf_, prec, rounding)
+ return v
+
+ def __neg__(s):
+ cls, new, (prec, rounding) = s._ctxdata
+ v = new(cls)
+ v._mpf_ = mpf_neg(s._mpf_, prec, rounding)
+ return v
+
+ def _cmp(s, t, func):
+ if hasattr(t, '_mpf_'):
+ t = t._mpf_
+ else:
+ t = s.mpf_convert_rhs(t)
+ if t is NotImplemented:
+ return t
+ return func(s._mpf_, t)
+
+ def __cmp__(s, t): return s._cmp(t, mpf_cmp)
+ def __lt__(s, t): return s._cmp(t, mpf_lt)
+ def __gt__(s, t): return s._cmp(t, mpf_gt)
+ def __le__(s, t): return s._cmp(t, mpf_le)
+ def __ge__(s, t): return s._cmp(t, mpf_ge)
+
+ def __ne__(s, t):
+ v = s.__eq__(t)
+ if v is NotImplemented:
+ return v
+ return not v
+
+ def __rsub__(s, t):
+ cls, new, (prec, rounding) = s._ctxdata
+ if type(t) in int_types:
+ v = new(cls)
+ v._mpf_ = mpf_sub(from_int(t), s._mpf_, prec, rounding)
+ return v
+ t = s.mpf_convert_lhs(t)
+ if t is NotImplemented:
+ return t
+ return t - s
+
+ def __rdiv__(s, t):
+ cls, new, (prec, rounding) = s._ctxdata
+ if isinstance(t, int_types):
+ v = new(cls)
+ v._mpf_ = mpf_rdiv_int(t, s._mpf_, prec, rounding)
+ return v
+ t = s.mpf_convert_lhs(t)
+ if t is NotImplemented:
+ return t
+ return t / s
+
+ def __rpow__(s, t):
+ t = s.mpf_convert_lhs(t)
+ if t is NotImplemented:
+ return t
+ return t ** s
+
+ def __rmod__(s, t):
+ t = s.mpf_convert_lhs(t)
+ if t is NotImplemented:
+ return t
+ return t % s
+
+ def sqrt(s):
+ return s.context.sqrt(s)
+
+ def ae(s, t, rel_eps=None, abs_eps=None):
+ return s.context.almosteq(s, t, rel_eps, abs_eps)
+
+ def to_fixed(self, prec):
+ return to_fixed(self._mpf_, prec)
+
+ def __round__(self, *args):
+ return round(float(self), *args)
+
+mpf_binary_op = """
+def %NAME%(self, other):
+ mpf, new, (prec, rounding) = self._ctxdata
+ sval = self._mpf_
+ if hasattr(other, '_mpf_'):
+ tval = other._mpf_
+ %WITH_MPF%
+ ttype = type(other)
+ if ttype in int_types:
+ %WITH_INT%
+ elif ttype is float:
+ tval = from_float(other)
+ %WITH_MPF%
+ elif hasattr(other, '_mpc_'):
+ tval = other._mpc_
+ mpc = type(other)
+ %WITH_MPC%
+ elif ttype is complex:
+ tval = from_float(other.real), from_float(other.imag)
+ mpc = self.context.mpc
+ %WITH_MPC%
+ if isinstance(other, mpnumeric):
+ return NotImplemented
+ try:
+ other = mpf.context.convert(other, strings=False)
+ except TypeError:
+ return NotImplemented
+ return self.%NAME%(other)
+"""
+
+return_mpf = "; obj = new(mpf); obj._mpf_ = val; return obj"
+return_mpc = "; obj = new(mpc); obj._mpc_ = val; return obj"
+
+mpf_pow_same = """
+ try:
+ val = mpf_pow(sval, tval, prec, rounding) %s
+ except ComplexResult:
+ if mpf.context.trap_complex:
+ raise
+ mpc = mpf.context.mpc
+ val = mpc_pow((sval, fzero), (tval, fzero), prec, rounding) %s
+""" % (return_mpf, return_mpc)
+
+def binary_op(name, with_mpf='', with_int='', with_mpc=''):
+ code = mpf_binary_op
+ code = code.replace("%WITH_INT%", with_int)
+ code = code.replace("%WITH_MPC%", with_mpc)
+ code = code.replace("%WITH_MPF%", with_mpf)
+ code = code.replace("%NAME%", name)
+ np = {}
+ exec_(code, globals(), np)
+ return np[name]
+
+_mpf.__eq__ = binary_op('__eq__',
+ 'return mpf_eq(sval, tval)',
+ 'return mpf_eq(sval, from_int(other))',
+ 'return (tval[1] == fzero) and mpf_eq(tval[0], sval)')
+
+_mpf.__add__ = binary_op('__add__',
+ 'val = mpf_add(sval, tval, prec, rounding)' + return_mpf,
+ 'val = mpf_add(sval, from_int(other), prec, rounding)' + return_mpf,
+ 'val = mpc_add_mpf(tval, sval, prec, rounding)' + return_mpc)
+
+_mpf.__sub__ = binary_op('__sub__',
+ 'val = mpf_sub(sval, tval, prec, rounding)' + return_mpf,
+ 'val = mpf_sub(sval, from_int(other), prec, rounding)' + return_mpf,
+ 'val = mpc_sub((sval, fzero), tval, prec, rounding)' + return_mpc)
+
+_mpf.__mul__ = binary_op('__mul__',
+ 'val = mpf_mul(sval, tval, prec, rounding)' + return_mpf,
+ 'val = mpf_mul_int(sval, other, prec, rounding)' + return_mpf,
+ 'val = mpc_mul_mpf(tval, sval, prec, rounding)' + return_mpc)
+
+_mpf.__div__ = binary_op('__div__',
+ 'val = mpf_div(sval, tval, prec, rounding)' + return_mpf,
+ 'val = mpf_div(sval, from_int(other), prec, rounding)' + return_mpf,
+ 'val = mpc_mpf_div(sval, tval, prec, rounding)' + return_mpc)
+
+_mpf.__mod__ = binary_op('__mod__',
+ 'val = mpf_mod(sval, tval, prec, rounding)' + return_mpf,
+ 'val = mpf_mod(sval, from_int(other), prec, rounding)' + return_mpf,
+ 'raise NotImplementedError("complex modulo")')
+
+_mpf.__pow__ = binary_op('__pow__',
+ mpf_pow_same,
+ 'val = mpf_pow_int(sval, other, prec, rounding)' + return_mpf,
+ 'val = mpc_pow((sval, fzero), tval, prec, rounding)' + return_mpc)
+
+_mpf.__radd__ = _mpf.__add__
+_mpf.__rmul__ = _mpf.__mul__
+_mpf.__truediv__ = _mpf.__div__
+_mpf.__rtruediv__ = _mpf.__rdiv__
+
+
+class _constant(_mpf):
+ """Represents a mathematical constant with dynamic precision.
+ When printed or used in an arithmetic operation, a constant
+ is converted to a regular mpf at the working precision. A
+ regular mpf can also be obtained using the operation +x."""
+
+ def __new__(cls, func, name, docname=''):
+ a = object.__new__(cls)
+ a.name = name
+ a.func = func
+ a.__doc__ = getattr(function_docs, docname, '')
+ return a
+
+ def __call__(self, prec=None, dps=None, rounding=None):
+ prec2, rounding2 = self.context._prec_rounding
+ if not prec: prec = prec2
+ if not rounding: rounding = rounding2
+ if dps: prec = dps_to_prec(dps)
+ return self.context.make_mpf(self.func(prec, rounding))
+
+ @property
+ def _mpf_(self):
+ prec, rounding = self.context._prec_rounding
+ return self.func(prec, rounding)
+
+ def __repr__(self):
+ return "<%s: %s~>" % (self.name, self.context.nstr(self(dps=15)))
+
+
+class _mpc(mpnumeric):
+ """
+ An mpc represents a complex number using a pair of mpf:s (one
+ for the real part and another for the imaginary part.) The mpc
+ class behaves fairly similarly to Python's complex type.
+ """
+
+ __slots__ = ['_mpc_']
+
+ def __new__(cls, real=0, imag=0):
+ s = object.__new__(cls)
+ if isinstance(real, complex_types):
+ real, imag = real.real, real.imag
+ elif hasattr(real, '_mpc_'):
+ s._mpc_ = real._mpc_
+ return s
+ real = cls.context.mpf(real)
+ imag = cls.context.mpf(imag)
+ s._mpc_ = (real._mpf_, imag._mpf_)
+ return s
+
+ real = property(lambda self: self.context.make_mpf(self._mpc_[0]))
+ imag = property(lambda self: self.context.make_mpf(self._mpc_[1]))
+
+ def __getstate__(self):
+ return to_pickable(self._mpc_[0]), to_pickable(self._mpc_[1])
+
+ def __setstate__(self, val):
+ self._mpc_ = from_pickable(val[0]), from_pickable(val[1])
+
+ def __repr__(s):
+ if s.context.pretty:
+ return str(s)
+ r = repr(s.real)[4:-1]
+ i = repr(s.imag)[4:-1]
+ return "%s(real=%s, imag=%s)" % (type(s).__name__, r, i)
+
+ def __str__(s):
+ return "(%s)" % mpc_to_str(s._mpc_, s.context._str_digits)
+
+ def __complex__(s):
+ return mpc_to_complex(s._mpc_, rnd=s.context._prec_rounding[1])
+
+ def __pos__(s):
+ cls, new, (prec, rounding) = s._ctxdata
+ v = new(cls)
+ v._mpc_ = mpc_pos(s._mpc_, prec, rounding)
+ return v
+
+ def __abs__(s):
+ prec, rounding = s.context._prec_rounding
+ v = new(s.context.mpf)
+ v._mpf_ = mpc_abs(s._mpc_, prec, rounding)
+ return v
+
+ def __neg__(s):
+ cls, new, (prec, rounding) = s._ctxdata
+ v = new(cls)
+ v._mpc_ = mpc_neg(s._mpc_, prec, rounding)
+ return v
+
+ def conjugate(s):
+ cls, new, (prec, rounding) = s._ctxdata
+ v = new(cls)
+ v._mpc_ = mpc_conjugate(s._mpc_, prec, rounding)
+ return v
+
+ def __nonzero__(s):
+ return mpc_is_nonzero(s._mpc_)
+
+ __bool__ = __nonzero__
+
+ def __hash__(s):
+ return mpc_hash(s._mpc_)
+
+ @classmethod
+ def mpc_convert_lhs(cls, x):
+ try:
+ y = cls.context.convert(x)
+ return y
+ except TypeError:
+ return NotImplemented
+
+ def __eq__(s, t):
+ if not hasattr(t, '_mpc_'):
+ if isinstance(t, str):
+ return False
+ t = s.mpc_convert_lhs(t)
+ if t is NotImplemented:
+ return t
+ return s.real == t.real and s.imag == t.imag
+
+ def __ne__(s, t):
+ b = s.__eq__(t)
+ if b is NotImplemented:
+ return b
+ return not b
+
+ def _compare(*args):
+ raise TypeError("no ordering relation is defined for complex numbers")
+
+ __gt__ = _compare
+ __le__ = _compare
+ __gt__ = _compare
+ __ge__ = _compare
+
+ def __add__(s, t):
+ cls, new, (prec, rounding) = s._ctxdata
+ if not hasattr(t, '_mpc_'):
+ t = s.mpc_convert_lhs(t)
+ if t is NotImplemented:
+ return t
+ if hasattr(t, '_mpf_'):
+ v = new(cls)
+ v._mpc_ = mpc_add_mpf(s._mpc_, t._mpf_, prec, rounding)
+ return v
+ v = new(cls)
+ v._mpc_ = mpc_add(s._mpc_, t._mpc_, prec, rounding)
+ return v
+
+ def __sub__(s, t):
+ cls, new, (prec, rounding) = s._ctxdata
+ if not hasattr(t, '_mpc_'):
+ t = s.mpc_convert_lhs(t)
+ if t is NotImplemented:
+ return t
+ if hasattr(t, '_mpf_'):
+ v = new(cls)
+ v._mpc_ = mpc_sub_mpf(s._mpc_, t._mpf_, prec, rounding)
+ return v
+ v = new(cls)
+ v._mpc_ = mpc_sub(s._mpc_, t._mpc_, prec, rounding)
+ return v
+
+ def __mul__(s, t):
+ cls, new, (prec, rounding) = s._ctxdata
+ if not hasattr(t, '_mpc_'):
+ if isinstance(t, int_types):
+ v = new(cls)
+ v._mpc_ = mpc_mul_int(s._mpc_, t, prec, rounding)
+ return v
+ t = s.mpc_convert_lhs(t)
+ if t is NotImplemented:
+ return t
+ if hasattr(t, '_mpf_'):
+ v = new(cls)
+ v._mpc_ = mpc_mul_mpf(s._mpc_, t._mpf_, prec, rounding)
+ return v
+ t = s.mpc_convert_lhs(t)
+ v = new(cls)
+ v._mpc_ = mpc_mul(s._mpc_, t._mpc_, prec, rounding)
+ return v
+
+ def __div__(s, t):
+ cls, new, (prec, rounding) = s._ctxdata
+ if not hasattr(t, '_mpc_'):
+ t = s.mpc_convert_lhs(t)
+ if t is NotImplemented:
+ return t
+ if hasattr(t, '_mpf_'):
+ v = new(cls)
+ v._mpc_ = mpc_div_mpf(s._mpc_, t._mpf_, prec, rounding)
+ return v
+ v = new(cls)
+ v._mpc_ = mpc_div(s._mpc_, t._mpc_, prec, rounding)
+ return v
+
+ def __pow__(s, t):
+ cls, new, (prec, rounding) = s._ctxdata
+ if isinstance(t, int_types):
+ v = new(cls)
+ v._mpc_ = mpc_pow_int(s._mpc_, t, prec, rounding)
+ return v
+ t = s.mpc_convert_lhs(t)
+ if t is NotImplemented:
+ return t
+ v = new(cls)
+ if hasattr(t, '_mpf_'):
+ v._mpc_ = mpc_pow_mpf(s._mpc_, t._mpf_, prec, rounding)
+ else:
+ v._mpc_ = mpc_pow(s._mpc_, t._mpc_, prec, rounding)
+ return v
+
+ __radd__ = __add__
+
+ def __rsub__(s, t):
+ t = s.mpc_convert_lhs(t)
+ if t is NotImplemented:
+ return t
+ return t - s
+
+ def __rmul__(s, t):
+ cls, new, (prec, rounding) = s._ctxdata
+ if isinstance(t, int_types):
+ v = new(cls)
+ v._mpc_ = mpc_mul_int(s._mpc_, t, prec, rounding)
+ return v
+ t = s.mpc_convert_lhs(t)
+ if t is NotImplemented:
+ return t
+ return t * s
+
+ def __rdiv__(s, t):
+ t = s.mpc_convert_lhs(t)
+ if t is NotImplemented:
+ return t
+ return t / s
+
+ def __rpow__(s, t):
+ t = s.mpc_convert_lhs(t)
+ if t is NotImplemented:
+ return t
+ return t ** s
+
+ __truediv__ = __div__
+ __rtruediv__ = __rdiv__
+
+ def ae(s, t, rel_eps=None, abs_eps=None):
+ return s.context.almosteq(s, t, rel_eps, abs_eps)
+
+
+complex_types = (complex, _mpc)
+
+
+class PythonMPContext(object):
+
+ def __init__(ctx):
+ ctx._prec_rounding = [53, round_nearest]
+ ctx.mpf = type('mpf', (_mpf,), {})
+ ctx.mpc = type('mpc', (_mpc,), {})
+ ctx.mpf._ctxdata = [ctx.mpf, new, ctx._prec_rounding]
+ ctx.mpc._ctxdata = [ctx.mpc, new, ctx._prec_rounding]
+ ctx.mpf.context = ctx
+ ctx.mpc.context = ctx
+ ctx.constant = type('constant', (_constant,), {})
+ ctx.constant._ctxdata = [ctx.mpf, new, ctx._prec_rounding]
+ ctx.constant.context = ctx
+
+ def make_mpf(ctx, v):
+ a = new(ctx.mpf)
+ a._mpf_ = v
+ return a
+
+ def make_mpc(ctx, v):
+ a = new(ctx.mpc)
+ a._mpc_ = v
+ return a
+
+ def default(ctx):
+ ctx._prec = ctx._prec_rounding[0] = 53
+ ctx._dps = 15
+ ctx.trap_complex = False
+
+ def _set_prec(ctx, n):
+ ctx._prec = ctx._prec_rounding[0] = max(1, int(n))
+ ctx._dps = prec_to_dps(n)
+
+ def _set_dps(ctx, n):
+ ctx._prec = ctx._prec_rounding[0] = dps_to_prec(n)
+ ctx._dps = max(1, int(n))
+
+ prec = property(lambda ctx: ctx._prec, _set_prec)
+ dps = property(lambda ctx: ctx._dps, _set_dps)
+
+ def convert(ctx, x, strings=True):
+ """
+ Converts *x* to an ``mpf`` or ``mpc``. If *x* is of type ``mpf``,
+ ``mpc``, ``int``, ``float``, ``complex``, the conversion
+ will be performed losslessly.
+
+ If *x* is a string, the result will be rounded to the present
+ working precision. Strings representing fractions or complex
+ numbers are permitted.
+
+ >>> from mpmath import *
+ >>> mp.dps = 15; mp.pretty = False
+ >>> mpmathify(3.5)
+ mpf('3.5')
+ >>> mpmathify('2.1')
+ mpf('2.1000000000000001')
+ >>> mpmathify('3/4')
+ mpf('0.75')
+ >>> mpmathify('2+3j')
+ mpc(real='2.0', imag='3.0')
+
+ """
+ if type(x) in ctx.types: return x
+ if isinstance(x, int_types): return ctx.make_mpf(from_int(x))
+ if isinstance(x, float): return ctx.make_mpf(from_float(x))
+ if isinstance(x, complex):
+ return ctx.make_mpc((from_float(x.real), from_float(x.imag)))
+ if type(x).__module__ == 'numpy': return ctx.npconvert(x)
+ if isinstance(x, numbers.Rational): # e.g. Fraction
+ try: x = rational.mpq(int(x.numerator), int(x.denominator))
+ except: pass
+ prec, rounding = ctx._prec_rounding
+ if isinstance(x, rational.mpq):
+ p, q = x._mpq_
+ return ctx.make_mpf(from_rational(p, q, prec))
+ if strings and isinstance(x, basestring):
+ try:
+ _mpf_ = from_str(x, prec, rounding)
+ return ctx.make_mpf(_mpf_)
+ except ValueError:
+ pass
+ if hasattr(x, '_mpf_'): return ctx.make_mpf(x._mpf_)
+ if hasattr(x, '_mpc_'): return ctx.make_mpc(x._mpc_)
+ if hasattr(x, '_mpmath_'):
+ return ctx.convert(x._mpmath_(prec, rounding))
+ if type(x).__module__ == 'decimal':
+ try: return ctx.make_mpf(from_Decimal(x, prec, rounding))
+ except: pass
+ return ctx._convert_fallback(x, strings)
+
+ def npconvert(ctx, x):
+ """
+ Converts *x* to an ``mpf`` or ``mpc``. *x* should be a numpy
+ scalar.
+ """
+ import numpy as np
+ if isinstance(x, np.integer): return ctx.make_mpf(from_int(int(x)))
+ if isinstance(x, np.floating): return ctx.make_mpf(from_npfloat(x))
+ if isinstance(x, np.complexfloating):
+ return ctx.make_mpc((from_npfloat(x.real), from_npfloat(x.imag)))
+ raise TypeError("cannot create mpf from " + repr(x))
+
+ def isnan(ctx, x):
+ """
+ Return *True* if *x* is a NaN (not-a-number), or for a complex
+ number, whether either the real or complex part is NaN;
+ otherwise return *False*::
+
+ >>> from mpmath import *
+ >>> isnan(3.14)
+ False
+ >>> isnan(nan)
+ True
+ >>> isnan(mpc(3.14,2.72))
+ False
+ >>> isnan(mpc(3.14,nan))
+ True
+
+ """
+ if hasattr(x, "_mpf_"):
+ return x._mpf_ == fnan
+ if hasattr(x, "_mpc_"):
+ return fnan in x._mpc_
+ if isinstance(x, int_types) or isinstance(x, rational.mpq):
+ return False
+ x = ctx.convert(x)
+ if hasattr(x, '_mpf_') or hasattr(x, '_mpc_'):
+ return ctx.isnan(x)
+ raise TypeError("isnan() needs a number as input")
+
+ def isinf(ctx, x):
+ """
+ Return *True* if the absolute value of *x* is infinite;
+ otherwise return *False*::
+
+ >>> from mpmath import *
+ >>> isinf(inf)
+ True
+ >>> isinf(-inf)
+ True
+ >>> isinf(3)
+ False
+ >>> isinf(3+4j)
+ False
+ >>> isinf(mpc(3,inf))
+ True
+ >>> isinf(mpc(inf,3))
+ True
+
+ """
+ if hasattr(x, "_mpf_"):
+ return x._mpf_ in (finf, fninf)
+ if hasattr(x, "_mpc_"):
+ re, im = x._mpc_
+ return re in (finf, fninf) or im in (finf, fninf)
+ if isinstance(x, int_types) or isinstance(x, rational.mpq):
+ return False
+ x = ctx.convert(x)
+ if hasattr(x, '_mpf_') or hasattr(x, '_mpc_'):
+ return ctx.isinf(x)
+ raise TypeError("isinf() needs a number as input")
+
+ def isnormal(ctx, x):
+ """
+ Determine whether *x* is "normal" in the sense of floating-point
+ representation; that is, return *False* if *x* is zero, an
+ infinity or NaN; otherwise return *True*. By extension, a
+ complex number *x* is considered "normal" if its magnitude is
+ normal::
+
+ >>> from mpmath import *
+ >>> isnormal(3)
+ True
+ >>> isnormal(0)
+ False
+ >>> isnormal(inf); isnormal(-inf); isnormal(nan)
+ False
+ False
+ False
+ >>> isnormal(0+0j)
+ False
+ >>> isnormal(0+3j)
+ True
+ >>> isnormal(mpc(2,nan))
+ False
+ """
+ if hasattr(x, "_mpf_"):
+ return bool(x._mpf_[1])
+ if hasattr(x, "_mpc_"):
+ re, im = x._mpc_
+ re_normal = bool(re[1])
+ im_normal = bool(im[1])
+ if re == fzero: return im_normal
+ if im == fzero: return re_normal
+ return re_normal and im_normal
+ if isinstance(x, int_types) or isinstance(x, rational.mpq):
+ return bool(x)
+ x = ctx.convert(x)
+ if hasattr(x, '_mpf_') or hasattr(x, '_mpc_'):
+ return ctx.isnormal(x)
+ raise TypeError("isnormal() needs a number as input")
+
+ def isint(ctx, x, gaussian=False):
+ """
+ Return *True* if *x* is integer-valued; otherwise return
+ *False*::
+
+ >>> from mpmath import *
+ >>> isint(3)
+ True
+ >>> isint(mpf(3))
+ True
+ >>> isint(3.2)
+ False
+ >>> isint(inf)
+ False
+
+ Optionally, Gaussian integers can be checked for::
+
+ >>> isint(3+0j)
+ True
+ >>> isint(3+2j)
+ False
+ >>> isint(3+2j, gaussian=True)
+ True
+
+ """
+ if isinstance(x, int_types):
+ return True
+ if hasattr(x, "_mpf_"):
+ sign, man, exp, bc = xval = x._mpf_
+ return bool((man and exp >= 0) or xval == fzero)
+ if hasattr(x, "_mpc_"):
+ re, im = x._mpc_
+ rsign, rman, rexp, rbc = re
+ isign, iman, iexp, ibc = im
+ re_isint = (rman and rexp >= 0) or re == fzero
+ if gaussian:
+ im_isint = (iman and iexp >= 0) or im == fzero
+ return re_isint and im_isint
+ return re_isint and im == fzero
+ if isinstance(x, rational.mpq):
+ p, q = x._mpq_
+ return p % q == 0
+ x = ctx.convert(x)
+ if hasattr(x, '_mpf_') or hasattr(x, '_mpc_'):
+ return ctx.isint(x, gaussian)
+ raise TypeError("isint() needs a number as input")
+
+ def fsum(ctx, terms, absolute=False, squared=False):
+ """
+ Calculates a sum containing a finite number of terms (for infinite
+ series, see :func:`~mpmath.nsum`). The terms will be converted to
+ mpmath numbers. For len(terms) > 2, this function is generally
+ faster and produces more accurate results than the builtin
+ Python function :func:`sum`.
+
+ >>> from mpmath import *
+ >>> mp.dps = 15; mp.pretty = False
+ >>> fsum([1, 2, 0.5, 7])
+ mpf('10.5')
+
+ With squared=True each term is squared, and with absolute=True
+ the absolute value of each term is used.
+ """
+ prec, rnd = ctx._prec_rounding
+ real = []
+ imag = []
+ for term in terms:
+ reval = imval = 0
+ if hasattr(term, "_mpf_"):
+ reval = term._mpf_
+ elif hasattr(term, "_mpc_"):
+ reval, imval = term._mpc_
+ else:
+ term = ctx.convert(term)
+ if hasattr(term, "_mpf_"):
+ reval = term._mpf_
+ elif hasattr(term, "_mpc_"):
+ reval, imval = term._mpc_
+ else:
+ raise NotImplementedError
+ if imval:
+ if squared:
+ if absolute:
+ real.append(mpf_mul(reval,reval))
+ real.append(mpf_mul(imval,imval))
+ else:
+ reval, imval = mpc_pow_int((reval,imval),2,prec+10)
+ real.append(reval)
+ imag.append(imval)
+ elif absolute:
+ real.append(mpc_abs((reval,imval), prec))
+ else:
+ real.append(reval)
+ imag.append(imval)
+ else:
+ if squared:
+ reval = mpf_mul(reval, reval)
+ elif absolute:
+ reval = mpf_abs(reval)
+ real.append(reval)
+ s = mpf_sum(real, prec, rnd, absolute)
+ if imag:
+ s = ctx.make_mpc((s, mpf_sum(imag, prec, rnd)))
+ else:
+ s = ctx.make_mpf(s)
+ return s
+
+ def fdot(ctx, A, B=None, conjugate=False):
+ r"""
+ Computes the dot product of the iterables `A` and `B`,
+
+ .. math ::
+
+ \sum_{k=0} A_k B_k.
+
+ Alternatively, :func:`~mpmath.fdot` accepts a single iterable of pairs.
+ In other words, ``fdot(A,B)`` and ``fdot(zip(A,B))`` are equivalent.
+ The elements are automatically converted to mpmath numbers.
+
+ With ``conjugate=True``, the elements in the second vector
+ will be conjugated:
+
+ .. math ::
+
+ \sum_{k=0} A_k \overline{B_k}
+
+ **Examples**
+
+ >>> from mpmath import *
+ >>> mp.dps = 15; mp.pretty = False
+ >>> A = [2, 1.5, 3]
+ >>> B = [1, -1, 2]
+ >>> fdot(A, B)
+ mpf('6.5')
+ >>> list(zip(A, B))
+ [(2, 1), (1.5, -1), (3, 2)]
+ >>> fdot(_)
+ mpf('6.5')
+ >>> A = [2, 1.5, 3j]
+ >>> B = [1+j, 3, -1-j]
+ >>> fdot(A, B)
+ mpc(real='9.5', imag='-1.0')
+ >>> fdot(A, B, conjugate=True)
+ mpc(real='3.5', imag='-5.0')
+
+ """
+ if B is not None:
+ A = zip(A, B)
+ prec, rnd = ctx._prec_rounding
+ real = []
+ imag = []
+ hasattr_ = hasattr
+ types = (ctx.mpf, ctx.mpc)
+ for a, b in A:
+ if type(a) not in types: a = ctx.convert(a)
+ if type(b) not in types: b = ctx.convert(b)
+ a_real = hasattr_(a, "_mpf_")
+ b_real = hasattr_(b, "_mpf_")
+ if a_real and b_real:
+ real.append(mpf_mul(a._mpf_, b._mpf_))
+ continue
+ a_complex = hasattr_(a, "_mpc_")
+ b_complex = hasattr_(b, "_mpc_")
+ if a_real and b_complex:
+ aval = a._mpf_
+ bre, bim = b._mpc_
+ if conjugate:
+ bim = mpf_neg(bim)
+ real.append(mpf_mul(aval, bre))
+ imag.append(mpf_mul(aval, bim))
+ elif b_real and a_complex:
+ are, aim = a._mpc_
+ bval = b._mpf_
+ real.append(mpf_mul(are, bval))
+ imag.append(mpf_mul(aim, bval))
+ elif a_complex and b_complex:
+ #re, im = mpc_mul(a._mpc_, b._mpc_, prec+20)
+ are, aim = a._mpc_
+ bre, bim = b._mpc_
+ if conjugate:
+ bim = mpf_neg(bim)
+ real.append(mpf_mul(are, bre))
+ real.append(mpf_neg(mpf_mul(aim, bim)))
+ imag.append(mpf_mul(are, bim))
+ imag.append(mpf_mul(aim, bre))
+ else:
+ raise NotImplementedError
+ s = mpf_sum(real, prec, rnd)
+ if imag:
+ s = ctx.make_mpc((s, mpf_sum(imag, prec, rnd)))
+ else:
+ s = ctx.make_mpf(s)
+ return s
+
+ def _wrap_libmp_function(ctx, mpf_f, mpc_f=None, mpi_f=None, doc=""):
+ """
+ Given a low-level mpf_ function, and optionally similar functions
+ for mpc_ and mpi_, defines the function as a context method.
+
+ It is assumed that the return type is the same as that of
+ the input; the exception is that propagation from mpf to mpc is possible
+ by raising ComplexResult.
+
+ """
+ def f(x, **kwargs):
+ if type(x) not in ctx.types:
+ x = ctx.convert(x)
+ prec, rounding = ctx._prec_rounding
+ if kwargs:
+ prec = kwargs.get('prec', prec)
+ if 'dps' in kwargs:
+ prec = dps_to_prec(kwargs['dps'])
+ rounding = kwargs.get('rounding', rounding)
+ if hasattr(x, '_mpf_'):
+ try:
+ return ctx.make_mpf(mpf_f(x._mpf_, prec, rounding))
+ except ComplexResult:
+ # Handle propagation to complex
+ if ctx.trap_complex:
+ raise
+ return ctx.make_mpc(mpc_f((x._mpf_, fzero), prec, rounding))
+ elif hasattr(x, '_mpc_'):
+ return ctx.make_mpc(mpc_f(x._mpc_, prec, rounding))
+ raise NotImplementedError("%s of a %s" % (name, type(x)))
+ name = mpf_f.__name__[4:]
+ f.__doc__ = function_docs.__dict__.get(name, "Computes the %s of x" % doc)
+ return f
+
+ # Called by SpecialFunctions.__init__()
+ @classmethod
+ def _wrap_specfun(cls, name, f, wrap):
+ if wrap:
+ def f_wrapped(ctx, *args, **kwargs):
+ convert = ctx.convert
+ args = [convert(a) for a in args]
+ prec = ctx.prec
+ try:
+ ctx.prec += 10
+ retval = f(ctx, *args, **kwargs)
+ finally:
+ ctx.prec = prec
+ return +retval
+ else:
+ f_wrapped = f
+ f_wrapped.__doc__ = function_docs.__dict__.get(name, f.__doc__)
+ setattr(cls, name, f_wrapped)
+
+ def _convert_param(ctx, x):
+ if hasattr(x, "_mpc_"):
+ v, im = x._mpc_
+ if im != fzero:
+ return x, 'C'
+ elif hasattr(x, "_mpf_"):
+ v = x._mpf_
+ else:
+ if type(x) in int_types:
+ return int(x), 'Z'
+ p = None
+ if isinstance(x, tuple):
+ p, q = x
+ elif hasattr(x, '_mpq_'):
+ p, q = x._mpq_
+ elif isinstance(x, basestring) and '/' in x:
+ p, q = x.split('/')
+ p = int(p)
+ q = int(q)
+ if p is not None:
+ if not p % q:
+ return p // q, 'Z'
+ return ctx.mpq(p,q), 'Q'
+ x = ctx.convert(x)
+ if hasattr(x, "_mpc_"):
+ v, im = x._mpc_
+ if im != fzero:
+ return x, 'C'
+ elif hasattr(x, "_mpf_"):
+ v = x._mpf_
+ else:
+ return x, 'U'
+ sign, man, exp, bc = v
+ if man:
+ if exp >= -4:
+ if sign:
+ man = -man
+ if exp >= 0:
+ return int(man) << exp, 'Z'
+ if exp >= -4:
+ p, q = int(man), (1<<(-exp))
+ return ctx.mpq(p,q), 'Q'
+ x = ctx.make_mpf(v)
+ return x, 'R'
+ elif not exp:
+ return 0, 'Z'
+ else:
+ return x, 'U'
+
+ def _mpf_mag(ctx, x):
+ sign, man, exp, bc = x
+ if man:
+ return exp+bc
+ if x == fzero:
+ return ctx.ninf
+ if x == finf or x == fninf:
+ return ctx.inf
+ return ctx.nan
+
+ def mag(ctx, x):
+ """
+ Quick logarithmic magnitude estimate of a number. Returns an
+ integer or infinity `m` such that `|x| <= 2^m`. It is not
+ guaranteed that `m` is an optimal bound, but it will never
+ be too large by more than 2 (and probably not more than 1).
+
+ **Examples**
+
+ >>> from mpmath import *
+ >>> mp.pretty = True
+ >>> mag(10), mag(10.0), mag(mpf(10)), int(ceil(log(10,2)))
+ (4, 4, 4, 4)
+ >>> mag(10j), mag(10+10j)
+ (4, 5)
+ >>> mag(0.01), int(ceil(log(0.01,2)))
+ (-6, -6)
+ >>> mag(0), mag(inf), mag(-inf), mag(nan)
+ (-inf, +inf, +inf, nan)
+
+ """
+ if hasattr(x, "_mpf_"):
+ return ctx._mpf_mag(x._mpf_)
+ elif hasattr(x, "_mpc_"):
+ r, i = x._mpc_
+ if r == fzero:
+ return ctx._mpf_mag(i)
+ if i == fzero:
+ return ctx._mpf_mag(r)
+ return 1+max(ctx._mpf_mag(r), ctx._mpf_mag(i))
+ elif isinstance(x, int_types):
+ if x:
+ return bitcount(abs(x))
+ return ctx.ninf
+ elif isinstance(x, rational.mpq):
+ p, q = x._mpq_
+ if p:
+ return 1 + bitcount(abs(p)) - bitcount(q)
+ return ctx.ninf
+ else:
+ x = ctx.convert(x)
+ if hasattr(x, "_mpf_") or hasattr(x, "_mpc_"):
+ return ctx.mag(x)
+ else:
+ raise TypeError("requires an mpf/mpc")
+
+
+# Register with "numbers" ABC
+# We do not subclass, hence we do not use the @abstractmethod checks. While
+# this is less invasive it may turn out that we do not actually support
+# parts of the expected interfaces. See
+# http://docs.python.org/2/library/numbers.html for list of abstract
+# methods.
+try:
+ import numbers
+ numbers.Complex.register(_mpc)
+ numbers.Real.register(_mpf)
+except ImportError:
+ pass
diff --git a/pythonProject/.venv/Lib/site-packages/mpmath/function_docs.py b/pythonProject/.venv/Lib/site-packages/mpmath/function_docs.py
new file mode 100644
index 0000000000000000000000000000000000000000..73c071dc30a25c0ea1366e06a407a20206bd18a2
--- /dev/null
+++ b/pythonProject/.venv/Lib/site-packages/mpmath/function_docs.py
@@ -0,0 +1,10201 @@
+"""
+Extended docstrings for functions.py
+"""
+
+
+pi = r"""
+`\pi`, roughly equal to 3.141592654, represents the area of the unit
+circle, the half-period of trigonometric functions, and many other
+things in mathematics.
+
+Mpmath can evaluate `\pi` to arbitrary precision::
+
+ >>> from mpmath import *
+ >>> mp.dps = 50; mp.pretty = True
+ >>> +pi
+ 3.1415926535897932384626433832795028841971693993751
+
+This shows digits 99991-100000 of `\pi` (the last digit is actually
+a 4 when the decimal expansion is truncated, but here the nearest
+rounding is used)::
+
+ >>> mp.dps = 100000
+ >>> str(pi)[-10:]
+ '5549362465'
+
+**Possible issues**
+
+:data:`pi` always rounds to the nearest floating-point
+number when used. This means that exact mathematical identities
+involving `\pi` will generally not be preserved in floating-point
+arithmetic. In particular, multiples of :data:`pi` (except for
+the trivial case ``0*pi``) are *not* the exact roots of
+:func:`~mpmath.sin`, but differ roughly by the current epsilon::
+
+ >>> mp.dps = 15
+ >>> sin(pi)
+ 1.22464679914735e-16
+
+One solution is to use the :func:`~mpmath.sinpi` function instead::
+
+ >>> sinpi(1)
+ 0.0
+
+See the documentation of trigonometric functions for additional
+details.
+
+**References**
+
+* [BorweinBorwein]_
+
+"""
+
+degree = r"""
+Represents one degree of angle, `1^{\circ} = \pi/180`, or
+about 0.01745329. This constant may be evaluated to arbitrary
+precision::
+
+ >>> from mpmath import *
+ >>> mp.dps = 50; mp.pretty = True
+ >>> +degree
+ 0.017453292519943295769236907684886127134428718885417
+
+The :data:`degree` object is convenient for conversion
+to radians::
+
+ >>> sin(30 * degree)
+ 0.5
+ >>> asin(0.5) / degree
+ 30.0
+"""
+
+e = r"""
+The transcendental number `e` = 2.718281828... is the base of the
+natural logarithm (:func:`~mpmath.ln`) and of the exponential function
+(:func:`~mpmath.exp`).
+
+Mpmath can be evaluate `e` to arbitrary precision::
+
+ >>> from mpmath import *
+ >>> mp.dps = 50; mp.pretty = True
+ >>> +e
+ 2.7182818284590452353602874713526624977572470937
+
+This shows digits 99991-100000 of `e` (the last digit is actually
+a 5 when the decimal expansion is truncated, but here the nearest
+rounding is used)::
+
+ >>> mp.dps = 100000
+ >>> str(e)[-10:]
+ '2100427166'
+
+**Possible issues**
+
+:data:`e` always rounds to the nearest floating-point number
+when used, and mathematical identities involving `e` may not
+hold in floating-point arithmetic. For example, ``ln(e)``
+might not evaluate exactly to 1.
+
+In particular, don't use ``e**x`` to compute the exponential
+function. Use ``exp(x)`` instead; this is both faster and more
+accurate.
+"""
+
+phi = r"""
+Represents the golden ratio `\phi = (1+\sqrt 5)/2`,
+approximately equal to 1.6180339887. To high precision,
+its value is::
+
+ >>> from mpmath import *
+ >>> mp.dps = 50; mp.pretty = True
+ >>> +phi
+ 1.6180339887498948482045868343656381177203091798058
+
+Formulas for the golden ratio include the following::
+
+ >>> (1+sqrt(5))/2
+ 1.6180339887498948482045868343656381177203091798058
+ >>> findroot(lambda x: x**2-x-1, 1)
+ 1.6180339887498948482045868343656381177203091798058
+ >>> limit(lambda n: fib(n+1)/fib(n), inf)
+ 1.6180339887498948482045868343656381177203091798058
+"""
+
+euler = r"""
+Euler's constant or the Euler-Mascheroni constant `\gamma`
+= 0.57721566... is a number of central importance to
+number theory and special functions. It is defined as the limit
+
+.. math ::
+
+ \gamma = \lim_{n\to\infty} H_n - \log n
+
+where `H_n = 1 + \frac{1}{2} + \ldots + \frac{1}{n}` is a harmonic
+number (see :func:`~mpmath.harmonic`).
+
+Evaluation of `\gamma` is supported at arbitrary precision::
+
+ >>> from mpmath import *
+ >>> mp.dps = 50; mp.pretty = True
+ >>> +euler
+ 0.57721566490153286060651209008240243104215933593992
+
+We can also compute `\gamma` directly from the definition,
+although this is less efficient::
+
+ >>> limit(lambda n: harmonic(n)-log(n), inf)
+ 0.57721566490153286060651209008240243104215933593992
+
+This shows digits 9991-10000 of `\gamma` (the last digit is actually
+a 5 when the decimal expansion is truncated, but here the nearest
+rounding is used)::
+
+ >>> mp.dps = 10000
+ >>> str(euler)[-10:]
+ '4679858166'
+
+Integrals, series, and representations for `\gamma` in terms of
+special functions include the following (there are many others)::
+
+ >>> mp.dps = 25
+ >>> -quad(lambda x: exp(-x)*log(x), [0,inf])
+ 0.5772156649015328606065121
+ >>> quad(lambda x,y: (x-1)/(1-x*y)/log(x*y), [0,1], [0,1])
+ 0.5772156649015328606065121
+ >>> nsum(lambda k: 1/k-log(1+1/k), [1,inf])
+ 0.5772156649015328606065121
+ >>> nsum(lambda k: (-1)**k*zeta(k)/k, [2,inf])
+ 0.5772156649015328606065121
+ >>> -diff(gamma, 1)
+ 0.5772156649015328606065121
+ >>> limit(lambda x: 1/x-gamma(x), 0)
+ 0.5772156649015328606065121
+ >>> limit(lambda x: zeta(x)-1/(x-1), 1)
+ 0.5772156649015328606065121
+ >>> (log(2*pi*nprod(lambda n:
+ ... exp(-2+2/n)*(1+2/n)**n, [1,inf]))-3)/2
+ 0.5772156649015328606065121
+
+For generalizations of the identities `\gamma = -\Gamma'(1)`
+and `\gamma = \lim_{x\to1} \zeta(x)-1/(x-1)`, see
+:func:`~mpmath.psi` and :func:`~mpmath.stieltjes` respectively.
+
+**References**
+
+* [BorweinBailey]_
+
+"""
+
+catalan = r"""
+Catalan's constant `K` = 0.91596559... is given by the infinite
+series
+
+.. math ::
+
+ K = \sum_{k=0}^{\infty} \frac{(-1)^k}{(2k+1)^2}.
+
+Mpmath can evaluate it to arbitrary precision::
+
+ >>> from mpmath import *
+ >>> mp.dps = 50; mp.pretty = True
+ >>> +catalan
+ 0.91596559417721901505460351493238411077414937428167
+
+One can also compute `K` directly from the definition, although
+this is significantly less efficient::
+
+ >>> nsum(lambda k: (-1)**k/(2*k+1)**2, [0, inf])
+ 0.91596559417721901505460351493238411077414937428167
+
+This shows digits 9991-10000 of `K` (the last digit is actually
+a 3 when the decimal expansion is truncated, but here the nearest
+rounding is used)::
+
+ >>> mp.dps = 10000
+ >>> str(catalan)[-10:]
+ '9537871504'
+
+Catalan's constant has numerous integral representations::
+
+ >>> mp.dps = 50
+ >>> quad(lambda x: -log(x)/(1+x**2), [0, 1])
+ 0.91596559417721901505460351493238411077414937428167
+ >>> quad(lambda x: atan(x)/x, [0, 1])
+ 0.91596559417721901505460351493238411077414937428167
+ >>> quad(lambda x: ellipk(x**2)/2, [0, 1])
+ 0.91596559417721901505460351493238411077414937428167
+ >>> quad(lambda x,y: 1/(1+(x*y)**2), [0, 1], [0, 1])
+ 0.91596559417721901505460351493238411077414937428167
+
+As well as series representations::
+
+ >>> pi*log(sqrt(3)+2)/8 + 3*nsum(lambda n:
+ ... (fac(n)/(2*n+1))**2/fac(2*n), [0, inf])/8
+ 0.91596559417721901505460351493238411077414937428167
+ >>> 1-nsum(lambda n: n*zeta(2*n+1)/16**n, [1,inf])
+ 0.91596559417721901505460351493238411077414937428167
+"""
+
+khinchin = r"""
+Khinchin's constant `K` = 2.68542... is a number that
+appears in the theory of continued fractions. Mpmath can evaluate
+it to arbitrary precision::
+
+ >>> from mpmath import *
+ >>> mp.dps = 50; mp.pretty = True
+ >>> +khinchin
+ 2.6854520010653064453097148354817956938203822939945
+
+An integral representation is::
+
+ >>> I = quad(lambda x: log((1-x**2)/sincpi(x))/x/(1+x), [0, 1])
+ >>> 2*exp(1/log(2)*I)
+ 2.6854520010653064453097148354817956938203822939945
+
+The computation of ``khinchin`` is based on an efficient
+implementation of the following series::
+
+ >>> f = lambda n: (zeta(2*n)-1)/n*sum((-1)**(k+1)/mpf(k)
+ ... for k in range(1,2*int(n)))
+ >>> exp(nsum(f, [1,inf])/log(2))
+ 2.6854520010653064453097148354817956938203822939945
+"""
+
+glaisher = r"""
+Glaisher's constant `A`, also known as the Glaisher-Kinkelin
+constant, is a number approximately equal to 1.282427129 that
+sometimes appears in formulas related to gamma and zeta functions.
+It is also related to the Barnes G-function (see :func:`~mpmath.barnesg`).
+
+The constant is defined as `A = \exp(1/12-\zeta'(-1))` where
+`\zeta'(s)` denotes the derivative of the Riemann zeta function
+(see :func:`~mpmath.zeta`).
+
+Mpmath can evaluate Glaisher's constant to arbitrary precision:
+
+ >>> from mpmath import *
+ >>> mp.dps = 50; mp.pretty = True
+ >>> +glaisher
+ 1.282427129100622636875342568869791727767688927325
+
+We can verify that the value computed by :data:`glaisher` is
+correct using mpmath's facilities for numerical
+differentiation and arbitrary evaluation of the zeta function:
+
+ >>> exp(mpf(1)/12 - diff(zeta, -1))
+ 1.282427129100622636875342568869791727767688927325
+
+Here is an example of an integral that can be evaluated in
+terms of Glaisher's constant:
+
+ >>> mp.dps = 15
+ >>> quad(lambda x: log(gamma(x)), [1, 1.5])
+ -0.0428537406502909
+ >>> -0.5 - 7*log(2)/24 + log(pi)/4 + 3*log(glaisher)/2
+ -0.042853740650291
+
+Mpmath computes Glaisher's constant by applying Euler-Maclaurin
+summation to a slowly convergent series. The implementation is
+reasonably efficient up to about 10,000 digits. See the source
+code for additional details.
+
+References:
+http://mathworld.wolfram.com/Glaisher-KinkelinConstant.html
+"""
+
+apery = r"""
+Represents Apery's constant, which is the irrational number
+approximately equal to 1.2020569 given by
+
+.. math ::
+
+ \zeta(3) = \sum_{k=1}^\infty\frac{1}{k^3}.
+
+The calculation is based on an efficient hypergeometric
+series. To 50 decimal places, the value is given by::
+
+ >>> from mpmath import *
+ >>> mp.dps = 50; mp.pretty = True
+ >>> +apery
+ 1.2020569031595942853997381615114499907649862923405
+
+Other ways to evaluate Apery's constant using mpmath
+include::
+
+ >>> zeta(3)
+ 1.2020569031595942853997381615114499907649862923405
+ >>> -psi(2,1)/2
+ 1.2020569031595942853997381615114499907649862923405
+ >>> 8*nsum(lambda k: 1/(2*k+1)**3, [0,inf])/7
+ 1.2020569031595942853997381615114499907649862923405
+ >>> f = lambda k: 2/k**3/(exp(2*pi*k)-1)
+ >>> 7*pi**3/180 - nsum(f, [1,inf])
+ 1.2020569031595942853997381615114499907649862923405
+
+This shows digits 9991-10000 of Apery's constant::
+
+ >>> mp.dps = 10000
+ >>> str(apery)[-10:]
+ '3189504235'
+"""
+
+mertens = r"""
+Represents the Mertens or Meissel-Mertens constant, which is the
+prime number analog of Euler's constant:
+
+.. math ::
+
+ B_1 = \lim_{N\to\infty}
+ \left(\sum_{p_k \le N} \frac{1}{p_k} - \log \log N \right)
+
+Here `p_k` denotes the `k`-th prime number. Other names for this
+constant include the Hadamard-de la Vallee-Poussin constant or
+the prime reciprocal constant.
+
+The following gives the Mertens constant to 50 digits::
+
+ >>> from mpmath import *
+ >>> mp.dps = 50; mp.pretty = True
+ >>> +mertens
+ 0.2614972128476427837554268386086958590515666482612
+
+References:
+http://mathworld.wolfram.com/MertensConstant.html
+"""
+
+twinprime = r"""
+Represents the twin prime constant, which is the factor `C_2`
+featuring in the Hardy-Littlewood conjecture for the growth of the
+twin prime counting function,
+
+.. math ::
+
+ \pi_2(n) \sim 2 C_2 \frac{n}{\log^2 n}.
+
+It is given by the product over primes
+
+.. math ::
+
+ C_2 = \prod_{p\ge3} \frac{p(p-2)}{(p-1)^2} \approx 0.66016
+
+Computing `C_2` to 50 digits::
+
+ >>> from mpmath import *
+ >>> mp.dps = 50; mp.pretty = True
+ >>> +twinprime
+ 0.66016181584686957392781211001455577843262336028473
+
+References:
+http://mathworld.wolfram.com/TwinPrimesConstant.html
+"""
+
+ln = r"""
+Computes the natural logarithm of `x`, `\ln x`.
+See :func:`~mpmath.log` for additional documentation."""
+
+sqrt = r"""
+``sqrt(x)`` gives the principal square root of `x`, `\sqrt x`.
+For positive real numbers, the principal root is simply the
+positive square root. For arbitrary complex numbers, the principal
+square root is defined to satisfy `\sqrt x = \exp(\log(x)/2)`.
+The function thus has a branch cut along the negative half real axis.
+
+For all mpmath numbers ``x``, calling ``sqrt(x)`` is equivalent to
+performing ``x**0.5``.
+
+**Examples**
+
+Basic examples and limits::
+
+ >>> from mpmath import *
+ >>> mp.dps = 15; mp.pretty = True
+ >>> sqrt(10)
+ 3.16227766016838
+ >>> sqrt(100)
+ 10.0
+ >>> sqrt(-4)
+ (0.0 + 2.0j)
+ >>> sqrt(1+1j)
+ (1.09868411346781 + 0.455089860562227j)
+ >>> sqrt(inf)
+ +inf
+
+Square root evaluation is fast at huge precision::
+
+ >>> mp.dps = 50000
+ >>> a = sqrt(3)
+ >>> str(a)[-10:]
+ '9329332815'
+
+:func:`mpmath.iv.sqrt` supports interval arguments::
+
+ >>> iv.dps = 15; iv.pretty = True
+ >>> iv.sqrt([16,100])
+ [4.0, 10.0]
+ >>> iv.sqrt(2)
+ [1.4142135623730949234, 1.4142135623730951455]
+ >>> iv.sqrt(2) ** 2
+ [1.9999999999999995559, 2.0000000000000004441]
+
+"""
+
+cbrt = r"""
+``cbrt(x)`` computes the cube root of `x`, `x^{1/3}`. This
+function is faster and more accurate than raising to a floating-point
+fraction::
+
+ >>> from mpmath import *
+ >>> mp.dps = 15; mp.pretty = False
+ >>> 125**(mpf(1)/3)
+ mpf('4.9999999999999991')
+ >>> cbrt(125)
+ mpf('5.0')
+
+Every nonzero complex number has three cube roots. This function
+returns the cube root defined by `\exp(\log(x)/3)` where the
+principal branch of the natural logarithm is used. Note that this
+does not give a real cube root for negative real numbers::
+
+ >>> mp.pretty = True
+ >>> cbrt(-1)
+ (0.5 + 0.866025403784439j)
+"""
+
+exp = r"""
+Computes the exponential function,
+
+.. math ::
+
+ \exp(x) = e^x = \sum_{k=0}^{\infty} \frac{x^k}{k!}.
+
+For complex numbers, the exponential function also satisfies
+
+.. math ::
+
+ \exp(x+yi) = e^x (\cos y + i \sin y).
+
+**Basic examples**
+
+Some values of the exponential function::
+
+ >>> from mpmath import *
+ >>> mp.dps = 25; mp.pretty = True
+ >>> exp(0)
+ 1.0
+ >>> exp(1)
+ 2.718281828459045235360287
+ >>> exp(-1)
+ 0.3678794411714423215955238
+ >>> exp(inf)
+ +inf
+ >>> exp(-inf)
+ 0.0
+
+Arguments can be arbitrarily large::
+
+ >>> exp(10000)
+ 8.806818225662921587261496e+4342
+ >>> exp(-10000)
+ 1.135483865314736098540939e-4343
+
+Evaluation is supported for interval arguments via
+:func:`mpmath.iv.exp`::
+
+ >>> iv.dps = 25; iv.pretty = True
+ >>> iv.exp([-inf,0])
+ [0.0, 1.0]
+ >>> iv.exp([0,1])
+ [1.0, 2.71828182845904523536028749558]
+
+The exponential function can be evaluated efficiently to arbitrary
+precision::
+
+ >>> mp.dps = 10000
+ >>> exp(pi) #doctest: +ELLIPSIS
+ 23.140692632779269005729...8984304016040616
+
+**Functional properties**
+
+Numerical verification of Euler's identity for the complex
+exponential function::
+
+ >>> mp.dps = 15
+ >>> exp(j*pi)+1
+ (0.0 + 1.22464679914735e-16j)
+ >>> chop(exp(j*pi)+1)
+ 0.0
+
+This recovers the coefficients (reciprocal factorials) in the
+Maclaurin series expansion of exp::
+
+ >>> nprint(taylor(exp, 0, 5))
+ [1.0, 1.0, 0.5, 0.166667, 0.0416667, 0.00833333]
+
+The exponential function is its own derivative and antiderivative::
+
+ >>> exp(pi)
+ 23.1406926327793
+ >>> diff(exp, pi)
+ 23.1406926327793
+ >>> quad(exp, [-inf, pi])
+ 23.1406926327793
+
+The exponential function can be evaluated using various methods,
+including direct summation of the series, limits, and solving
+the defining differential equation::
+
+ >>> nsum(lambda k: pi**k/fac(k), [0,inf])
+ 23.1406926327793
+ >>> limit(lambda k: (1+pi/k)**k, inf)
+ 23.1406926327793
+ >>> odefun(lambda t, x: x, 0, 1)(pi)
+ 23.1406926327793
+"""
+
+cosh = r"""
+Computes the hyperbolic cosine of `x`,
+`\cosh(x) = (e^x + e^{-x})/2`. Values and limits include::
+
+ >>> from mpmath import *
+ >>> mp.dps = 25; mp.pretty = True
+ >>> cosh(0)
+ 1.0
+ >>> cosh(1)
+ 1.543080634815243778477906
+ >>> cosh(-inf), cosh(+inf)
+ (+inf, +inf)
+
+The hyperbolic cosine is an even, convex function with
+a global minimum at `x = 0`, having a Maclaurin series
+that starts::
+
+ >>> nprint(chop(taylor(cosh, 0, 5)))
+ [1.0, 0.0, 0.5, 0.0, 0.0416667, 0.0]
+
+Generalized to complex numbers, the hyperbolic cosine is
+equivalent to a cosine with the argument rotated
+in the imaginary direction, or `\cosh x = \cos ix`::
+
+ >>> cosh(2+3j)
+ (-3.724545504915322565473971 + 0.5118225699873846088344638j)
+ >>> cos(3-2j)
+ (-3.724545504915322565473971 + 0.5118225699873846088344638j)
+"""
+
+sinh = r"""
+Computes the hyperbolic sine of `x`,
+`\sinh(x) = (e^x - e^{-x})/2`. Values and limits include::
+
+ >>> from mpmath import *
+ >>> mp.dps = 25; mp.pretty = True
+ >>> sinh(0)
+ 0.0
+ >>> sinh(1)
+ 1.175201193643801456882382
+ >>> sinh(-inf), sinh(+inf)
+ (-inf, +inf)
+
+The hyperbolic sine is an odd function, with a Maclaurin
+series that starts::
+
+ >>> nprint(chop(taylor(sinh, 0, 5)))
+ [0.0, 1.0, 0.0, 0.166667, 0.0, 0.00833333]
+
+Generalized to complex numbers, the hyperbolic sine is
+essentially a sine with a rotation `i` applied to
+the argument; more precisely, `\sinh x = -i \sin ix`::
+
+ >>> sinh(2+3j)
+ (-3.590564589985779952012565 + 0.5309210862485198052670401j)
+ >>> j*sin(3-2j)
+ (-3.590564589985779952012565 + 0.5309210862485198052670401j)
+"""
+
+tanh = r"""
+Computes the hyperbolic tangent of `x`,
+`\tanh(x) = \sinh(x)/\cosh(x)`. Values and limits include::
+
+ >>> from mpmath import *
+ >>> mp.dps = 25; mp.pretty = True
+ >>> tanh(0)
+ 0.0
+ >>> tanh(1)
+ 0.7615941559557648881194583
+ >>> tanh(-inf), tanh(inf)
+ (-1.0, 1.0)
+
+The hyperbolic tangent is an odd, sigmoidal function, similar
+to the inverse tangent and error function. Its Maclaurin
+series is::
+
+ >>> nprint(chop(taylor(tanh, 0, 5)))
+ [0.0, 1.0, 0.0, -0.333333, 0.0, 0.133333]
+
+Generalized to complex numbers, the hyperbolic tangent is
+essentially a tangent with a rotation `i` applied to
+the argument; more precisely, `\tanh x = -i \tan ix`::
+
+ >>> tanh(2+3j)
+ (0.9653858790221331242784803 - 0.009884375038322493720314034j)
+ >>> j*tan(3-2j)
+ (0.9653858790221331242784803 - 0.009884375038322493720314034j)
+"""
+
+cos = r"""
+Computes the cosine of `x`, `\cos(x)`.
+
+ >>> from mpmath import *
+ >>> mp.dps = 25; mp.pretty = True
+ >>> cos(pi/3)
+ 0.5
+ >>> cos(100000001)
+ -0.9802850113244713353133243
+ >>> cos(2+3j)
+ (-4.189625690968807230132555 - 9.109227893755336597979197j)
+ >>> cos(inf)
+ nan
+ >>> nprint(chop(taylor(cos, 0, 6)))
+ [1.0, 0.0, -0.5, 0.0, 0.0416667, 0.0, -0.00138889]
+
+Intervals are supported via :func:`mpmath.iv.cos`::
+
+ >>> iv.dps = 25; iv.pretty = True
+ >>> iv.cos([0,1])
+ [0.540302305868139717400936602301, 1.0]
+ >>> iv.cos([0,2])
+ [-0.41614683654714238699756823214, 1.0]
+"""
+
+sin = r"""
+Computes the sine of `x`, `\sin(x)`.
+
+ >>> from mpmath import *
+ >>> mp.dps = 25; mp.pretty = True
+ >>> sin(pi/3)
+ 0.8660254037844386467637232
+ >>> sin(100000001)
+ 0.1975887055794968911438743
+ >>> sin(2+3j)
+ (9.1544991469114295734673 - 4.168906959966564350754813j)
+ >>> sin(inf)
+ nan
+ >>> nprint(chop(taylor(sin, 0, 6)))
+ [0.0, 1.0, 0.0, -0.166667, 0.0, 0.00833333, 0.0]
+
+Intervals are supported via :func:`mpmath.iv.sin`::
+
+ >>> iv.dps = 25; iv.pretty = True
+ >>> iv.sin([0,1])
+ [0.0, 0.841470984807896506652502331201]
+ >>> iv.sin([0,2])
+ [0.0, 1.0]
+"""
+
+tan = r"""
+Computes the tangent of `x`, `\tan(x) = \frac{\sin(x)}{\cos(x)}`.
+The tangent function is singular at `x = (n+1/2)\pi`, but
+``tan(x)`` always returns a finite result since `(n+1/2)\pi`
+cannot be represented exactly using floating-point arithmetic.
+
+ >>> from mpmath import *
+ >>> mp.dps = 25; mp.pretty = True
+ >>> tan(pi/3)
+ 1.732050807568877293527446
+ >>> tan(100000001)
+ -0.2015625081449864533091058
+ >>> tan(2+3j)
+ (-0.003764025641504248292751221 + 1.003238627353609801446359j)
+ >>> tan(inf)
+ nan
+ >>> nprint(chop(taylor(tan, 0, 6)))
+ [0.0, 1.0, 0.0, 0.333333, 0.0, 0.133333, 0.0]
+
+Intervals are supported via :func:`mpmath.iv.tan`::
+
+ >>> iv.dps = 25; iv.pretty = True
+ >>> iv.tan([0,1])
+ [0.0, 1.55740772465490223050697482944]
+ >>> iv.tan([0,2]) # Interval includes a singularity
+ [-inf, +inf]
+"""
+
+sec = r"""
+Computes the secant of `x`, `\mathrm{sec}(x) = \frac{1}{\cos(x)}`.
+The secant function is singular at `x = (n+1/2)\pi`, but
+``sec(x)`` always returns a finite result since `(n+1/2)\pi`
+cannot be represented exactly using floating-point arithmetic.
+
+ >>> from mpmath import *
+ >>> mp.dps = 25; mp.pretty = True
+ >>> sec(pi/3)
+ 2.0
+ >>> sec(10000001)
+ -1.184723164360392819100265
+ >>> sec(2+3j)
+ (-0.04167496441114427004834991 + 0.0906111371962375965296612j)
+ >>> sec(inf)
+ nan
+ >>> nprint(chop(taylor(sec, 0, 6)))
+ [1.0, 0.0, 0.5, 0.0, 0.208333, 0.0, 0.0847222]
+
+Intervals are supported via :func:`mpmath.iv.sec`::
+
+ >>> iv.dps = 25; iv.pretty = True
+ >>> iv.sec([0,1])
+ [1.0, 1.85081571768092561791175326276]
+ >>> iv.sec([0,2]) # Interval includes a singularity
+ [-inf, +inf]
+"""
+
+csc = r"""
+Computes the cosecant of `x`, `\mathrm{csc}(x) = \frac{1}{\sin(x)}`.
+This cosecant function is singular at `x = n \pi`, but with the
+exception of the point `x = 0`, ``csc(x)`` returns a finite result
+since `n \pi` cannot be represented exactly using floating-point
+arithmetic.
+
+ >>> from mpmath import *
+ >>> mp.dps = 25; mp.pretty = True
+ >>> csc(pi/3)
+ 1.154700538379251529018298
+ >>> csc(10000001)
+ -1.864910497503629858938891
+ >>> csc(2+3j)
+ (0.09047320975320743980579048 + 0.04120098628857412646300981j)
+ >>> csc(inf)
+ nan
+
+Intervals are supported via :func:`mpmath.iv.csc`::
+
+ >>> iv.dps = 25; iv.pretty = True
+ >>> iv.csc([0,1]) # Interval includes a singularity
+ [1.18839510577812121626159943988, +inf]
+ >>> iv.csc([0,2])
+ [1.0, +inf]
+"""
+
+cot = r"""
+Computes the cotangent of `x`,
+`\mathrm{cot}(x) = \frac{1}{\tan(x)} = \frac{\cos(x)}{\sin(x)}`.
+This cotangent function is singular at `x = n \pi`, but with the
+exception of the point `x = 0`, ``cot(x)`` returns a finite result
+since `n \pi` cannot be represented exactly using floating-point
+arithmetic.
+
+ >>> from mpmath import *
+ >>> mp.dps = 25; mp.pretty = True
+ >>> cot(pi/3)
+ 0.5773502691896257645091488
+ >>> cot(10000001)
+ 1.574131876209625656003562
+ >>> cot(2+3j)
+ (-0.003739710376336956660117409 - 0.9967577965693583104609688j)
+ >>> cot(inf)
+ nan
+
+Intervals are supported via :func:`mpmath.iv.cot`::
+
+ >>> iv.dps = 25; iv.pretty = True
+ >>> iv.cot([0,1]) # Interval includes a singularity
+ [0.642092615934330703006419974862, +inf]
+ >>> iv.cot([1,2])
+ [-inf, +inf]
+"""
+
+acos = r"""
+Computes the inverse cosine or arccosine of `x`, `\cos^{-1}(x)`.
+Since `-1 \le \cos(x) \le 1` for real `x`, the inverse
+cosine is real-valued only for `-1 \le x \le 1`. On this interval,
+:func:`~mpmath.acos` is defined to be a monotonically decreasing
+function assuming values between `+\pi` and `0`.
+
+Basic values are::
+
+ >>> from mpmath import *
+ >>> mp.dps = 25; mp.pretty = True
+ >>> acos(-1)
+ 3.141592653589793238462643
+ >>> acos(0)
+ 1.570796326794896619231322
+ >>> acos(1)
+ 0.0
+ >>> nprint(chop(taylor(acos, 0, 6)))
+ [1.5708, -1.0, 0.0, -0.166667, 0.0, -0.075, 0.0]
+
+:func:`~mpmath.acos` is defined so as to be a proper inverse function of
+`\cos(\theta)` for `0 \le \theta < \pi`.
+We have `\cos(\cos^{-1}(x)) = x` for all `x`, but
+`\cos^{-1}(\cos(x)) = x` only for `0 \le \Re[x] < \pi`::
+
+ >>> for x in [1, 10, -1, 2+3j, 10+3j]:
+ ... print("%s %s" % (cos(acos(x)), acos(cos(x))))
+ ...
+ 1.0 1.0
+ (10.0 + 0.0j) 2.566370614359172953850574
+ -1.0 1.0
+ (2.0 + 3.0j) (2.0 + 3.0j)
+ (10.0 + 3.0j) (2.566370614359172953850574 - 3.0j)
+
+The inverse cosine has two branch points: `x = \pm 1`. :func:`~mpmath.acos`
+places the branch cuts along the line segments `(-\infty, -1)` and
+`(+1, +\infty)`. In general,
+
+.. math ::
+
+ \cos^{-1}(x) = \frac{\pi}{2} + i \log\left(ix + \sqrt{1-x^2} \right)
+
+where the principal-branch log and square root are implied.
+"""
+
+asin = r"""
+Computes the inverse sine or arcsine of `x`, `\sin^{-1}(x)`.
+Since `-1 \le \sin(x) \le 1` for real `x`, the inverse
+sine is real-valued only for `-1 \le x \le 1`.
+On this interval, it is defined to be a monotonically increasing
+function assuming values between `-\pi/2` and `\pi/2`.
+
+Basic values are::
+
+ >>> from mpmath import *
+ >>> mp.dps = 25; mp.pretty = True
+ >>> asin(-1)
+ -1.570796326794896619231322
+ >>> asin(0)
+ 0.0
+ >>> asin(1)
+ 1.570796326794896619231322
+ >>> nprint(chop(taylor(asin, 0, 6)))
+ [0.0, 1.0, 0.0, 0.166667, 0.0, 0.075, 0.0]
+
+:func:`~mpmath.asin` is defined so as to be a proper inverse function of
+`\sin(\theta)` for `-\pi/2 < \theta < \pi/2`.
+We have `\sin(\sin^{-1}(x)) = x` for all `x`, but
+`\sin^{-1}(\sin(x)) = x` only for `-\pi/2 < \Re[x] < \pi/2`::
+
+ >>> for x in [1, 10, -1, 1+3j, -2+3j]:
+ ... print("%s %s" % (chop(sin(asin(x))), asin(sin(x))))
+ ...
+ 1.0 1.0
+ 10.0 -0.5752220392306202846120698
+ -1.0 -1.0
+ (1.0 + 3.0j) (1.0 + 3.0j)
+ (-2.0 + 3.0j) (-1.141592653589793238462643 - 3.0j)
+
+The inverse sine has two branch points: `x = \pm 1`. :func:`~mpmath.asin`
+places the branch cuts along the line segments `(-\infty, -1)` and
+`(+1, +\infty)`. In general,
+
+.. math ::
+
+ \sin^{-1}(x) = -i \log\left(ix + \sqrt{1-x^2} \right)
+
+where the principal-branch log and square root are implied.
+"""
+
+atan = r"""
+Computes the inverse tangent or arctangent of `x`, `\tan^{-1}(x)`.
+This is a real-valued function for all real `x`, with range
+`(-\pi/2, \pi/2)`.
+
+Basic values are::
+
+ >>> from mpmath import *
+ >>> mp.dps = 25; mp.pretty = True
+ >>> atan(-inf)
+ -1.570796326794896619231322
+ >>> atan(-1)
+ -0.7853981633974483096156609
+ >>> atan(0)
+ 0.0
+ >>> atan(1)
+ 0.7853981633974483096156609
+ >>> atan(inf)
+ 1.570796326794896619231322
+ >>> nprint(chop(taylor(atan, 0, 6)))
+ [0.0, 1.0, 0.0, -0.333333, 0.0, 0.2, 0.0]
+
+The inverse tangent is often used to compute angles. However,
+the atan2 function is often better for this as it preserves sign
+(see :func:`~mpmath.atan2`).
+
+:func:`~mpmath.atan` is defined so as to be a proper inverse function of
+`\tan(\theta)` for `-\pi/2 < \theta < \pi/2`.
+We have `\tan(\tan^{-1}(x)) = x` for all `x`, but
+`\tan^{-1}(\tan(x)) = x` only for `-\pi/2 < \Re[x] < \pi/2`::
+
+ >>> mp.dps = 25
+ >>> for x in [1, 10, -1, 1+3j, -2+3j]:
+ ... print("%s %s" % (tan(atan(x)), atan(tan(x))))
+ ...
+ 1.0 1.0
+ 10.0 0.5752220392306202846120698
+ -1.0 -1.0
+ (1.0 + 3.0j) (1.000000000000000000000001 + 3.0j)
+ (-2.0 + 3.0j) (1.141592653589793238462644 + 3.0j)
+
+The inverse tangent has two branch points: `x = \pm i`. :func:`~mpmath.atan`
+places the branch cuts along the line segments `(-i \infty, -i)` and
+`(+i, +i \infty)`. In general,
+
+.. math ::
+
+ \tan^{-1}(x) = \frac{i}{2}\left(\log(1-ix)-\log(1+ix)\right)
+
+where the principal-branch log is implied.
+"""
+
+acot = r"""Computes the inverse cotangent of `x`,
+`\mathrm{cot}^{-1}(x) = \tan^{-1}(1/x)`."""
+
+asec = r"""Computes the inverse secant of `x`,
+`\mathrm{sec}^{-1}(x) = \cos^{-1}(1/x)`."""
+
+acsc = r"""Computes the inverse cosecant of `x`,
+`\mathrm{csc}^{-1}(x) = \sin^{-1}(1/x)`."""
+
+coth = r"""Computes the hyperbolic cotangent of `x`,
+`\mathrm{coth}(x) = \frac{\cosh(x)}{\sinh(x)}`.
+"""
+
+sech = r"""Computes the hyperbolic secant of `x`,
+`\mathrm{sech}(x) = \frac{1}{\cosh(x)}`.
+"""
+
+csch = r"""Computes the hyperbolic cosecant of `x`,
+`\mathrm{csch}(x) = \frac{1}{\sinh(x)}`.
+"""
+
+acosh = r"""Computes the inverse hyperbolic cosine of `x`,
+`\mathrm{cosh}^{-1}(x) = \log(x+\sqrt{x+1}\sqrt{x-1})`.
+"""
+
+asinh = r"""Computes the inverse hyperbolic sine of `x`,
+`\mathrm{sinh}^{-1}(x) = \log(x+\sqrt{1+x^2})`.
+"""
+
+atanh = r"""Computes the inverse hyperbolic tangent of `x`,
+`\mathrm{tanh}^{-1}(x) = \frac{1}{2}\left(\log(1+x)-\log(1-x)\right)`.
+"""
+
+acoth = r"""Computes the inverse hyperbolic cotangent of `x`,
+`\mathrm{coth}^{-1}(x) = \tanh^{-1}(1/x)`."""
+
+asech = r"""Computes the inverse hyperbolic secant of `x`,
+`\mathrm{sech}^{-1}(x) = \cosh^{-1}(1/x)`."""
+
+acsch = r"""Computes the inverse hyperbolic cosecant of `x`,
+`\mathrm{csch}^{-1}(x) = \sinh^{-1}(1/x)`."""
+
+
+
+sinpi = r"""
+Computes `\sin(\pi x)`, more accurately than the expression
+``sin(pi*x)``::
+
+ >>> from mpmath import *
+ >>> mp.dps = 15; mp.pretty = True
+ >>> sinpi(10**10), sin(pi*(10**10))
+ (0.0, -2.23936276195592e-6)
+ >>> sinpi(10**10+0.5), sin(pi*(10**10+0.5))
+ (1.0, 0.999999999998721)
+"""
+
+cospi = r"""
+Computes `\cos(\pi x)`, more accurately than the expression
+``cos(pi*x)``::
+
+ >>> from mpmath import *
+ >>> mp.dps = 15; mp.pretty = True
+ >>> cospi(10**10), cos(pi*(10**10))
+ (1.0, 0.999999999997493)
+ >>> cospi(10**10+0.5), cos(pi*(10**10+0.5))
+ (0.0, 1.59960492420134e-6)
+"""
+
+sinc = r"""
+``sinc(x)`` computes the unnormalized sinc function, defined as
+
+.. math ::
+
+ \mathrm{sinc}(x) = \begin{cases}
+ \sin(x)/x, & \mbox{if } x \ne 0 \\
+ 1, & \mbox{if } x = 0.
+ \end{cases}
+
+See :func:`~mpmath.sincpi` for the normalized sinc function.
+
+Simple values and limits include::
+
+ >>> from mpmath import *
+ >>> mp.dps = 15; mp.pretty = True
+ >>> sinc(0)
+ 1.0
+ >>> sinc(1)
+ 0.841470984807897
+ >>> sinc(inf)
+ 0.0
+
+The integral of the sinc function is the sine integral Si::
+
+ >>> quad(sinc, [0, 1])
+ 0.946083070367183
+ >>> si(1)
+ 0.946083070367183
+"""
+
+sincpi = r"""
+``sincpi(x)`` computes the normalized sinc function, defined as
+
+.. math ::
+
+ \mathrm{sinc}_{\pi}(x) = \begin{cases}
+ \sin(\pi x)/(\pi x), & \mbox{if } x \ne 0 \\
+ 1, & \mbox{if } x = 0.
+ \end{cases}
+
+Equivalently, we have
+`\mathrm{sinc}_{\pi}(x) = \mathrm{sinc}(\pi x)`.
+
+The normalization entails that the function integrates
+to unity over the entire real line::
+
+ >>> from mpmath import *
+ >>> mp.dps = 15; mp.pretty = True
+ >>> quadosc(sincpi, [-inf, inf], period=2.0)
+ 1.0
+
+Like, :func:`~mpmath.sinpi`, :func:`~mpmath.sincpi` is evaluated accurately
+at its roots::
+
+ >>> sincpi(10)
+ 0.0
+"""
+
+expj = r"""
+Convenience function for computing `e^{ix}`::
+
+ >>> from mpmath import *
+ >>> mp.dps = 25; mp.pretty = True
+ >>> expj(0)
+ (1.0 + 0.0j)
+ >>> expj(-1)
+ (0.5403023058681397174009366 - 0.8414709848078965066525023j)
+ >>> expj(j)
+ (0.3678794411714423215955238 + 0.0j)
+ >>> expj(1+j)
+ (0.1987661103464129406288032 + 0.3095598756531121984439128j)
+"""
+
+expjpi = r"""
+Convenience function for computing `e^{i \pi x}`.
+Evaluation is accurate near zeros (see also :func:`~mpmath.cospi`,
+:func:`~mpmath.sinpi`)::
+
+ >>> from mpmath import *
+ >>> mp.dps = 25; mp.pretty = True
+ >>> expjpi(0)
+ (1.0 + 0.0j)
+ >>> expjpi(1)
+ (-1.0 + 0.0j)
+ >>> expjpi(0.5)
+ (0.0 + 1.0j)
+ >>> expjpi(-1)
+ (-1.0 + 0.0j)
+ >>> expjpi(j)
+ (0.04321391826377224977441774 + 0.0j)
+ >>> expjpi(1+j)
+ (-0.04321391826377224977441774 + 0.0j)
+"""
+
+floor = r"""
+Computes the floor of `x`, `\lfloor x \rfloor`, defined as
+the largest integer less than or equal to `x`::
+
+ >>> from mpmath import *
+ >>> mp.pretty = False
+ >>> floor(3.5)
+ mpf('3.0')
+
+.. note ::
+
+ :func:`~mpmath.floor`, :func:`~mpmath.ceil` and :func:`~mpmath.nint` return a
+ floating-point number, not a Python ``int``. If `\lfloor x \rfloor` is
+ too large to be represented exactly at the present working precision,
+ the result will be rounded, not necessarily in the direction
+ implied by the mathematical definition of the function.
+
+To avoid rounding, use *prec=0*::
+
+ >>> mp.dps = 15
+ >>> print(int(floor(10**30+1)))
+ 1000000000000000019884624838656
+ >>> print(int(floor(10**30+1, prec=0)))
+ 1000000000000000000000000000001
+
+The floor function is defined for complex numbers and
+acts on the real and imaginary parts separately::
+
+ >>> floor(3.25+4.75j)
+ mpc(real='3.0', imag='4.0')
+"""
+
+ceil = r"""
+Computes the ceiling of `x`, `\lceil x \rceil`, defined as
+the smallest integer greater than or equal to `x`::
+
+ >>> from mpmath import *
+ >>> mp.pretty = False
+ >>> ceil(3.5)
+ mpf('4.0')
+
+The ceiling function is defined for complex numbers and
+acts on the real and imaginary parts separately::
+
+ >>> ceil(3.25+4.75j)
+ mpc(real='4.0', imag='5.0')
+
+See notes about rounding for :func:`~mpmath.floor`.
+"""
+
+nint = r"""
+Evaluates the nearest integer function, `\mathrm{nint}(x)`.
+This gives the nearest integer to `x`; on a tie, it
+gives the nearest even integer::
+
+ >>> from mpmath import *
+ >>> mp.pretty = False
+ >>> nint(3.2)
+ mpf('3.0')
+ >>> nint(3.8)
+ mpf('4.0')
+ >>> nint(3.5)
+ mpf('4.0')
+ >>> nint(4.5)
+ mpf('4.0')
+
+The nearest integer function is defined for complex numbers and
+acts on the real and imaginary parts separately::
+
+ >>> nint(3.25+4.75j)
+ mpc(real='3.0', imag='5.0')
+
+See notes about rounding for :func:`~mpmath.floor`.
+"""
+
+frac = r"""
+Gives the fractional part of `x`, defined as
+`\mathrm{frac}(x) = x - \lfloor x \rfloor` (see :func:`~mpmath.floor`).
+In effect, this computes `x` modulo 1, or `x+n` where
+`n \in \mathbb{Z}` is such that `x+n \in [0,1)`::
+
+ >>> from mpmath import *
+ >>> mp.pretty = False
+ >>> frac(1.25)
+ mpf('0.25')
+ >>> frac(3)
+ mpf('0.0')
+ >>> frac(-1.25)
+ mpf('0.75')
+
+For a complex number, the fractional part function applies to
+the real and imaginary parts separately::
+
+ >>> frac(2.25+3.75j)
+ mpc(real='0.25', imag='0.75')
+
+Plotted, the fractional part function gives a sawtooth
+wave. The Fourier series coefficients have a simple
+form::
+
+ >>> mp.dps = 15
+ >>> nprint(fourier(lambda x: frac(x)-0.5, [0,1], 4))
+ ([0.0, 0.0, 0.0, 0.0, 0.0], [0.0, -0.31831, -0.159155, -0.106103, -0.0795775])
+ >>> nprint([-1/(pi*k) for k in range(1,5)])
+ [-0.31831, -0.159155, -0.106103, -0.0795775]
+
+.. note::
+
+ The fractional part is sometimes defined as a symmetric
+ function, i.e. returning `-\mathrm{frac}(-x)` if `x < 0`.
+ This convention is used, for instance, by Mathematica's
+ ``FractionalPart``.
+
+"""
+
+sign = r"""
+Returns the sign of `x`, defined as `\mathrm{sign}(x) = x / |x|`
+(with the special case `\mathrm{sign}(0) = 0`)::
+
+ >>> from mpmath import *
+ >>> mp.dps = 15; mp.pretty = False
+ >>> sign(10)
+ mpf('1.0')
+ >>> sign(-10)
+ mpf('-1.0')
+ >>> sign(0)
+ mpf('0.0')
+
+Note that the sign function is also defined for complex numbers,
+for which it gives the projection onto the unit circle::
+
+ >>> mp.dps = 15; mp.pretty = True
+ >>> sign(1+j)
+ (0.707106781186547 + 0.707106781186547j)
+
+"""
+
+arg = r"""
+Computes the complex argument (phase) of `x`, defined as the
+signed angle between the positive real axis and `x` in the
+complex plane::
+
+ >>> from mpmath import *
+ >>> mp.dps = 15; mp.pretty = True
+ >>> arg(3)
+ 0.0
+ >>> arg(3+3j)
+ 0.785398163397448
+ >>> arg(3j)
+ 1.5707963267949
+ >>> arg(-3)
+ 3.14159265358979
+ >>> arg(-3j)
+ -1.5707963267949
+
+The angle is defined to satisfy `-\pi < \arg(x) \le \pi` and
+with the sign convention that a nonnegative imaginary part
+results in a nonnegative argument.
+
+The value returned by :func:`~mpmath.arg` is an ``mpf`` instance.
+"""
+
+fabs = r"""
+Returns the absolute value of `x`, `|x|`. Unlike :func:`abs`,
+:func:`~mpmath.fabs` converts non-mpmath numbers (such as ``int``)
+into mpmath numbers::
+
+ >>> from mpmath import *
+ >>> mp.dps = 15; mp.pretty = False
+ >>> fabs(3)
+ mpf('3.0')
+ >>> fabs(-3)
+ mpf('3.0')
+ >>> fabs(3+4j)
+ mpf('5.0')
+"""
+
+re = r"""
+Returns the real part of `x`, `\Re(x)`. :func:`~mpmath.re`
+converts a non-mpmath number to an mpmath number::
+
+ >>> from mpmath import *
+ >>> mp.dps = 15; mp.pretty = False
+ >>> re(3)
+ mpf('3.0')
+ >>> re(-1+4j)
+ mpf('-1.0')
+"""
+
+im = r"""
+Returns the imaginary part of `x`, `\Im(x)`. :func:`~mpmath.im`
+converts a non-mpmath number to an mpmath number::
+
+ >>> from mpmath import *
+ >>> mp.dps = 15; mp.pretty = False
+ >>> im(3)
+ mpf('0.0')
+ >>> im(-1+4j)
+ mpf('4.0')
+"""
+
+conj = r"""
+Returns the complex conjugate of `x`, `\overline{x}`. Unlike
+``x.conjugate()``, :func:`~mpmath.im` converts `x` to a mpmath number::
+
+ >>> from mpmath import *
+ >>> mp.dps = 15; mp.pretty = False
+ >>> conj(3)
+ mpf('3.0')
+ >>> conj(-1+4j)
+ mpc(real='-1.0', imag='-4.0')
+"""
+
+polar = r"""
+Returns the polar representation of the complex number `z`
+as a pair `(r, \phi)` such that `z = r e^{i \phi}`::
+
+ >>> from mpmath import *
+ >>> mp.dps = 15; mp.pretty = True
+ >>> polar(-2)
+ (2.0, 3.14159265358979)
+ >>> polar(3-4j)
+ (5.0, -0.927295218001612)
+"""
+
+rect = r"""
+Returns the complex number represented by polar
+coordinates `(r, \phi)`::
+
+ >>> from mpmath import *
+ >>> mp.dps = 15; mp.pretty = True
+ >>> chop(rect(2, pi))
+ -2.0
+ >>> rect(sqrt(2), -pi/4)
+ (1.0 - 1.0j)
+"""
+
+expm1 = r"""
+Computes `e^x - 1`, accurately for small `x`.
+
+Unlike the expression ``exp(x) - 1``, ``expm1(x)`` does not suffer from
+potentially catastrophic cancellation::
+
+ >>> from mpmath import *
+ >>> mp.dps = 15; mp.pretty = True
+ >>> exp(1e-10)-1; print(expm1(1e-10))
+ 1.00000008274037e-10
+ 1.00000000005e-10
+ >>> exp(1e-20)-1; print(expm1(1e-20))
+ 0.0
+ 1.0e-20
+ >>> 1/(exp(1e-20)-1)
+ Traceback (most recent call last):
+ ...
+ ZeroDivisionError
+ >>> 1/expm1(1e-20)
+ 1.0e+20
+
+Evaluation works for extremely tiny values::
+
+ >>> expm1(0)
+ 0.0
+ >>> expm1('1e-10000000')
+ 1.0e-10000000
+
+"""
+
+log1p = r"""
+Computes `\log(1+x)`, accurately for small `x`.
+
+ >>> from mpmath import *
+ >>> mp.dps = 15; mp.pretty = True
+ >>> log(1+1e-10); print(mp.log1p(1e-10))
+ 1.00000008269037e-10
+ 9.9999999995e-11
+ >>> mp.log1p(1e-100j)
+ (5.0e-201 + 1.0e-100j)
+ >>> mp.log1p(0)
+ 0.0
+
+"""
+
+
+powm1 = r"""
+Computes `x^y - 1`, accurately when `x^y` is very close to 1.
+
+This avoids potentially catastrophic cancellation::
+
+ >>> from mpmath import *
+ >>> mp.dps = 15; mp.pretty = True
+ >>> power(0.99999995, 1e-10) - 1
+ 0.0
+ >>> powm1(0.99999995, 1e-10)
+ -5.00000012791934e-18
+
+Powers exactly equal to 1, and only those powers, yield 0 exactly::
+
+ >>> powm1(-j, 4)
+ (0.0 + 0.0j)
+ >>> powm1(3, 0)
+ 0.0
+ >>> powm1(fadd(-1, 1e-100, exact=True), 4)
+ -4.0e-100
+
+Evaluation works for extremely tiny `y`::
+
+ >>> powm1(2, '1e-100000')
+ 6.93147180559945e-100001
+ >>> powm1(j, '1e-1000')
+ (-1.23370055013617e-2000 + 1.5707963267949e-1000j)
+
+"""
+
+root = r"""
+``root(z, n, k=0)`` computes an `n`-th root of `z`, i.e. returns a number
+`r` that (up to possible approximation error) satisfies `r^n = z`.
+(``nthroot`` is available as an alias for ``root``.)
+
+Every complex number `z \ne 0` has `n` distinct `n`-th roots, which are
+equidistant points on a circle with radius `|z|^{1/n}`, centered around the
+origin. A specific root may be selected using the optional index
+`k`. The roots are indexed counterclockwise, starting with `k = 0` for the root
+closest to the positive real half-axis.
+
+The `k = 0` root is the so-called principal `n`-th root, often denoted by
+`\sqrt[n]{z}` or `z^{1/n}`, and also given by `\exp(\log(z) / n)`. If `z` is
+a positive real number, the principal root is just the unique positive
+`n`-th root of `z`. Under some circumstances, non-principal real roots exist:
+for positive real `z`, `n` even, there is a negative root given by `k = n/2`;
+for negative real `z`, `n` odd, there is a negative root given by `k = (n-1)/2`.
+
+To obtain all roots with a simple expression, use
+``[root(z,n,k) for k in range(n)]``.
+
+An important special case, ``root(1, n, k)`` returns the `k`-th `n`-th root of
+unity, `\zeta_k = e^{2 \pi i k / n}`. Alternatively, :func:`~mpmath.unitroots`
+provides a slightly more convenient way to obtain the roots of unity,
+including the option to compute only the primitive roots of unity.
+
+Both `k` and `n` should be integers; `k` outside of ``range(n)`` will be
+reduced modulo `n`. If `n` is negative, `x^{-1/n} = 1/x^{1/n}` (or
+the equivalent reciprocal for a non-principal root with `k \ne 0`) is computed.
+
+:func:`~mpmath.root` is implemented to use Newton's method for small
+`n`. At high precision, this makes `x^{1/n}` not much more
+expensive than the regular exponentiation, `x^n`. For very large
+`n`, :func:`~mpmath.nthroot` falls back to use the exponential function.
+
+**Examples**
+
+:func:`~mpmath.nthroot`/:func:`~mpmath.root` is faster and more accurate than raising to a
+floating-point fraction::
+
+ >>> from mpmath import *
+ >>> mp.dps = 15; mp.pretty = False
+ >>> 16807 ** (mpf(1)/5)
+ mpf('7.0000000000000009')
+ >>> root(16807, 5)
+ mpf('7.0')
+ >>> nthroot(16807, 5) # Alias
+ mpf('7.0')
+
+A high-precision root::
+
+ >>> mp.dps = 50; mp.pretty = True
+ >>> nthroot(10, 5)
+ 1.584893192461113485202101373391507013269442133825
+ >>> nthroot(10, 5) ** 5
+ 10.0
+
+Computing principal and non-principal square and cube roots::
+
+ >>> mp.dps = 15
+ >>> root(10, 2)
+ 3.16227766016838
+ >>> root(10, 2, 1)
+ -3.16227766016838
+ >>> root(-10, 3)
+ (1.07721734501594 + 1.86579517236206j)
+ >>> root(-10, 3, 1)
+ -2.15443469003188
+ >>> root(-10, 3, 2)
+ (1.07721734501594 - 1.86579517236206j)
+
+All the 7th roots of a complex number::
+
+ >>> for r in [root(3+4j, 7, k) for k in range(7)]:
+ ... print("%s %s" % (r, r**7))
+ ...
+ (1.24747270589553 + 0.166227124177353j) (3.0 + 4.0j)
+ (0.647824911301003 + 1.07895435170559j) (3.0 + 4.0j)
+ (-0.439648254723098 + 1.17920694574172j) (3.0 + 4.0j)
+ (-1.19605731775069 + 0.391492658196305j) (3.0 + 4.0j)
+ (-1.05181082538903 - 0.691023585965793j) (3.0 + 4.0j)
+ (-0.115529328478668 - 1.25318497558335j) (3.0 + 4.0j)
+ (0.907748109144957 - 0.871672518271819j) (3.0 + 4.0j)
+
+Cube roots of unity::
+
+ >>> for k in range(3): print(root(1, 3, k))
+ ...
+ 1.0
+ (-0.5 + 0.866025403784439j)
+ (-0.5 - 0.866025403784439j)
+
+Some exact high order roots::
+
+ >>> root(75**210, 105)
+ 5625.0
+ >>> root(1, 128, 96)
+ (0.0 - 1.0j)
+ >>> root(4**128, 128, 96)
+ (0.0 - 4.0j)
+
+"""
+
+unitroots = r"""
+``unitroots(n)`` returns `\zeta_0, \zeta_1, \ldots, \zeta_{n-1}`,
+all the distinct `n`-th roots of unity, as a list. If the option
+*primitive=True* is passed, only the primitive roots are returned.
+
+Every `n`-th root of unity satisfies `(\zeta_k)^n = 1`. There are `n` distinct
+roots for each `n` (`\zeta_k` and `\zeta_j` are the same when
+`k = j \pmod n`), which form a regular polygon with vertices on the unit
+circle. They are ordered counterclockwise with increasing `k`, starting
+with `\zeta_0 = 1`.
+
+**Examples**
+
+The roots of unity up to `n = 4`::
+
+ >>> from mpmath import *
+ >>> mp.dps = 15; mp.pretty = True
+ >>> nprint(unitroots(1))
+ [1.0]
+ >>> nprint(unitroots(2))
+ [1.0, -1.0]
+ >>> nprint(unitroots(3))
+ [1.0, (-0.5 + 0.866025j), (-0.5 - 0.866025j)]
+ >>> nprint(unitroots(4))
+ [1.0, (0.0 + 1.0j), -1.0, (0.0 - 1.0j)]
+
+Roots of unity form a geometric series that sums to 0::
+
+ >>> mp.dps = 50
+ >>> chop(fsum(unitroots(25)))
+ 0.0
+
+Primitive roots up to `n = 4`::
+
+ >>> mp.dps = 15
+ >>> nprint(unitroots(1, primitive=True))
+ [1.0]
+ >>> nprint(unitroots(2, primitive=True))
+ [-1.0]
+ >>> nprint(unitroots(3, primitive=True))
+ [(-0.5 + 0.866025j), (-0.5 - 0.866025j)]
+ >>> nprint(unitroots(4, primitive=True))
+ [(0.0 + 1.0j), (0.0 - 1.0j)]
+
+There are only four primitive 12th roots::
+
+ >>> nprint(unitroots(12, primitive=True))
+ [(0.866025 + 0.5j), (-0.866025 + 0.5j), (-0.866025 - 0.5j), (0.866025 - 0.5j)]
+
+The `n`-th roots of unity form a group, the cyclic group of order `n`.
+Any primitive root `r` is a generator for this group, meaning that
+`r^0, r^1, \ldots, r^{n-1}` gives the whole set of unit roots (in
+some permuted order)::
+
+ >>> for r in unitroots(6): print(r)
+ ...
+ 1.0
+ (0.5 + 0.866025403784439j)
+ (-0.5 + 0.866025403784439j)
+ -1.0
+ (-0.5 - 0.866025403784439j)
+ (0.5 - 0.866025403784439j)
+ >>> r = unitroots(6, primitive=True)[1]
+ >>> for k in range(6): print(chop(r**k))
+ ...
+ 1.0
+ (0.5 - 0.866025403784439j)
+ (-0.5 - 0.866025403784439j)
+ -1.0
+ (-0.5 + 0.866025403784438j)
+ (0.5 + 0.866025403784438j)
+
+The number of primitive roots equals the Euler totient function `\phi(n)`::
+
+ >>> [len(unitroots(n, primitive=True)) for n in range(1,20)]
+ [1, 1, 2, 2, 4, 2, 6, 4, 6, 4, 10, 4, 12, 6, 8, 8, 16, 6, 18]
+
+"""
+
+
+log = r"""
+Computes the base-`b` logarithm of `x`, `\log_b(x)`. If `b` is
+unspecified, :func:`~mpmath.log` computes the natural (base `e`) logarithm
+and is equivalent to :func:`~mpmath.ln`. In general, the base `b` logarithm
+is defined in terms of the natural logarithm as
+`\log_b(x) = \ln(x)/\ln(b)`.
+
+By convention, we take `\log(0) = -\infty`.
+
+The natural logarithm is real if `x > 0` and complex if `x < 0` or if
+`x` is complex. The principal branch of the complex logarithm is
+used, meaning that `\Im(\ln(x)) = -\pi < \arg(x) \le \pi`.
+
+**Examples**
+
+Some basic values and limits::
+
+ >>> from mpmath import *
+ >>> mp.dps = 15; mp.pretty = True
+ >>> log(1)
+ 0.0
+ >>> log(2)
+ 0.693147180559945
+ >>> log(1000,10)
+ 3.0
+ >>> log(4, 16)
+ 0.5
+ >>> log(j)
+ (0.0 + 1.5707963267949j)
+ >>> log(-1)
+ (0.0 + 3.14159265358979j)
+ >>> log(0)
+ -inf
+ >>> log(inf)
+ +inf
+
+The natural logarithm is the antiderivative of `1/x`::
+
+ >>> quad(lambda x: 1/x, [1, 5])
+ 1.6094379124341
+ >>> log(5)
+ 1.6094379124341
+ >>> diff(log, 10)
+ 0.1
+
+The Taylor series expansion of the natural logarithm around
+`x = 1` has coefficients `(-1)^{n+1}/n`::
+
+ >>> nprint(taylor(log, 1, 7))
+ [0.0, 1.0, -0.5, 0.333333, -0.25, 0.2, -0.166667, 0.142857]
+
+:func:`~mpmath.log` supports arbitrary precision evaluation::
+
+ >>> mp.dps = 50
+ >>> log(pi)
+ 1.1447298858494001741434273513530587116472948129153
+ >>> log(pi, pi**3)
+ 0.33333333333333333333333333333333333333333333333333
+ >>> mp.dps = 25
+ >>> log(3+4j)
+ (1.609437912434100374600759 + 0.9272952180016122324285125j)
+"""
+
+log10 = r"""
+Computes the base-10 logarithm of `x`, `\log_{10}(x)`. ``log10(x)``
+is equivalent to ``log(x, 10)``.
+"""
+
+fmod = r"""
+Converts `x` and `y` to mpmath numbers and returns `x \mod y`.
+For mpmath numbers, this is equivalent to ``x % y``.
+
+ >>> from mpmath import *
+ >>> mp.dps = 15; mp.pretty = True
+ >>> fmod(100, pi)
+ 2.61062773871641
+
+You can use :func:`~mpmath.fmod` to compute fractional parts of numbers::
+
+ >>> fmod(10.25, 1)
+ 0.25
+
+"""
+
+radians = r"""
+Converts the degree angle `x` to radians::
+
+ >>> from mpmath import *
+ >>> mp.dps = 15; mp.pretty = True
+ >>> radians(60)
+ 1.0471975511966
+"""
+
+degrees = r"""
+Converts the radian angle `x` to a degree angle::
+
+ >>> from mpmath import *
+ >>> mp.dps = 15; mp.pretty = True
+ >>> degrees(pi/3)
+ 60.0
+"""
+
+atan2 = r"""
+Computes the two-argument arctangent, `\mathrm{atan2}(y, x)`,
+giving the signed angle between the positive `x`-axis and the
+point `(x, y)` in the 2D plane. This function is defined for
+real `x` and `y` only.
+
+The two-argument arctangent essentially computes
+`\mathrm{atan}(y/x)`, but accounts for the signs of both
+`x` and `y` to give the angle for the correct quadrant. The
+following examples illustrate the difference::
+
+ >>> from mpmath import *
+ >>> mp.dps = 15; mp.pretty = True
+ >>> atan2(1,1), atan(1/1.)
+ (0.785398163397448, 0.785398163397448)
+ >>> atan2(1,-1), atan(1/-1.)
+ (2.35619449019234, -0.785398163397448)
+ >>> atan2(-1,1), atan(-1/1.)
+ (-0.785398163397448, -0.785398163397448)
+ >>> atan2(-1,-1), atan(-1/-1.)
+ (-2.35619449019234, 0.785398163397448)
+
+The angle convention is the same as that used for the complex
+argument; see :func:`~mpmath.arg`.
+"""
+
+fibonacci = r"""
+``fibonacci(n)`` computes the `n`-th Fibonacci number, `F(n)`. The
+Fibonacci numbers are defined by the recurrence `F(n) = F(n-1) + F(n-2)`
+with the initial values `F(0) = 0`, `F(1) = 1`. :func:`~mpmath.fibonacci`
+extends this definition to arbitrary real and complex arguments
+using the formula
+
+.. math ::
+
+ F(z) = \frac{\phi^z - \cos(\pi z) \phi^{-z}}{\sqrt 5}
+
+where `\phi` is the golden ratio. :func:`~mpmath.fibonacci` also uses this
+continuous formula to compute `F(n)` for extremely large `n`, where
+calculating the exact integer would be wasteful.
+
+For convenience, :func:`~mpmath.fib` is available as an alias for
+:func:`~mpmath.fibonacci`.
+
+**Basic examples**
+
+Some small Fibonacci numbers are::
+
+ >>> from mpmath import *
+ >>> mp.dps = 15; mp.pretty = True
+ >>> for i in range(10):
+ ... print(fibonacci(i))
+ ...
+ 0.0
+ 1.0
+ 1.0
+ 2.0
+ 3.0
+ 5.0
+ 8.0
+ 13.0
+ 21.0
+ 34.0
+ >>> fibonacci(50)
+ 12586269025.0
+
+The recurrence for `F(n)` extends backwards to negative `n`::
+
+ >>> for i in range(10):
+ ... print(fibonacci(-i))
+ ...
+ 0.0
+ 1.0
+ -1.0
+ 2.0
+ -3.0
+ 5.0
+ -8.0
+ 13.0
+ -21.0
+ 34.0
+
+Large Fibonacci numbers will be computed approximately unless
+the precision is set high enough::
+
+ >>> fib(200)
+ 2.8057117299251e+41
+ >>> mp.dps = 45
+ >>> fib(200)
+ 280571172992510140037611932413038677189525.0
+
+:func:`~mpmath.fibonacci` can compute approximate Fibonacci numbers
+of stupendous size::
+
+ >>> mp.dps = 15
+ >>> fibonacci(10**25)
+ 3.49052338550226e+2089876402499787337692720
+
+**Real and complex arguments**
+
+The extended Fibonacci function is an analytic function. The
+property `F(z) = F(z-1) + F(z-2)` holds for arbitrary `z`::
+
+ >>> mp.dps = 15
+ >>> fib(pi)
+ 2.1170270579161
+ >>> fib(pi-1) + fib(pi-2)
+ 2.1170270579161
+ >>> fib(3+4j)
+ (-5248.51130728372 - 14195.962288353j)
+ >>> fib(2+4j) + fib(1+4j)
+ (-5248.51130728372 - 14195.962288353j)
+
+The Fibonacci function has infinitely many roots on the
+negative half-real axis. The first root is at 0, the second is
+close to -0.18, and then there are infinitely many roots that
+asymptotically approach `-n+1/2`::
+
+ >>> findroot(fib, -0.2)
+ -0.183802359692956
+ >>> findroot(fib, -2)
+ -1.57077646820395
+ >>> findroot(fib, -17)
+ -16.4999999596115
+ >>> findroot(fib, -24)
+ -23.5000000000479
+
+**Mathematical relationships**
+
+For large `n`, `F(n+1)/F(n)` approaches the golden ratio::
+
+ >>> mp.dps = 50
+ >>> fibonacci(101)/fibonacci(100)
+ 1.6180339887498948482045868343656381177203127439638
+ >>> +phi
+ 1.6180339887498948482045868343656381177203091798058
+
+The sum of reciprocal Fibonacci numbers converges to an irrational
+number for which no closed form expression is known::
+
+ >>> mp.dps = 15
+ >>> nsum(lambda n: 1/fib(n), [1, inf])
+ 3.35988566624318
+
+Amazingly, however, the sum of odd-index reciprocal Fibonacci
+numbers can be expressed in terms of a Jacobi theta function::
+
+ >>> nsum(lambda n: 1/fib(2*n+1), [0, inf])
+ 1.82451515740692
+ >>> sqrt(5)*jtheta(2,0,(3-sqrt(5))/2)**2/4
+ 1.82451515740692
+
+Some related sums can be done in closed form::
+
+ >>> nsum(lambda k: 1/(1+fib(2*k+1)), [0, inf])
+ 1.11803398874989
+ >>> phi - 0.5
+ 1.11803398874989
+ >>> f = lambda k:(-1)**(k+1) / sum(fib(n)**2 for n in range(1,int(k+1)))
+ >>> nsum(f, [1, inf])
+ 0.618033988749895
+ >>> phi-1
+ 0.618033988749895
+
+**References**
+
+1. http://mathworld.wolfram.com/FibonacciNumber.html
+"""
+
+altzeta = r"""
+Gives the Dirichlet eta function, `\eta(s)`, also known as the
+alternating zeta function. This function is defined in analogy
+with the Riemann zeta function as providing the sum of the
+alternating series
+
+.. math ::
+
+ \eta(s) = \sum_{k=0}^{\infty} \frac{(-1)^k}{k^s}
+ = 1-\frac{1}{2^s}+\frac{1}{3^s}-\frac{1}{4^s}+\ldots
+
+The eta function, unlike the Riemann zeta function, is an entire
+function, having a finite value for all complex `s`. The special case
+`\eta(1) = \log(2)` gives the value of the alternating harmonic series.
+
+The alternating zeta function may expressed using the Riemann zeta function
+as `\eta(s) = (1 - 2^{1-s}) \zeta(s)`. It can also be expressed
+in terms of the Hurwitz zeta function, for example using
+:func:`~mpmath.dirichlet` (see documentation for that function).
+
+**Examples**
+
+Some special values are::
+
+ >>> from mpmath import *
+ >>> mp.dps = 15; mp.pretty = True
+ >>> altzeta(1)
+ 0.693147180559945
+ >>> altzeta(0)
+ 0.5
+ >>> altzeta(-1)
+ 0.25
+ >>> altzeta(-2)
+ 0.0
+
+An example of a sum that can be computed more accurately and
+efficiently via :func:`~mpmath.altzeta` than via numerical summation::
+
+ >>> sum(-(-1)**n / mpf(n)**2.5 for n in range(1, 100))
+ 0.867204951503984
+ >>> altzeta(2.5)
+ 0.867199889012184
+
+At positive even integers, the Dirichlet eta function
+evaluates to a rational multiple of a power of `\pi`::
+
+ >>> altzeta(2)
+ 0.822467033424113
+ >>> pi**2/12
+ 0.822467033424113
+
+Like the Riemann zeta function, `\eta(s)`, approaches 1
+as `s` approaches positive infinity, although it does
+so from below rather than from above::
+
+ >>> altzeta(30)
+ 0.999999999068682
+ >>> altzeta(inf)
+ 1.0
+ >>> mp.pretty = False
+ >>> altzeta(1000, rounding='d')
+ mpf('0.99999999999999989')
+ >>> altzeta(1000, rounding='u')
+ mpf('1.0')
+
+**References**
+
+1. http://mathworld.wolfram.com/DirichletEtaFunction.html
+
+2. http://en.wikipedia.org/wiki/Dirichlet_eta_function
+"""
+
+factorial = r"""
+Computes the factorial, `x!`. For integers `n \ge 0`, we have
+`n! = 1 \cdot 2 \cdots (n-1) \cdot n` and more generally the factorial
+is defined for real or complex `x` by `x! = \Gamma(x+1)`.
+
+**Examples**
+
+Basic values and limits::
+
+ >>> from mpmath import *
+ >>> mp.dps = 15; mp.pretty = True
+ >>> for k in range(6):
+ ... print("%s %s" % (k, fac(k)))
+ ...
+ 0 1.0
+ 1 1.0
+ 2 2.0
+ 3 6.0
+ 4 24.0
+ 5 120.0
+ >>> fac(inf)
+ +inf
+ >>> fac(0.5), sqrt(pi)/2
+ (0.886226925452758, 0.886226925452758)
+
+For large positive `x`, `x!` can be approximated by
+Stirling's formula::
+
+ >>> x = 10**10
+ >>> fac(x)
+ 2.32579620567308e+95657055186
+ >>> sqrt(2*pi*x)*(x/e)**x
+ 2.32579597597705e+95657055186
+
+:func:`~mpmath.fac` supports evaluation for astronomically large values::
+
+ >>> fac(10**30)
+ 6.22311232304258e+29565705518096748172348871081098
+
+Reciprocal factorials appear in the Taylor series of the
+exponential function (among many other contexts)::
+
+ >>> nsum(lambda k: 1/fac(k), [0, inf]), exp(1)
+ (2.71828182845905, 2.71828182845905)
+ >>> nsum(lambda k: pi**k/fac(k), [0, inf]), exp(pi)
+ (23.1406926327793, 23.1406926327793)
+
+"""
+
+gamma = r"""
+Computes the gamma function, `\Gamma(x)`. The gamma function is a
+shifted version of the ordinary factorial, satisfying
+`\Gamma(n) = (n-1)!` for integers `n > 0`. More generally, it
+is defined by
+
+.. math ::
+
+ \Gamma(x) = \int_0^{\infty} t^{x-1} e^{-t}\, dt
+
+for any real or complex `x` with `\Re(x) > 0` and for `\Re(x) < 0`
+by analytic continuation.
+
+**Examples**
+
+Basic values and limits::
+
+ >>> from mpmath import *
+ >>> mp.dps = 15; mp.pretty = True
+ >>> for k in range(1, 6):
+ ... print("%s %s" % (k, gamma(k)))
+ ...
+ 1 1.0
+ 2 1.0
+ 3 2.0
+ 4 6.0
+ 5 24.0
+ >>> gamma(inf)
+ +inf
+ >>> gamma(0)
+ Traceback (most recent call last):
+ ...
+ ValueError: gamma function pole
+
+The gamma function of a half-integer is a rational multiple of
+`\sqrt{\pi}`::
+
+ >>> gamma(0.5), sqrt(pi)
+ (1.77245385090552, 1.77245385090552)
+ >>> gamma(1.5), sqrt(pi)/2
+ (0.886226925452758, 0.886226925452758)
+
+We can check the integral definition::
+
+ >>> gamma(3.5)
+ 3.32335097044784
+ >>> quad(lambda t: t**2.5*exp(-t), [0,inf])
+ 3.32335097044784
+
+:func:`~mpmath.gamma` supports arbitrary-precision evaluation and
+complex arguments::
+
+ >>> mp.dps = 50
+ >>> gamma(sqrt(3))
+ 0.91510229697308632046045539308226554038315280564184
+ >>> mp.dps = 25
+ >>> gamma(2j)
+ (0.009902440080927490985955066 - 0.07595200133501806872408048j)
+
+Arguments can also be large. Note that the gamma function grows
+very quickly::
+
+ >>> mp.dps = 15
+ >>> gamma(10**20)
+ 1.9328495143101e+1956570551809674817225
+
+**References**
+
+* [Spouge]_
+
+"""
+
+psi = r"""
+Gives the polygamma function of order `m` of `z`, `\psi^{(m)}(z)`.
+Special cases are known as the *digamma function* (`\psi^{(0)}(z)`),
+the *trigamma function* (`\psi^{(1)}(z)`), etc. The polygamma
+functions are defined as the logarithmic derivatives of the gamma
+function:
+
+.. math ::
+
+ \psi^{(m)}(z) = \left(\frac{d}{dz}\right)^{m+1} \log \Gamma(z)
+
+In particular, `\psi^{(0)}(z) = \Gamma'(z)/\Gamma(z)`. In the
+present implementation of :func:`~mpmath.psi`, the order `m` must be a
+nonnegative integer, while the argument `z` may be an arbitrary
+complex number (with exception for the polygamma function's poles
+at `z = 0, -1, -2, \ldots`).
+
+**Examples**
+
+For various rational arguments, the polygamma function reduces to
+a combination of standard mathematical constants::
+
+ >>> from mpmath import *
+ >>> mp.dps = 25; mp.pretty = True
+ >>> psi(0, 1), -euler
+ (-0.5772156649015328606065121, -0.5772156649015328606065121)
+ >>> psi(1, '1/4'), pi**2+8*catalan
+ (17.19732915450711073927132, 17.19732915450711073927132)
+ >>> psi(2, '1/2'), -14*apery
+ (-16.82879664423431999559633, -16.82879664423431999559633)
+
+The polygamma functions are derivatives of each other::
+
+ >>> diff(lambda x: psi(3, x), pi), psi(4, pi)
+ (-0.1105749312578862734526952, -0.1105749312578862734526952)
+ >>> quad(lambda x: psi(4, x), [2, 3]), psi(3,3)-psi(3,2)
+ (-0.375, -0.375)
+
+The digamma function diverges logarithmically as `z \to \infty`,
+while higher orders tend to zero::
+
+ >>> psi(0,inf), psi(1,inf), psi(2,inf)
+ (+inf, 0.0, 0.0)
+
+Evaluation for a complex argument::
+
+ >>> psi(2, -1-2j)
+ (0.03902435405364952654838445 + 0.1574325240413029954685366j)
+
+Evaluation is supported for large orders `m` and/or large
+arguments `z`::
+
+ >>> psi(3, 10**100)
+ 2.0e-300
+ >>> psi(250, 10**30+10**20*j)
+ (-1.293142504363642687204865e-7010 + 3.232856260909107391513108e-7018j)
+
+**Application to infinite series**
+
+Any infinite series where the summand is a rational function of
+the index `k` can be evaluated in closed form in terms of polygamma
+functions of the roots and poles of the summand::
+
+ >>> a = sqrt(2)
+ >>> b = sqrt(3)
+ >>> nsum(lambda k: 1/((k+a)**2*(k+b)), [0, inf])
+ 0.4049668927517857061917531
+ >>> (psi(0,a)-psi(0,b)-a*psi(1,a)+b*psi(1,a))/(a-b)**2
+ 0.4049668927517857061917531
+
+This follows from the series representation (`m > 0`)
+
+.. math ::
+
+ \psi^{(m)}(z) = (-1)^{m+1} m! \sum_{k=0}^{\infty}
+ \frac{1}{(z+k)^{m+1}}.
+
+Since the roots of a polynomial may be complex, it is sometimes
+necessary to use the complex polygamma function to evaluate
+an entirely real-valued sum::
+
+ >>> nsum(lambda k: 1/(k**2-2*k+3), [0, inf])
+ 1.694361433907061256154665
+ >>> nprint(polyroots([1,-2,3]))
+ [(1.0 - 1.41421j), (1.0 + 1.41421j)]
+ >>> r1 = 1-sqrt(2)*j
+ >>> r2 = r1.conjugate()
+ >>> (psi(0,-r2)-psi(0,-r1))/(r1-r2)
+ (1.694361433907061256154665 + 0.0j)
+
+"""
+
+digamma = r"""
+Shortcut for ``psi(0,z)``.
+"""
+
+harmonic = r"""
+If `n` is an integer, ``harmonic(n)`` gives a floating-point
+approximation of the `n`-th harmonic number `H(n)`, defined as
+
+.. math ::
+
+ H(n) = 1 + \frac{1}{2} + \frac{1}{3} + \ldots + \frac{1}{n}
+
+The first few harmonic numbers are::
+
+ >>> from mpmath import *
+ >>> mp.dps = 15; mp.pretty = True
+ >>> for n in range(8):
+ ... print("%s %s" % (n, harmonic(n)))
+ ...
+ 0 0.0
+ 1 1.0
+ 2 1.5
+ 3 1.83333333333333
+ 4 2.08333333333333
+ 5 2.28333333333333
+ 6 2.45
+ 7 2.59285714285714
+
+The infinite harmonic series `1 + 1/2 + 1/3 + \ldots` diverges::
+
+ >>> harmonic(inf)
+ +inf
+
+:func:`~mpmath.harmonic` is evaluated using the digamma function rather
+than by summing the harmonic series term by term. It can therefore
+be computed quickly for arbitrarily large `n`, and even for
+nonintegral arguments::
+
+ >>> harmonic(10**100)
+ 230.835724964306
+ >>> harmonic(0.5)
+ 0.613705638880109
+ >>> harmonic(3+4j)
+ (2.24757548223494 + 0.850502209186044j)
+
+:func:`~mpmath.harmonic` supports arbitrary precision evaluation::
+
+ >>> mp.dps = 50
+ >>> harmonic(11)
+ 3.0198773448773448773448773448773448773448773448773
+ >>> harmonic(pi)
+ 1.8727388590273302654363491032336134987519132374152
+
+The harmonic series diverges, but at a glacial pace. It is possible
+to calculate the exact number of terms required before the sum
+exceeds a given amount, say 100::
+
+ >>> mp.dps = 50
+ >>> v = 10**findroot(lambda x: harmonic(10**x) - 100, 10)
+ >>> v
+ 15092688622113788323693563264538101449859496.864101
+ >>> v = int(ceil(v))
+ >>> print(v)
+ 15092688622113788323693563264538101449859497
+ >>> harmonic(v-1)
+ 99.999999999999999999999999999999999999999999942747
+ >>> harmonic(v)
+ 100.000000000000000000000000000000000000000000009
+
+"""
+
+bernoulli = r"""
+Computes the nth Bernoulli number, `B_n`, for any integer `n \ge 0`.
+
+The Bernoulli numbers are rational numbers, but this function
+returns a floating-point approximation. To obtain an exact
+fraction, use :func:`~mpmath.bernfrac` instead.
+
+**Examples**
+
+Numerical values of the first few Bernoulli numbers::
+
+ >>> from mpmath import *
+ >>> mp.dps = 15; mp.pretty = True
+ >>> for n in range(15):
+ ... print("%s %s" % (n, bernoulli(n)))
+ ...
+ 0 1.0
+ 1 -0.5
+ 2 0.166666666666667
+ 3 0.0
+ 4 -0.0333333333333333
+ 5 0.0
+ 6 0.0238095238095238
+ 7 0.0
+ 8 -0.0333333333333333
+ 9 0.0
+ 10 0.0757575757575758
+ 11 0.0
+ 12 -0.253113553113553
+ 13 0.0
+ 14 1.16666666666667
+
+Bernoulli numbers can be approximated with arbitrary precision::
+
+ >>> mp.dps = 50
+ >>> bernoulli(100)
+ -2.8382249570693706959264156336481764738284680928013e+78
+
+Arbitrarily large `n` are supported::
+
+ >>> mp.dps = 15
+ >>> bernoulli(10**20 + 2)
+ 3.09136296657021e+1876752564973863312327
+
+The Bernoulli numbers are related to the Riemann zeta function
+at integer arguments::
+
+ >>> -bernoulli(8) * (2*pi)**8 / (2*fac(8))
+ 1.00407735619794
+ >>> zeta(8)
+ 1.00407735619794
+
+**Algorithm**
+
+For small `n` (`n < 3000`) :func:`~mpmath.bernoulli` uses a recurrence
+formula due to Ramanujan. All results in this range are cached,
+so sequential computation of small Bernoulli numbers is
+guaranteed to be fast.
+
+For larger `n`, `B_n` is evaluated in terms of the Riemann zeta
+function.
+"""
+
+stieltjes = r"""
+For a nonnegative integer `n`, ``stieltjes(n)`` computes the
+`n`-th Stieltjes constant `\gamma_n`, defined as the
+`n`-th coefficient in the Laurent series expansion of the
+Riemann zeta function around the pole at `s = 1`. That is,
+we have:
+
+.. math ::
+
+ \zeta(s) = \frac{1}{s-1} \sum_{n=0}^{\infty}
+ \frac{(-1)^n}{n!} \gamma_n (s-1)^n
+
+More generally, ``stieltjes(n, a)`` gives the corresponding
+coefficient `\gamma_n(a)` for the Hurwitz zeta function
+`\zeta(s,a)` (with `\gamma_n = \gamma_n(1)`).
+
+**Examples**
+
+The zeroth Stieltjes constant is just Euler's constant `\gamma`::
+
+ >>> from mpmath import *
+ >>> mp.dps = 15; mp.pretty = True
+ >>> stieltjes(0)
+ 0.577215664901533
+
+Some more values are::
+
+ >>> stieltjes(1)
+ -0.0728158454836767
+ >>> stieltjes(10)
+ 0.000205332814909065
+ >>> stieltjes(30)
+ 0.00355772885557316
+ >>> stieltjes(1000)
+ -1.57095384420474e+486
+ >>> stieltjes(2000)
+ 2.680424678918e+1109
+ >>> stieltjes(1, 2.5)
+ -0.23747539175716
+
+An alternative way to compute `\gamma_1`::
+
+ >>> diff(extradps(15)(lambda x: 1/(x-1) - zeta(x)), 1)
+ -0.0728158454836767
+
+:func:`~mpmath.stieltjes` supports arbitrary precision evaluation::
+
+ >>> mp.dps = 50
+ >>> stieltjes(2)
+ -0.0096903631928723184845303860352125293590658061013408
+
+**Algorithm**
+
+:func:`~mpmath.stieltjes` numerically evaluates the integral in
+the following representation due to Ainsworth, Howell and
+Coffey [1], [2]:
+
+.. math ::
+
+ \gamma_n(a) = \frac{\log^n a}{2a} - \frac{\log^{n+1}(a)}{n+1} +
+ \frac{2}{a} \Re \int_0^{\infty}
+ \frac{(x/a-i)\log^n(a-ix)}{(1+x^2/a^2)(e^{2\pi x}-1)} dx.
+
+For some reference values with `a = 1`, see e.g. [4].
+
+**References**
+
+1. O. R. Ainsworth & L. W. Howell, "An integral representation of
+ the generalized Euler-Mascheroni constants", NASA Technical
+ Paper 2456 (1985),
+ http://ntrs.nasa.gov/archive/nasa/casi.ntrs.nasa.gov/19850014994_1985014994.pdf
+
+2. M. W. Coffey, "The Stieltjes constants, their relation to the
+ `\eta_j` coefficients, and representation of the Hurwitz
+ zeta function", arXiv:0706.0343v1 http://arxiv.org/abs/0706.0343
+
+3. http://mathworld.wolfram.com/StieltjesConstants.html
+
+4. http://pi.lacim.uqam.ca/piDATA/stieltjesgamma.txt
+
+"""
+
+gammaprod = r"""
+Given iterables `a` and `b`, ``gammaprod(a, b)`` computes the
+product / quotient of gamma functions:
+
+.. math ::
+
+ \frac{\Gamma(a_0) \Gamma(a_1) \cdots \Gamma(a_p)}
+ {\Gamma(b_0) \Gamma(b_1) \cdots \Gamma(b_q)}
+
+Unlike direct calls to :func:`~mpmath.gamma`, :func:`~mpmath.gammaprod` considers
+the entire product as a limit and evaluates this limit properly if
+any of the numerator or denominator arguments are nonpositive
+integers such that poles of the gamma function are encountered.
+That is, :func:`~mpmath.gammaprod` evaluates
+
+.. math ::
+
+ \lim_{\epsilon \to 0}
+ \frac{\Gamma(a_0+\epsilon) \Gamma(a_1+\epsilon) \cdots
+ \Gamma(a_p+\epsilon)}
+ {\Gamma(b_0+\epsilon) \Gamma(b_1+\epsilon) \cdots
+ \Gamma(b_q+\epsilon)}
+
+In particular:
+
+* If there are equally many poles in the numerator and the
+ denominator, the limit is a rational number times the remaining,
+ regular part of the product.
+
+* If there are more poles in the numerator, :func:`~mpmath.gammaprod`
+ returns ``+inf``.
+
+* If there are more poles in the denominator, :func:`~mpmath.gammaprod`
+ returns 0.
+
+**Examples**
+
+The reciprocal gamma function `1/\Gamma(x)` evaluated at `x = 0`::
+
+ >>> from mpmath import *
+ >>> mp.dps = 15
+ >>> gammaprod([], [0])
+ 0.0
+
+A limit::
+
+ >>> gammaprod([-4], [-3])
+ -0.25
+ >>> limit(lambda x: gamma(x-1)/gamma(x), -3, direction=1)
+ -0.25
+ >>> limit(lambda x: gamma(x-1)/gamma(x), -3, direction=-1)
+ -0.25
+
+"""
+
+beta = r"""
+Computes the beta function,
+`B(x,y) = \Gamma(x) \Gamma(y) / \Gamma(x+y)`.
+The beta function is also commonly defined by the integral
+representation
+
+.. math ::
+
+ B(x,y) = \int_0^1 t^{x-1} (1-t)^{y-1} \, dt
+
+**Examples**
+
+For integer and half-integer arguments where all three gamma
+functions are finite, the beta function becomes either rational
+number or a rational multiple of `\pi`::
+
+ >>> from mpmath import *
+ >>> mp.dps = 15; mp.pretty = True
+ >>> beta(5, 2)
+ 0.0333333333333333
+ >>> beta(1.5, 2)
+ 0.266666666666667
+ >>> 16*beta(2.5, 1.5)
+ 3.14159265358979
+
+Where appropriate, :func:`~mpmath.beta` evaluates limits. A pole
+of the beta function is taken to result in ``+inf``::
+
+ >>> beta(-0.5, 0.5)
+ 0.0
+ >>> beta(-3, 3)
+ -0.333333333333333
+ >>> beta(-2, 3)
+ +inf
+ >>> beta(inf, 1)
+ 0.0
+ >>> beta(inf, 0)
+ nan
+
+:func:`~mpmath.beta` supports complex numbers and arbitrary precision
+evaluation::
+
+ >>> beta(1, 2+j)
+ (0.4 - 0.2j)
+ >>> mp.dps = 25
+ >>> beta(j,0.5)
+ (1.079424249270925780135675 - 1.410032405664160838288752j)
+ >>> mp.dps = 50
+ >>> beta(pi, e)
+ 0.037890298781212201348153837138927165984170287886464
+
+Various integrals can be computed by means of the
+beta function::
+
+ >>> mp.dps = 15
+ >>> quad(lambda t: t**2.5*(1-t)**2, [0, 1])
+ 0.0230880230880231
+ >>> beta(3.5, 3)
+ 0.0230880230880231
+ >>> quad(lambda t: sin(t)**4 * sqrt(cos(t)), [0, pi/2])
+ 0.319504062596158
+ >>> beta(2.5, 0.75)/2
+ 0.319504062596158
+
+"""
+
+betainc = r"""
+``betainc(a, b, x1=0, x2=1, regularized=False)`` gives the generalized
+incomplete beta function,
+
+.. math ::
+
+ I_{x_1}^{x_2}(a,b) = \int_{x_1}^{x_2} t^{a-1} (1-t)^{b-1} dt.
+
+When `x_1 = 0, x_2 = 1`, this reduces to the ordinary (complete)
+beta function `B(a,b)`; see :func:`~mpmath.beta`.
+
+With the keyword argument ``regularized=True``, :func:`~mpmath.betainc`
+computes the regularized incomplete beta function
+`I_{x_1}^{x_2}(a,b) / B(a,b)`. This is the cumulative distribution of the
+beta distribution with parameters `a`, `b`.
+
+.. note :
+
+ Implementations of the incomplete beta function in some other
+ software uses a different argument order. For example, Mathematica uses the
+ reversed argument order ``Beta[x1,x2,a,b]``. For the equivalent of SciPy's
+ three-argument incomplete beta integral (implicitly with `x1 = 0`), use
+ ``betainc(a,b,0,x2,regularized=True)``.
+
+**Examples**
+
+Verifying that :func:`~mpmath.betainc` computes the integral in the
+definition::
+
+ >>> from mpmath import *
+ >>> mp.dps = 25; mp.pretty = True
+ >>> x,y,a,b = 3, 4, 0, 6
+ >>> betainc(x, y, a, b)
+ -4010.4
+ >>> quad(lambda t: t**(x-1) * (1-t)**(y-1), [a, b])
+ -4010.4
+
+The arguments may be arbitrary complex numbers::
+
+ >>> betainc(0.75, 1-4j, 0, 2+3j)
+ (0.2241657956955709603655887 + 0.3619619242700451992411724j)
+
+With regularization::
+
+ >>> betainc(1, 2, 0, 0.25, regularized=True)
+ 0.4375
+ >>> betainc(pi, e, 0, 1, regularized=True) # Complete
+ 1.0
+
+The beta integral satisfies some simple argument transformation
+symmetries::
+
+ >>> mp.dps = 15
+ >>> betainc(2,3,4,5), -betainc(2,3,5,4), betainc(3,2,1-5,1-4)
+ (56.0833333333333, 56.0833333333333, 56.0833333333333)
+
+The beta integral can often be evaluated analytically. For integer and
+rational arguments, the incomplete beta function typically reduces to a
+simple algebraic-logarithmic expression::
+
+ >>> mp.dps = 25
+ >>> identify(chop(betainc(0, 0, 3, 4)))
+ '-(log((9/8)))'
+ >>> identify(betainc(2, 3, 4, 5))
+ '(673/12)'
+ >>> identify(betainc(1.5, 1, 1, 2))
+ '((-12+sqrt(1152))/18)'
+
+"""
+
+binomial = r"""
+Computes the binomial coefficient
+
+.. math ::
+
+ {n \choose k} = \frac{n!}{k!(n-k)!}.
+
+The binomial coefficient gives the number of ways that `k` items
+can be chosen from a set of `n` items. More generally, the binomial
+coefficient is a well-defined function of arbitrary real or
+complex `n` and `k`, via the gamma function.
+
+**Examples**
+
+Generate Pascal's triangle::
+
+ >>> from mpmath import *
+ >>> mp.dps = 15; mp.pretty = True
+ >>> for n in range(5):
+ ... nprint([binomial(n,k) for k in range(n+1)])
+ ...
+ [1.0]
+ [1.0, 1.0]
+ [1.0, 2.0, 1.0]
+ [1.0, 3.0, 3.0, 1.0]
+ [1.0, 4.0, 6.0, 4.0, 1.0]
+
+There is 1 way to select 0 items from the empty set, and 0 ways to
+select 1 item from the empty set::
+
+ >>> binomial(0, 0)
+ 1.0
+ >>> binomial(0, 1)
+ 0.0
+
+:func:`~mpmath.binomial` supports large arguments::
+
+ >>> binomial(10**20, 10**20-5)
+ 8.33333333333333e+97
+ >>> binomial(10**20, 10**10)
+ 2.60784095465201e+104342944813
+
+Nonintegral binomial coefficients find use in series
+expansions::
+
+ >>> nprint(taylor(lambda x: (1+x)**0.25, 0, 4))
+ [1.0, 0.25, -0.09375, 0.0546875, -0.0375977]
+ >>> nprint([binomial(0.25, k) for k in range(5)])
+ [1.0, 0.25, -0.09375, 0.0546875, -0.0375977]
+
+An integral representation::
+
+ >>> n, k = 5, 3
+ >>> f = lambda t: exp(-j*k*t)*(1+exp(j*t))**n
+ >>> chop(quad(f, [-pi,pi])/(2*pi))
+ 10.0
+ >>> binomial(n,k)
+ 10.0
+
+"""
+
+rf = r"""
+Computes the rising factorial or Pochhammer symbol,
+
+.. math ::
+
+ x^{(n)} = x (x+1) \cdots (x+n-1) = \frac{\Gamma(x+n)}{\Gamma(x)}
+
+where the rightmost expression is valid for nonintegral `n`.
+
+**Examples**
+
+For integral `n`, the rising factorial is a polynomial::
+
+ >>> from mpmath import *
+ >>> mp.dps = 15; mp.pretty = True
+ >>> for n in range(5):
+ ... nprint(taylor(lambda x: rf(x,n), 0, n))
+ ...
+ [1.0]
+ [0.0, 1.0]
+ [0.0, 1.0, 1.0]
+ [0.0, 2.0, 3.0, 1.0]
+ [0.0, 6.0, 11.0, 6.0, 1.0]
+
+Evaluation is supported for arbitrary arguments::
+
+ >>> rf(2+3j, 5.5)
+ (-7202.03920483347 - 3777.58810701527j)
+"""
+
+ff = r"""
+Computes the falling factorial,
+
+.. math ::
+
+ (x)_n = x (x-1) \cdots (x-n+1) = \frac{\Gamma(x+1)}{\Gamma(x-n+1)}
+
+where the rightmost expression is valid for nonintegral `n`.
+
+**Examples**
+
+For integral `n`, the falling factorial is a polynomial::
+
+ >>> from mpmath import *
+ >>> mp.dps = 15; mp.pretty = True
+ >>> for n in range(5):
+ ... nprint(taylor(lambda x: ff(x,n), 0, n))
+ ...
+ [1.0]
+ [0.0, 1.0]
+ [0.0, -1.0, 1.0]
+ [0.0, 2.0, -3.0, 1.0]
+ [0.0, -6.0, 11.0, -6.0, 1.0]
+
+Evaluation is supported for arbitrary arguments::
+
+ >>> ff(2+3j, 5.5)
+ (-720.41085888203 + 316.101124983878j)
+"""
+
+fac2 = r"""
+Computes the double factorial `x!!`, defined for integers
+`x > 0` by
+
+.. math ::
+
+ x!! = \begin{cases}
+ 1 \cdot 3 \cdots (x-2) \cdot x & x \;\mathrm{odd} \\
+ 2 \cdot 4 \cdots (x-2) \cdot x & x \;\mathrm{even}
+ \end{cases}
+
+and more generally by [1]
+
+.. math ::
+
+ x!! = 2^{x/2} \left(\frac{\pi}{2}\right)^{(\cos(\pi x)-1)/4}
+ \Gamma\left(\frac{x}{2}+1\right).
+
+**Examples**
+
+The integer sequence of double factorials begins::
+
+ >>> from mpmath import *
+ >>> mp.dps = 15; mp.pretty = True
+ >>> nprint([fac2(n) for n in range(10)])
+ [1.0, 1.0, 2.0, 3.0, 8.0, 15.0, 48.0, 105.0, 384.0, 945.0]
+
+For large `x`, double factorials follow a Stirling-like asymptotic
+approximation::
+
+ >>> x = mpf(10000)
+ >>> fac2(x)
+ 5.97272691416282e+17830
+ >>> sqrt(pi)*x**((x+1)/2)*exp(-x/2)
+ 5.97262736954392e+17830
+
+The recurrence formula `x!! = x (x-2)!!` can be reversed to
+define the double factorial of negative odd integers (but
+not negative even integers)::
+
+ >>> fac2(-1), fac2(-3), fac2(-5), fac2(-7)
+ (1.0, -1.0, 0.333333333333333, -0.0666666666666667)
+ >>> fac2(-2)
+ Traceback (most recent call last):
+ ...
+ ValueError: gamma function pole
+
+With the exception of the poles at negative even integers,
+:func:`~mpmath.fac2` supports evaluation for arbitrary complex arguments.
+The recurrence formula is valid generally::
+
+ >>> fac2(pi+2j)
+ (-1.3697207890154e-12 + 3.93665300979176e-12j)
+ >>> (pi+2j)*fac2(pi-2+2j)
+ (-1.3697207890154e-12 + 3.93665300979176e-12j)
+
+Double factorials should not be confused with nested factorials,
+which are immensely larger::
+
+ >>> fac(fac(20))
+ 5.13805976125208e+43675043585825292774
+ >>> fac2(20)
+ 3715891200.0
+
+Double factorials appear, among other things, in series expansions
+of Gaussian functions and the error function. Infinite series
+include::
+
+ >>> nsum(lambda k: 1/fac2(k), [0, inf])
+ 3.05940740534258
+ >>> sqrt(e)*(1+sqrt(pi/2)*erf(sqrt(2)/2))
+ 3.05940740534258
+ >>> nsum(lambda k: 2**k/fac2(2*k-1), [1, inf])
+ 4.06015693855741
+ >>> e * erf(1) * sqrt(pi)
+ 4.06015693855741
+
+A beautiful Ramanujan sum::
+
+ >>> nsum(lambda k: (-1)**k*(fac2(2*k-1)/fac2(2*k))**3, [0,inf])
+ 0.90917279454693
+ >>> (gamma('9/8')/gamma('5/4')/gamma('7/8'))**2
+ 0.90917279454693
+
+**References**
+
+1. http://functions.wolfram.com/GammaBetaErf/Factorial2/27/01/0002/
+
+2. http://mathworld.wolfram.com/DoubleFactorial.html
+
+"""
+
+hyper = r"""
+Evaluates the generalized hypergeometric function
+
+.. math ::
+
+ \,_pF_q(a_1,\ldots,a_p; b_1,\ldots,b_q; z) =
+ \sum_{n=0}^\infty \frac{(a_1)_n (a_2)_n \ldots (a_p)_n}
+ {(b_1)_n(b_2)_n\ldots(b_q)_n} \frac{z^n}{n!}
+
+where `(x)_n` denotes the rising factorial (see :func:`~mpmath.rf`).
+
+The parameters lists ``a_s`` and ``b_s`` may contain integers,
+real numbers, complex numbers, as well as exact fractions given in
+the form of tuples `(p, q)`. :func:`~mpmath.hyper` is optimized to handle
+integers and fractions more efficiently than arbitrary
+floating-point parameters (since rational parameters are by
+far the most common).
+
+**Examples**
+
+Verifying that :func:`~mpmath.hyper` gives the sum in the definition, by
+comparison with :func:`~mpmath.nsum`::
+
+ >>> from mpmath import *
+ >>> mp.dps = 25; mp.pretty = True
+ >>> a,b,c,d = 2,3,4,5
+ >>> x = 0.25
+ >>> hyper([a,b],[c,d],x)
+ 1.078903941164934876086237
+ >>> fn = lambda n: rf(a,n)*rf(b,n)/rf(c,n)/rf(d,n)*x**n/fac(n)
+ >>> nsum(fn, [0, inf])
+ 1.078903941164934876086237
+
+The parameters can be any combination of integers, fractions,
+floats and complex numbers::
+
+ >>> a, b, c, d, e = 1, (-1,2), pi, 3+4j, (2,3)
+ >>> x = 0.2j
+ >>> hyper([a,b],[c,d,e],x)
+ (0.9923571616434024810831887 - 0.005753848733883879742993122j)
+ >>> b, e = -0.5, mpf(2)/3
+ >>> fn = lambda n: rf(a,n)*rf(b,n)/rf(c,n)/rf(d,n)/rf(e,n)*x**n/fac(n)
+ >>> nsum(fn, [0, inf])
+ (0.9923571616434024810831887 - 0.005753848733883879742993122j)
+
+The `\,_0F_0` and `\,_1F_0` series are just elementary functions::
+
+ >>> a, z = sqrt(2), +pi
+ >>> hyper([],[],z)
+ 23.14069263277926900572909
+ >>> exp(z)
+ 23.14069263277926900572909
+ >>> hyper([a],[],z)
+ (-0.09069132879922920160334114 + 0.3283224323946162083579656j)
+ >>> (1-z)**(-a)
+ (-0.09069132879922920160334114 + 0.3283224323946162083579656j)
+
+If any `a_k` coefficient is a nonpositive integer, the series terminates
+into a finite polynomial::
+
+ >>> hyper([1,1,1,-3],[2,5],1)
+ 0.7904761904761904761904762
+ >>> identify(_)
+ '(83/105)'
+
+If any `b_k` is a nonpositive integer, the function is undefined (unless the
+series terminates before the division by zero occurs)::
+
+ >>> hyper([1,1,1,-3],[-2,5],1)
+ Traceback (most recent call last):
+ ...
+ ZeroDivisionError: pole in hypergeometric series
+ >>> hyper([1,1,1,-1],[-2,5],1)
+ 1.1
+
+Except for polynomial cases, the radius of convergence `R` of the hypergeometric
+series is either `R = \infty` (if `p \le q`), `R = 1` (if `p = q+1`), or
+`R = 0` (if `p > q+1`).
+
+The analytic continuations of the functions with `p = q+1`, i.e. `\,_2F_1`,
+`\,_3F_2`, `\,_4F_3`, etc, are all implemented and therefore these functions
+can be evaluated for `|z| \ge 1`. The shortcuts :func:`~mpmath.hyp2f1`, :func:`~mpmath.hyp3f2`
+are available to handle the most common cases (see their documentation),
+but functions of higher degree are also supported via :func:`~mpmath.hyper`::
+
+ >>> hyper([1,2,3,4], [5,6,7], 1) # 4F3 at finite-valued branch point
+ 1.141783505526870731311423
+ >>> hyper([4,5,6,7], [1,2,3], 1) # 4F3 at pole
+ +inf
+ >>> hyper([1,2,3,4,5], [6,7,8,9], 10) # 5F4
+ (1.543998916527972259717257 - 0.5876309929580408028816365j)
+ >>> hyper([1,2,3,4,5,6], [7,8,9,10,11], 1j) # 6F5
+ (0.9996565821853579063502466 + 0.0129721075905630604445669j)
+
+Near `z = 1` with noninteger parameters::
+
+ >>> hyper(['1/3',1,'3/2',2], ['1/5','11/6','41/8'], 1)
+ 2.219433352235586121250027
+ >>> hyper(['1/3',1,'3/2',2], ['1/5','11/6','5/4'], 1)
+ +inf
+ >>> eps1 = extradps(6)(lambda: 1 - mpf('1e-6'))()
+ >>> hyper(['1/3',1,'3/2',2], ['1/5','11/6','5/4'], eps1)
+ 2923978034.412973409330956
+
+Please note that, as currently implemented, evaluation of `\,_pF_{p-1}`
+with `p \ge 3` may be slow or inaccurate when `|z-1|` is small,
+for some parameter values.
+
+Evaluation may be aborted if convergence appears to be too slow.
+The optional ``maxterms`` (limiting the number of series terms) and ``maxprec``
+(limiting the internal precision) keyword arguments can be used
+to control evaluation::
+
+ >>> hyper([1,2,3], [4,5,6], 10000) # doctest: +IGNORE_EXCEPTION_DETAIL
+ Traceback (most recent call last):
+ ...
+ NoConvergence: Hypergeometric series converges too slowly. Try increasing maxterms.
+ >>> hyper([1,2,3], [4,5,6], 10000, maxterms=10**6)
+ 7.622806053177969474396918e+4310
+
+Additional options include ``force_series`` (which forces direct use of
+a hypergeometric series even if another evaluation method might work better)
+and ``asymp_tol`` which controls the target tolerance for using
+asymptotic series.
+
+When `p > q+1`, ``hyper`` computes the (iterated) Borel sum of the divergent
+series. For `\,_2F_0` the Borel sum has an analytic solution and can be
+computed efficiently (see :func:`~mpmath.hyp2f0`). For higher degrees, the functions
+is evaluated first by attempting to sum it directly as an asymptotic
+series (this only works for tiny `|z|`), and then by evaluating the Borel
+regularized sum using numerical integration. Except for
+special parameter combinations, this can be extremely slow.
+
+ >>> hyper([1,1], [], 0.5) # regularization of 2F0
+ (1.340965419580146562086448 + 0.8503366631752726568782447j)
+ >>> hyper([1,1,1,1], [1], 0.5) # regularization of 4F1
+ (1.108287213689475145830699 + 0.5327107430640678181200491j)
+
+With the following magnitude of argument, the asymptotic series for `\,_3F_1`
+gives only a few digits. Using Borel summation, ``hyper`` can produce
+a value with full accuracy::
+
+ >>> mp.dps = 15
+ >>> hyper([2,0.5,4], [5.25], '0.08', force_series=True) # doctest: +IGNORE_EXCEPTION_DETAIL
+ Traceback (most recent call last):
+ ...
+ NoConvergence: Hypergeometric series converges too slowly. Try increasing maxterms.
+ >>> hyper([2,0.5,4], [5.25], '0.08', asymp_tol=1e-4)
+ 1.0725535790737
+ >>> hyper([2,0.5,4], [5.25], '0.08')
+ (1.07269542893559 + 5.54668863216891e-5j)
+ >>> hyper([2,0.5,4], [5.25], '-0.08', asymp_tol=1e-4)
+ 0.946344925484879
+ >>> hyper([2,0.5,4], [5.25], '-0.08')
+ 0.946312503737771
+ >>> mp.dps = 25
+ >>> hyper([2,0.5,4], [5.25], '-0.08')
+ 0.9463125037377662296700858
+
+Note that with the positive `z` value, there is a complex part in the
+correct result, which falls below the tolerance of the asymptotic series.
+
+By default, a parameter that appears in both ``a_s`` and ``b_s`` will be removed
+unless it is a nonpositive integer. This generally speeds up evaluation
+by producing a hypergeometric function of lower order.
+This optimization can be disabled by passing ``eliminate=False``.
+
+ >>> hyper([1,2,3], [4,5,3], 10000)
+ 1.268943190440206905892212e+4321
+ >>> hyper([1,2,3], [4,5,3], 10000, eliminate=False) # doctest: +IGNORE_EXCEPTION_DETAIL
+ Traceback (most recent call last):
+ ...
+ NoConvergence: Hypergeometric series converges too slowly. Try increasing maxterms.
+ >>> hyper([1,2,3], [4,5,3], 10000, eliminate=False, maxterms=10**6)
+ 1.268943190440206905892212e+4321
+
+If a nonpositive integer `-n` appears in both ``a_s`` and ``b_s``, this parameter
+cannot be unambiguously removed since it creates a term 0 / 0.
+In this case the hypergeometric series is understood to terminate before
+the division by zero occurs. This convention is consistent with Mathematica.
+An alternative convention of eliminating the parameters can be toggled
+with ``eliminate_all=True``:
+
+ >>> hyper([2,-1], [-1], 3)
+ 7.0
+ >>> hyper([2,-1], [-1], 3, eliminate_all=True)
+ 0.25
+ >>> hyper([2], [], 3)
+ 0.25
+
+"""
+
+hypercomb = r"""
+Computes a weighted combination of hypergeometric functions
+
+.. math ::
+
+ \sum_{r=1}^N \left[ \prod_{k=1}^{l_r} {w_{r,k}}^{c_{r,k}}
+ \frac{\prod_{k=1}^{m_r} \Gamma(\alpha_{r,k})}{\prod_{k=1}^{n_r}
+ \Gamma(\beta_{r,k})}
+ \,_{p_r}F_{q_r}(a_{r,1},\ldots,a_{r,p}; b_{r,1},
+ \ldots, b_{r,q}; z_r)\right].
+
+Typically the parameters are linear combinations of a small set of base
+parameters; :func:`~mpmath.hypercomb` permits computing a correct value in
+the case that some of the `\alpha`, `\beta`, `b` turn out to be
+nonpositive integers, or if division by zero occurs for some `w^c`,
+assuming that there are opposing singularities that cancel out.
+The limit is computed by evaluating the function with the base
+parameters perturbed, at a higher working precision.
+
+The first argument should be a function that takes the perturbable
+base parameters ``params`` as input and returns `N` tuples
+``(w, c, alpha, beta, a, b, z)``, where the coefficients ``w``, ``c``,
+gamma factors ``alpha``, ``beta``, and hypergeometric coefficients
+``a``, ``b`` each should be lists of numbers, and ``z`` should be a single
+number.
+
+**Examples**
+
+The following evaluates
+
+.. math ::
+
+ (a-1) \frac{\Gamma(a-3)}{\Gamma(a-4)} \,_1F_1(a,a-1,z) = e^z(a-4)(a+z-1)
+
+with `a=1, z=3`. There is a zero factor, two gamma function poles, and
+the 1F1 function is singular; all singularities cancel out to give a finite
+value::
+
+ >>> from mpmath import *
+ >>> mp.dps = 15; mp.pretty = True
+ >>> hypercomb(lambda a: [([a-1],[1],[a-3],[a-4],[a],[a-1],3)], [1])
+ -180.769832308689
+ >>> -9*exp(3)
+ -180.769832308689
+
+"""
+
+hyp0f1 = r"""
+Gives the hypergeometric function `\,_0F_1`, sometimes known as the
+confluent limit function, defined as
+
+.. math ::
+
+ \,_0F_1(a,z) = \sum_{k=0}^{\infty} \frac{1}{(a)_k} \frac{z^k}{k!}.
+
+This function satisfies the differential equation `z f''(z) + a f'(z) = f(z)`,
+and is related to the Bessel function of the first kind (see :func:`~mpmath.besselj`).
+
+``hyp0f1(a,z)`` is equivalent to ``hyper([],[a],z)``; see documentation for
+:func:`~mpmath.hyper` for more information.
+
+**Examples**
+
+Evaluation for arbitrary arguments::
+
+ >>> from mpmath import *
+ >>> mp.dps = 25; mp.pretty = True
+ >>> hyp0f1(2, 0.25)
+ 1.130318207984970054415392
+ >>> hyp0f1((1,2), 1234567)
+ 6.27287187546220705604627e+964
+ >>> hyp0f1(3+4j, 1000000j)
+ (3.905169561300910030267132e+606 + 3.807708544441684513934213e+606j)
+
+Evaluation is supported for arbitrarily large values of `z`,
+using asymptotic expansions::
+
+ >>> hyp0f1(1, 10**50)
+ 2.131705322874965310390701e+8685889638065036553022565
+ >>> hyp0f1(1, -10**50)
+ 1.115945364792025420300208e-13
+
+Verifying the differential equation::
+
+ >>> a = 2.5
+ >>> f = lambda z: hyp0f1(a,z)
+ >>> for z in [0, 10, 3+4j]:
+ ... chop(z*diff(f,z,2) + a*diff(f,z) - f(z))
+ ...
+ 0.0
+ 0.0
+ 0.0
+
+"""
+
+hyp1f1 = r"""
+Gives the confluent hypergeometric function of the first kind,
+
+.. math ::
+
+ \,_1F_1(a,b,z) = \sum_{k=0}^{\infty} \frac{(a)_k}{(b)_k} \frac{z^k}{k!},
+
+also known as Kummer's function and sometimes denoted by `M(a,b,z)`. This
+function gives one solution to the confluent (Kummer's) differential equation
+
+.. math ::
+
+ z f''(z) + (b-z) f'(z) - af(z) = 0.
+
+A second solution is given by the `U` function; see :func:`~mpmath.hyperu`.
+Solutions are also given in an alternate form by the Whittaker
+functions (:func:`~mpmath.whitm`, :func:`~mpmath.whitw`).
+
+``hyp1f1(a,b,z)`` is equivalent
+to ``hyper([a],[b],z)``; see documentation for :func:`~mpmath.hyper` for more
+information.
+
+**Examples**
+
+Evaluation for real and complex values of the argument `z`, with
+fixed parameters `a = 2, b = -1/3`::
+
+ >>> from mpmath import *
+ >>> mp.dps = 25; mp.pretty = True
+ >>> hyp1f1(2, (-1,3), 3.25)
+ -2815.956856924817275640248
+ >>> hyp1f1(2, (-1,3), -3.25)
+ -1.145036502407444445553107
+ >>> hyp1f1(2, (-1,3), 1000)
+ -8.021799872770764149793693e+441
+ >>> hyp1f1(2, (-1,3), -1000)
+ 0.000003131987633006813594535331
+ >>> hyp1f1(2, (-1,3), 100+100j)
+ (-3.189190365227034385898282e+48 - 1.106169926814270418999315e+49j)
+
+Parameters may be complex::
+
+ >>> hyp1f1(2+3j, -1+j, 10j)
+ (261.8977905181045142673351 + 160.8930312845682213562172j)
+
+Arbitrarily large values of `z` are supported::
+
+ >>> hyp1f1(3, 4, 10**20)
+ 3.890569218254486878220752e+43429448190325182745
+ >>> hyp1f1(3, 4, -10**20)
+ 6.0e-60
+ >>> hyp1f1(3, 4, 10**20*j)
+ (-1.935753855797342532571597e-20 - 2.291911213325184901239155e-20j)
+
+Verifying the differential equation::
+
+ >>> a, b = 1.5, 2
+ >>> f = lambda z: hyp1f1(a,b,z)
+ >>> for z in [0, -10, 3, 3+4j]:
+ ... chop(z*diff(f,z,2) + (b-z)*diff(f,z) - a*f(z))
+ ...
+ 0.0
+ 0.0
+ 0.0
+ 0.0
+
+An integral representation::
+
+ >>> a, b = 1.5, 3
+ >>> z = 1.5
+ >>> hyp1f1(a,b,z)
+ 2.269381460919952778587441
+ >>> g = lambda t: exp(z*t)*t**(a-1)*(1-t)**(b-a-1)
+ >>> gammaprod([b],[a,b-a])*quad(g, [0,1])
+ 2.269381460919952778587441
+
+
+"""
+
+hyp1f2 = r"""
+Gives the hypergeometric function `\,_1F_2(a_1,a_2;b_1,b_2; z)`.
+The call ``hyp1f2(a1,b1,b2,z)`` is equivalent to
+``hyper([a1],[b1,b2],z)``.
+
+Evaluation works for complex and arbitrarily large arguments::
+
+ >>> from mpmath import *
+ >>> mp.dps = 25; mp.pretty = True
+ >>> a, b, c = 1.5, (-1,3), 2.25
+ >>> hyp1f2(a, b, c, 10**20)
+ -1.159388148811981535941434e+8685889639
+ >>> hyp1f2(a, b, c, -10**20)
+ -12.60262607892655945795907
+ >>> hyp1f2(a, b, c, 10**20*j)
+ (4.237220401382240876065501e+6141851464 - 2.950930337531768015892987e+6141851464j)
+ >>> hyp1f2(2+3j, -2j, 0.5j, 10-20j)
+ (135881.9905586966432662004 - 86681.95885418079535738828j)
+
+"""
+
+hyp2f2 = r"""
+Gives the hypergeometric function `\,_2F_2(a_1,a_2;b_1,b_2; z)`.
+The call ``hyp2f2(a1,a2,b1,b2,z)`` is equivalent to
+``hyper([a1,a2],[b1,b2],z)``.
+
+Evaluation works for complex and arbitrarily large arguments::
+
+ >>> from mpmath import *
+ >>> mp.dps = 25; mp.pretty = True
+ >>> a, b, c, d = 1.5, (-1,3), 2.25, 4
+ >>> hyp2f2(a, b, c, d, 10**20)
+ -5.275758229007902299823821e+43429448190325182663
+ >>> hyp2f2(a, b, c, d, -10**20)
+ 2561445.079983207701073448
+ >>> hyp2f2(a, b, c, d, 10**20*j)
+ (2218276.509664121194836667 - 1280722.539991603850462856j)
+ >>> hyp2f2(2+3j, -2j, 0.5j, 4j, 10-20j)
+ (80500.68321405666957342788 - 20346.82752982813540993502j)
+
+"""
+
+hyp2f3 = r"""
+Gives the hypergeometric function `\,_2F_3(a_1,a_2;b_1,b_2,b_3; z)`.
+The call ``hyp2f3(a1,a2,b1,b2,b3,z)`` is equivalent to
+``hyper([a1,a2],[b1,b2,b3],z)``.
+
+Evaluation works for arbitrarily large arguments::
+
+ >>> from mpmath import *
+ >>> mp.dps = 25; mp.pretty = True
+ >>> a1,a2,b1,b2,b3 = 1.5, (-1,3), 2.25, 4, (1,5)
+ >>> hyp2f3(a1,a2,b1,b2,b3,10**20)
+ -4.169178177065714963568963e+8685889590
+ >>> hyp2f3(a1,a2,b1,b2,b3,-10**20)
+ 7064472.587757755088178629
+ >>> hyp2f3(a1,a2,b1,b2,b3,10**20*j)
+ (-5.163368465314934589818543e+6141851415 + 1.783578125755972803440364e+6141851416j)
+ >>> hyp2f3(2+3j, -2j, 0.5j, 4j, -1-j, 10-20j)
+ (-2280.938956687033150740228 + 13620.97336609573659199632j)
+ >>> hyp2f3(2+3j, -2j, 0.5j, 4j, -1-j, 10000000-20000000j)
+ (4.849835186175096516193e+3504 - 3.365981529122220091353633e+3504j)
+
+"""
+
+hyp2f1 = r"""
+Gives the Gauss hypergeometric function `\,_2F_1` (often simply referred to as
+*the* hypergeometric function), defined for `|z| < 1` as
+
+.. math ::
+
+ \,_2F_1(a,b,c,z) = \sum_{k=0}^{\infty}
+ \frac{(a)_k (b)_k}{(c)_k} \frac{z^k}{k!}.
+
+and for `|z| \ge 1` by analytic continuation, with a branch cut on `(1, \infty)`
+when necessary.
+
+Special cases of this function include many of the orthogonal polynomials as
+well as the incomplete beta function and other functions. Properties of the
+Gauss hypergeometric function are documented comprehensively in many references,
+for example Abramowitz & Stegun, section 15.
+
+The implementation supports the analytic continuation as well as evaluation
+close to the unit circle where `|z| \approx 1`. The syntax ``hyp2f1(a,b,c,z)``
+is equivalent to ``hyper([a,b],[c],z)``.
+
+**Examples**
+
+Evaluation with `z` inside, outside and on the unit circle, for
+fixed parameters::
+
+ >>> from mpmath import *
+ >>> mp.dps = 25; mp.pretty = True
+ >>> hyp2f1(2, (1,2), 4, 0.75)
+ 1.303703703703703703703704
+ >>> hyp2f1(2, (1,2), 4, -1.75)
+ 0.7431290566046919177853916
+ >>> hyp2f1(2, (1,2), 4, 1.75)
+ (1.418075801749271137026239 - 1.114976146679907015775102j)
+ >>> hyp2f1(2, (1,2), 4, 1)
+ 1.6
+ >>> hyp2f1(2, (1,2), 4, -1)
+ 0.8235498012182875315037882
+ >>> hyp2f1(2, (1,2), 4, j)
+ (0.9144026291433065674259078 + 0.2050415770437884900574923j)
+ >>> hyp2f1(2, (1,2), 4, 2+j)
+ (0.9274013540258103029011549 + 0.7455257875808100868984496j)
+ >>> hyp2f1(2, (1,2), 4, 0.25j)
+ (0.9931169055799728251931672 + 0.06154836525312066938147793j)
+
+Evaluation with complex parameter values::
+
+ >>> hyp2f1(1+j, 0.75, 10j, 1+5j)
+ (0.8834833319713479923389638 + 0.7053886880648105068343509j)
+
+Evaluation with `z = 1`::
+
+ >>> hyp2f1(-2.5, 3.5, 1.5, 1)
+ 0.0
+ >>> hyp2f1(-2.5, 3, 4, 1)
+ 0.06926406926406926406926407
+ >>> hyp2f1(2, 3, 4, 1)
+ +inf
+
+Evaluation for huge arguments::
+
+ >>> hyp2f1((-1,3), 1.75, 4, '1e100')
+ (7.883714220959876246415651e+32 + 1.365499358305579597618785e+33j)
+ >>> hyp2f1((-1,3), 1.75, 4, '1e1000000')
+ (7.883714220959876246415651e+333332 + 1.365499358305579597618785e+333333j)
+ >>> hyp2f1((-1,3), 1.75, 4, '1e1000000j')
+ (1.365499358305579597618785e+333333 - 7.883714220959876246415651e+333332j)
+
+An integral representation::
+
+ >>> a,b,c,z = -0.5, 1, 2.5, 0.25
+ >>> g = lambda t: t**(b-1) * (1-t)**(c-b-1) * (1-t*z)**(-a)
+ >>> gammaprod([c],[b,c-b]) * quad(g, [0,1])
+ 0.9480458814362824478852618
+ >>> hyp2f1(a,b,c,z)
+ 0.9480458814362824478852618
+
+Verifying the hypergeometric differential equation::
+
+ >>> f = lambda z: hyp2f1(a,b,c,z)
+ >>> chop(z*(1-z)*diff(f,z,2) + (c-(a+b+1)*z)*diff(f,z) - a*b*f(z))
+ 0.0
+
+"""
+
+hyp3f2 = r"""
+Gives the generalized hypergeometric function `\,_3F_2`, defined for `|z| < 1`
+as
+
+.. math ::
+
+ \,_3F_2(a_1,a_2,a_3,b_1,b_2,z) = \sum_{k=0}^{\infty}
+ \frac{(a_1)_k (a_2)_k (a_3)_k}{(b_1)_k (b_2)_k} \frac{z^k}{k!}.
+
+and for `|z| \ge 1` by analytic continuation. The analytic structure of this
+function is similar to that of `\,_2F_1`, generally with a singularity at
+`z = 1` and a branch cut on `(1, \infty)`.
+
+Evaluation is supported inside, on, and outside
+the circle of convergence `|z| = 1`::
+
+ >>> from mpmath import *
+ >>> mp.dps = 25; mp.pretty = True
+ >>> hyp3f2(1,2,3,4,5,0.25)
+ 1.083533123380934241548707
+ >>> hyp3f2(1,2+2j,3,4,5,-10+10j)
+ (0.1574651066006004632914361 - 0.03194209021885226400892963j)
+ >>> hyp3f2(1,2,3,4,5,-10)
+ 0.3071141169208772603266489
+ >>> hyp3f2(1,2,3,4,5,10)
+ (-0.4857045320523947050581423 - 0.5988311440454888436888028j)
+ >>> hyp3f2(0.25,1,1,2,1.5,1)
+ 1.157370995096772047567631
+ >>> (8-pi-2*ln2)/3
+ 1.157370995096772047567631
+ >>> hyp3f2(1+j,0.5j,2,1,-2j,-1)
+ (1.74518490615029486475959 + 0.1454701525056682297614029j)
+ >>> hyp3f2(1+j,0.5j,2,1,-2j,sqrt(j))
+ (0.9829816481834277511138055 - 0.4059040020276937085081127j)
+ >>> hyp3f2(-3,2,1,-5,4,1)
+ 1.41
+ >>> hyp3f2(-3,2,1,-5,4,2)
+ 2.12
+
+Evaluation very close to the unit circle::
+
+ >>> hyp3f2(1,2,3,4,5,'1.0001')
+ (1.564877796743282766872279 - 3.76821518787438186031973e-11j)
+ >>> hyp3f2(1,2,3,4,5,'1+0.0001j')
+ (1.564747153061671573212831 + 0.0001305757570366084557648482j)
+ >>> hyp3f2(1,2,3,4,5,'0.9999')
+ 1.564616644881686134983664
+ >>> hyp3f2(1,2,3,4,5,'-0.9999')
+ 0.7823896253461678060196207
+
+.. note ::
+
+ Evaluation for `|z-1|` small can currently be inaccurate or slow
+ for some parameter combinations.
+
+For various parameter combinations, `\,_3F_2` admits representation in terms
+of hypergeometric functions of lower degree, or in terms of
+simpler functions::
+
+ >>> for a, b, z in [(1,2,-1), (2,0.5,1)]:
+ ... hyp2f1(a,b,a+b+0.5,z)**2
+ ... hyp3f2(2*a,a+b,2*b,a+b+0.5,2*a+2*b,z)
+ ...
+ 0.4246104461966439006086308
+ 0.4246104461966439006086308
+ 7.111111111111111111111111
+ 7.111111111111111111111111
+
+ >>> z = 2+3j
+ >>> hyp3f2(0.5,1,1.5,2,2,z)
+ (0.7621440939243342419729144 + 0.4249117735058037649915723j)
+ >>> 4*(pi-2*ellipe(z))/(pi*z)
+ (0.7621440939243342419729144 + 0.4249117735058037649915723j)
+
+"""
+
+hyperu = r"""
+Gives the Tricomi confluent hypergeometric function `U`, also known as
+the Kummer or confluent hypergeometric function of the second kind. This
+function gives a second linearly independent solution to the confluent
+hypergeometric differential equation (the first is provided by `\,_1F_1` --
+see :func:`~mpmath.hyp1f1`).
+
+**Examples**
+
+Evaluation for arbitrary complex arguments::
+
+ >>> from mpmath import *
+ >>> mp.dps = 25; mp.pretty = True
+ >>> hyperu(2,3,4)
+ 0.0625
+ >>> hyperu(0.25, 5, 1000)
+ 0.1779949416140579573763523
+ >>> hyperu(0.25, 5, -1000)
+ (0.1256256609322773150118907 - 0.1256256609322773150118907j)
+
+The `U` function may be singular at `z = 0`::
+
+ >>> hyperu(1.5, 2, 0)
+ +inf
+ >>> hyperu(1.5, -2, 0)
+ 0.1719434921288400112603671
+
+Verifying the differential equation::
+
+ >>> a, b = 1.5, 2
+ >>> f = lambda z: hyperu(a,b,z)
+ >>> for z in [-10, 3, 3+4j]:
+ ... chop(z*diff(f,z,2) + (b-z)*diff(f,z) - a*f(z))
+ ...
+ 0.0
+ 0.0
+ 0.0
+
+An integral representation::
+
+ >>> a,b,z = 2, 3.5, 4.25
+ >>> hyperu(a,b,z)
+ 0.06674960718150520648014567
+ >>> quad(lambda t: exp(-z*t)*t**(a-1)*(1+t)**(b-a-1),[0,inf]) / gamma(a)
+ 0.06674960718150520648014567
+
+
+[1] http://people.math.sfu.ca/~cbm/aands/page_504.htm
+"""
+
+hyp2f0 = r"""
+Gives the hypergeometric function `\,_2F_0`, defined formally by the
+series
+
+.. math ::
+
+ \,_2F_0(a,b;;z) = \sum_{n=0}^{\infty} (a)_n (b)_n \frac{z^n}{n!}.
+
+This series usually does not converge. For small enough `z`, it can be viewed
+as an asymptotic series that may be summed directly with an appropriate
+truncation. When this is not the case, :func:`~mpmath.hyp2f0` gives a regularized sum,
+or equivalently, it uses a representation in terms of the
+hypergeometric U function [1]. The series also converges when either `a` or `b`
+is a nonpositive integer, as it then terminates into a polynomial
+after `-a` or `-b` terms.
+
+**Examples**
+
+Evaluation is supported for arbitrary complex arguments::
+
+ >>> from mpmath import *
+ >>> mp.dps = 25; mp.pretty = True
+ >>> hyp2f0((2,3), 1.25, -100)
+ 0.07095851870980052763312791
+ >>> hyp2f0((2,3), 1.25, 100)
+ (-0.03254379032170590665041131 + 0.07269254613282301012735797j)
+ >>> hyp2f0(-0.75, 1-j, 4j)
+ (-0.3579987031082732264862155 - 3.052951783922142735255881j)
+
+Even with real arguments, the regularized value of 2F0 is often complex-valued,
+but the imaginary part decreases exponentially as `z \to 0`. In the following
+example, the first call uses complex evaluation while the second has a small
+enough `z` to evaluate using the direct series and thus the returned value
+is strictly real (this should be taken to indicate that the imaginary
+part is less than ``eps``)::
+
+ >>> mp.dps = 15
+ >>> hyp2f0(1.5, 0.5, 0.05)
+ (1.04166637647907 + 8.34584913683906e-8j)
+ >>> hyp2f0(1.5, 0.5, 0.0005)
+ 1.00037535207621
+
+The imaginary part can be retrieved by increasing the working precision::
+
+ >>> mp.dps = 80
+ >>> nprint(hyp2f0(1.5, 0.5, 0.009).imag)
+ 1.23828e-46
+
+In the polynomial case (the series terminating), 2F0 can evaluate exactly::
+
+ >>> mp.dps = 15
+ >>> hyp2f0(-6,-6,2)
+ 291793.0
+ >>> identify(hyp2f0(-2,1,0.25))
+ '(5/8)'
+
+The coefficients of the polynomials can be recovered using Taylor expansion::
+
+ >>> nprint(taylor(lambda x: hyp2f0(-3,0.5,x), 0, 10))
+ [1.0, -1.5, 2.25, -1.875, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0]
+ >>> nprint(taylor(lambda x: hyp2f0(-4,0.5,x), 0, 10))
+ [1.0, -2.0, 4.5, -7.5, 6.5625, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0]
+
+
+[1] http://people.math.sfu.ca/~cbm/aands/page_504.htm
+"""
+
+
+gammainc = r"""
+``gammainc(z, a=0, b=inf)`` computes the (generalized) incomplete
+gamma function with integration limits `[a, b]`:
+
+.. math ::
+
+ \Gamma(z,a,b) = \int_a^b t^{z-1} e^{-t} \, dt
+
+The generalized incomplete gamma function reduces to the
+following special cases when one or both endpoints are fixed:
+
+* `\Gamma(z,0,\infty)` is the standard ("complete")
+ gamma function, `\Gamma(z)` (available directly
+ as the mpmath function :func:`~mpmath.gamma`)
+* `\Gamma(z,a,\infty)` is the "upper" incomplete gamma
+ function, `\Gamma(z,a)`
+* `\Gamma(z,0,b)` is the "lower" incomplete gamma
+ function, `\gamma(z,b)`.
+
+Of course, we have
+`\Gamma(z,0,x) + \Gamma(z,x,\infty) = \Gamma(z)`
+for all `z` and `x`.
+
+Note however that some authors reverse the order of the
+arguments when defining the lower and upper incomplete
+gamma function, so one should be careful to get the correct
+definition.
+
+If also given the keyword argument ``regularized=True``,
+:func:`~mpmath.gammainc` computes the "regularized" incomplete gamma
+function
+
+.. math ::
+
+ P(z,a,b) = \frac{\Gamma(z,a,b)}{\Gamma(z)}.
+
+**Examples**
+
+We can compare with numerical quadrature to verify that
+:func:`~mpmath.gammainc` computes the integral in the definition::
+
+ >>> from mpmath import *
+ >>> mp.dps = 25; mp.pretty = True
+ >>> gammainc(2+3j, 4, 10)
+ (0.00977212668627705160602312 - 0.0770637306312989892451977j)
+ >>> quad(lambda t: t**(2+3j-1) * exp(-t), [4, 10])
+ (0.00977212668627705160602312 - 0.0770637306312989892451977j)
+
+Argument symmetries follow directly from the integral definition::
+
+ >>> gammainc(3, 4, 5) + gammainc(3, 5, 4)
+ 0.0
+ >>> gammainc(3,0,2) + gammainc(3,2,4); gammainc(3,0,4)
+ 1.523793388892911312363331
+ 1.523793388892911312363331
+ >>> findroot(lambda z: gammainc(2,z,3), 1)
+ 3.0
+
+Evaluation for arbitrarily large arguments::
+
+ >>> gammainc(10, 100)
+ 4.083660630910611272288592e-26
+ >>> gammainc(10, 10000000000000000)
+ 5.290402449901174752972486e-4342944819032375
+ >>> gammainc(3+4j, 1000000+1000000j)
+ (-1.257913707524362408877881e-434284 + 2.556691003883483531962095e-434284j)
+
+Evaluation of a generalized incomplete gamma function automatically chooses
+the representation that gives a more accurate result, depending on which
+parameter is larger::
+
+ >>> gammainc(10000000, 3) - gammainc(10000000, 2) # Bad
+ 0.0
+ >>> gammainc(10000000, 2, 3) # Good
+ 1.755146243738946045873491e+4771204
+ >>> gammainc(2, 0, 100000001) - gammainc(2, 0, 100000000) # Bad
+ 0.0
+ >>> gammainc(2, 100000000, 100000001) # Good
+ 4.078258353474186729184421e-43429441
+
+The incomplete gamma functions satisfy simple recurrence
+relations::
+
+ >>> mp.dps = 25
+ >>> z, a = mpf(3.5), mpf(2)
+ >>> gammainc(z+1, a); z*gammainc(z,a) + a**z*exp(-a)
+ 10.60130296933533459267329
+ 10.60130296933533459267329
+ >>> gammainc(z+1,0,a); z*gammainc(z,0,a) - a**z*exp(-a)
+ 1.030425427232114336470932
+ 1.030425427232114336470932
+
+Evaluation at integers and poles::
+
+ >>> gammainc(-3, -4, -5)
+ (-0.2214577048967798566234192 + 0.0j)
+ >>> gammainc(-3, 0, 5)
+ +inf
+
+If `z` is an integer, the recurrence reduces the incomplete gamma
+function to `P(a) \exp(-a) + Q(b) \exp(-b)` where `P` and
+`Q` are polynomials::
+
+ >>> gammainc(1, 2); exp(-2)
+ 0.1353352832366126918939995
+ 0.1353352832366126918939995
+ >>> mp.dps = 50
+ >>> identify(gammainc(6, 1, 2), ['exp(-1)', 'exp(-2)'])
+ '(326*exp(-1) + (-872)*exp(-2))'
+
+The incomplete gamma functions reduce to functions such as
+the exponential integral Ei and the error function for special
+arguments::
+
+ >>> mp.dps = 25
+ >>> gammainc(0, 4); -ei(-4)
+ 0.00377935240984890647887486
+ 0.00377935240984890647887486
+ >>> gammainc(0.5, 0, 2); sqrt(pi)*erf(sqrt(2))
+ 1.691806732945198336509541
+ 1.691806732945198336509541
+
+"""
+
+erf = r"""
+Computes the error function, `\mathrm{erf}(x)`. The error
+function is the normalized antiderivative of the Gaussian function
+`\exp(-t^2)`. More precisely,
+
+.. math::
+
+ \mathrm{erf}(x) = \frac{2}{\sqrt \pi} \int_0^x \exp(-t^2) \,dt
+
+**Basic examples**
+
+Simple values and limits include::
+
+ >>> from mpmath import *
+ >>> mp.dps = 15; mp.pretty = True
+ >>> erf(0)
+ 0.0
+ >>> erf(1)
+ 0.842700792949715
+ >>> erf(-1)
+ -0.842700792949715
+ >>> erf(inf)
+ 1.0
+ >>> erf(-inf)
+ -1.0
+
+For large real `x`, `\mathrm{erf}(x)` approaches 1 very
+rapidly::
+
+ >>> erf(3)
+ 0.999977909503001
+ >>> erf(5)
+ 0.999999999998463
+
+The error function is an odd function::
+
+ >>> nprint(chop(taylor(erf, 0, 5)))
+ [0.0, 1.12838, 0.0, -0.376126, 0.0, 0.112838]
+
+:func:`~mpmath.erf` implements arbitrary-precision evaluation and
+supports complex numbers::
+
+ >>> mp.dps = 50
+ >>> erf(0.5)
+ 0.52049987781304653768274665389196452873645157575796
+ >>> mp.dps = 25
+ >>> erf(1+j)
+ (1.316151281697947644880271 + 0.1904534692378346862841089j)
+
+Evaluation is supported for large arguments::
+
+ >>> mp.dps = 25
+ >>> erf('1e1000')
+ 1.0
+ >>> erf('-1e1000')
+ -1.0
+ >>> erf('1e-1000')
+ 1.128379167095512573896159e-1000
+ >>> erf('1e7j')
+ (0.0 + 8.593897639029319267398803e+43429448190317j)
+ >>> erf('1e7+1e7j')
+ (0.9999999858172446172631323 + 3.728805278735270407053139e-8j)
+
+**Related functions**
+
+See also :func:`~mpmath.erfc`, which is more accurate for large `x`,
+and :func:`~mpmath.erfi` which gives the antiderivative of
+`\exp(t^2)`.
+
+The Fresnel integrals :func:`~mpmath.fresnels` and :func:`~mpmath.fresnelc`
+are also related to the error function.
+"""
+
+erfc = r"""
+Computes the complementary error function,
+`\mathrm{erfc}(x) = 1-\mathrm{erf}(x)`.
+This function avoids cancellation that occurs when naively
+computing the complementary error function as ``1-erf(x)``::
+
+ >>> from mpmath import *
+ >>> mp.dps = 15; mp.pretty = True
+ >>> 1 - erf(10)
+ 0.0
+ >>> erfc(10)
+ 2.08848758376254e-45
+
+:func:`~mpmath.erfc` works accurately even for ludicrously large
+arguments::
+
+ >>> erfc(10**10)
+ 4.3504398860243e-43429448190325182776
+
+Complex arguments are supported::
+
+ >>> erfc(500+50j)
+ (1.19739830969552e-107492 + 1.46072418957528e-107491j)
+
+"""
+
+
+erfi = r"""
+Computes the imaginary error function, `\mathrm{erfi}(x)`.
+The imaginary error function is defined in analogy with the
+error function, but with a positive sign in the integrand:
+
+.. math ::
+
+ \mathrm{erfi}(x) = \frac{2}{\sqrt \pi} \int_0^x \exp(t^2) \,dt
+
+Whereas the error function rapidly converges to 1 as `x` grows,
+the imaginary error function rapidly diverges to infinity.
+The functions are related as
+`\mathrm{erfi}(x) = -i\,\mathrm{erf}(ix)` for all complex
+numbers `x`.
+
+**Examples**
+
+Basic values and limits::
+
+ >>> from mpmath import *
+ >>> mp.dps = 15; mp.pretty = True
+ >>> erfi(0)
+ 0.0
+ >>> erfi(1)
+ 1.65042575879754
+ >>> erfi(-1)
+ -1.65042575879754
+ >>> erfi(inf)
+ +inf
+ >>> erfi(-inf)
+ -inf
+
+Note the symmetry between erf and erfi::
+
+ >>> erfi(3j)
+ (0.0 + 0.999977909503001j)
+ >>> erf(3)
+ 0.999977909503001
+ >>> erf(1+2j)
+ (-0.536643565778565 - 5.04914370344703j)
+ >>> erfi(2+1j)
+ (-5.04914370344703 - 0.536643565778565j)
+
+Large arguments are supported::
+
+ >>> erfi(1000)
+ 1.71130938718796e+434291
+ >>> erfi(10**10)
+ 7.3167287567024e+43429448190325182754
+ >>> erfi(-10**10)
+ -7.3167287567024e+43429448190325182754
+ >>> erfi(1000-500j)
+ (2.49895233563961e+325717 + 2.6846779342253e+325717j)
+ >>> erfi(100000j)
+ (0.0 + 1.0j)
+ >>> erfi(-100000j)
+ (0.0 - 1.0j)
+
+
+"""
+
+erfinv = r"""
+Computes the inverse error function, satisfying
+
+.. math ::
+
+ \mathrm{erf}(\mathrm{erfinv}(x)) =
+ \mathrm{erfinv}(\mathrm{erf}(x)) = x.
+
+This function is defined only for `-1 \le x \le 1`.
+
+**Examples**
+
+Special values include::
+
+ >>> from mpmath import *
+ >>> mp.dps = 15; mp.pretty = True
+ >>> erfinv(0)
+ 0.0
+ >>> erfinv(1)
+ +inf
+ >>> erfinv(-1)
+ -inf
+
+The domain is limited to the standard interval::
+
+ >>> erfinv(2)
+ Traceback (most recent call last):
+ ...
+ ValueError: erfinv(x) is defined only for -1 <= x <= 1
+
+It is simple to check that :func:`~mpmath.erfinv` computes inverse values of
+:func:`~mpmath.erf` as promised::
+
+ >>> erf(erfinv(0.75))
+ 0.75
+ >>> erf(erfinv(-0.995))
+ -0.995
+
+:func:`~mpmath.erfinv` supports arbitrary-precision evaluation::
+
+ >>> mp.dps = 50
+ >>> x = erf(2)
+ >>> x
+ 0.99532226501895273416206925636725292861089179704006
+ >>> erfinv(x)
+ 2.0
+
+A definite integral involving the inverse error function::
+
+ >>> mp.dps = 15
+ >>> quad(erfinv, [0, 1])
+ 0.564189583547756
+ >>> 1/sqrt(pi)
+ 0.564189583547756
+
+The inverse error function can be used to generate random numbers
+with a Gaussian distribution (although this is a relatively
+inefficient algorithm)::
+
+ >>> nprint([erfinv(2*rand()-1) for n in range(6)]) # doctest: +SKIP
+ [-0.586747, 1.10233, -0.376796, 0.926037, -0.708142, -0.732012]
+
+"""
+
+npdf = r"""
+``npdf(x, mu=0, sigma=1)`` evaluates the probability density
+function of a normal distribution with mean value `\mu`
+and variance `\sigma^2`.
+
+Elementary properties of the probability distribution can
+be verified using numerical integration::
+
+ >>> from mpmath import *
+ >>> mp.dps = 15; mp.pretty = True
+ >>> quad(npdf, [-inf, inf])
+ 1.0
+ >>> quad(lambda x: npdf(x, 3), [3, inf])
+ 0.5
+ >>> quad(lambda x: npdf(x, 3, 2), [3, inf])
+ 0.5
+
+See also :func:`~mpmath.ncdf`, which gives the cumulative
+distribution.
+"""
+
+ncdf = r"""
+``ncdf(x, mu=0, sigma=1)`` evaluates the cumulative distribution
+function of a normal distribution with mean value `\mu`
+and variance `\sigma^2`.
+
+See also :func:`~mpmath.npdf`, which gives the probability density.
+
+Elementary properties include::
+
+ >>> from mpmath import *
+ >>> mp.dps = 15; mp.pretty = True
+ >>> ncdf(pi, mu=pi)
+ 0.5
+ >>> ncdf(-inf)
+ 0.0
+ >>> ncdf(+inf)
+ 1.0
+
+The cumulative distribution is the integral of the density
+function having identical mu and sigma::
+
+ >>> mp.dps = 15
+ >>> diff(ncdf, 2)
+ 0.053990966513188
+ >>> npdf(2)
+ 0.053990966513188
+ >>> diff(lambda x: ncdf(x, 1, 0.5), 0)
+ 0.107981933026376
+ >>> npdf(0, 1, 0.5)
+ 0.107981933026376
+"""
+
+expint = r"""
+:func:`~mpmath.expint(n,z)` gives the generalized exponential integral
+or En-function,
+
+.. math ::
+
+ \mathrm{E}_n(z) = \int_1^{\infty} \frac{e^{-zt}}{t^n} dt,
+
+where `n` and `z` may both be complex numbers. The case with `n = 1` is
+also given by :func:`~mpmath.e1`.
+
+**Examples**
+
+Evaluation at real and complex arguments::
+
+ >>> from mpmath import *
+ >>> mp.dps = 25; mp.pretty = True
+ >>> expint(1, 6.25)
+ 0.0002704758872637179088496194
+ >>> expint(-3, 2+3j)
+ (0.00299658467335472929656159 + 0.06100816202125885450319632j)
+ >>> expint(2+3j, 4-5j)
+ (0.001803529474663565056945248 - 0.002235061547756185403349091j)
+
+At negative integer values of `n`, `E_n(z)` reduces to a
+rational-exponential function::
+
+ >>> f = lambda n, z: fac(n)*sum(z**k/fac(k-1) for k in range(1,n+2))/\
+ ... exp(z)/z**(n+2)
+ >>> n = 3
+ >>> z = 1/pi
+ >>> expint(-n,z)
+ 584.2604820613019908668219
+ >>> f(n,z)
+ 584.2604820613019908668219
+ >>> n = 5
+ >>> expint(-n,z)
+ 115366.5762594725451811138
+ >>> f(n,z)
+ 115366.5762594725451811138
+"""
+
+e1 = r"""
+Computes the exponential integral `\mathrm{E}_1(z)`, given by
+
+.. math ::
+
+ \mathrm{E}_1(z) = \int_z^{\infty} \frac{e^{-t}}{t} dt.
+
+This is equivalent to :func:`~mpmath.expint` with `n = 1`.
+
+**Examples**
+
+Two ways to evaluate this function::
+
+ >>> from mpmath import *
+ >>> mp.dps = 25; mp.pretty = True
+ >>> e1(6.25)
+ 0.0002704758872637179088496194
+ >>> expint(1,6.25)
+ 0.0002704758872637179088496194
+
+The E1-function is essentially the same as the Ei-function (:func:`~mpmath.ei`)
+with negated argument, except for an imaginary branch cut term::
+
+ >>> e1(2.5)
+ 0.02491491787026973549562801
+ >>> -ei(-2.5)
+ 0.02491491787026973549562801
+ >>> e1(-2.5)
+ (-7.073765894578600711923552 - 3.141592653589793238462643j)
+ >>> -ei(2.5)
+ -7.073765894578600711923552
+
+"""
+
+ei = r"""
+Computes the exponential integral or Ei-function, `\mathrm{Ei}(x)`.
+The exponential integral is defined as
+
+.. math ::
+
+ \mathrm{Ei}(x) = \int_{-\infty\,}^x \frac{e^t}{t} \, dt.
+
+When the integration range includes `t = 0`, the exponential
+integral is interpreted as providing the Cauchy principal value.
+
+For real `x`, the Ei-function behaves roughly like
+`\mathrm{Ei}(x) \approx \exp(x) + \log(|x|)`.
+
+The Ei-function is related to the more general family of exponential
+integral functions denoted by `E_n`, which are available as :func:`~mpmath.expint`.
+
+**Basic examples**
+
+Some basic values and limits are::
+
+ >>> from mpmath import *
+ >>> mp.dps = 15; mp.pretty = True
+ >>> ei(0)
+ -inf
+ >>> ei(1)
+ 1.89511781635594
+ >>> ei(inf)
+ +inf
+ >>> ei(-inf)
+ 0.0
+
+For `x < 0`, the defining integral can be evaluated
+numerically as a reference::
+
+ >>> ei(-4)
+ -0.00377935240984891
+ >>> quad(lambda t: exp(t)/t, [-inf, -4])
+ -0.00377935240984891
+
+:func:`~mpmath.ei` supports complex arguments and arbitrary
+precision evaluation::
+
+ >>> mp.dps = 50
+ >>> ei(pi)
+ 10.928374389331410348638445906907535171566338835056
+ >>> mp.dps = 25
+ >>> ei(3+4j)
+ (-4.154091651642689822535359 + 4.294418620024357476985535j)
+
+**Related functions**
+
+The exponential integral is closely related to the logarithmic
+integral. See :func:`~mpmath.li` for additional information.
+
+The exponential integral is related to the hyperbolic
+and trigonometric integrals (see :func:`~mpmath.chi`, :func:`~mpmath.shi`,
+:func:`~mpmath.ci`, :func:`~mpmath.si`) similarly to how the ordinary
+exponential function is related to the hyperbolic and
+trigonometric functions::
+
+ >>> mp.dps = 15
+ >>> ei(3)
+ 9.93383257062542
+ >>> chi(3) + shi(3)
+ 9.93383257062542
+ >>> chop(ci(3j) - j*si(3j) - pi*j/2)
+ 9.93383257062542
+
+Beware that logarithmic corrections, as in the last example
+above, are required to obtain the correct branch in general.
+For details, see [1].
+
+The exponential integral is also a special case of the
+hypergeometric function `\,_2F_2`::
+
+ >>> z = 0.6
+ >>> z*hyper([1,1],[2,2],z) + (ln(z)-ln(1/z))/2 + euler
+ 0.769881289937359
+ >>> ei(z)
+ 0.769881289937359
+
+**References**
+
+1. Relations between Ei and other functions:
+ http://functions.wolfram.com/GammaBetaErf/ExpIntegralEi/27/01/
+
+2. Abramowitz & Stegun, section 5:
+ http://people.math.sfu.ca/~cbm/aands/page_228.htm
+
+3. Asymptotic expansion for Ei:
+ http://mathworld.wolfram.com/En-Function.html
+"""
+
+li = r"""
+Computes the logarithmic integral or li-function
+`\mathrm{li}(x)`, defined by
+
+.. math ::
+
+ \mathrm{li}(x) = \int_0^x \frac{1}{\log t} \, dt
+
+The logarithmic integral has a singularity at `x = 1`.
+
+Alternatively, ``li(x, offset=True)`` computes the offset
+logarithmic integral (used in number theory)
+
+.. math ::
+
+ \mathrm{Li}(x) = \int_2^x \frac{1}{\log t} \, dt.
+
+These two functions are related via the simple identity
+`\mathrm{Li}(x) = \mathrm{li}(x) - \mathrm{li}(2)`.
+
+The logarithmic integral should also not be confused with
+the polylogarithm (also denoted by Li), which is implemented
+as :func:`~mpmath.polylog`.
+
+**Examples**
+
+Some basic values and limits::
+
+ >>> from mpmath import *
+ >>> mp.dps = 30; mp.pretty = True
+ >>> li(0)
+ 0.0
+ >>> li(1)
+ -inf
+ >>> li(1)
+ -inf
+ >>> li(2)
+ 1.04516378011749278484458888919
+ >>> findroot(li, 2)
+ 1.45136923488338105028396848589
+ >>> li(inf)
+ +inf
+ >>> li(2, offset=True)
+ 0.0
+ >>> li(1, offset=True)
+ -inf
+ >>> li(0, offset=True)
+ -1.04516378011749278484458888919
+ >>> li(10, offset=True)
+ 5.12043572466980515267839286347
+
+The logarithmic integral can be evaluated for arbitrary
+complex arguments::
+
+ >>> mp.dps = 20
+ >>> li(3+4j)
+ (3.1343755504645775265 + 2.6769247817778742392j)
+
+The logarithmic integral is related to the exponential integral::
+
+ >>> ei(log(3))
+ 2.1635885946671919729
+ >>> li(3)
+ 2.1635885946671919729
+
+The logarithmic integral grows like `O(x/\log(x))`::
+
+ >>> mp.dps = 15
+ >>> x = 10**100
+ >>> x/log(x)
+ 4.34294481903252e+97
+ >>> li(x)
+ 4.3619719871407e+97
+
+The prime number theorem states that the number of primes less
+than `x` is asymptotic to `\mathrm{Li}(x)` (equivalently
+`\mathrm{li}(x)`). For example, it is known that there are
+exactly 1,925,320,391,606,803,968,923 prime numbers less than
+`10^{23}` [1]. The logarithmic integral provides a very
+accurate estimate::
+
+ >>> li(10**23, offset=True)
+ 1.92532039161405e+21
+
+A definite integral is::
+
+ >>> quad(li, [0, 1])
+ -0.693147180559945
+ >>> -ln(2)
+ -0.693147180559945
+
+**References**
+
+1. http://mathworld.wolfram.com/PrimeCountingFunction.html
+
+2. http://mathworld.wolfram.com/LogarithmicIntegral.html
+
+"""
+
+ci = r"""
+Computes the cosine integral,
+
+.. math ::
+
+ \mathrm{Ci}(x) = -\int_x^{\infty} \frac{\cos t}{t}\,dt
+ = \gamma + \log x + \int_0^x \frac{\cos t - 1}{t}\,dt
+
+**Examples**
+
+Some values and limits::
+
+ >>> from mpmath import *
+ >>> mp.dps = 25; mp.pretty = True
+ >>> ci(0)
+ -inf
+ >>> ci(1)
+ 0.3374039229009681346626462
+ >>> ci(pi)
+ 0.07366791204642548599010096
+ >>> ci(inf)
+ 0.0
+ >>> ci(-inf)
+ (0.0 + 3.141592653589793238462643j)
+ >>> ci(2+3j)
+ (1.408292501520849518759125 - 2.983617742029605093121118j)
+
+The cosine integral behaves roughly like the sinc function
+(see :func:`~mpmath.sinc`) for large real `x`::
+
+ >>> ci(10**10)
+ -4.875060251748226537857298e-11
+ >>> sinc(10**10)
+ -4.875060250875106915277943e-11
+ >>> chop(limit(ci, inf))
+ 0.0
+
+It has infinitely many roots on the positive real axis::
+
+ >>> findroot(ci, 1)
+ 0.6165054856207162337971104
+ >>> findroot(ci, 2)
+ 3.384180422551186426397851
+
+Evaluation is supported for `z` anywhere in the complex plane::
+
+ >>> ci(10**6*(1+j))
+ (4.449410587611035724984376e+434287 + 9.75744874290013526417059e+434287j)
+
+We can evaluate the defining integral as a reference::
+
+ >>> mp.dps = 15
+ >>> -quadosc(lambda t: cos(t)/t, [5, inf], omega=1)
+ -0.190029749656644
+ >>> ci(5)
+ -0.190029749656644
+
+Some infinite series can be evaluated using the
+cosine integral::
+
+ >>> nsum(lambda k: (-1)**k/(fac(2*k)*(2*k)), [1,inf])
+ -0.239811742000565
+ >>> ci(1) - euler
+ -0.239811742000565
+
+"""
+
+si = r"""
+Computes the sine integral,
+
+.. math ::
+
+ \mathrm{Si}(x) = \int_0^x \frac{\sin t}{t}\,dt.
+
+The sine integral is thus the antiderivative of the sinc
+function (see :func:`~mpmath.sinc`).
+
+**Examples**
+
+Some values and limits::
+
+ >>> from mpmath import *
+ >>> mp.dps = 25; mp.pretty = True
+ >>> si(0)
+ 0.0
+ >>> si(1)
+ 0.9460830703671830149413533
+ >>> si(-1)
+ -0.9460830703671830149413533
+ >>> si(pi)
+ 1.851937051982466170361053
+ >>> si(inf)
+ 1.570796326794896619231322
+ >>> si(-inf)
+ -1.570796326794896619231322
+ >>> si(2+3j)
+ (4.547513889562289219853204 + 1.399196580646054789459839j)
+
+The sine integral approaches `\pi/2` for large real `x`::
+
+ >>> si(10**10)
+ 1.570796326707584656968511
+ >>> pi/2
+ 1.570796326794896619231322
+
+Evaluation is supported for `z` anywhere in the complex plane::
+
+ >>> si(10**6*(1+j))
+ (-9.75744874290013526417059e+434287 + 4.449410587611035724984376e+434287j)
+
+We can evaluate the defining integral as a reference::
+
+ >>> mp.dps = 15
+ >>> quad(sinc, [0, 5])
+ 1.54993124494467
+ >>> si(5)
+ 1.54993124494467
+
+Some infinite series can be evaluated using the
+sine integral::
+
+ >>> nsum(lambda k: (-1)**k/(fac(2*k+1)*(2*k+1)), [0,inf])
+ 0.946083070367183
+ >>> si(1)
+ 0.946083070367183
+
+"""
+
+chi = r"""
+Computes the hyperbolic cosine integral, defined
+in analogy with the cosine integral (see :func:`~mpmath.ci`) as
+
+.. math ::
+
+ \mathrm{Chi}(x) = -\int_x^{\infty} \frac{\cosh t}{t}\,dt
+ = \gamma + \log x + \int_0^x \frac{\cosh t - 1}{t}\,dt
+
+Some values and limits::
+
+ >>> from mpmath import *
+ >>> mp.dps = 25; mp.pretty = True
+ >>> chi(0)
+ -inf
+ >>> chi(1)
+ 0.8378669409802082408946786
+ >>> chi(inf)
+ +inf
+ >>> findroot(chi, 0.5)
+ 0.5238225713898644064509583
+ >>> chi(2+3j)
+ (-0.1683628683277204662429321 + 2.625115880451325002151688j)
+
+Evaluation is supported for `z` anywhere in the complex plane::
+
+ >>> chi(10**6*(1+j))
+ (4.449410587611035724984376e+434287 - 9.75744874290013526417059e+434287j)
+
+"""
+
+shi = r"""
+Computes the hyperbolic sine integral, defined
+in analogy with the sine integral (see :func:`~mpmath.si`) as
+
+.. math ::
+
+ \mathrm{Shi}(x) = \int_0^x \frac{\sinh t}{t}\,dt.
+
+Some values and limits::
+
+ >>> from mpmath import *
+ >>> mp.dps = 25; mp.pretty = True
+ >>> shi(0)
+ 0.0
+ >>> shi(1)
+ 1.057250875375728514571842
+ >>> shi(-1)
+ -1.057250875375728514571842
+ >>> shi(inf)
+ +inf
+ >>> shi(2+3j)
+ (-0.1931890762719198291678095 + 2.645432555362369624818525j)
+
+Evaluation is supported for `z` anywhere in the complex plane::
+
+ >>> shi(10**6*(1+j))
+ (4.449410587611035724984376e+434287 - 9.75744874290013526417059e+434287j)
+
+"""
+
+fresnels = r"""
+Computes the Fresnel sine integral
+
+.. math ::
+
+ S(x) = \int_0^x \sin\left(\frac{\pi t^2}{2}\right) \,dt
+
+Note that some sources define this function
+without the normalization factor `\pi/2`.
+
+**Examples**
+
+Some basic values and limits::
+
+ >>> from mpmath import *
+ >>> mp.dps = 25; mp.pretty = True
+ >>> fresnels(0)
+ 0.0
+ >>> fresnels(inf)
+ 0.5
+ >>> fresnels(-inf)
+ -0.5
+ >>> fresnels(1)
+ 0.4382591473903547660767567
+ >>> fresnels(1+2j)
+ (36.72546488399143842838788 + 15.58775110440458732748279j)
+
+Comparing with the definition::
+
+ >>> fresnels(3)
+ 0.4963129989673750360976123
+ >>> quad(lambda t: sin(pi*t**2/2), [0,3])
+ 0.4963129989673750360976123
+"""
+
+fresnelc = r"""
+Computes the Fresnel cosine integral
+
+.. math ::
+
+ C(x) = \int_0^x \cos\left(\frac{\pi t^2}{2}\right) \,dt
+
+Note that some sources define this function
+without the normalization factor `\pi/2`.
+
+**Examples**
+
+Some basic values and limits::
+
+ >>> from mpmath import *
+ >>> mp.dps = 25; mp.pretty = True
+ >>> fresnelc(0)
+ 0.0
+ >>> fresnelc(inf)
+ 0.5
+ >>> fresnelc(-inf)
+ -0.5
+ >>> fresnelc(1)
+ 0.7798934003768228294742064
+ >>> fresnelc(1+2j)
+ (16.08787137412548041729489 - 36.22568799288165021578758j)
+
+Comparing with the definition::
+
+ >>> fresnelc(3)
+ 0.6057207892976856295561611
+ >>> quad(lambda t: cos(pi*t**2/2), [0,3])
+ 0.6057207892976856295561611
+"""
+
+airyai = r"""
+Computes the Airy function `\operatorname{Ai}(z)`, which is
+the solution of the Airy differential equation `f''(z) - z f(z) = 0`
+with initial conditions
+
+.. math ::
+
+ \operatorname{Ai}(0) =
+ \frac{1}{3^{2/3}\Gamma\left(\frac{2}{3}\right)}
+
+ \operatorname{Ai}'(0) =
+ -\frac{1}{3^{1/3}\Gamma\left(\frac{1}{3}\right)}.
+
+Other common ways of defining the Ai-function include
+integrals such as
+
+.. math ::
+
+ \operatorname{Ai}(x) = \frac{1}{\pi}
+ \int_0^{\infty} \cos\left(\frac{1}{3}t^3+xt\right) dt
+ \qquad x \in \mathbb{R}
+
+ \operatorname{Ai}(z) = \frac{\sqrt{3}}{2\pi}
+ \int_0^{\infty}
+ \exp\left(-\frac{t^3}{3}-\frac{z^3}{3t^3}\right) dt.
+
+The Ai-function is an entire function with a turning point,
+behaving roughly like a slowly decaying sine wave for `z < 0` and
+like a rapidly decreasing exponential for `z > 0`.
+A second solution of the Airy differential equation
+is given by `\operatorname{Bi}(z)` (see :func:`~mpmath.airybi`).
+
+Optionally, with *derivative=alpha*, :func:`airyai` can compute the
+`\alpha`-th order fractional derivative with respect to `z`.
+For `\alpha = n = 1,2,3,\ldots` this gives the derivative
+`\operatorname{Ai}^{(n)}(z)`, and for `\alpha = -n = -1,-2,-3,\ldots`
+this gives the `n`-fold iterated integral
+
+.. math ::
+
+ f_0(z) = \operatorname{Ai}(z)
+
+ f_n(z) = \int_0^z f_{n-1}(t) dt.
+
+The Ai-function has infinitely many zeros, all located along the
+negative half of the real axis. They can be computed with
+:func:`~mpmath.airyaizero`.
+
+**Plots**
+
+.. literalinclude :: /plots/ai.py
+.. image :: /plots/ai.png
+.. literalinclude :: /plots/ai_c.py
+.. image :: /plots/ai_c.png
+
+**Basic examples**
+
+Limits and values include::
+
+ >>> from mpmath import *
+ >>> mp.dps = 25; mp.pretty = True
+ >>> airyai(0); 1/(power(3,'2/3')*gamma('2/3'))
+ 0.3550280538878172392600632
+ 0.3550280538878172392600632
+ >>> airyai(1)
+ 0.1352924163128814155241474
+ >>> airyai(-1)
+ 0.5355608832923521187995166
+ >>> airyai(inf); airyai(-inf)
+ 0.0
+ 0.0
+
+Evaluation is supported for large magnitudes of the argument::
+
+ >>> airyai(-100)
+ 0.1767533932395528780908311
+ >>> airyai(100)
+ 2.634482152088184489550553e-291
+ >>> airyai(50+50j)
+ (-5.31790195707456404099817e-68 - 1.163588003770709748720107e-67j)
+ >>> airyai(-50+50j)
+ (1.041242537363167632587245e+158 + 3.347525544923600321838281e+157j)
+
+Huge arguments are also fine::
+
+ >>> airyai(10**10)
+ 1.162235978298741779953693e-289529654602171
+ >>> airyai(-10**10)
+ 0.0001736206448152818510510181
+ >>> w = airyai(10**10*(1+j))
+ >>> w.real
+ 5.711508683721355528322567e-186339621747698
+ >>> w.imag
+ 1.867245506962312577848166e-186339621747697
+
+The first root of the Ai-function is::
+
+ >>> findroot(airyai, -2)
+ -2.338107410459767038489197
+ >>> airyaizero(1)
+ -2.338107410459767038489197
+
+**Properties and relations**
+
+Verifying the Airy differential equation::
+
+ >>> for z in [-3.4, 0, 2.5, 1+2j]:
+ ... chop(airyai(z,2) - z*airyai(z))
+ ...
+ 0.0
+ 0.0
+ 0.0
+ 0.0
+
+The first few terms of the Taylor series expansion around `z = 0`
+(every third term is zero)::
+
+ >>> nprint(taylor(airyai, 0, 5))
+ [0.355028, -0.258819, 0.0, 0.0591713, -0.0215683, 0.0]
+
+The Airy functions satisfy the Wronskian relation
+`\operatorname{Ai}(z) \operatorname{Bi}'(z) -
+\operatorname{Ai}'(z) \operatorname{Bi}(z) = 1/\pi`::
+
+ >>> z = -0.5
+ >>> airyai(z)*airybi(z,1) - airyai(z,1)*airybi(z)
+ 0.3183098861837906715377675
+ >>> 1/pi
+ 0.3183098861837906715377675
+
+The Airy functions can be expressed in terms of Bessel
+functions of order `\pm 1/3`. For `\Re[z] \le 0`, we have::
+
+ >>> z = -3
+ >>> airyai(z)
+ -0.3788142936776580743472439
+ >>> y = 2*power(-z,'3/2')/3
+ >>> (sqrt(-z) * (besselj('1/3',y) + besselj('-1/3',y)))/3
+ -0.3788142936776580743472439
+
+**Derivatives and integrals**
+
+Derivatives of the Ai-function (directly and using :func:`~mpmath.diff`)::
+
+ >>> airyai(-3,1); diff(airyai,-3)
+ 0.3145837692165988136507873
+ 0.3145837692165988136507873
+ >>> airyai(-3,2); diff(airyai,-3,2)
+ 1.136442881032974223041732
+ 1.136442881032974223041732
+ >>> airyai(1000,1); diff(airyai,1000)
+ -2.943133917910336090459748e-9156
+ -2.943133917910336090459748e-9156
+
+Several derivatives at `z = 0`::
+
+ >>> airyai(0,0); airyai(0,1); airyai(0,2)
+ 0.3550280538878172392600632
+ -0.2588194037928067984051836
+ 0.0
+ >>> airyai(0,3); airyai(0,4); airyai(0,5)
+ 0.3550280538878172392600632
+ -0.5176388075856135968103671
+ 0.0
+ >>> airyai(0,15); airyai(0,16); airyai(0,17)
+ 1292.30211615165475090663
+ -3188.655054727379756351861
+ 0.0
+
+The integral of the Ai-function::
+
+ >>> airyai(3,-1); quad(airyai, [0,3])
+ 0.3299203760070217725002701
+ 0.3299203760070217725002701
+ >>> airyai(-10,-1); quad(airyai, [0,-10])
+ -0.765698403134212917425148
+ -0.765698403134212917425148
+
+Integrals of high or fractional order::
+
+ >>> airyai(-2,0.5); differint(airyai,-2,0.5,0)
+ (0.0 + 0.2453596101351438273844725j)
+ (0.0 + 0.2453596101351438273844725j)
+ >>> airyai(-2,-4); differint(airyai,-2,-4,0)
+ 0.2939176441636809580339365
+ 0.2939176441636809580339365
+ >>> airyai(0,-1); airyai(0,-2); airyai(0,-3)
+ 0.0
+ 0.0
+ 0.0
+
+Integrals of the Ai-function can be evaluated at limit points::
+
+ >>> airyai(-1000000,-1); airyai(-inf,-1)
+ -0.6666843728311539978751512
+ -0.6666666666666666666666667
+ >>> airyai(10,-1); airyai(+inf,-1)
+ 0.3333333332991690159427932
+ 0.3333333333333333333333333
+ >>> airyai(+inf,-2); airyai(+inf,-3)
+ +inf
+ +inf
+ >>> airyai(-1000000,-2); airyai(-inf,-2)
+ 666666.4078472650651209742
+ +inf
+ >>> airyai(-1000000,-3); airyai(-inf,-3)
+ -333333074513.7520264995733
+ -inf
+
+**References**
+
+1. [DLMF]_ Chapter 9: Airy and Related Functions
+2. [WolframFunctions]_ section: Bessel-Type Functions
+
+"""
+
+airybi = r"""
+Computes the Airy function `\operatorname{Bi}(z)`, which is
+the solution of the Airy differential equation `f''(z) - z f(z) = 0`
+with initial conditions
+
+.. math ::
+
+ \operatorname{Bi}(0) =
+ \frac{1}{3^{1/6}\Gamma\left(\frac{2}{3}\right)}
+
+ \operatorname{Bi}'(0) =
+ \frac{3^{1/6}}{\Gamma\left(\frac{1}{3}\right)}.
+
+Like the Ai-function (see :func:`~mpmath.airyai`), the Bi-function
+is oscillatory for `z < 0`, but it grows rather than decreases
+for `z > 0`.
+
+Optionally, as for :func:`~mpmath.airyai`, derivatives, integrals
+and fractional derivatives can be computed with the *derivative*
+parameter.
+
+The Bi-function has infinitely many zeros along the negative
+half-axis, as well as complex zeros, which can all be computed
+with :func:`~mpmath.airybizero`.
+
+**Plots**
+
+.. literalinclude :: /plots/bi.py
+.. image :: /plots/bi.png
+.. literalinclude :: /plots/bi_c.py
+.. image :: /plots/bi_c.png
+
+**Basic examples**
+
+Limits and values include::
+
+ >>> from mpmath import *
+ >>> mp.dps = 25; mp.pretty = True
+ >>> airybi(0); 1/(power(3,'1/6')*gamma('2/3'))
+ 0.6149266274460007351509224
+ 0.6149266274460007351509224
+ >>> airybi(1)
+ 1.207423594952871259436379
+ >>> airybi(-1)
+ 0.10399738949694461188869
+ >>> airybi(inf); airybi(-inf)
+ +inf
+ 0.0
+
+Evaluation is supported for large magnitudes of the argument::
+
+ >>> airybi(-100)
+ 0.02427388768016013160566747
+ >>> airybi(100)
+ 6.041223996670201399005265e+288
+ >>> airybi(50+50j)
+ (-5.322076267321435669290334e+63 + 1.478450291165243789749427e+65j)
+ >>> airybi(-50+50j)
+ (-3.347525544923600321838281e+157 + 1.041242537363167632587245e+158j)
+
+Huge arguments::
+
+ >>> airybi(10**10)
+ 1.369385787943539818688433e+289529654602165
+ >>> airybi(-10**10)
+ 0.001775656141692932747610973
+ >>> w = airybi(10**10*(1+j))
+ >>> w.real
+ -6.559955931096196875845858e+186339621747689
+ >>> w.imag
+ -6.822462726981357180929024e+186339621747690
+
+The first real root of the Bi-function is::
+
+ >>> findroot(airybi, -1); airybizero(1)
+ -1.17371322270912792491998
+ -1.17371322270912792491998
+
+**Properties and relations**
+
+Verifying the Airy differential equation::
+
+ >>> for z in [-3.4, 0, 2.5, 1+2j]:
+ ... chop(airybi(z,2) - z*airybi(z))
+ ...
+ 0.0
+ 0.0
+ 0.0
+ 0.0
+
+The first few terms of the Taylor series expansion around `z = 0`
+(every third term is zero)::
+
+ >>> nprint(taylor(airybi, 0, 5))
+ [0.614927, 0.448288, 0.0, 0.102488, 0.0373574, 0.0]
+
+The Airy functions can be expressed in terms of Bessel
+functions of order `\pm 1/3`. For `\Re[z] \le 0`, we have::
+
+ >>> z = -3
+ >>> airybi(z)
+ -0.1982896263749265432206449
+ >>> p = 2*power(-z,'3/2')/3
+ >>> sqrt(-mpf(z)/3)*(besselj('-1/3',p) - besselj('1/3',p))
+ -0.1982896263749265432206449
+
+**Derivatives and integrals**
+
+Derivatives of the Bi-function (directly and using :func:`~mpmath.diff`)::
+
+ >>> airybi(-3,1); diff(airybi,-3)
+ -0.675611222685258537668032
+ -0.675611222685258537668032
+ >>> airybi(-3,2); diff(airybi,-3,2)
+ 0.5948688791247796296619346
+ 0.5948688791247796296619346
+ >>> airybi(1000,1); diff(airybi,1000)
+ 1.710055114624614989262335e+9156
+ 1.710055114624614989262335e+9156
+
+Several derivatives at `z = 0`::
+
+ >>> airybi(0,0); airybi(0,1); airybi(0,2)
+ 0.6149266274460007351509224
+ 0.4482883573538263579148237
+ 0.0
+ >>> airybi(0,3); airybi(0,4); airybi(0,5)
+ 0.6149266274460007351509224
+ 0.8965767147076527158296474
+ 0.0
+ >>> airybi(0,15); airybi(0,16); airybi(0,17)
+ 2238.332923903442675949357
+ 5522.912562599140729510628
+ 0.0
+
+The integral of the Bi-function::
+
+ >>> airybi(3,-1); quad(airybi, [0,3])
+ 10.06200303130620056316655
+ 10.06200303130620056316655
+ >>> airybi(-10,-1); quad(airybi, [0,-10])
+ -0.01504042480614002045135483
+ -0.01504042480614002045135483
+
+Integrals of high or fractional order::
+
+ >>> airybi(-2,0.5); differint(airybi, -2, 0.5, 0)
+ (0.0 + 0.5019859055341699223453257j)
+ (0.0 + 0.5019859055341699223453257j)
+ >>> airybi(-2,-4); differint(airybi,-2,-4,0)
+ 0.2809314599922447252139092
+ 0.2809314599922447252139092
+ >>> airybi(0,-1); airybi(0,-2); airybi(0,-3)
+ 0.0
+ 0.0
+ 0.0
+
+Integrals of the Bi-function can be evaluated at limit points::
+
+ >>> airybi(-1000000,-1); airybi(-inf,-1)
+ 0.000002191261128063434047966873
+ 0.0
+ >>> airybi(10,-1); airybi(+inf,-1)
+ 147809803.1074067161675853
+ +inf
+ >>> airybi(+inf,-2); airybi(+inf,-3)
+ +inf
+ +inf
+ >>> airybi(-1000000,-2); airybi(-inf,-2)
+ 0.4482883750599908479851085
+ 0.4482883573538263579148237
+ >>> gamma('2/3')*power(3,'2/3')/(2*pi)
+ 0.4482883573538263579148237
+ >>> airybi(-100000,-3); airybi(-inf,-3)
+ -44828.52827206932872493133
+ -inf
+ >>> airybi(-100000,-4); airybi(-inf,-4)
+ 2241411040.437759489540248
+ +inf
+
+"""
+
+airyaizero = r"""
+Gives the `k`-th zero of the Airy Ai-function,
+i.e. the `k`-th number `a_k` ordered by magnitude for which
+`\operatorname{Ai}(a_k) = 0`.
+
+Optionally, with *derivative=1*, the corresponding
+zero `a'_k` of the derivative function, i.e.
+`\operatorname{Ai}'(a'_k) = 0`, is computed.
+
+**Examples**
+
+Some values of `a_k`::
+
+ >>> from mpmath import *
+ >>> mp.dps = 25; mp.pretty = True
+ >>> airyaizero(1)
+ -2.338107410459767038489197
+ >>> airyaizero(2)
+ -4.087949444130970616636989
+ >>> airyaizero(3)
+ -5.520559828095551059129856
+ >>> airyaizero(1000)
+ -281.0315196125215528353364
+
+Some values of `a'_k`::
+
+ >>> airyaizero(1,1)
+ -1.018792971647471089017325
+ >>> airyaizero(2,1)
+ -3.248197582179836537875424
+ >>> airyaizero(3,1)
+ -4.820099211178735639400616
+ >>> airyaizero(1000,1)
+ -280.9378080358935070607097
+
+Verification::
+
+ >>> chop(airyai(airyaizero(1)))
+ 0.0
+ >>> chop(airyai(airyaizero(1,1),1))
+ 0.0
+
+"""
+
+airybizero = r"""
+With *complex=False*, gives the `k`-th real zero of the Airy Bi-function,
+i.e. the `k`-th number `b_k` ordered by magnitude for which
+`\operatorname{Bi}(b_k) = 0`.
+
+With *complex=True*, gives the `k`-th complex zero in the upper
+half plane `\beta_k`. Also the conjugate `\overline{\beta_k}`
+is a zero.
+
+Optionally, with *derivative=1*, the corresponding
+zero `b'_k` or `\beta'_k` of the derivative function, i.e.
+`\operatorname{Bi}'(b'_k) = 0` or `\operatorname{Bi}'(\beta'_k) = 0`,
+is computed.
+
+**Examples**
+
+Some values of `b_k`::
+
+ >>> from mpmath import *
+ >>> mp.dps = 25; mp.pretty = True
+ >>> airybizero(1)
+ -1.17371322270912792491998
+ >>> airybizero(2)
+ -3.271093302836352715680228
+ >>> airybizero(3)
+ -4.830737841662015932667709
+ >>> airybizero(1000)
+ -280.9378112034152401578834
+
+Some values of `b_k`::
+
+ >>> airybizero(1,1)
+ -2.294439682614123246622459
+ >>> airybizero(2,1)
+ -4.073155089071828215552369
+ >>> airybizero(3,1)
+ -5.512395729663599496259593
+ >>> airybizero(1000,1)
+ -281.0315164471118527161362
+
+Some values of `\beta_k`::
+
+ >>> airybizero(1,complex=True)
+ (0.9775448867316206859469927 + 2.141290706038744575749139j)
+ >>> airybizero(2,complex=True)
+ (1.896775013895336346627217 + 3.627291764358919410440499j)
+ >>> airybizero(3,complex=True)
+ (2.633157739354946595708019 + 4.855468179979844983174628j)
+ >>> airybizero(1000,complex=True)
+ (140.4978560578493018899793 + 243.3907724215792121244867j)
+
+Some values of `\beta'_k`::
+
+ >>> airybizero(1,1,complex=True)
+ (0.2149470745374305676088329 + 1.100600143302797880647194j)
+ >>> airybizero(2,1,complex=True)
+ (1.458168309223507392028211 + 2.912249367458445419235083j)
+ >>> airybizero(3,1,complex=True)
+ (2.273760763013482299792362 + 4.254528549217097862167015j)
+ >>> airybizero(1000,1,complex=True)
+ (140.4509972835270559730423 + 243.3096175398562811896208j)
+
+Verification::
+
+ >>> chop(airybi(airybizero(1)))
+ 0.0
+ >>> chop(airybi(airybizero(1,1),1))
+ 0.0
+ >>> u = airybizero(1,complex=True)
+ >>> chop(airybi(u))
+ 0.0
+ >>> chop(airybi(conj(u)))
+ 0.0
+
+The complex zeros (in the upper and lower half-planes respectively)
+asymptotically approach the rays `z = R \exp(\pm i \pi /3)`::
+
+ >>> arg(airybizero(1,complex=True))
+ 1.142532510286334022305364
+ >>> arg(airybizero(1000,complex=True))
+ 1.047271114786212061583917
+ >>> arg(airybizero(1000000,complex=True))
+ 1.047197624741816183341355
+ >>> pi/3
+ 1.047197551196597746154214
+
+"""
+
+
+ellipk = r"""
+Evaluates the complete elliptic integral of the first kind,
+`K(m)`, defined by
+
+.. math ::
+
+ K(m) = \int_0^{\pi/2} \frac{dt}{\sqrt{1-m \sin^2 t}} \, = \,
+ \frac{\pi}{2} \,_2F_1\left(\frac{1}{2}, \frac{1}{2}, 1, m\right).
+
+Note that the argument is the parameter `m = k^2`,
+not the modulus `k` which is sometimes used.
+
+**Plots**
+
+.. literalinclude :: /plots/ellipk.py
+.. image :: /plots/ellipk.png
+
+**Examples**
+
+Values and limits include::
+
+ >>> from mpmath import *
+ >>> mp.dps = 25; mp.pretty = True
+ >>> ellipk(0)
+ 1.570796326794896619231322
+ >>> ellipk(inf)
+ (0.0 + 0.0j)
+ >>> ellipk(-inf)
+ 0.0
+ >>> ellipk(1)
+ +inf
+ >>> ellipk(-1)
+ 1.31102877714605990523242
+ >>> ellipk(2)
+ (1.31102877714605990523242 - 1.31102877714605990523242j)
+
+Verifying the defining integral and hypergeometric
+representation::
+
+ >>> ellipk(0.5)
+ 1.85407467730137191843385
+ >>> quad(lambda t: (1-0.5*sin(t)**2)**-0.5, [0, pi/2])
+ 1.85407467730137191843385
+ >>> pi/2*hyp2f1(0.5,0.5,1,0.5)
+ 1.85407467730137191843385
+
+Evaluation is supported for arbitrary complex `m`::
+
+ >>> ellipk(3+4j)
+ (0.9111955638049650086562171 + 0.6313342832413452438845091j)
+
+A definite integral::
+
+ >>> quad(ellipk, [0, 1])
+ 2.0
+"""
+
+agm = r"""
+``agm(a, b)`` computes the arithmetic-geometric mean of `a` and
+`b`, defined as the limit of the following iteration:
+
+.. math ::
+
+ a_0 = a
+
+ b_0 = b
+
+ a_{n+1} = \frac{a_n+b_n}{2}
+
+ b_{n+1} = \sqrt{a_n b_n}
+
+This function can be called with a single argument, computing
+`\mathrm{agm}(a,1) = \mathrm{agm}(1,a)`.
+
+**Examples**
+
+It is a well-known theorem that the geometric mean of
+two distinct positive numbers is less than the arithmetic
+mean. It follows that the arithmetic-geometric mean lies
+between the two means::
+
+ >>> from mpmath import *
+ >>> mp.dps = 15; mp.pretty = True
+ >>> a = mpf(3)
+ >>> b = mpf(4)
+ >>> sqrt(a*b)
+ 3.46410161513775
+ >>> agm(a,b)
+ 3.48202767635957
+ >>> (a+b)/2
+ 3.5
+
+The arithmetic-geometric mean is scale-invariant::
+
+ >>> agm(10*e, 10*pi)
+ 29.261085515723
+ >>> 10*agm(e, pi)
+ 29.261085515723
+
+As an order-of-magnitude estimate, `\mathrm{agm}(1,x) \approx x`
+for large `x`::
+
+ >>> agm(10**10)
+ 643448704.760133
+ >>> agm(10**50)
+ 1.34814309345871e+48
+
+For tiny `x`, `\mathrm{agm}(1,x) \approx -\pi/(2 \log(x/4))`::
+
+ >>> agm('0.01')
+ 0.262166887202249
+ >>> -pi/2/log('0.0025')
+ 0.262172347753122
+
+The arithmetic-geometric mean can also be computed for complex
+numbers::
+
+ >>> agm(3, 2+j)
+ (2.51055133276184 + 0.547394054060638j)
+
+The AGM iteration converges very quickly (each step doubles
+the number of correct digits), so :func:`~mpmath.agm` supports efficient
+high-precision evaluation::
+
+ >>> mp.dps = 10000
+ >>> a = agm(1,2)
+ >>> str(a)[-10:]
+ '1679581912'
+
+**Mathematical relations**
+
+The arithmetic-geometric mean may be used to evaluate the
+following two parametric definite integrals:
+
+.. math ::
+
+ I_1 = \int_0^{\infty}
+ \frac{1}{\sqrt{(x^2+a^2)(x^2+b^2)}} \,dx
+
+ I_2 = \int_0^{\pi/2}
+ \frac{1}{\sqrt{a^2 \cos^2(x) + b^2 \sin^2(x)}} \,dx
+
+We have::
+
+ >>> mp.dps = 15
+ >>> a = 3
+ >>> b = 4
+ >>> f1 = lambda x: ((x**2+a**2)*(x**2+b**2))**-0.5
+ >>> f2 = lambda x: ((a*cos(x))**2 + (b*sin(x))**2)**-0.5
+ >>> quad(f1, [0, inf])
+ 0.451115405388492
+ >>> quad(f2, [0, pi/2])
+ 0.451115405388492
+ >>> pi/(2*agm(a,b))
+ 0.451115405388492
+
+A formula for `\Gamma(1/4)`::
+
+ >>> gamma(0.25)
+ 3.62560990822191
+ >>> sqrt(2*sqrt(2*pi**3)/agm(1,sqrt(2)))
+ 3.62560990822191
+
+**Possible issues**
+
+The branch cut chosen for complex `a` and `b` is somewhat
+arbitrary.
+
+"""
+
+gegenbauer = r"""
+Evaluates the Gegenbauer polynomial, or ultraspherical polynomial,
+
+.. math ::
+
+ C_n^{(a)}(z) = {n+2a-1 \choose n} \,_2F_1\left(-n, n+2a;
+ a+\frac{1}{2}; \frac{1}{2}(1-z)\right).
+
+When `n` is a nonnegative integer, this formula gives a polynomial
+in `z` of degree `n`, but all parameters are permitted to be
+complex numbers. With `a = 1/2`, the Gegenbauer polynomial
+reduces to a Legendre polynomial.
+
+**Examples**
+
+Evaluation for arbitrary arguments::
+
+ >>> from mpmath import *
+ >>> mp.dps = 25; mp.pretty = True
+ >>> gegenbauer(3, 0.5, -10)
+ -2485.0
+ >>> gegenbauer(1000, 10, 100)
+ 3.012757178975667428359374e+2322
+ >>> gegenbauer(2+3j, -0.75, -1000j)
+ (-5038991.358609026523401901 + 9414549.285447104177860806j)
+
+Evaluation at negative integer orders::
+
+ >>> gegenbauer(-4, 2, 1.75)
+ -1.0
+ >>> gegenbauer(-4, 3, 1.75)
+ 0.0
+ >>> gegenbauer(-4, 2j, 1.75)
+ 0.0
+ >>> gegenbauer(-7, 0.5, 3)
+ 8989.0
+
+The Gegenbauer polynomials solve the differential equation::
+
+ >>> n, a = 4.5, 1+2j
+ >>> f = lambda z: gegenbauer(n, a, z)
+ >>> for z in [0, 0.75, -0.5j]:
+ ... chop((1-z**2)*diff(f,z,2) - (2*a+1)*z*diff(f,z) + n*(n+2*a)*f(z))
+ ...
+ 0.0
+ 0.0
+ 0.0
+
+The Gegenbauer polynomials have generating function
+`(1-2zt+t^2)^{-a}`::
+
+ >>> a, z = 2.5, 1
+ >>> taylor(lambda t: (1-2*z*t+t**2)**(-a), 0, 3)
+ [1.0, 5.0, 15.0, 35.0]
+ >>> [gegenbauer(n,a,z) for n in range(4)]
+ [1.0, 5.0, 15.0, 35.0]
+
+The Gegenbauer polynomials are orthogonal on `[-1, 1]` with respect
+to the weight `(1-z^2)^{a-\frac{1}{2}}`::
+
+ >>> a, n, m = 2.5, 4, 5
+ >>> Cn = lambda z: gegenbauer(n, a, z, zeroprec=1000)
+ >>> Cm = lambda z: gegenbauer(m, a, z, zeroprec=1000)
+ >>> chop(quad(lambda z: Cn(z)*Cm(z)*(1-z**2)*(a-0.5), [-1, 1]))
+ 0.0
+"""
+
+laguerre = r"""
+Gives the generalized (associated) Laguerre polynomial, defined by
+
+.. math ::
+
+ L_n^a(z) = \frac{\Gamma(n+b+1)}{\Gamma(b+1) \Gamma(n+1)}
+ \,_1F_1(-n, a+1, z).
+
+With `a = 0` and `n` a nonnegative integer, this reduces to an ordinary
+Laguerre polynomial, the sequence of which begins
+`L_0(z) = 1, L_1(z) = 1-z, L_2(z) = z^2-2z+1, \ldots`.
+
+The Laguerre polynomials are orthogonal with respect to the weight
+`z^a e^{-z}` on `[0, \infty)`.
+
+**Plots**
+
+.. literalinclude :: /plots/laguerre.py
+.. image :: /plots/laguerre.png
+
+**Examples**
+
+Evaluation for arbitrary arguments::
+
+ >>> from mpmath import *
+ >>> mp.dps = 25; mp.pretty = True
+ >>> laguerre(5, 0, 0.25)
+ 0.03726399739583333333333333
+ >>> laguerre(1+j, 0.5, 2+3j)
+ (4.474921610704496808379097 - 11.02058050372068958069241j)
+ >>> laguerre(2, 0, 10000)
+ 49980001.0
+ >>> laguerre(2.5, 0, 10000)
+ -9.327764910194842158583189e+4328
+
+The first few Laguerre polynomials, normalized to have integer
+coefficients::
+
+ >>> for n in range(7):
+ ... chop(taylor(lambda z: fac(n)*laguerre(n, 0, z), 0, n))
+ ...
+ [1.0]
+ [1.0, -1.0]
+ [2.0, -4.0, 1.0]
+ [6.0, -18.0, 9.0, -1.0]
+ [24.0, -96.0, 72.0, -16.0, 1.0]
+ [120.0, -600.0, 600.0, -200.0, 25.0, -1.0]
+ [720.0, -4320.0, 5400.0, -2400.0, 450.0, -36.0, 1.0]
+
+Verifying orthogonality::
+
+ >>> Lm = lambda t: laguerre(m,a,t)
+ >>> Ln = lambda t: laguerre(n,a,t)
+ >>> a, n, m = 2.5, 2, 3
+ >>> chop(quad(lambda t: exp(-t)*t**a*Lm(t)*Ln(t), [0,inf]))
+ 0.0
+
+
+"""
+
+hermite = r"""
+Evaluates the Hermite polynomial `H_n(z)`, which may be defined using
+the recurrence
+
+.. math ::
+
+ H_0(z) = 1
+
+ H_1(z) = 2z
+
+ H_{n+1} = 2z H_n(z) - 2n H_{n-1}(z).
+
+The Hermite polynomials are orthogonal on `(-\infty, \infty)` with
+respect to the weight `e^{-z^2}`. More generally, allowing arbitrary complex
+values of `n`, the Hermite function `H_n(z)` is defined as
+
+.. math ::
+
+ H_n(z) = (2z)^n \,_2F_0\left(-\frac{n}{2}, \frac{1-n}{2},
+ -\frac{1}{z^2}\right)
+
+for `\Re{z} > 0`, or generally
+
+.. math ::
+
+ H_n(z) = 2^n \sqrt{\pi} \left(
+ \frac{1}{\Gamma\left(\frac{1-n}{2}\right)}
+ \,_1F_1\left(-\frac{n}{2}, \frac{1}{2}, z^2\right) -
+ \frac{2z}{\Gamma\left(-\frac{n}{2}\right)}
+ \,_1F_1\left(\frac{1-n}{2}, \frac{3}{2}, z^2\right)
+ \right).
+
+**Plots**
+
+.. literalinclude :: /plots/hermite.py
+.. image :: /plots/hermite.png
+
+**Examples**
+
+Evaluation for arbitrary arguments::
+
+ >>> from mpmath import *
+ >>> mp.dps = 25; mp.pretty = True
+ >>> hermite(0, 10)
+ 1.0
+ >>> hermite(1, 10); hermite(2, 10)
+ 20.0
+ 398.0
+ >>> hermite(10000, 2)
+ 4.950440066552087387515653e+19334
+ >>> hermite(3, -10**8)
+ -7999999999999998800000000.0
+ >>> hermite(-3, -10**8)
+ 1.675159751729877682920301e+4342944819032534
+ >>> hermite(2+3j, -1+2j)
+ (-0.07652130602993513389421901 - 0.1084662449961914580276007j)
+
+Coefficients of the first few Hermite polynomials are::
+
+ >>> for n in range(7):
+ ... chop(taylor(lambda z: hermite(n, z), 0, n))
+ ...
+ [1.0]
+ [0.0, 2.0]
+ [-2.0, 0.0, 4.0]
+ [0.0, -12.0, 0.0, 8.0]
+ [12.0, 0.0, -48.0, 0.0, 16.0]
+ [0.0, 120.0, 0.0, -160.0, 0.0, 32.0]
+ [-120.0, 0.0, 720.0, 0.0, -480.0, 0.0, 64.0]
+
+Values at `z = 0`::
+
+ >>> for n in range(-5, 9):
+ ... hermite(n, 0)
+ ...
+ 0.02769459142039868792653387
+ 0.08333333333333333333333333
+ 0.2215567313631895034122709
+ 0.5
+ 0.8862269254527580136490837
+ 1.0
+ 0.0
+ -2.0
+ 0.0
+ 12.0
+ 0.0
+ -120.0
+ 0.0
+ 1680.0
+
+Hermite functions satisfy the differential equation::
+
+ >>> n = 4
+ >>> f = lambda z: hermite(n, z)
+ >>> z = 1.5
+ >>> chop(diff(f,z,2) - 2*z*diff(f,z) + 2*n*f(z))
+ 0.0
+
+Verifying orthogonality::
+
+ >>> chop(quad(lambda t: hermite(2,t)*hermite(4,t)*exp(-t**2), [-inf,inf]))
+ 0.0
+
+"""
+
+jacobi = r"""
+``jacobi(n, a, b, x)`` evaluates the Jacobi polynomial
+`P_n^{(a,b)}(x)`. The Jacobi polynomials are a special
+case of the hypergeometric function `\,_2F_1` given by:
+
+.. math ::
+
+ P_n^{(a,b)}(x) = {n+a \choose n}
+ \,_2F_1\left(-n,1+a+b+n,a+1,\frac{1-x}{2}\right).
+
+Note that this definition generalizes to nonintegral values
+of `n`. When `n` is an integer, the hypergeometric series
+terminates after a finite number of terms, giving
+a polynomial in `x`.
+
+**Evaluation of Jacobi polynomials**
+
+A special evaluation is `P_n^{(a,b)}(1) = {n+a \choose n}`::
+
+ >>> from mpmath import *
+ >>> mp.dps = 15; mp.pretty = True
+ >>> jacobi(4, 0.5, 0.25, 1)
+ 2.4609375
+ >>> binomial(4+0.5, 4)
+ 2.4609375
+
+A Jacobi polynomial of degree `n` is equal to its
+Taylor polynomial of degree `n`. The explicit
+coefficients of Jacobi polynomials can therefore
+be recovered easily using :func:`~mpmath.taylor`::
+
+ >>> for n in range(5):
+ ... nprint(taylor(lambda x: jacobi(n,1,2,x), 0, n))
+ ...
+ [1.0]
+ [-0.5, 2.5]
+ [-0.75, -1.5, 5.25]
+ [0.5, -3.5, -3.5, 10.5]
+ [0.625, 2.5, -11.25, -7.5, 20.625]
+
+For nonintegral `n`, the Jacobi "polynomial" is no longer
+a polynomial::
+
+ >>> nprint(taylor(lambda x: jacobi(0.5,1,2,x), 0, 4))
+ [0.309983, 1.84119, -1.26933, 1.26699, -1.34808]
+
+**Orthogonality**
+
+The Jacobi polynomials are orthogonal on the interval
+`[-1, 1]` with respect to the weight function
+`w(x) = (1-x)^a (1+x)^b`. That is,
+`w(x) P_n^{(a,b)}(x) P_m^{(a,b)}(x)` integrates to
+zero if `m \ne n` and to a nonzero number if `m = n`.
+
+The orthogonality is easy to verify using numerical
+quadrature::
+
+ >>> P = jacobi
+ >>> f = lambda x: (1-x)**a * (1+x)**b * P(m,a,b,x) * P(n,a,b,x)
+ >>> a = 2
+ >>> b = 3
+ >>> m, n = 3, 4
+ >>> chop(quad(f, [-1, 1]), 1)
+ 0.0
+ >>> m, n = 4, 4
+ >>> quad(f, [-1, 1])
+ 1.9047619047619
+
+**Differential equation**
+
+The Jacobi polynomials are solutions of the differential
+equation
+
+.. math ::
+
+ (1-x^2) y'' + (b-a-(a+b+2)x) y' + n (n+a+b+1) y = 0.
+
+We can verify that :func:`~mpmath.jacobi` approximately satisfies
+this equation::
+
+ >>> from mpmath import *
+ >>> mp.dps = 15
+ >>> a = 2.5
+ >>> b = 4
+ >>> n = 3
+ >>> y = lambda x: jacobi(n,a,b,x)
+ >>> x = pi
+ >>> A0 = n*(n+a+b+1)*y(x)
+ >>> A1 = (b-a-(a+b+2)*x)*diff(y,x)
+ >>> A2 = (1-x**2)*diff(y,x,2)
+ >>> nprint(A2 + A1 + A0, 1)
+ 4.0e-12
+
+The difference of order `10^{-12}` is as close to zero as
+it could be at 15-digit working precision, since the terms
+are large::
+
+ >>> A0, A1, A2
+ (26560.2328981879, -21503.7641037294, -5056.46879445852)
+
+"""
+
+legendre = r"""
+``legendre(n, x)`` evaluates the Legendre polynomial `P_n(x)`.
+The Legendre polynomials are given by the formula
+
+.. math ::
+
+ P_n(x) = \frac{1}{2^n n!} \frac{d^n}{dx^n} (x^2 -1)^n.
+
+Alternatively, they can be computed recursively using
+
+.. math ::
+
+ P_0(x) = 1
+
+ P_1(x) = x
+
+ (n+1) P_{n+1}(x) = (2n+1) x P_n(x) - n P_{n-1}(x).
+
+A third definition is in terms of the hypergeometric function
+`\,_2F_1`, whereby they can be generalized to arbitrary `n`:
+
+.. math ::
+
+ P_n(x) = \,_2F_1\left(-n, n+1, 1, \frac{1-x}{2}\right)
+
+**Plots**
+
+.. literalinclude :: /plots/legendre.py
+.. image :: /plots/legendre.png
+
+**Basic evaluation**
+
+The Legendre polynomials assume fixed values at the points
+`x = -1` and `x = 1`::
+
+ >>> from mpmath import *
+ >>> mp.dps = 15; mp.pretty = True
+ >>> nprint([legendre(n, 1) for n in range(6)])
+ [1.0, 1.0, 1.0, 1.0, 1.0, 1.0]
+ >>> nprint([legendre(n, -1) for n in range(6)])
+ [1.0, -1.0, 1.0, -1.0, 1.0, -1.0]
+
+The coefficients of Legendre polynomials can be recovered
+using degree-`n` Taylor expansion::
+
+ >>> for n in range(5):
+ ... nprint(chop(taylor(lambda x: legendre(n, x), 0, n)))
+ ...
+ [1.0]
+ [0.0, 1.0]
+ [-0.5, 0.0, 1.5]
+ [0.0, -1.5, 0.0, 2.5]
+ [0.375, 0.0, -3.75, 0.0, 4.375]
+
+The roots of Legendre polynomials are located symmetrically
+on the interval `[-1, 1]`::
+
+ >>> for n in range(5):
+ ... nprint(polyroots(taylor(lambda x: legendre(n, x), 0, n)[::-1]))
+ ...
+ []
+ [0.0]
+ [-0.57735, 0.57735]
+ [-0.774597, 0.0, 0.774597]
+ [-0.861136, -0.339981, 0.339981, 0.861136]
+
+An example of an evaluation for arbitrary `n`::
+
+ >>> legendre(0.75, 2+4j)
+ (1.94952805264875 + 2.1071073099422j)
+
+**Orthogonality**
+
+The Legendre polynomials are orthogonal on `[-1, 1]` with respect
+to the trivial weight `w(x) = 1`. That is, `P_m(x) P_n(x)`
+integrates to zero if `m \ne n` and to `2/(2n+1)` if `m = n`::
+
+ >>> m, n = 3, 4
+ >>> quad(lambda x: legendre(m,x)*legendre(n,x), [-1, 1])
+ 0.0
+ >>> m, n = 4, 4
+ >>> quad(lambda x: legendre(m,x)*legendre(n,x), [-1, 1])
+ 0.222222222222222
+
+**Differential equation**
+
+The Legendre polynomials satisfy the differential equation
+
+.. math ::
+
+ ((1-x^2) y')' + n(n+1) y' = 0.
+
+We can verify this numerically::
+
+ >>> n = 3.6
+ >>> x = 0.73
+ >>> P = legendre
+ >>> A = diff(lambda t: (1-t**2)*diff(lambda u: P(n,u), t), x)
+ >>> B = n*(n+1)*P(n,x)
+ >>> nprint(A+B,1)
+ 9.0e-16
+
+"""
+
+
+legenp = r"""
+Calculates the (associated) Legendre function of the first kind of
+degree *n* and order *m*, `P_n^m(z)`. Taking `m = 0` gives the ordinary
+Legendre function of the first kind, `P_n(z)`. The parameters may be
+complex numbers.
+
+In terms of the Gauss hypergeometric function, the (associated) Legendre
+function is defined as
+
+.. math ::
+
+ P_n^m(z) = \frac{1}{\Gamma(1-m)} \frac{(1+z)^{m/2}}{(1-z)^{m/2}}
+ \,_2F_1\left(-n, n+1, 1-m, \frac{1-z}{2}\right).
+
+With *type=3* instead of *type=2*, the alternative
+definition
+
+.. math ::
+
+ \hat{P}_n^m(z) = \frac{1}{\Gamma(1-m)} \frac{(z+1)^{m/2}}{(z-1)^{m/2}}
+ \,_2F_1\left(-n, n+1, 1-m, \frac{1-z}{2}\right).
+
+is used. These functions correspond respectively to ``LegendreP[n,m,2,z]``
+and ``LegendreP[n,m,3,z]`` in Mathematica.
+
+The general solution of the (associated) Legendre differential equation
+
+.. math ::
+
+ (1-z^2) f''(z) - 2zf'(z) + \left(n(n+1)-\frac{m^2}{1-z^2}\right)f(z) = 0
+
+is given by `C_1 P_n^m(z) + C_2 Q_n^m(z)` for arbitrary constants
+`C_1`, `C_2`, where `Q_n^m(z)` is a Legendre function of the
+second kind as implemented by :func:`~mpmath.legenq`.
+
+**Examples**
+
+Evaluation for arbitrary parameters and arguments::
+
+ >>> from mpmath import *
+ >>> mp.dps = 25; mp.pretty = True
+ >>> legenp(2, 0, 10); legendre(2, 10)
+ 149.5
+ 149.5
+ >>> legenp(-2, 0.5, 2.5)
+ (1.972260393822275434196053 - 1.972260393822275434196053j)
+ >>> legenp(2+3j, 1-j, -0.5+4j)
+ (-3.335677248386698208736542 - 5.663270217461022307645625j)
+ >>> chop(legenp(3, 2, -1.5, type=2))
+ 28.125
+ >>> chop(legenp(3, 2, -1.5, type=3))
+ -28.125
+
+Verifying the associated Legendre differential equation::
+
+ >>> n, m = 2, -0.5
+ >>> C1, C2 = 1, -3
+ >>> f = lambda z: C1*legenp(n,m,z) + C2*legenq(n,m,z)
+ >>> deq = lambda z: (1-z**2)*diff(f,z,2) - 2*z*diff(f,z) + \
+ ... (n*(n+1)-m**2/(1-z**2))*f(z)
+ >>> for z in [0, 2, -1.5, 0.5+2j]:
+ ... chop(deq(mpmathify(z)))
+ ...
+ 0.0
+ 0.0
+ 0.0
+ 0.0
+"""
+
+legenq = r"""
+Calculates the (associated) Legendre function of the second kind of
+degree *n* and order *m*, `Q_n^m(z)`. Taking `m = 0` gives the ordinary
+Legendre function of the second kind, `Q_n(z)`. The parameters may be
+complex numbers.
+
+The Legendre functions of the second kind give a second set of
+solutions to the (associated) Legendre differential equation.
+(See :func:`~mpmath.legenp`.)
+Unlike the Legendre functions of the first kind, they are not
+polynomials of `z` for integer `n`, `m` but rational or logarithmic
+functions with poles at `z = \pm 1`.
+
+There are various ways to define Legendre functions of
+the second kind, giving rise to different complex structure.
+A version can be selected using the *type* keyword argument.
+The *type=2* and *type=3* functions are given respectively by
+
+.. math ::
+
+ Q_n^m(z) = \frac{\pi}{2 \sin(\pi m)}
+ \left( \cos(\pi m) P_n^m(z) -
+ \frac{\Gamma(1+m+n)}{\Gamma(1-m+n)} P_n^{-m}(z)\right)
+
+ \hat{Q}_n^m(z) = \frac{\pi}{2 \sin(\pi m)} e^{\pi i m}
+ \left( \hat{P}_n^m(z) -
+ \frac{\Gamma(1+m+n)}{\Gamma(1-m+n)} \hat{P}_n^{-m}(z)\right)
+
+where `P` and `\hat{P}` are the *type=2* and *type=3* Legendre functions
+of the first kind. The formulas above should be understood as limits
+when `m` is an integer.
+
+These functions correspond to ``LegendreQ[n,m,2,z]`` (or ``LegendreQ[n,m,z]``)
+and ``LegendreQ[n,m,3,z]`` in Mathematica. The *type=3* function
+is essentially the same as the function defined in
+Abramowitz & Stegun (eq. 8.1.3) but with `(z+1)^{m/2}(z-1)^{m/2}` instead
+of `(z^2-1)^{m/2}`, giving slightly different branches.
+
+**Examples**
+
+Evaluation for arbitrary parameters and arguments::
+
+ >>> from mpmath import *
+ >>> mp.dps = 25; mp.pretty = True
+ >>> legenq(2, 0, 0.5)
+ -0.8186632680417568557122028
+ >>> legenq(-1.5, -2, 2.5)
+ (0.6655964618250228714288277 + 0.3937692045497259717762649j)
+ >>> legenq(2-j, 3+4j, -6+5j)
+ (-10001.95256487468541686564 - 6011.691337610097577791134j)
+
+Different versions of the function::
+
+ >>> legenq(2, 1, 0.5)
+ 0.7298060598018049369381857
+ >>> legenq(2, 1, 1.5)
+ (-7.902916572420817192300921 + 0.1998650072605976600724502j)
+ >>> legenq(2, 1, 0.5, type=3)
+ (2.040524284763495081918338 - 0.7298060598018049369381857j)
+ >>> chop(legenq(2, 1, 1.5, type=3))
+ -0.1998650072605976600724502
+
+"""
+
+chebyt = r"""
+``chebyt(n, x)`` evaluates the Chebyshev polynomial of the first
+kind `T_n(x)`, defined by the identity
+
+.. math ::
+
+ T_n(\cos x) = \cos(n x).
+
+The Chebyshev polynomials of the first kind are a special
+case of the Jacobi polynomials, and by extension of the
+hypergeometric function `\,_2F_1`. They can thus also be
+evaluated for nonintegral `n`.
+
+**Plots**
+
+.. literalinclude :: /plots/chebyt.py
+.. image :: /plots/chebyt.png
+
+**Basic evaluation**
+
+The coefficients of the `n`-th polynomial can be recovered
+using using degree-`n` Taylor expansion::
+
+ >>> from mpmath import *
+ >>> mp.dps = 15; mp.pretty = True
+ >>> for n in range(5):
+ ... nprint(chop(taylor(lambda x: chebyt(n, x), 0, n)))
+ ...
+ [1.0]
+ [0.0, 1.0]
+ [-1.0, 0.0, 2.0]
+ [0.0, -3.0, 0.0, 4.0]
+ [1.0, 0.0, -8.0, 0.0, 8.0]
+
+**Orthogonality**
+
+The Chebyshev polynomials of the first kind are orthogonal
+on the interval `[-1, 1]` with respect to the weight
+function `w(x) = 1/\sqrt{1-x^2}`::
+
+ >>> f = lambda x: chebyt(m,x)*chebyt(n,x)/sqrt(1-x**2)
+ >>> m, n = 3, 4
+ >>> nprint(quad(f, [-1, 1]),1)
+ 0.0
+ >>> m, n = 4, 4
+ >>> quad(f, [-1, 1])
+ 1.57079632596448
+
+"""
+
+chebyu = r"""
+``chebyu(n, x)`` evaluates the Chebyshev polynomial of the second
+kind `U_n(x)`, defined by the identity
+
+.. math ::
+
+ U_n(\cos x) = \frac{\sin((n+1)x)}{\sin(x)}.
+
+The Chebyshev polynomials of the second kind are a special
+case of the Jacobi polynomials, and by extension of the
+hypergeometric function `\,_2F_1`. They can thus also be
+evaluated for nonintegral `n`.
+
+**Plots**
+
+.. literalinclude :: /plots/chebyu.py
+.. image :: /plots/chebyu.png
+
+**Basic evaluation**
+
+The coefficients of the `n`-th polynomial can be recovered
+using using degree-`n` Taylor expansion::
+
+ >>> from mpmath import *
+ >>> mp.dps = 15; mp.pretty = True
+ >>> for n in range(5):
+ ... nprint(chop(taylor(lambda x: chebyu(n, x), 0, n)))
+ ...
+ [1.0]
+ [0.0, 2.0]
+ [-1.0, 0.0, 4.0]
+ [0.0, -4.0, 0.0, 8.0]
+ [1.0, 0.0, -12.0, 0.0, 16.0]
+
+**Orthogonality**
+
+The Chebyshev polynomials of the second kind are orthogonal
+on the interval `[-1, 1]` with respect to the weight
+function `w(x) = \sqrt{1-x^2}`::
+
+ >>> f = lambda x: chebyu(m,x)*chebyu(n,x)*sqrt(1-x**2)
+ >>> m, n = 3, 4
+ >>> quad(f, [-1, 1])
+ 0.0
+ >>> m, n = 4, 4
+ >>> quad(f, [-1, 1])
+ 1.5707963267949
+"""
+
+besselj = r"""
+``besselj(n, x, derivative=0)`` gives the Bessel function of the first kind
+`J_n(x)`. Bessel functions of the first kind are defined as
+solutions of the differential equation
+
+.. math ::
+
+ x^2 y'' + x y' + (x^2 - n^2) y = 0
+
+which appears, among other things, when solving the radial
+part of Laplace's equation in cylindrical coordinates. This
+equation has two solutions for given `n`, where the
+`J_n`-function is the solution that is nonsingular at `x = 0`.
+For positive integer `n`, `J_n(x)` behaves roughly like a sine
+(odd `n`) or cosine (even `n`) multiplied by a magnitude factor
+that decays slowly as `x \to \pm\infty`.
+
+Generally, `J_n` is a special case of the hypergeometric
+function `\,_0F_1`:
+
+.. math ::
+
+ J_n(x) = \frac{x^n}{2^n \Gamma(n+1)}
+ \,_0F_1\left(n+1,-\frac{x^2}{4}\right)
+
+With *derivative* = `m \ne 0`, the `m`-th derivative
+
+.. math ::
+
+ \frac{d^m}{dx^m} J_n(x)
+
+is computed.
+
+**Plots**
+
+.. literalinclude :: /plots/besselj.py
+.. image :: /plots/besselj.png
+.. literalinclude :: /plots/besselj_c.py
+.. image :: /plots/besselj_c.png
+
+**Examples**
+
+Evaluation is supported for arbitrary arguments, and at
+arbitrary precision::
+
+ >>> from mpmath import *
+ >>> mp.dps = 15; mp.pretty = True
+ >>> besselj(2, 1000)
+ -0.024777229528606
+ >>> besselj(4, 0.75)
+ 0.000801070086542314
+ >>> besselj(2, 1000j)
+ (-2.48071721019185e+432 + 6.41567059811949e-437j)
+ >>> mp.dps = 25
+ >>> besselj(0.75j, 3+4j)
+ (-2.778118364828153309919653 - 1.5863603889018621585533j)
+ >>> mp.dps = 50
+ >>> besselj(1, pi)
+ 0.28461534317975275734531059968613140570981118184947
+
+Arguments may be large::
+
+ >>> mp.dps = 25
+ >>> besselj(0, 10000)
+ -0.007096160353388801477265164
+ >>> besselj(0, 10**10)
+ 0.000002175591750246891726859055
+ >>> besselj(2, 10**100)
+ 7.337048736538615712436929e-51
+ >>> besselj(2, 10**5*j)
+ (-3.540725411970948860173735e+43426 + 4.4949812409615803110051e-43433j)
+
+The Bessel functions of the first kind satisfy simple
+symmetries around `x = 0`::
+
+ >>> mp.dps = 15
+ >>> nprint([besselj(n,0) for n in range(5)])
+ [1.0, 0.0, 0.0, 0.0, 0.0]
+ >>> nprint([besselj(n,pi) for n in range(5)])
+ [-0.304242, 0.284615, 0.485434, 0.333458, 0.151425]
+ >>> nprint([besselj(n,-pi) for n in range(5)])
+ [-0.304242, -0.284615, 0.485434, -0.333458, 0.151425]
+
+Roots of Bessel functions are often used::
+
+ >>> nprint([findroot(j0, k) for k in [2, 5, 8, 11, 14]])
+ [2.40483, 5.52008, 8.65373, 11.7915, 14.9309]
+ >>> nprint([findroot(j1, k) for k in [3, 7, 10, 13, 16]])
+ [3.83171, 7.01559, 10.1735, 13.3237, 16.4706]
+
+The roots are not periodic, but the distance between successive
+roots asymptotically approaches `2 \pi`. Bessel functions of
+the first kind have the following normalization::
+
+ >>> quadosc(j0, [0, inf], period=2*pi)
+ 1.0
+ >>> quadosc(j1, [0, inf], period=2*pi)
+ 1.0
+
+For `n = 1/2` or `n = -1/2`, the Bessel function reduces to a
+trigonometric function::
+
+ >>> x = 10
+ >>> besselj(0.5, x), sqrt(2/(pi*x))*sin(x)
+ (-0.13726373575505, -0.13726373575505)
+ >>> besselj(-0.5, x), sqrt(2/(pi*x))*cos(x)
+ (-0.211708866331398, -0.211708866331398)
+
+Derivatives of any order can be computed (negative orders
+correspond to integration)::
+
+ >>> mp.dps = 25
+ >>> besselj(0, 7.5, 1)
+ -0.1352484275797055051822405
+ >>> diff(lambda x: besselj(0,x), 7.5)
+ -0.1352484275797055051822405
+ >>> besselj(0, 7.5, 10)
+ -0.1377811164763244890135677
+ >>> diff(lambda x: besselj(0,x), 7.5, 10)
+ -0.1377811164763244890135677
+ >>> besselj(0,7.5,-1) - besselj(0,3.5,-1)
+ -0.1241343240399987693521378
+ >>> quad(j0, [3.5, 7.5])
+ -0.1241343240399987693521378
+
+Differentiation with a noninteger order gives the fractional derivative
+in the sense of the Riemann-Liouville differintegral, as computed by
+:func:`~mpmath.differint`::
+
+ >>> mp.dps = 15
+ >>> besselj(1, 3.5, 0.75)
+ -0.385977722939384
+ >>> differint(lambda x: besselj(1, x), 3.5, 0.75)
+ -0.385977722939384
+
+"""
+
+besseli = r"""
+``besseli(n, x, derivative=0)`` gives the modified Bessel function of the
+first kind,
+
+.. math ::
+
+ I_n(x) = i^{-n} J_n(ix).
+
+With *derivative* = `m \ne 0`, the `m`-th derivative
+
+.. math ::
+
+ \frac{d^m}{dx^m} I_n(x)
+
+is computed.
+
+**Plots**
+
+.. literalinclude :: /plots/besseli.py
+.. image :: /plots/besseli.png
+.. literalinclude :: /plots/besseli_c.py
+.. image :: /plots/besseli_c.png
+
+**Examples**
+
+Some values of `I_n(x)`::
+
+ >>> from mpmath import *
+ >>> mp.dps = 25; mp.pretty = True
+ >>> besseli(0,0)
+ 1.0
+ >>> besseli(1,0)
+ 0.0
+ >>> besseli(0,1)
+ 1.266065877752008335598245
+ >>> besseli(3.5, 2+3j)
+ (-0.2904369752642538144289025 - 0.4469098397654815837307006j)
+
+Arguments may be large::
+
+ >>> besseli(2, 1000)
+ 2.480717210191852440616782e+432
+ >>> besseli(2, 10**10)
+ 4.299602851624027900335391e+4342944813
+ >>> besseli(2, 6000+10000j)
+ (-2.114650753239580827144204e+2603 + 4.385040221241629041351886e+2602j)
+
+For integers `n`, the following integral representation holds::
+
+ >>> mp.dps = 15
+ >>> n = 3
+ >>> x = 2.3
+ >>> quad(lambda t: exp(x*cos(t))*cos(n*t), [0,pi])/pi
+ 0.349223221159309
+ >>> besseli(n,x)
+ 0.349223221159309
+
+Derivatives and antiderivatives of any order can be computed::
+
+ >>> mp.dps = 25
+ >>> besseli(2, 7.5, 1)
+ 195.8229038931399062565883
+ >>> diff(lambda x: besseli(2,x), 7.5)
+ 195.8229038931399062565883
+ >>> besseli(2, 7.5, 10)
+ 153.3296508971734525525176
+ >>> diff(lambda x: besseli(2,x), 7.5, 10)
+ 153.3296508971734525525176
+ >>> besseli(2,7.5,-1) - besseli(2,3.5,-1)
+ 202.5043900051930141956876
+ >>> quad(lambda x: besseli(2,x), [3.5, 7.5])
+ 202.5043900051930141956876
+
+"""
+
+bessely = r"""
+``bessely(n, x, derivative=0)`` gives the Bessel function of the second kind,
+
+.. math ::
+
+ Y_n(x) = \frac{J_n(x) \cos(\pi n) - J_{-n}(x)}{\sin(\pi n)}.
+
+For `n` an integer, this formula should be understood as a
+limit. With *derivative* = `m \ne 0`, the `m`-th derivative
+
+.. math ::
+
+ \frac{d^m}{dx^m} Y_n(x)
+
+is computed.
+
+**Plots**
+
+.. literalinclude :: /plots/bessely.py
+.. image :: /plots/bessely.png
+.. literalinclude :: /plots/bessely_c.py
+.. image :: /plots/bessely_c.png
+
+**Examples**
+
+Some values of `Y_n(x)`::
+
+ >>> from mpmath import *
+ >>> mp.dps = 25; mp.pretty = True
+ >>> bessely(0,0), bessely(1,0), bessely(2,0)
+ (-inf, -inf, -inf)
+ >>> bessely(1, pi)
+ 0.3588729167767189594679827
+ >>> bessely(0.5, 3+4j)
+ (9.242861436961450520325216 - 3.085042824915332562522402j)
+
+Arguments may be large::
+
+ >>> bessely(0, 10000)
+ 0.00364780555898660588668872
+ >>> bessely(2.5, 10**50)
+ -4.8952500412050989295774e-26
+ >>> bessely(2.5, -10**50)
+ (0.0 + 4.8952500412050989295774e-26j)
+
+Derivatives and antiderivatives of any order can be computed::
+
+ >>> bessely(2, 3.5, 1)
+ 0.3842618820422660066089231
+ >>> diff(lambda x: bessely(2, x), 3.5)
+ 0.3842618820422660066089231
+ >>> bessely(0.5, 3.5, 1)
+ -0.2066598304156764337900417
+ >>> diff(lambda x: bessely(0.5, x), 3.5)
+ -0.2066598304156764337900417
+ >>> diff(lambda x: bessely(2, x), 0.5, 10)
+ -208173867409.5547350101511
+ >>> bessely(2, 0.5, 10)
+ -208173867409.5547350101511
+ >>> bessely(2, 100.5, 100)
+ 0.02668487547301372334849043
+ >>> quad(lambda x: bessely(2,x), [1,3])
+ -1.377046859093181969213262
+ >>> bessely(2,3,-1) - bessely(2,1,-1)
+ -1.377046859093181969213262
+
+"""
+
+besselk = r"""
+``besselk(n, x)`` gives the modified Bessel function of the
+second kind,
+
+.. math ::
+
+ K_n(x) = \frac{\pi}{2} \frac{I_{-n}(x)-I_{n}(x)}{\sin(\pi n)}
+
+For `n` an integer, this formula should be understood as a
+limit.
+
+**Plots**
+
+.. literalinclude :: /plots/besselk.py
+.. image :: /plots/besselk.png
+.. literalinclude :: /plots/besselk_c.py
+.. image :: /plots/besselk_c.png
+
+**Examples**
+
+Evaluation is supported for arbitrary complex arguments::
+
+ >>> from mpmath import *
+ >>> mp.dps = 25; mp.pretty = True
+ >>> besselk(0,1)
+ 0.4210244382407083333356274
+ >>> besselk(0, -1)
+ (0.4210244382407083333356274 - 3.97746326050642263725661j)
+ >>> besselk(3.5, 2+3j)
+ (-0.02090732889633760668464128 + 0.2464022641351420167819697j)
+ >>> besselk(2+3j, 0.5)
+ (0.9615816021726349402626083 + 0.1918250181801757416908224j)
+
+Arguments may be large::
+
+ >>> besselk(0, 100)
+ 4.656628229175902018939005e-45
+ >>> besselk(1, 10**6)
+ 4.131967049321725588398296e-434298
+ >>> besselk(1, 10**6*j)
+ (0.001140348428252385844876706 - 0.0005200017201681152909000961j)
+ >>> besselk(4.5, fmul(10**50, j, exact=True))
+ (1.561034538142413947789221e-26 + 1.243554598118700063281496e-25j)
+
+The point `x = 0` is a singularity (logarithmic if `n = 0`)::
+
+ >>> besselk(0,0)
+ +inf
+ >>> besselk(1,0)
+ +inf
+ >>> for n in range(-4, 5):
+ ... print(besselk(n, '1e-1000'))
+ ...
+ 4.8e+4001
+ 8.0e+3000
+ 2.0e+2000
+ 1.0e+1000
+ 2302.701024509704096466802
+ 1.0e+1000
+ 2.0e+2000
+ 8.0e+3000
+ 4.8e+4001
+
+"""
+
+hankel1 = r"""
+``hankel1(n,x)`` computes the Hankel function of the first kind,
+which is the complex combination of Bessel functions given by
+
+.. math ::
+
+ H_n^{(1)}(x) = J_n(x) + i Y_n(x).
+
+**Plots**
+
+.. literalinclude :: /plots/hankel1.py
+.. image :: /plots/hankel1.png
+.. literalinclude :: /plots/hankel1_c.py
+.. image :: /plots/hankel1_c.png
+
+**Examples**
+
+The Hankel function is generally complex-valued::
+
+ >>> from mpmath import *
+ >>> mp.dps = 25; mp.pretty = True
+ >>> hankel1(2, pi)
+ (0.4854339326315091097054957 - 0.0999007139290278787734903j)
+ >>> hankel1(3.5, pi)
+ (0.2340002029630507922628888 - 0.6419643823412927142424049j)
+"""
+
+hankel2 = r"""
+``hankel2(n,x)`` computes the Hankel function of the second kind,
+which is the complex combination of Bessel functions given by
+
+.. math ::
+
+ H_n^{(2)}(x) = J_n(x) - i Y_n(x).
+
+**Plots**
+
+.. literalinclude :: /plots/hankel2.py
+.. image :: /plots/hankel2.png
+.. literalinclude :: /plots/hankel2_c.py
+.. image :: /plots/hankel2_c.png
+
+**Examples**
+
+The Hankel function is generally complex-valued::
+
+ >>> from mpmath import *
+ >>> mp.dps = 25; mp.pretty = True
+ >>> hankel2(2, pi)
+ (0.4854339326315091097054957 + 0.0999007139290278787734903j)
+ >>> hankel2(3.5, pi)
+ (0.2340002029630507922628888 + 0.6419643823412927142424049j)
+"""
+
+lambertw = r"""
+The Lambert W function `W(z)` is defined as the inverse function
+of `w \exp(w)`. In other words, the value of `W(z)` is such that
+`z = W(z) \exp(W(z))` for any complex number `z`.
+
+The Lambert W function is a multivalued function with infinitely
+many branches `W_k(z)`, indexed by `k \in \mathbb{Z}`. Each branch
+gives a different solution `w` of the equation `z = w \exp(w)`.
+All branches are supported by :func:`~mpmath.lambertw`:
+
+* ``lambertw(z)`` gives the principal solution (branch 0)
+
+* ``lambertw(z, k)`` gives the solution on branch `k`
+
+The Lambert W function has two partially real branches: the
+principal branch (`k = 0`) is real for real `z > -1/e`, and the
+`k = -1` branch is real for `-1/e < z < 0`. All branches except
+`k = 0` have a logarithmic singularity at `z = 0`.
+
+The definition, implementation and choice of branches
+is based on [Corless]_.
+
+**Plots**
+
+.. literalinclude :: /plots/lambertw.py
+.. image :: /plots/lambertw.png
+.. literalinclude :: /plots/lambertw_c.py
+.. image :: /plots/lambertw_c.png
+
+**Basic examples**
+
+The Lambert W function is the inverse of `w \exp(w)`::
+
+ >>> from mpmath import *
+ >>> mp.dps = 25; mp.pretty = True
+ >>> w = lambertw(1)
+ >>> w
+ 0.5671432904097838729999687
+ >>> w*exp(w)
+ 1.0
+
+Any branch gives a valid inverse::
+
+ >>> w = lambertw(1, k=3)
+ >>> w
+ (-2.853581755409037807206819 + 17.11353553941214591260783j)
+ >>> w = lambertw(1, k=25)
+ >>> w
+ (-5.047020464221569709378686 + 155.4763860949415867162066j)
+ >>> chop(w*exp(w))
+ 1.0
+
+**Applications to equation-solving**
+
+The Lambert W function may be used to solve various kinds of
+equations, such as finding the value of the infinite power
+tower `z^{z^{z^{\ldots}}}`::
+
+ >>> def tower(z, n):
+ ... if n == 0:
+ ... return z
+ ... return z ** tower(z, n-1)
+ ...
+ >>> tower(mpf(0.5), 100)
+ 0.6411857445049859844862005
+ >>> -lambertw(-log(0.5))/log(0.5)
+ 0.6411857445049859844862005
+
+**Properties**
+
+The Lambert W function grows roughly like the natural logarithm
+for large arguments::
+
+ >>> lambertw(1000); log(1000)
+ 5.249602852401596227126056
+ 6.907755278982137052053974
+ >>> lambertw(10**100); log(10**100)
+ 224.8431064451185015393731
+ 230.2585092994045684017991
+
+The principal branch of the Lambert W function has a rational
+Taylor series expansion around `z = 0`::
+
+ >>> nprint(taylor(lambertw, 0, 6), 10)
+ [0.0, 1.0, -1.0, 1.5, -2.666666667, 5.208333333, -10.8]
+
+Some special values and limits are::
+
+ >>> lambertw(0)
+ 0.0
+ >>> lambertw(1)
+ 0.5671432904097838729999687
+ >>> lambertw(e)
+ 1.0
+ >>> lambertw(inf)
+ +inf
+ >>> lambertw(0, k=-1)
+ -inf
+ >>> lambertw(0, k=3)
+ -inf
+ >>> lambertw(inf, k=2)
+ (+inf + 12.56637061435917295385057j)
+ >>> lambertw(inf, k=3)
+ (+inf + 18.84955592153875943077586j)
+ >>> lambertw(-inf, k=3)
+ (+inf + 21.9911485751285526692385j)
+
+The `k = 0` and `k = -1` branches join at `z = -1/e` where
+`W(z) = -1` for both branches. Since `-1/e` can only be represented
+approximately with binary floating-point numbers, evaluating the
+Lambert W function at this point only gives `-1` approximately::
+
+ >>> lambertw(-1/e, 0)
+ -0.9999999999998371330228251
+ >>> lambertw(-1/e, -1)
+ -1.000000000000162866977175
+
+If `-1/e` happens to round in the negative direction, there might be
+a small imaginary part::
+
+ >>> mp.dps = 15
+ >>> lambertw(-1/e)
+ (-1.0 + 8.22007971483662e-9j)
+ >>> lambertw(-1/e+eps)
+ -0.999999966242188
+
+**References**
+
+1. [Corless]_
+"""
+
+barnesg = r"""
+Evaluates the Barnes G-function, which generalizes the
+superfactorial (:func:`~mpmath.superfac`) and by extension also the
+hyperfactorial (:func:`~mpmath.hyperfac`) to the complex numbers
+in an analogous way to how the gamma function generalizes
+the ordinary factorial.
+
+The Barnes G-function may be defined in terms of a Weierstrass
+product:
+
+.. math ::
+
+ G(z+1) = (2\pi)^{z/2} e^{-[z(z+1)+\gamma z^2]/2}
+ \prod_{n=1}^\infty
+ \left[\left(1+\frac{z}{n}\right)^ne^{-z+z^2/(2n)}\right]
+
+For positive integers `n`, we have have relation to superfactorials
+`G(n) = \mathrm{sf}(n-2) = 0! \cdot 1! \cdots (n-2)!`.
+
+**Examples**
+
+Some elementary values and limits of the Barnes G-function::
+
+ >>> from mpmath import *
+ >>> mp.dps = 15; mp.pretty = True
+ >>> barnesg(1), barnesg(2), barnesg(3)
+ (1.0, 1.0, 1.0)
+ >>> barnesg(4)
+ 2.0
+ >>> barnesg(5)
+ 12.0
+ >>> barnesg(6)
+ 288.0
+ >>> barnesg(7)
+ 34560.0
+ >>> barnesg(8)
+ 24883200.0
+ >>> barnesg(inf)
+ +inf
+ >>> barnesg(0), barnesg(-1), barnesg(-2)
+ (0.0, 0.0, 0.0)
+
+Closed-form values are known for some rational arguments::
+
+ >>> barnesg('1/2')
+ 0.603244281209446
+ >>> sqrt(exp(0.25+log(2)/12)/sqrt(pi)/glaisher**3)
+ 0.603244281209446
+ >>> barnesg('1/4')
+ 0.29375596533861
+ >>> nthroot(exp('3/8')/exp(catalan/pi)/
+ ... gamma(0.25)**3/sqrt(glaisher)**9, 4)
+ 0.29375596533861
+
+The Barnes G-function satisfies the functional equation
+`G(z+1) = \Gamma(z) G(z)`::
+
+ >>> z = pi
+ >>> barnesg(z+1)
+ 2.39292119327948
+ >>> gamma(z)*barnesg(z)
+ 2.39292119327948
+
+The asymptotic growth rate of the Barnes G-function is related to
+the Glaisher-Kinkelin constant::
+
+ >>> limit(lambda n: barnesg(n+1)/(n**(n**2/2-mpf(1)/12)*
+ ... (2*pi)**(n/2)*exp(-3*n**2/4)), inf)
+ 0.847536694177301
+ >>> exp('1/12')/glaisher
+ 0.847536694177301
+
+The Barnes G-function can be differentiated in closed form::
+
+ >>> z = 3
+ >>> diff(barnesg, z)
+ 0.264507203401607
+ >>> barnesg(z)*((z-1)*psi(0,z)-z+(log(2*pi)+1)/2)
+ 0.264507203401607
+
+Evaluation is supported for arbitrary arguments and at arbitrary
+precision::
+
+ >>> barnesg(6.5)
+ 2548.7457695685
+ >>> barnesg(-pi)
+ 0.00535976768353037
+ >>> barnesg(3+4j)
+ (-0.000676375932234244 - 4.42236140124728e-5j)
+ >>> mp.dps = 50
+ >>> barnesg(1/sqrt(2))
+ 0.81305501090451340843586085064413533788206204124732
+ >>> q = barnesg(10j)
+ >>> q.real
+ 0.000000000021852360840356557241543036724799812371995850552234
+ >>> q.imag
+ -0.00000000000070035335320062304849020654215545839053210041457588
+ >>> mp.dps = 15
+ >>> barnesg(100)
+ 3.10361006263698e+6626
+ >>> barnesg(-101)
+ 0.0
+ >>> barnesg(-10.5)
+ 5.94463017605008e+25
+ >>> barnesg(-10000.5)
+ -6.14322868174828e+167480422
+ >>> barnesg(1000j)
+ (5.21133054865546e-1173597 + 4.27461836811016e-1173597j)
+ >>> barnesg(-1000+1000j)
+ (2.43114569750291e+1026623 + 2.24851410674842e+1026623j)
+
+
+**References**
+
+1. Whittaker & Watson, *A Course of Modern Analysis*,
+ Cambridge University Press, 4th edition (1927), p.264
+2. http://en.wikipedia.org/wiki/Barnes_G-function
+3. http://mathworld.wolfram.com/BarnesG-Function.html
+
+"""
+
+superfac = r"""
+Computes the superfactorial, defined as the product of
+consecutive factorials
+
+.. math ::
+
+ \mathrm{sf}(n) = \prod_{k=1}^n k!
+
+For general complex `z`, `\mathrm{sf}(z)` is defined
+in terms of the Barnes G-function (see :func:`~mpmath.barnesg`).
+
+**Examples**
+
+The first few superfactorials are (OEIS A000178)::
+
+ >>> from mpmath import *
+ >>> mp.dps = 15; mp.pretty = True
+ >>> for n in range(10):
+ ... print("%s %s" % (n, superfac(n)))
+ ...
+ 0 1.0
+ 1 1.0
+ 2 2.0
+ 3 12.0
+ 4 288.0
+ 5 34560.0
+ 6 24883200.0
+ 7 125411328000.0
+ 8 5.05658474496e+15
+ 9 1.83493347225108e+21
+
+Superfactorials grow very rapidly::
+
+ >>> superfac(1000)
+ 3.24570818422368e+1177245
+ >>> superfac(10**10)
+ 2.61398543581249e+467427913956904067453
+
+Evaluation is supported for arbitrary arguments::
+
+ >>> mp.dps = 25
+ >>> superfac(pi)
+ 17.20051550121297985285333
+ >>> superfac(2+3j)
+ (-0.005915485633199789627466468 + 0.008156449464604044948738263j)
+ >>> diff(superfac, 1)
+ 0.2645072034016070205673056
+
+**References**
+
+1. http://oeis.org/A000178
+
+"""
+
+
+hyperfac = r"""
+Computes the hyperfactorial, defined for integers as the product
+
+.. math ::
+
+ H(n) = \prod_{k=1}^n k^k.
+
+
+The hyperfactorial satisfies the recurrence formula `H(z) = z^z H(z-1)`.
+It can be defined more generally in terms of the Barnes G-function (see
+:func:`~mpmath.barnesg`) and the gamma function by the formula
+
+.. math ::
+
+ H(z) = \frac{\Gamma(z+1)^z}{G(z)}.
+
+The extension to complex numbers can also be done via
+the integral representation
+
+.. math ::
+
+ H(z) = (2\pi)^{-z/2} \exp \left[
+ {z+1 \choose 2} + \int_0^z \log(t!)\,dt
+ \right].
+
+**Examples**
+
+The rapidly-growing sequence of hyperfactorials begins
+(OEIS A002109)::
+
+ >>> from mpmath import *
+ >>> mp.dps = 15; mp.pretty = True
+ >>> for n in range(10):
+ ... print("%s %s" % (n, hyperfac(n)))
+ ...
+ 0 1.0
+ 1 1.0
+ 2 4.0
+ 3 108.0
+ 4 27648.0
+ 5 86400000.0
+ 6 4031078400000.0
+ 7 3.3197663987712e+18
+ 8 5.56964379417266e+25
+ 9 2.15779412229419e+34
+
+Some even larger hyperfactorials are::
+
+ >>> hyperfac(1000)
+ 5.46458120882585e+1392926
+ >>> hyperfac(10**10)
+ 4.60408207642219e+489142638002418704309
+
+The hyperfactorial can be evaluated for arbitrary arguments::
+
+ >>> hyperfac(0.5)
+ 0.880449235173423
+ >>> diff(hyperfac, 1)
+ 0.581061466795327
+ >>> hyperfac(pi)
+ 205.211134637462
+ >>> hyperfac(-10+1j)
+ (3.01144471378225e+46 - 2.45285242480185e+46j)
+
+The recurrence property of the hyperfactorial holds
+generally::
+
+ >>> z = 3-4*j
+ >>> hyperfac(z)
+ (-4.49795891462086e-7 - 6.33262283196162e-7j)
+ >>> z**z * hyperfac(z-1)
+ (-4.49795891462086e-7 - 6.33262283196162e-7j)
+ >>> z = mpf(-0.6)
+ >>> chop(z**z * hyperfac(z-1))
+ 1.28170142849352
+ >>> hyperfac(z)
+ 1.28170142849352
+
+The hyperfactorial may also be computed using the integral
+definition::
+
+ >>> z = 2.5
+ >>> hyperfac(z)
+ 15.9842119922237
+ >>> (2*pi)**(-z/2)*exp(binomial(z+1,2) +
+ ... quad(lambda t: loggamma(t+1), [0, z]))
+ 15.9842119922237
+
+:func:`~mpmath.hyperfac` supports arbitrary-precision evaluation::
+
+ >>> mp.dps = 50
+ >>> hyperfac(10)
+ 215779412229418562091680268288000000000000000.0
+ >>> hyperfac(1/sqrt(2))
+ 0.89404818005227001975423476035729076375705084390942
+
+**References**
+
+1. http://oeis.org/A002109
+2. http://mathworld.wolfram.com/Hyperfactorial.html
+
+"""
+
+rgamma = r"""
+Computes the reciprocal of the gamma function, `1/\Gamma(z)`. This
+function evaluates to zero at the poles
+of the gamma function, `z = 0, -1, -2, \ldots`.
+
+**Examples**
+
+Basic examples::
+
+ >>> from mpmath import *
+ >>> mp.dps = 25; mp.pretty = True
+ >>> rgamma(1)
+ 1.0
+ >>> rgamma(4)
+ 0.1666666666666666666666667
+ >>> rgamma(0); rgamma(-1)
+ 0.0
+ 0.0
+ >>> rgamma(1000)
+ 2.485168143266784862783596e-2565
+ >>> rgamma(inf)
+ 0.0
+
+A definite integral that can be evaluated in terms of elementary
+integrals::
+
+ >>> quad(rgamma, [0,inf])
+ 2.807770242028519365221501
+ >>> e + quad(lambda t: exp(-t)/(pi**2+log(t)**2), [0,inf])
+ 2.807770242028519365221501
+"""
+
+loggamma = r"""
+Computes the principal branch of the log-gamma function,
+`\ln \Gamma(z)`. Unlike `\ln(\Gamma(z))`, which has infinitely many
+complex branch cuts, the principal log-gamma function only has a single
+branch cut along the negative half-axis. The principal branch
+continuously matches the asymptotic Stirling expansion
+
+.. math ::
+
+ \ln \Gamma(z) \sim \frac{\ln(2 \pi)}{2} +
+ \left(z-\frac{1}{2}\right) \ln(z) - z + O(z^{-1}).
+
+The real parts of both functions agree, but their imaginary
+parts generally differ by `2 n \pi` for some `n \in \mathbb{Z}`.
+They coincide for `z \in \mathbb{R}, z > 0`.
+
+Computationally, it is advantageous to use :func:`~mpmath.loggamma`
+instead of :func:`~mpmath.gamma` for extremely large arguments.
+
+**Examples**
+
+Comparing with `\ln(\Gamma(z))`::
+
+ >>> from mpmath import *
+ >>> mp.dps = 25; mp.pretty = True
+ >>> loggamma('13.2'); log(gamma('13.2'))
+ 20.49400419456603678498394
+ 20.49400419456603678498394
+ >>> loggamma(3+4j)
+ (-1.756626784603784110530604 + 4.742664438034657928194889j)
+ >>> log(gamma(3+4j))
+ (-1.756626784603784110530604 - 1.540520869144928548730397j)
+ >>> log(gamma(3+4j)) + 2*pi*j
+ (-1.756626784603784110530604 + 4.742664438034657928194889j)
+
+Note the imaginary parts for negative arguments::
+
+ >>> loggamma(-0.5); loggamma(-1.5); loggamma(-2.5)
+ (1.265512123484645396488946 - 3.141592653589793238462643j)
+ (0.8600470153764810145109327 - 6.283185307179586476925287j)
+ (-0.05624371649767405067259453 - 9.42477796076937971538793j)
+
+Some special values::
+
+ >>> loggamma(1); loggamma(2)
+ 0.0
+ 0.0
+ >>> loggamma(3); +ln2
+ 0.6931471805599453094172321
+ 0.6931471805599453094172321
+ >>> loggamma(3.5); log(15*sqrt(pi)/8)
+ 1.200973602347074224816022
+ 1.200973602347074224816022
+ >>> loggamma(inf)
+ +inf
+
+Huge arguments are permitted::
+
+ >>> loggamma('1e30')
+ 6.807755278982137052053974e+31
+ >>> loggamma('1e300')
+ 6.897755278982137052053974e+302
+ >>> loggamma('1e3000')
+ 6.906755278982137052053974e+3003
+ >>> loggamma('1e100000000000000000000')
+ 2.302585092994045684007991e+100000000000000000020
+ >>> loggamma('1e30j')
+ (-1.570796326794896619231322e+30 + 6.807755278982137052053974e+31j)
+ >>> loggamma('1e300j')
+ (-1.570796326794896619231322e+300 + 6.897755278982137052053974e+302j)
+ >>> loggamma('1e3000j')
+ (-1.570796326794896619231322e+3000 + 6.906755278982137052053974e+3003j)
+
+The log-gamma function can be integrated analytically
+on any interval of unit length::
+
+ >>> z = 0
+ >>> quad(loggamma, [z,z+1]); log(2*pi)/2
+ 0.9189385332046727417803297
+ 0.9189385332046727417803297
+ >>> z = 3+4j
+ >>> quad(loggamma, [z,z+1]); (log(z)-1)*z + log(2*pi)/2
+ (-0.9619286014994750641314421 + 5.219637303741238195688575j)
+ (-0.9619286014994750641314421 + 5.219637303741238195688575j)
+
+The derivatives of the log-gamma function are given by the
+polygamma function (:func:`~mpmath.psi`)::
+
+ >>> diff(loggamma, -4+3j); psi(0, -4+3j)
+ (1.688493531222971393607153 + 2.554898911356806978892748j)
+ (1.688493531222971393607153 + 2.554898911356806978892748j)
+ >>> diff(loggamma, -4+3j, 2); psi(1, -4+3j)
+ (-0.1539414829219882371561038 - 0.1020485197430267719746479j)
+ (-0.1539414829219882371561038 - 0.1020485197430267719746479j)
+
+The log-gamma function satisfies an additive form of the
+recurrence relation for the ordinary gamma function::
+
+ >>> z = 2+3j
+ >>> loggamma(z); loggamma(z+1) - log(z)
+ (-2.092851753092733349564189 + 2.302396543466867626153708j)
+ (-2.092851753092733349564189 + 2.302396543466867626153708j)
+
+"""
+
+siegeltheta = r"""
+Computes the Riemann-Siegel theta function,
+
+.. math ::
+
+ \theta(t) = \frac{
+ \log\Gamma\left(\frac{1+2it}{4}\right) -
+ \log\Gamma\left(\frac{1-2it}{4}\right)
+ }{2i} - \frac{\log \pi}{2} t.
+
+The Riemann-Siegel theta function is important in
+providing the phase factor for the Z-function
+(see :func:`~mpmath.siegelz`). Evaluation is supported for real and
+complex arguments::
+
+ >>> from mpmath import *
+ >>> mp.dps = 25; mp.pretty = True
+ >>> siegeltheta(0)
+ 0.0
+ >>> siegeltheta(inf)
+ +inf
+ >>> siegeltheta(-inf)
+ -inf
+ >>> siegeltheta(1)
+ -1.767547952812290388302216
+ >>> siegeltheta(10+0.25j)
+ (-3.068638039426838572528867 + 0.05804937947429712998395177j)
+
+Arbitrary derivatives may be computed with derivative = k
+
+ >>> siegeltheta(1234, derivative=2)
+ 0.0004051864079114053109473741
+ >>> diff(siegeltheta, 1234, n=2)
+ 0.0004051864079114053109473741
+
+
+The Riemann-Siegel theta function has odd symmetry around `t = 0`,
+two local extreme points and three real roots including 0 (located
+symmetrically)::
+
+ >>> nprint(chop(taylor(siegeltheta, 0, 5)))
+ [0.0, -2.68609, 0.0, 2.69433, 0.0, -6.40218]
+ >>> findroot(diffun(siegeltheta), 7)
+ 6.28983598883690277966509
+ >>> findroot(siegeltheta, 20)
+ 17.84559954041086081682634
+
+For large `t`, there is a famous asymptotic formula
+for `\theta(t)`, to first order given by::
+
+ >>> t = mpf(10**6)
+ >>> siegeltheta(t)
+ 5488816.353078403444882823
+ >>> -t*log(2*pi/t)/2-t/2
+ 5488816.745777464310273645
+"""
+
+grampoint = r"""
+Gives the `n`-th Gram point `g_n`, defined as the solution
+to the equation `\theta(g_n) = \pi n` where `\theta(t)`
+is the Riemann-Siegel theta function (:func:`~mpmath.siegeltheta`).
+
+The first few Gram points are::
+
+ >>> from mpmath import *
+ >>> mp.dps = 25; mp.pretty = True
+ >>> grampoint(0)
+ 17.84559954041086081682634
+ >>> grampoint(1)
+ 23.17028270124630927899664
+ >>> grampoint(2)
+ 27.67018221781633796093849
+ >>> grampoint(3)
+ 31.71797995476405317955149
+
+Checking the definition::
+
+ >>> siegeltheta(grampoint(3))
+ 9.42477796076937971538793
+ >>> 3*pi
+ 9.42477796076937971538793
+
+A large Gram point::
+
+ >>> grampoint(10**10)
+ 3293531632.728335454561153
+
+Gram points are useful when studying the Z-function
+(:func:`~mpmath.siegelz`). See the documentation of that function
+for additional examples.
+
+:func:`~mpmath.grampoint` can solve the defining equation for
+nonintegral `n`. There is a fixed point where `g(x) = x`::
+
+ >>> findroot(lambda x: grampoint(x) - x, 10000)
+ 9146.698193171459265866198
+
+**References**
+
+1. http://mathworld.wolfram.com/GramPoint.html
+
+"""
+
+siegelz = r"""
+Computes the Z-function, also known as the Riemann-Siegel Z function,
+
+.. math ::
+
+ Z(t) = e^{i \theta(t)} \zeta(1/2+it)
+
+where `\zeta(s)` is the Riemann zeta function (:func:`~mpmath.zeta`)
+and where `\theta(t)` denotes the Riemann-Siegel theta function
+(see :func:`~mpmath.siegeltheta`).
+
+Evaluation is supported for real and complex arguments::
+
+ >>> from mpmath import *
+ >>> mp.dps = 25; mp.pretty = True
+ >>> siegelz(1)
+ -0.7363054628673177346778998
+ >>> siegelz(3+4j)
+ (-0.1852895764366314976003936 - 0.2773099198055652246992479j)
+
+The first four derivatives are supported, using the
+optional *derivative* keyword argument::
+
+ >>> siegelz(1234567, derivative=3)
+ 56.89689348495089294249178
+ >>> diff(siegelz, 1234567, n=3)
+ 56.89689348495089294249178
+
+
+The Z-function has a Maclaurin expansion::
+
+ >>> nprint(chop(taylor(siegelz, 0, 4)))
+ [-1.46035, 0.0, 2.73588, 0.0, -8.39357]
+
+The Z-function `Z(t)` is equal to `\pm |\zeta(s)|` on the
+critical line `s = 1/2+it` (i.e. for real arguments `t`
+to `Z`). Its zeros coincide with those of the Riemann zeta
+function::
+
+ >>> findroot(siegelz, 14)
+ 14.13472514173469379045725
+ >>> findroot(siegelz, 20)
+ 21.02203963877155499262848
+ >>> findroot(zeta, 0.5+14j)
+ (0.5 + 14.13472514173469379045725j)
+ >>> findroot(zeta, 0.5+20j)
+ (0.5 + 21.02203963877155499262848j)
+
+Since the Z-function is real-valued on the critical line
+(and unlike `|\zeta(s)|` analytic), it is useful for
+investigating the zeros of the Riemann zeta function.
+For example, one can use a root-finding algorithm based
+on sign changes::
+
+ >>> findroot(siegelz, [100, 200], solver='bisect')
+ 176.4414342977104188888926
+
+To locate roots, Gram points `g_n` which can be computed
+by :func:`~mpmath.grampoint` are useful. If `(-1)^n Z(g_n)` is
+positive for two consecutive `n`, then `Z(t)` must have
+a zero between those points::
+
+ >>> g10 = grampoint(10)
+ >>> g11 = grampoint(11)
+ >>> (-1)**10 * siegelz(g10) > 0
+ True
+ >>> (-1)**11 * siegelz(g11) > 0
+ True
+ >>> findroot(siegelz, [g10, g11], solver='bisect')
+ 56.44624769706339480436776
+ >>> g10, g11
+ (54.67523744685325626632663, 57.54516517954725443703014)
+
+"""
+
+riemannr = r"""
+Evaluates the Riemann R function, a smooth approximation of the
+prime counting function `\pi(x)` (see :func:`~mpmath.primepi`). The Riemann
+R function gives a fast numerical approximation useful e.g. to
+roughly estimate the number of primes in a given interval.
+
+The Riemann R function is computed using the rapidly convergent Gram
+series,
+
+.. math ::
+
+ R(x) = 1 + \sum_{k=1}^{\infty}
+ \frac{\log^k x}{k k! \zeta(k+1)}.
+
+From the Gram series, one sees that the Riemann R function is a
+well-defined analytic function (except for a branch cut along
+the negative real half-axis); it can be evaluated for arbitrary
+real or complex arguments.
+
+The Riemann R function gives a very accurate approximation
+of the prime counting function. For example, it is wrong by at
+most 2 for `x < 1000`, and for `x = 10^9` differs from the exact
+value of `\pi(x)` by 79, or less than two parts in a million.
+It is about 10 times more accurate than the logarithmic integral
+estimate (see :func:`~mpmath.li`), which however is even faster to evaluate.
+It is orders of magnitude more accurate than the extremely
+fast `x/\log x` estimate.
+
+**Examples**
+
+For small arguments, the Riemann R function almost exactly
+gives the prime counting function if rounded to the nearest
+integer::
+
+ >>> from mpmath import *
+ >>> mp.dps = 15; mp.pretty = True
+ >>> primepi(50), riemannr(50)
+ (15, 14.9757023241462)
+ >>> max(abs(primepi(n)-int(round(riemannr(n)))) for n in range(100))
+ 1
+ >>> max(abs(primepi(n)-int(round(riemannr(n)))) for n in range(300))
+ 2
+
+The Riemann R function can be evaluated for arguments far too large
+for exact determination of `\pi(x)` to be computationally
+feasible with any presently known algorithm::
+
+ >>> riemannr(10**30)
+ 1.46923988977204e+28
+ >>> riemannr(10**100)
+ 4.3619719871407e+97
+ >>> riemannr(10**1000)
+ 4.3448325764012e+996
+
+A comparison of the Riemann R function and logarithmic integral estimates
+for `\pi(x)` using exact values of `\pi(10^n)` up to `n = 9`.
+The fractional error is shown in parentheses::
+
+ >>> exact = [4,25,168,1229,9592,78498,664579,5761455,50847534]
+ >>> for n, p in enumerate(exact):
+ ... n += 1
+ ... r, l = riemannr(10**n), li(10**n)
+ ... rerr, lerr = nstr((r-p)/p,3), nstr((l-p)/p,3)
+ ... print("%i %i %s(%s) %s(%s)" % (n, p, r, rerr, l, lerr))
+ ...
+ 1 4 4.56458314100509(0.141) 6.1655995047873(0.541)
+ 2 25 25.6616332669242(0.0265) 30.1261415840796(0.205)
+ 3 168 168.359446281167(0.00214) 177.609657990152(0.0572)
+ 4 1229 1226.93121834343(-0.00168) 1246.13721589939(0.0139)
+ 5 9592 9587.43173884197(-0.000476) 9629.8090010508(0.00394)
+ 6 78498 78527.3994291277(0.000375) 78627.5491594622(0.00165)
+ 7 664579 664667.447564748(0.000133) 664918.405048569(0.000511)
+ 8 5761455 5761551.86732017(1.68e-5) 5762209.37544803(0.000131)
+ 9 50847534 50847455.4277214(-1.55e-6) 50849234.9570018(3.35e-5)
+
+The derivative of the Riemann R function gives the approximate
+probability for a number of magnitude `x` to be prime::
+
+ >>> diff(riemannr, 1000)
+ 0.141903028110784
+ >>> mpf(primepi(1050) - primepi(950)) / 100
+ 0.15
+
+Evaluation is supported for arbitrary arguments and at arbitrary
+precision::
+
+ >>> mp.dps = 30
+ >>> riemannr(7.5)
+ 3.72934743264966261918857135136
+ >>> riemannr(-4+2j)
+ (-0.551002208155486427591793957644 + 2.16966398138119450043195899746j)
+
+"""
+
+primepi = r"""
+Evaluates the prime counting function, `\pi(x)`, which gives
+the number of primes less than or equal to `x`. The argument
+`x` may be fractional.
+
+The prime counting function is very expensive to evaluate
+precisely for large `x`, and the present implementation is
+not optimized in any way. For numerical approximation of the
+prime counting function, it is better to use :func:`~mpmath.primepi2`
+or :func:`~mpmath.riemannr`.
+
+Some values of the prime counting function::
+
+ >>> from mpmath import *
+ >>> [primepi(k) for k in range(20)]
+ [0, 0, 1, 2, 2, 3, 3, 4, 4, 4, 4, 5, 5, 6, 6, 6, 6, 7, 7, 8]
+ >>> primepi(3.5)
+ 2
+ >>> primepi(100000)
+ 9592
+
+"""
+
+primepi2 = r"""
+Returns an interval (as an ``mpi`` instance) providing bounds
+for the value of the prime counting function `\pi(x)`. For small
+`x`, :func:`~mpmath.primepi2` returns an exact interval based on
+the output of :func:`~mpmath.primepi`. For `x > 2656`, a loose interval
+based on Schoenfeld's inequality
+
+.. math ::
+
+ |\pi(x) - \mathrm{li}(x)| < \frac{\sqrt x \log x}{8 \pi}
+
+is returned. This estimate is rigorous assuming the truth of
+the Riemann hypothesis, and can be computed very quickly.
+
+**Examples**
+
+Exact values of the prime counting function for small `x`::
+
+ >>> from mpmath import *
+ >>> mp.dps = 15; mp.pretty = True
+ >>> iv.dps = 15; iv.pretty = True
+ >>> primepi2(10)
+ [4.0, 4.0]
+ >>> primepi2(100)
+ [25.0, 25.0]
+ >>> primepi2(1000)
+ [168.0, 168.0]
+
+Loose intervals are generated for moderately large `x`:
+
+ >>> primepi2(10000), primepi(10000)
+ ([1209.0, 1283.0], 1229)
+ >>> primepi2(50000), primepi(50000)
+ ([5070.0, 5263.0], 5133)
+
+As `x` increases, the absolute error gets worse while the relative
+error improves. The exact value of `\pi(10^{23})` is
+1925320391606803968923, and :func:`~mpmath.primepi2` gives 9 significant
+digits::
+
+ >>> p = primepi2(10**23)
+ >>> p
+ [1.9253203909477020467e+21, 1.925320392280406229e+21]
+ >>> mpf(p.delta) / mpf(p.a)
+ 6.9219865355293e-10
+
+A more precise, nonrigorous estimate for `\pi(x)` can be
+obtained using the Riemann R function (:func:`~mpmath.riemannr`).
+For large enough `x`, the value returned by :func:`~mpmath.primepi2`
+essentially amounts to a small perturbation of the value returned by
+:func:`~mpmath.riemannr`::
+
+ >>> primepi2(10**100)
+ [4.3619719871407024816e+97, 4.3619719871407032404e+97]
+ >>> riemannr(10**100)
+ 4.3619719871407e+97
+"""
+
+primezeta = r"""
+Computes the prime zeta function, which is defined
+in analogy with the Riemann zeta function (:func:`~mpmath.zeta`)
+as
+
+.. math ::
+
+ P(s) = \sum_p \frac{1}{p^s}
+
+where the sum is taken over all prime numbers `p`. Although
+this sum only converges for `\mathrm{Re}(s) > 1`, the
+function is defined by analytic continuation in the
+half-plane `\mathrm{Re}(s) > 0`.
+
+**Examples**
+
+Arbitrary-precision evaluation for real and complex arguments is
+supported::
+
+ >>> from mpmath import *
+ >>> mp.dps = 30; mp.pretty = True
+ >>> primezeta(2)
+ 0.452247420041065498506543364832
+ >>> primezeta(pi)
+ 0.15483752698840284272036497397
+ >>> mp.dps = 50
+ >>> primezeta(3)
+ 0.17476263929944353642311331466570670097541212192615
+ >>> mp.dps = 20
+ >>> primezeta(3+4j)
+ (-0.12085382601645763295 - 0.013370403397787023602j)
+
+The prime zeta function has a logarithmic pole at `s = 1`,
+with residue equal to the difference of the Mertens and
+Euler constants::
+
+ >>> primezeta(1)
+ +inf
+ >>> extradps(25)(lambda x: primezeta(1+x)+log(x))(+eps)
+ -0.31571845205389007685
+ >>> mertens-euler
+ -0.31571845205389007685
+
+The analytic continuation to `0 < \mathrm{Re}(s) \le 1`
+is implemented. In this strip the function exhibits
+very complex behavior; on the unit interval, it has poles at
+`1/n` for every squarefree integer `n`::
+
+ >>> primezeta(0.5) # Pole at s = 1/2
+ (-inf + 3.1415926535897932385j)
+ >>> primezeta(0.25)
+ (-1.0416106801757269036 + 0.52359877559829887308j)
+ >>> primezeta(0.5+10j)
+ (0.54892423556409790529 + 0.45626803423487934264j)
+
+Although evaluation works in principle for any `\mathrm{Re}(s) > 0`,
+it should be noted that the evaluation time increases exponentially
+as `s` approaches the imaginary axis.
+
+For large `\mathrm{Re}(s)`, `P(s)` is asymptotic to `2^{-s}`::
+
+ >>> primezeta(inf)
+ 0.0
+ >>> primezeta(10), mpf(2)**-10
+ (0.00099360357443698021786, 0.0009765625)
+ >>> primezeta(1000)
+ 9.3326361850321887899e-302
+ >>> primezeta(1000+1000j)
+ (-3.8565440833654995949e-302 - 8.4985390447553234305e-302j)
+
+**References**
+
+Carl-Erik Froberg, "On the prime zeta function",
+BIT 8 (1968), pp. 187-202.
+
+"""
+
+bernpoly = r"""
+Evaluates the Bernoulli polynomial `B_n(z)`.
+
+The first few Bernoulli polynomials are::
+
+ >>> from mpmath import *
+ >>> mp.dps = 15; mp.pretty = True
+ >>> for n in range(6):
+ ... nprint(chop(taylor(lambda x: bernpoly(n,x), 0, n)))
+ ...
+ [1.0]
+ [-0.5, 1.0]
+ [0.166667, -1.0, 1.0]
+ [0.0, 0.5, -1.5, 1.0]
+ [-0.0333333, 0.0, 1.0, -2.0, 1.0]
+ [0.0, -0.166667, 0.0, 1.66667, -2.5, 1.0]
+
+At `z = 0`, the Bernoulli polynomial evaluates to a
+Bernoulli number (see :func:`~mpmath.bernoulli`)::
+
+ >>> bernpoly(12, 0), bernoulli(12)
+ (-0.253113553113553, -0.253113553113553)
+ >>> bernpoly(13, 0), bernoulli(13)
+ (0.0, 0.0)
+
+Evaluation is accurate for large `n` and small `z`::
+
+ >>> mp.dps = 25
+ >>> bernpoly(100, 0.5)
+ 2.838224957069370695926416e+78
+ >>> bernpoly(1000, 10.5)
+ 5.318704469415522036482914e+1769
+
+"""
+
+polylog = r"""
+Computes the polylogarithm, defined by the sum
+
+.. math ::
+
+ \mathrm{Li}_s(z) = \sum_{k=1}^{\infty} \frac{z^k}{k^s}.
+
+This series is convergent only for `|z| < 1`, so elsewhere
+the analytic continuation is implied.
+
+The polylogarithm should not be confused with the logarithmic
+integral (also denoted by Li or li), which is implemented
+as :func:`~mpmath.li`.
+
+**Examples**
+
+The polylogarithm satisfies a huge number of functional identities.
+A sample of polylogarithm evaluations is shown below::
+
+ >>> from mpmath import *
+ >>> mp.dps = 15; mp.pretty = True
+ >>> polylog(1,0.5), log(2)
+ (0.693147180559945, 0.693147180559945)
+ >>> polylog(2,0.5), (pi**2-6*log(2)**2)/12
+ (0.582240526465012, 0.582240526465012)
+ >>> polylog(2,-phi), -log(phi)**2-pi**2/10
+ (-1.21852526068613, -1.21852526068613)
+ >>> polylog(3,0.5), 7*zeta(3)/8-pi**2*log(2)/12+log(2)**3/6
+ (0.53721319360804, 0.53721319360804)
+
+:func:`~mpmath.polylog` can evaluate the analytic continuation of the
+polylogarithm when `s` is an integer::
+
+ >>> polylog(2, 10)
+ (0.536301287357863 - 7.23378441241546j)
+ >>> polylog(2, -10)
+ -4.1982778868581
+ >>> polylog(2, 10j)
+ (-3.05968879432873 + 3.71678149306807j)
+ >>> polylog(-2, 10)
+ -0.150891632373114
+ >>> polylog(-2, -10)
+ 0.067618332081142
+ >>> polylog(-2, 10j)
+ (0.0384353698579347 + 0.0912451798066779j)
+
+Some more examples, with arguments on the unit circle (note that
+the series definition cannot be used for computation here)::
+
+ >>> polylog(2,j)
+ (-0.205616758356028 + 0.915965594177219j)
+ >>> j*catalan-pi**2/48
+ (-0.205616758356028 + 0.915965594177219j)
+ >>> polylog(3,exp(2*pi*j/3))
+ (-0.534247512515375 + 0.765587078525922j)
+ >>> -4*zeta(3)/9 + 2*j*pi**3/81
+ (-0.534247512515375 + 0.765587078525921j)
+
+Polylogarithms of different order are related by integration
+and differentiation::
+
+ >>> s, z = 3, 0.5
+ >>> polylog(s+1, z)
+ 0.517479061673899
+ >>> quad(lambda t: polylog(s,t)/t, [0, z])
+ 0.517479061673899
+ >>> z*diff(lambda t: polylog(s+2,t), z)
+ 0.517479061673899
+
+Taylor series expansions around `z = 0` are::
+
+ >>> for n in range(-3, 4):
+ ... nprint(taylor(lambda x: polylog(n,x), 0, 5))
+ ...
+ [0.0, 1.0, 8.0, 27.0, 64.0, 125.0]
+ [0.0, 1.0, 4.0, 9.0, 16.0, 25.0]
+ [0.0, 1.0, 2.0, 3.0, 4.0, 5.0]
+ [0.0, 1.0, 1.0, 1.0, 1.0, 1.0]
+ [0.0, 1.0, 0.5, 0.333333, 0.25, 0.2]
+ [0.0, 1.0, 0.25, 0.111111, 0.0625, 0.04]
+ [0.0, 1.0, 0.125, 0.037037, 0.015625, 0.008]
+
+The series defining the polylogarithm is simultaneously
+a Taylor series and an L-series. For certain values of `z`, the
+polylogarithm reduces to a pure zeta function::
+
+ >>> polylog(pi, 1), zeta(pi)
+ (1.17624173838258, 1.17624173838258)
+ >>> polylog(pi, -1), -altzeta(pi)
+ (-0.909670702980385, -0.909670702980385)
+
+Evaluation for arbitrary, nonintegral `s` is supported
+for `z` within the unit circle:
+
+ >>> polylog(3+4j, 0.25)
+ (0.24258605789446 - 0.00222938275488344j)
+ >>> nsum(lambda k: 0.25**k / k**(3+4j), [1,inf])
+ (0.24258605789446 - 0.00222938275488344j)
+
+It is also supported outside of the unit circle::
+
+ >>> polylog(1+j, 20+40j)
+ (-7.1421172179728 - 3.92726697721369j)
+ >>> polylog(1+j, 200+400j)
+ (-5.41934747194626 - 9.94037752563927j)
+
+**References**
+
+1. Richard Crandall, "Note on fast polylogarithm computation"
+ http://www.reed.edu/physics/faculty/crandall/papers/Polylog.pdf
+2. http://en.wikipedia.org/wiki/Polylogarithm
+3. http://mathworld.wolfram.com/Polylogarithm.html
+
+"""
+
+bell = r"""
+For `n` a nonnegative integer, ``bell(n,x)`` evaluates the Bell
+polynomial `B_n(x)`, the first few of which are
+
+.. math ::
+
+ B_0(x) = 1
+
+ B_1(x) = x
+
+ B_2(x) = x^2+x
+
+ B_3(x) = x^3+3x^2+x
+
+If `x = 1` or :func:`~mpmath.bell` is called with only one argument, it
+gives the `n`-th Bell number `B_n`, which is the number of
+partitions of a set with `n` elements. By setting the precision to
+at least `\log_{10} B_n` digits, :func:`~mpmath.bell` provides fast
+calculation of exact Bell numbers.
+
+In general, :func:`~mpmath.bell` computes
+
+.. math ::
+
+ B_n(x) = e^{-x} \left(\mathrm{sinc}(\pi n) + E_n(x)\right)
+
+where `E_n(x)` is the generalized exponential function implemented
+by :func:`~mpmath.polyexp`. This is an extension of Dobinski's formula [1],
+where the modification is the sinc term ensuring that `B_n(x)` is
+continuous in `n`; :func:`~mpmath.bell` can thus be evaluated,
+differentiated, etc for arbitrary complex arguments.
+
+**Examples**
+
+Simple evaluations::
+
+ >>> from mpmath import *
+ >>> mp.dps = 25; mp.pretty = True
+ >>> bell(0, 2.5)
+ 1.0
+ >>> bell(1, 2.5)
+ 2.5
+ >>> bell(2, 2.5)
+ 8.75
+
+Evaluation for arbitrary complex arguments::
+
+ >>> bell(5.75+1j, 2-3j)
+ (-10767.71345136587098445143 - 15449.55065599872579097221j)
+
+The first few Bell polynomials::
+
+ >>> for k in range(7):
+ ... nprint(taylor(lambda x: bell(k,x), 0, k))
+ ...
+ [1.0]
+ [0.0, 1.0]
+ [0.0, 1.0, 1.0]
+ [0.0, 1.0, 3.0, 1.0]
+ [0.0, 1.0, 7.0, 6.0, 1.0]
+ [0.0, 1.0, 15.0, 25.0, 10.0, 1.0]
+ [0.0, 1.0, 31.0, 90.0, 65.0, 15.0, 1.0]
+
+The first few Bell numbers and complementary Bell numbers::
+
+ >>> [int(bell(k)) for k in range(10)]
+ [1, 1, 2, 5, 15, 52, 203, 877, 4140, 21147]
+ >>> [int(bell(k,-1)) for k in range(10)]
+ [1, -1, 0, 1, 1, -2, -9, -9, 50, 267]
+
+Large Bell numbers::
+
+ >>> mp.dps = 50
+ >>> bell(50)
+ 185724268771078270438257767181908917499221852770.0
+ >>> bell(50,-1)
+ -29113173035759403920216141265491160286912.0
+
+Some even larger values::
+
+ >>> mp.dps = 25
+ >>> bell(1000,-1)
+ -1.237132026969293954162816e+1869
+ >>> bell(1000)
+ 2.989901335682408421480422e+1927
+ >>> bell(1000,2)
+ 6.591553486811969380442171e+1987
+ >>> bell(1000,100.5)
+ 9.101014101401543575679639e+2529
+
+A determinant identity satisfied by Bell numbers::
+
+ >>> mp.dps = 15
+ >>> N = 8
+ >>> det([[bell(k+j) for j in range(N)] for k in range(N)])
+ 125411328000.0
+ >>> superfac(N-1)
+ 125411328000.0
+
+**References**
+
+1. http://mathworld.wolfram.com/DobinskisFormula.html
+
+"""
+
+polyexp = r"""
+Evaluates the polyexponential function, defined for arbitrary
+complex `s`, `z` by the series
+
+.. math ::
+
+ E_s(z) = \sum_{k=1}^{\infty} \frac{k^s}{k!} z^k.
+
+`E_s(z)` is constructed from the exponential function analogously
+to how the polylogarithm is constructed from the ordinary
+logarithm; as a function of `s` (with `z` fixed), `E_s` is an L-series
+It is an entire function of both `s` and `z`.
+
+The polyexponential function provides a generalization of the
+Bell polynomials `B_n(x)` (see :func:`~mpmath.bell`) to noninteger orders `n`.
+In terms of the Bell polynomials,
+
+.. math ::
+
+ E_s(z) = e^z B_s(z) - \mathrm{sinc}(\pi s).
+
+Note that `B_n(x)` and `e^{-x} E_n(x)` are identical if `n`
+is a nonzero integer, but not otherwise. In particular, they differ
+at `n = 0`.
+
+**Examples**
+
+Evaluating a series::
+
+ >>> from mpmath import *
+ >>> mp.dps = 25; mp.pretty = True
+ >>> nsum(lambda k: sqrt(k)/fac(k), [1,inf])
+ 2.101755547733791780315904
+ >>> polyexp(0.5,1)
+ 2.101755547733791780315904
+
+Evaluation for arbitrary arguments::
+
+ >>> polyexp(-3-4j, 2.5+2j)
+ (2.351660261190434618268706 + 1.202966666673054671364215j)
+
+Evaluation is accurate for tiny function values::
+
+ >>> polyexp(4, -100)
+ 3.499471750566824369520223e-36
+
+If `n` is a nonpositive integer, `E_n` reduces to a special
+instance of the hypergeometric function `\,_pF_q`::
+
+ >>> n = 3
+ >>> x = pi
+ >>> polyexp(-n,x)
+ 4.042192318847986561771779
+ >>> x*hyper([1]*(n+1), [2]*(n+1), x)
+ 4.042192318847986561771779
+
+"""
+
+cyclotomic = r"""
+Evaluates the cyclotomic polynomial `\Phi_n(x)`, defined by
+
+.. math ::
+
+ \Phi_n(x) = \prod_{\zeta} (x - \zeta)
+
+where `\zeta` ranges over all primitive `n`-th roots of unity
+(see :func:`~mpmath.unitroots`). An equivalent representation, used
+for computation, is
+
+.. math ::
+
+ \Phi_n(x) = \prod_{d\mid n}(x^d-1)^{\mu(n/d)} = \Phi_n(x)
+
+where `\mu(m)` denotes the Moebius function. The cyclotomic
+polynomials are integer polynomials, the first of which can be
+written explicitly as
+
+.. math ::
+
+ \Phi_0(x) = 1
+
+ \Phi_1(x) = x - 1
+
+ \Phi_2(x) = x + 1
+
+ \Phi_3(x) = x^3 + x^2 + 1
+
+ \Phi_4(x) = x^2 + 1
+
+ \Phi_5(x) = x^4 + x^3 + x^2 + x + 1
+
+ \Phi_6(x) = x^2 - x + 1
+
+**Examples**
+
+The coefficients of low-order cyclotomic polynomials can be recovered
+using Taylor expansion::
+
+ >>> from mpmath import *
+ >>> mp.dps = 15; mp.pretty = True
+ >>> for n in range(9):
+ ... p = chop(taylor(lambda x: cyclotomic(n,x), 0, 10))
+ ... print("%s %s" % (n, nstr(p[:10+1-p[::-1].index(1)])))
+ ...
+ 0 [1.0]
+ 1 [-1.0, 1.0]
+ 2 [1.0, 1.0]
+ 3 [1.0, 1.0, 1.0]
+ 4 [1.0, 0.0, 1.0]
+ 5 [1.0, 1.0, 1.0, 1.0, 1.0]
+ 6 [1.0, -1.0, 1.0]
+ 7 [1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0]
+ 8 [1.0, 0.0, 0.0, 0.0, 1.0]
+
+The definition as a product over primitive roots may be checked
+by computing the product explicitly (for a real argument, this
+method will generally introduce numerical noise in the imaginary
+part)::
+
+ >>> mp.dps = 25
+ >>> z = 3+4j
+ >>> cyclotomic(10, z)
+ (-419.0 - 360.0j)
+ >>> fprod(z-r for r in unitroots(10, primitive=True))
+ (-419.0 - 360.0j)
+ >>> z = 3
+ >>> cyclotomic(10, z)
+ 61.0
+ >>> fprod(z-r for r in unitroots(10, primitive=True))
+ (61.0 - 3.146045605088568607055454e-25j)
+
+Up to permutation, the roots of a given cyclotomic polynomial
+can be checked to agree with the list of primitive roots::
+
+ >>> p = taylor(lambda x: cyclotomic(6,x), 0, 6)[:3]
+ >>> for r in polyroots(p[::-1]):
+ ... print(r)
+ ...
+ (0.5 - 0.8660254037844386467637232j)
+ (0.5 + 0.8660254037844386467637232j)
+ >>>
+ >>> for r in unitroots(6, primitive=True):
+ ... print(r)
+ ...
+ (0.5 + 0.8660254037844386467637232j)
+ (0.5 - 0.8660254037844386467637232j)
+
+"""
+
+meijerg = r"""
+Evaluates the Meijer G-function, defined as
+
+.. math ::
+
+ G^{m,n}_{p,q} \left( \left. \begin{matrix}
+ a_1, \dots, a_n ; a_{n+1} \dots a_p \\
+ b_1, \dots, b_m ; b_{m+1} \dots b_q
+ \end{matrix}\; \right| \; z ; r \right) =
+ \frac{1}{2 \pi i} \int_L
+ \frac{\prod_{j=1}^m \Gamma(b_j+s) \prod_{j=1}^n\Gamma(1-a_j-s)}
+ {\prod_{j=n+1}^{p}\Gamma(a_j+s) \prod_{j=m+1}^q \Gamma(1-b_j-s)}
+ z^{-s/r} ds
+
+for an appropriate choice of the contour `L` (see references).
+
+There are `p` elements `a_j`.
+The argument *a_s* should be a pair of lists, the first containing the
+`n` elements `a_1, \ldots, a_n` and the second containing
+the `p-n` elements `a_{n+1}, \ldots a_p`.
+
+There are `q` elements `b_j`.
+The argument *b_s* should be a pair of lists, the first containing the
+`m` elements `b_1, \ldots, b_m` and the second containing
+the `q-m` elements `b_{m+1}, \ldots b_q`.
+
+The implicit tuple `(m, n, p, q)` constitutes the order or degree of the
+Meijer G-function, and is determined by the lengths of the coefficient
+vectors. Confusingly, the indices in this tuple appear in a different order
+from the coefficients, but this notation is standard. The many examples
+given below should hopefully clear up any potential confusion.
+
+**Algorithm**
+
+The Meijer G-function is evaluated as a combination of hypergeometric series.
+There are two versions of the function, which can be selected with
+the optional *series* argument.
+
+*series=1* uses a sum of `m` `\,_pF_{q-1}` functions of `z`
+
+*series=2* uses a sum of `n` `\,_qF_{p-1}` functions of `1/z`
+
+The default series is chosen based on the degree and `|z|` in order
+to be consistent with Mathematica's. This definition of the Meijer G-function
+has a discontinuity at `|z| = 1` for some orders, which can
+be avoided by explicitly specifying a series.
+
+Keyword arguments are forwarded to :func:`~mpmath.hypercomb`.
+
+**Examples**
+
+Many standard functions are special cases of the Meijer G-function
+(possibly rescaled and/or with branch cut corrections). We define
+some test parameters::
+
+ >>> from mpmath import *
+ >>> mp.dps = 25; mp.pretty = True
+ >>> a = mpf(0.75)
+ >>> b = mpf(1.5)
+ >>> z = mpf(2.25)
+
+The exponential function:
+`e^z = G^{1,0}_{0,1} \left( \left. \begin{matrix} - \\ 0 \end{matrix} \;
+\right| \; -z \right)`
+
+ >>> meijerg([[],[]], [[0],[]], -z)
+ 9.487735836358525720550369
+ >>> exp(z)
+ 9.487735836358525720550369
+
+The natural logarithm:
+`\log(1+z) = G^{1,2}_{2,2} \left( \left. \begin{matrix} 1, 1 \\ 1, 0
+\end{matrix} \; \right| \; -z \right)`
+
+ >>> meijerg([[1,1],[]], [[1],[0]], z)
+ 1.178654996341646117219023
+ >>> log(1+z)
+ 1.178654996341646117219023
+
+A rational function:
+`\frac{z}{z+1} = G^{1,2}_{2,2} \left( \left. \begin{matrix} 1, 1 \\ 1, 1
+\end{matrix} \; \right| \; z \right)`
+
+ >>> meijerg([[1,1],[]], [[1],[1]], z)
+ 0.6923076923076923076923077
+ >>> z/(z+1)
+ 0.6923076923076923076923077
+
+The sine and cosine functions:
+
+`\frac{1}{\sqrt \pi} \sin(2 \sqrt z) = G^{1,0}_{0,2} \left( \left. \begin{matrix}
+- \\ \frac{1}{2}, 0 \end{matrix} \; \right| \; z \right)`
+
+`\frac{1}{\sqrt \pi} \cos(2 \sqrt z) = G^{1,0}_{0,2} \left( \left. \begin{matrix}
+- \\ 0, \frac{1}{2} \end{matrix} \; \right| \; z \right)`
+
+ >>> meijerg([[],[]], [[0.5],[0]], (z/2)**2)
+ 0.4389807929218676682296453
+ >>> sin(z)/sqrt(pi)
+ 0.4389807929218676682296453
+ >>> meijerg([[],[]], [[0],[0.5]], (z/2)**2)
+ -0.3544090145996275423331762
+ >>> cos(z)/sqrt(pi)
+ -0.3544090145996275423331762
+
+Bessel functions:
+
+`J_a(2 \sqrt z) = G^{1,0}_{0,2} \left( \left.
+\begin{matrix} - \\ \frac{a}{2}, -\frac{a}{2}
+\end{matrix} \; \right| \; z \right)`
+
+`Y_a(2 \sqrt z) = G^{2,0}_{1,3} \left( \left.
+\begin{matrix} \frac{-a-1}{2} \\ \frac{a}{2}, -\frac{a}{2}, \frac{-a-1}{2}
+\end{matrix} \; \right| \; z \right)`
+
+`(-z)^{a/2} z^{-a/2} I_a(2 \sqrt z) = G^{1,0}_{0,2} \left( \left.
+\begin{matrix} - \\ \frac{a}{2}, -\frac{a}{2}
+\end{matrix} \; \right| \; -z \right)`
+
+`2 K_a(2 \sqrt z) = G^{2,0}_{0,2} \left( \left.
+\begin{matrix} - \\ \frac{a}{2}, -\frac{a}{2}
+\end{matrix} \; \right| \; z \right)`
+
+As the example with the Bessel *I* function shows, a branch
+factor is required for some arguments when inverting the square root.
+
+ >>> meijerg([[],[]], [[a/2],[-a/2]], (z/2)**2)
+ 0.5059425789597154858527264
+ >>> besselj(a,z)
+ 0.5059425789597154858527264
+ >>> meijerg([[],[(-a-1)/2]], [[a/2,-a/2],[(-a-1)/2]], (z/2)**2)
+ 0.1853868950066556941442559
+ >>> bessely(a, z)
+ 0.1853868950066556941442559
+ >>> meijerg([[],[]], [[a/2],[-a/2]], -(z/2)**2)
+ (0.8685913322427653875717476 + 2.096964974460199200551738j)
+ >>> (-z)**(a/2) / z**(a/2) * besseli(a, z)
+ (0.8685913322427653875717476 + 2.096964974460199200551738j)
+ >>> 0.5*meijerg([[],[]], [[a/2,-a/2],[]], (z/2)**2)
+ 0.09334163695597828403796071
+ >>> besselk(a,z)
+ 0.09334163695597828403796071
+
+Error functions:
+
+`\sqrt{\pi} z^{2(a-1)} \mathrm{erfc}(z) = G^{2,0}_{1,2} \left( \left.
+\begin{matrix} a \\ a-1, a-\frac{1}{2}
+\end{matrix} \; \right| \; z, \frac{1}{2} \right)`
+
+ >>> meijerg([[],[a]], [[a-1,a-0.5],[]], z, 0.5)
+ 0.00172839843123091957468712
+ >>> sqrt(pi) * z**(2*a-2) * erfc(z)
+ 0.00172839843123091957468712
+
+A Meijer G-function of higher degree, (1,1,2,3):
+
+ >>> meijerg([[a],[b]], [[a],[b,a-1]], z)
+ 1.55984467443050210115617
+ >>> sin((b-a)*pi)/pi*(exp(z)-1)*z**(a-1)
+ 1.55984467443050210115617
+
+A Meijer G-function of still higher degree, (4,1,2,4), that can
+be expanded as a messy combination of exponential integrals:
+
+ >>> meijerg([[a],[2*b-a]], [[b,a,b-0.5,-1-a+2*b],[]], z)
+ 0.3323667133658557271898061
+ >>> chop(4**(a-b+1)*sqrt(pi)*gamma(2*b-2*a)*z**a*\
+ ... expint(2*b-2*a, -2*sqrt(-z))*expint(2*b-2*a, 2*sqrt(-z)))
+ 0.3323667133658557271898061
+
+In the following case, different series give different values::
+
+ >>> chop(meijerg([[1],[0.25]],[[3],[0.5]],-2))
+ -0.06417628097442437076207337
+ >>> meijerg([[1],[0.25]],[[3],[0.5]],-2,series=1)
+ 0.1428699426155117511873047
+ >>> chop(meijerg([[1],[0.25]],[[3],[0.5]],-2,series=2))
+ -0.06417628097442437076207337
+
+**References**
+
+1. http://en.wikipedia.org/wiki/Meijer_G-function
+
+2. http://mathworld.wolfram.com/MeijerG-Function.html
+
+3. http://functions.wolfram.com/HypergeometricFunctions/MeijerG/
+
+4. http://functions.wolfram.com/HypergeometricFunctions/MeijerG1/
+
+"""
+
+clsin = r"""
+Computes the Clausen sine function, defined formally by the series
+
+.. math ::
+
+ \mathrm{Cl}_s(z) = \sum_{k=1}^{\infty} \frac{\sin(kz)}{k^s}.
+
+The special case `\mathrm{Cl}_2(z)` (i.e. ``clsin(2,z)``) is the classical
+"Clausen function". More generally, the Clausen function is defined for
+complex `s` and `z`, even when the series does not converge. The
+Clausen function is related to the polylogarithm (:func:`~mpmath.polylog`) as
+
+.. math ::
+
+ \mathrm{Cl}_s(z) = \frac{1}{2i}\left(\mathrm{Li}_s\left(e^{iz}\right) -
+ \mathrm{Li}_s\left(e^{-iz}\right)\right)
+
+ = \mathrm{Im}\left[\mathrm{Li}_s(e^{iz})\right] \quad (s, z \in \mathbb{R}),
+
+and this representation can be taken to provide the analytic continuation of the
+series. The complementary function :func:`~mpmath.clcos` gives the corresponding
+cosine sum.
+
+**Examples**
+
+Evaluation for arbitrarily chosen `s` and `z`::
+
+ >>> from mpmath import *
+ >>> mp.dps = 25; mp.pretty = True
+ >>> s, z = 3, 4
+ >>> clsin(s, z); nsum(lambda k: sin(z*k)/k**s, [1,inf])
+ -0.6533010136329338746275795
+ -0.6533010136329338746275795
+
+Using `z + \pi` instead of `z` gives an alternating series::
+
+ >>> clsin(s, z+pi)
+ 0.8860032351260589402871624
+ >>> nsum(lambda k: (-1)**k*sin(z*k)/k**s, [1,inf])
+ 0.8860032351260589402871624
+
+With `s = 1`, the sum can be expressed in closed form
+using elementary functions::
+
+ >>> z = 1 + sqrt(3)
+ >>> clsin(1, z)
+ 0.2047709230104579724675985
+ >>> chop((log(1-exp(-j*z)) - log(1-exp(j*z)))/(2*j))
+ 0.2047709230104579724675985
+ >>> nsum(lambda k: sin(k*z)/k, [1,inf])
+ 0.2047709230104579724675985
+
+The classical Clausen function `\mathrm{Cl}_2(\theta)` gives the
+value of the integral `\int_0^{\theta} -\ln(2\sin(x/2)) dx` for
+`0 < \theta < 2 \pi`::
+
+ >>> cl2 = lambda t: clsin(2, t)
+ >>> cl2(3.5)
+ -0.2465045302347694216534255
+ >>> -quad(lambda x: ln(2*sin(0.5*x)), [0, 3.5])
+ -0.2465045302347694216534255
+
+This function is symmetric about `\theta = \pi` with zeros and extreme
+points::
+
+ >>> cl2(0); cl2(pi/3); chop(cl2(pi)); cl2(5*pi/3); chop(cl2(2*pi))
+ 0.0
+ 1.014941606409653625021203
+ 0.0
+ -1.014941606409653625021203
+ 0.0
+
+Catalan's constant is a special value::
+
+ >>> cl2(pi/2)
+ 0.9159655941772190150546035
+ >>> +catalan
+ 0.9159655941772190150546035
+
+The Clausen sine function can be expressed in closed form when
+`s` is an odd integer (becoming zero when `s` < 0)::
+
+ >>> z = 1 + sqrt(2)
+ >>> clsin(1, z); (pi-z)/2
+ 0.3636895456083490948304773
+ 0.3636895456083490948304773
+ >>> clsin(3, z); pi**2/6*z - pi*z**2/4 + z**3/12
+ 0.5661751584451144991707161
+ 0.5661751584451144991707161
+ >>> clsin(-1, z)
+ 0.0
+ >>> clsin(-3, z)
+ 0.0
+
+It can also be expressed in closed form for even integer `s \le 0`,
+providing a finite sum for series such as
+`\sin(z) + \sin(2z) + \sin(3z) + \ldots`::
+
+ >>> z = 1 + sqrt(2)
+ >>> clsin(0, z)
+ 0.1903105029507513881275865
+ >>> cot(z/2)/2
+ 0.1903105029507513881275865
+ >>> clsin(-2, z)
+ -0.1089406163841548817581392
+ >>> -cot(z/2)*csc(z/2)**2/4
+ -0.1089406163841548817581392
+
+Call with ``pi=True`` to multiply `z` by `\pi` exactly::
+
+ >>> clsin(3, 3*pi)
+ -8.892316224968072424732898e-26
+ >>> clsin(3, 3, pi=True)
+ 0.0
+
+Evaluation for complex `s`, `z` in a nonconvergent case::
+
+ >>> s, z = -1-j, 1+2j
+ >>> clsin(s, z)
+ (-0.593079480117379002516034 + 0.9038644233367868273362446j)
+ >>> extraprec(20)(nsum)(lambda k: sin(k*z)/k**s, [1,inf])
+ (-0.593079480117379002516034 + 0.9038644233367868273362446j)
+
+"""
+
+clcos = r"""
+Computes the Clausen cosine function, defined formally by the series
+
+.. math ::
+
+ \mathrm{\widetilde{Cl}}_s(z) = \sum_{k=1}^{\infty} \frac{\cos(kz)}{k^s}.
+
+This function is complementary to the Clausen sine function
+:func:`~mpmath.clsin`. In terms of the polylogarithm,
+
+.. math ::
+
+ \mathrm{\widetilde{Cl}}_s(z) =
+ \frac{1}{2}\left(\mathrm{Li}_s\left(e^{iz}\right) +
+ \mathrm{Li}_s\left(e^{-iz}\right)\right)
+
+ = \mathrm{Re}\left[\mathrm{Li}_s(e^{iz})\right] \quad (s, z \in \mathbb{R}).
+
+**Examples**
+
+Evaluation for arbitrarily chosen `s` and `z`::
+
+ >>> from mpmath import *
+ >>> mp.dps = 25; mp.pretty = True
+ >>> s, z = 3, 4
+ >>> clcos(s, z); nsum(lambda k: cos(z*k)/k**s, [1,inf])
+ -0.6518926267198991308332759
+ -0.6518926267198991308332759
+
+Using `z + \pi` instead of `z` gives an alternating series::
+
+ >>> s, z = 3, 0.5
+ >>> clcos(s, z+pi)
+ -0.8155530586502260817855618
+ >>> nsum(lambda k: (-1)**k*cos(z*k)/k**s, [1,inf])
+ -0.8155530586502260817855618
+
+With `s = 1`, the sum can be expressed in closed form
+using elementary functions::
+
+ >>> z = 1 + sqrt(3)
+ >>> clcos(1, z)
+ -0.6720334373369714849797918
+ >>> chop(-0.5*(log(1-exp(j*z))+log(1-exp(-j*z))))
+ -0.6720334373369714849797918
+ >>> -log(abs(2*sin(0.5*z))) # Equivalent to above when z is real
+ -0.6720334373369714849797918
+ >>> nsum(lambda k: cos(k*z)/k, [1,inf])
+ -0.6720334373369714849797918
+
+It can also be expressed in closed form when `s` is an even integer.
+For example,
+
+ >>> clcos(2,z)
+ -0.7805359025135583118863007
+ >>> pi**2/6 - pi*z/2 + z**2/4
+ -0.7805359025135583118863007
+
+The case `s = 0` gives the renormalized sum of
+`\cos(z) + \cos(2z) + \cos(3z) + \ldots` (which happens to be the same for
+any value of `z`)::
+
+ >>> clcos(0, z)
+ -0.5
+ >>> nsum(lambda k: cos(k*z), [1,inf])
+ -0.5
+
+Also the sums
+
+.. math ::
+
+ \cos(z) + 2\cos(2z) + 3\cos(3z) + \ldots
+
+and
+
+.. math ::
+
+ \cos(z) + 2^n \cos(2z) + 3^n \cos(3z) + \ldots
+
+for higher integer powers `n = -s` can be done in closed form. They are zero
+when `n` is positive and even (`s` negative and even)::
+
+ >>> clcos(-1, z); 1/(2*cos(z)-2)
+ -0.2607829375240542480694126
+ -0.2607829375240542480694126
+ >>> clcos(-3, z); (2+cos(z))*csc(z/2)**4/8
+ 0.1472635054979944390848006
+ 0.1472635054979944390848006
+ >>> clcos(-2, z); clcos(-4, z); clcos(-6, z)
+ 0.0
+ 0.0
+ 0.0
+
+With `z = \pi`, the series reduces to that of the Riemann zeta function
+(more generally, if `z = p \pi/q`, it is a finite sum over Hurwitz zeta
+function values)::
+
+ >>> clcos(2.5, 0); zeta(2.5)
+ 1.34148725725091717975677
+ 1.34148725725091717975677
+ >>> clcos(2.5, pi); -altzeta(2.5)
+ -0.8671998890121841381913472
+ -0.8671998890121841381913472
+
+Call with ``pi=True`` to multiply `z` by `\pi` exactly::
+
+ >>> clcos(-3, 2*pi)
+ 2.997921055881167659267063e+102
+ >>> clcos(-3, 2, pi=True)
+ 0.008333333333333333333333333
+
+Evaluation for complex `s`, `z` in a nonconvergent case::
+
+ >>> s, z = -1-j, 1+2j
+ >>> clcos(s, z)
+ (0.9407430121562251476136807 + 0.715826296033590204557054j)
+ >>> extraprec(20)(nsum)(lambda k: cos(k*z)/k**s, [1,inf])
+ (0.9407430121562251476136807 + 0.715826296033590204557054j)
+
+"""
+
+whitm = r"""
+Evaluates the Whittaker function `M(k,m,z)`, which gives a solution
+to the Whittaker differential equation
+
+.. math ::
+
+ \frac{d^2f}{dz^2} + \left(-\frac{1}{4}+\frac{k}{z}+
+ \frac{(\frac{1}{4}-m^2)}{z^2}\right) f = 0.
+
+A second solution is given by :func:`~mpmath.whitw`.
+
+The Whittaker functions are defined in Abramowitz & Stegun, section 13.1.
+They are alternate forms of the confluent hypergeometric functions
+`\,_1F_1` and `U`:
+
+.. math ::
+
+ M(k,m,z) = e^{-\frac{1}{2}z} z^{\frac{1}{2}+m}
+ \,_1F_1(\tfrac{1}{2}+m-k, 1+2m, z)
+
+ W(k,m,z) = e^{-\frac{1}{2}z} z^{\frac{1}{2}+m}
+ U(\tfrac{1}{2}+m-k, 1+2m, z).
+
+**Examples**
+
+Evaluation for arbitrary real and complex arguments is supported::
+
+ >>> from mpmath import *
+ >>> mp.dps = 25; mp.pretty = True
+ >>> whitm(1, 1, 1)
+ 0.7302596799460411820509668
+ >>> whitm(1, 1, -1)
+ (0.0 - 1.417977827655098025684246j)
+ >>> whitm(j, j/2, 2+3j)
+ (3.245477713363581112736478 - 0.822879187542699127327782j)
+ >>> whitm(2, 3, 100000)
+ 4.303985255686378497193063e+21707
+
+Evaluation at zero::
+
+ >>> whitm(1,-1,0); whitm(1,-0.5,0); whitm(1,0,0)
+ +inf
+ nan
+ 0.0
+
+We can verify that :func:`~mpmath.whitm` numerically satisfies the
+differential equation for arbitrarily chosen values::
+
+ >>> k = mpf(0.25)
+ >>> m = mpf(1.5)
+ >>> f = lambda z: whitm(k,m,z)
+ >>> for z in [-1, 2.5, 3, 1+2j]:
+ ... chop(diff(f,z,2) + (-0.25 + k/z + (0.25-m**2)/z**2)*f(z))
+ ...
+ 0.0
+ 0.0
+ 0.0
+ 0.0
+
+An integral involving both :func:`~mpmath.whitm` and :func:`~mpmath.whitw`,
+verifying evaluation along the real axis::
+
+ >>> quad(lambda x: exp(-x)*whitm(3,2,x)*whitw(1,-2,x), [0,inf])
+ 3.438869842576800225207341
+ >>> 128/(21*sqrt(pi))
+ 3.438869842576800225207341
+
+"""
+
+whitw = r"""
+Evaluates the Whittaker function `W(k,m,z)`, which gives a second
+solution to the Whittaker differential equation. (See :func:`~mpmath.whitm`.)
+
+**Examples**
+
+Evaluation for arbitrary real and complex arguments is supported::
+
+ >>> from mpmath import *
+ >>> mp.dps = 25; mp.pretty = True
+ >>> whitw(1, 1, 1)
+ 1.19532063107581155661012
+ >>> whitw(1, 1, -1)
+ (-0.9424875979222187313924639 - 0.2607738054097702293308689j)
+ >>> whitw(j, j/2, 2+3j)
+ (0.1782899315111033879430369 - 0.01609578360403649340169406j)
+ >>> whitw(2, 3, 100000)
+ 1.887705114889527446891274e-21705
+ >>> whitw(-1, -1, 100)
+ 1.905250692824046162462058e-24
+
+Evaluation at zero::
+
+ >>> for m in [-1, -0.5, 0, 0.5, 1]:
+ ... whitw(1, m, 0)
+ ...
+ +inf
+ nan
+ 0.0
+ nan
+ +inf
+
+We can verify that :func:`~mpmath.whitw` numerically satisfies the
+differential equation for arbitrarily chosen values::
+
+ >>> k = mpf(0.25)
+ >>> m = mpf(1.5)
+ >>> f = lambda z: whitw(k,m,z)
+ >>> for z in [-1, 2.5, 3, 1+2j]:
+ ... chop(diff(f,z,2) + (-0.25 + k/z + (0.25-m**2)/z**2)*f(z))
+ ...
+ 0.0
+ 0.0
+ 0.0
+ 0.0
+
+"""
+
+ber = r"""
+Computes the Kelvin function ber, which for real arguments gives the real part
+of the Bessel J function of a rotated argument
+
+.. math ::
+
+ J_n\left(x e^{3\pi i/4}\right) = \mathrm{ber}_n(x) + i \mathrm{bei}_n(x).
+
+The imaginary part is given by :func:`~mpmath.bei`.
+
+**Plots**
+
+.. literalinclude :: /plots/ber.py
+.. image :: /plots/ber.png
+
+**Examples**
+
+Verifying the defining relation::
+
+ >>> from mpmath import *
+ >>> mp.dps = 25; mp.pretty = True
+ >>> n, x = 2, 3.5
+ >>> ber(n,x)
+ 1.442338852571888752631129
+ >>> bei(n,x)
+ -0.948359035324558320217678
+ >>> besselj(n, x*root(1,8,3))
+ (1.442338852571888752631129 - 0.948359035324558320217678j)
+
+The ber and bei functions are also defined by analytic continuation
+for complex arguments::
+
+ >>> ber(1+j, 2+3j)
+ (4.675445984756614424069563 - 15.84901771719130765656316j)
+ >>> bei(1+j, 2+3j)
+ (15.83886679193707699364398 + 4.684053288183046528703611j)
+
+"""
+
+bei = r"""
+Computes the Kelvin function bei, which for real arguments gives the
+imaginary part of the Bessel J function of a rotated argument.
+See :func:`~mpmath.ber`.
+"""
+
+ker = r"""
+Computes the Kelvin function ker, which for real arguments gives the real part
+of the (rescaled) Bessel K function of a rotated argument
+
+.. math ::
+
+ e^{-\pi i/2} K_n\left(x e^{3\pi i/4}\right) = \mathrm{ker}_n(x) + i \mathrm{kei}_n(x).
+
+The imaginary part is given by :func:`~mpmath.kei`.
+
+**Plots**
+
+.. literalinclude :: /plots/ker.py
+.. image :: /plots/ker.png
+
+**Examples**
+
+Verifying the defining relation::
+
+ >>> from mpmath import *
+ >>> mp.dps = 25; mp.pretty = True
+ >>> n, x = 2, 4.5
+ >>> ker(n,x)
+ 0.02542895201906369640249801
+ >>> kei(n,x)
+ -0.02074960467222823237055351
+ >>> exp(-n*pi*j/2) * besselk(n, x*root(1,8,1))
+ (0.02542895201906369640249801 - 0.02074960467222823237055351j)
+
+The ker and kei functions are also defined by analytic continuation
+for complex arguments::
+
+ >>> ker(1+j, 3+4j)
+ (1.586084268115490421090533 - 2.939717517906339193598719j)
+ >>> kei(1+j, 3+4j)
+ (-2.940403256319453402690132 - 1.585621643835618941044855j)
+
+"""
+
+kei = r"""
+Computes the Kelvin function kei, which for real arguments gives the
+imaginary part of the (rescaled) Bessel K function of a rotated argument.
+See :func:`~mpmath.ker`.
+"""
+
+struveh = r"""
+Gives the Struve function
+
+.. math ::
+
+ \,\mathbf{H}_n(z) =
+ \sum_{k=0}^\infty \frac{(-1)^k}{\Gamma(k+\frac{3}{2})
+ \Gamma(k+n+\frac{3}{2})} {\left({\frac{z}{2}}\right)}^{2k+n+1}
+
+which is a solution to the Struve differential equation
+
+.. math ::
+
+ z^2 f''(z) + z f'(z) + (z^2-n^2) f(z) = \frac{2 z^{n+1}}{\pi (2n-1)!!}.
+
+**Examples**
+
+Evaluation for arbitrary real and complex arguments::
+
+ >>> from mpmath import *
+ >>> mp.dps = 25; mp.pretty = True
+ >>> struveh(0, 3.5)
+ 0.3608207733778295024977797
+ >>> struveh(-1, 10)
+ -0.255212719726956768034732
+ >>> struveh(1, -100.5)
+ 0.5819566816797362287502246
+ >>> struveh(2.5, 10000000000000)
+ 3153915652525200060.308937
+ >>> struveh(2.5, -10000000000000)
+ (0.0 - 3153915652525200060.308937j)
+ >>> struveh(1+j, 1000000+4000000j)
+ (-3.066421087689197632388731e+1737173 - 1.596619701076529803290973e+1737173j)
+
+A Struve function of half-integer order is elementary; for example:
+
+ >>> z = 3
+ >>> struveh(0.5, 3)
+ 0.9167076867564138178671595
+ >>> sqrt(2/(pi*z))*(1-cos(z))
+ 0.9167076867564138178671595
+
+Numerically verifying the differential equation::
+
+ >>> z = mpf(4.5)
+ >>> n = 3
+ >>> f = lambda z: struveh(n,z)
+ >>> lhs = z**2*diff(f,z,2) + z*diff(f,z) + (z**2-n**2)*f(z)
+ >>> rhs = 2*z**(n+1)/fac2(2*n-1)/pi
+ >>> lhs
+ 17.40359302709875496632744
+ >>> rhs
+ 17.40359302709875496632744
+
+"""
+
+struvel = r"""
+Gives the modified Struve function
+
+.. math ::
+
+ \,\mathbf{L}_n(z) = -i e^{-n\pi i/2} \mathbf{H}_n(i z)
+
+which solves to the modified Struve differential equation
+
+.. math ::
+
+ z^2 f''(z) + z f'(z) - (z^2+n^2) f(z) = \frac{2 z^{n+1}}{\pi (2n-1)!!}.
+
+**Examples**
+
+Evaluation for arbitrary real and complex arguments::
+
+ >>> from mpmath import *
+ >>> mp.dps = 25; mp.pretty = True
+ >>> struvel(0, 3.5)
+ 7.180846515103737996249972
+ >>> struvel(-1, 10)
+ 2670.994904980850550721511
+ >>> struvel(1, -100.5)
+ 1.757089288053346261497686e+42
+ >>> struvel(2.5, 10000000000000)
+ 4.160893281017115450519948e+4342944819025
+ >>> struvel(2.5, -10000000000000)
+ (0.0 - 4.160893281017115450519948e+4342944819025j)
+ >>> struvel(1+j, 700j)
+ (-0.1721150049480079451246076 + 0.1240770953126831093464055j)
+ >>> struvel(1+j, 1000000+4000000j)
+ (-2.973341637511505389128708e+434290 - 5.164633059729968297147448e+434290j)
+
+Numerically verifying the differential equation::
+
+ >>> z = mpf(3.5)
+ >>> n = 3
+ >>> f = lambda z: struvel(n,z)
+ >>> lhs = z**2*diff(f,z,2) + z*diff(f,z) - (z**2+n**2)*f(z)
+ >>> rhs = 2*z**(n+1)/fac2(2*n-1)/pi
+ >>> lhs
+ 6.368850306060678353018165
+ >>> rhs
+ 6.368850306060678353018165
+"""
+
+appellf1 = r"""
+Gives the Appell F1 hypergeometric function of two variables,
+
+.. math ::
+
+ F_1(a,b_1,b_2,c,x,y) = \sum_{m=0}^{\infty} \sum_{n=0}^{\infty}
+ \frac{(a)_{m+n} (b_1)_m (b_2)_n}{(c)_{m+n}}
+ \frac{x^m y^n}{m! n!}.
+
+This series is only generally convergent when `|x| < 1` and `|y| < 1`,
+although :func:`~mpmath.appellf1` can evaluate an analytic continuation
+with respecto to either variable, and sometimes both.
+
+**Examples**
+
+Evaluation is supported for real and complex parameters::
+
+ >>> from mpmath import *
+ >>> mp.dps = 25; mp.pretty = True
+ >>> appellf1(1,0,0.5,1,0.5,0.25)
+ 1.154700538379251529018298
+ >>> appellf1(1,1+j,0.5,1,0.5,0.5j)
+ (1.138403860350148085179415 + 1.510544741058517621110615j)
+
+For some integer parameters, the F1 series reduces to a polynomial::
+
+ >>> appellf1(2,-4,-3,1,2,5)
+ -816.0
+ >>> appellf1(-5,1,2,1,4,5)
+ -20528.0
+
+The analytic continuation with respect to either `x` or `y`,
+and sometimes with respect to both, can be evaluated::
+
+ >>> appellf1(2,3,4,5,100,0.5)
+ (0.0006231042714165329279738662 + 0.0000005769149277148425774499857j)
+ >>> appellf1('1.1', '0.3', '0.2+2j', '0.4', '0.2', 1.5+3j)
+ (-0.1782604566893954897128702 + 0.002472407104546216117161499j)
+ >>> appellf1(1,2,3,4,10,12)
+ -0.07122993830066776374929313
+
+For certain arguments, F1 reduces to an ordinary hypergeometric function::
+
+ >>> appellf1(1,2,3,5,0.5,0.25)
+ 1.547902270302684019335555
+ >>> 4*hyp2f1(1,2,5,'1/3')/3
+ 1.547902270302684019335555
+ >>> appellf1(1,2,3,4,0,1.5)
+ (-1.717202506168937502740238 - 2.792526803190927323077905j)
+ >>> hyp2f1(1,3,4,1.5)
+ (-1.717202506168937502740238 - 2.792526803190927323077905j)
+
+The F1 function satisfies a system of partial differential equations::
+
+ >>> a,b1,b2,c,x,y = map(mpf, [1,0.5,0.25,1.125,0.25,-0.25])
+ >>> F = lambda x,y: appellf1(a,b1,b2,c,x,y)
+ >>> chop(x*(1-x)*diff(F,(x,y),(2,0)) +
+ ... y*(1-x)*diff(F,(x,y),(1,1)) +
+ ... (c-(a+b1+1)*x)*diff(F,(x,y),(1,0)) -
+ ... b1*y*diff(F,(x,y),(0,1)) -
+ ... a*b1*F(x,y))
+ 0.0
+ >>>
+ >>> chop(y*(1-y)*diff(F,(x,y),(0,2)) +
+ ... x*(1-y)*diff(F,(x,y),(1,1)) +
+ ... (c-(a+b2+1)*y)*diff(F,(x,y),(0,1)) -
+ ... b2*x*diff(F,(x,y),(1,0)) -
+ ... a*b2*F(x,y))
+ 0.0
+
+The Appell F1 function allows for closed-form evaluation of various
+integrals, such as any integral of the form
+`\int x^r (x+a)^p (x+b)^q dx`::
+
+ >>> def integral(a,b,p,q,r,x1,x2):
+ ... a,b,p,q,r,x1,x2 = map(mpmathify, [a,b,p,q,r,x1,x2])
+ ... f = lambda x: x**r * (x+a)**p * (x+b)**q
+ ... def F(x):
+ ... v = x**(r+1)/(r+1) * (a+x)**p * (b+x)**q
+ ... v *= (1+x/a)**(-p)
+ ... v *= (1+x/b)**(-q)
+ ... v *= appellf1(r+1,-p,-q,2+r,-x/a,-x/b)
+ ... return v
+ ... print("Num. quad: %s" % quad(f, [x1,x2]))
+ ... print("Appell F1: %s" % (F(x2)-F(x1)))
+ ...
+ >>> integral('1/5','4/3','-2','3','1/2',0,1)
+ Num. quad: 9.073335358785776206576981
+ Appell F1: 9.073335358785776206576981
+ >>> integral('3/2','4/3','-2','3','1/2',0,1)
+ Num. quad: 1.092829171999626454344678
+ Appell F1: 1.092829171999626454344678
+ >>> integral('3/2','4/3','-2','3','1/2',12,25)
+ Num. quad: 1106.323225040235116498927
+ Appell F1: 1106.323225040235116498927
+
+Also incomplete elliptic integrals fall into this category [1]::
+
+ >>> def E(z, m):
+ ... if (pi/2).ae(z):
+ ... return ellipe(m)
+ ... return 2*round(re(z)/pi)*ellipe(m) + mpf(-1)**round(re(z)/pi)*\
+ ... sin(z)*appellf1(0.5,0.5,-0.5,1.5,sin(z)**2,m*sin(z)**2)
+ ...
+ >>> z, m = 1, 0.5
+ >>> E(z,m); quad(lambda t: sqrt(1-m*sin(t)**2), [0,pi/4,3*pi/4,z])
+ 0.9273298836244400669659042
+ 0.9273298836244400669659042
+ >>> z, m = 3, 2
+ >>> E(z,m); quad(lambda t: sqrt(1-m*sin(t)**2), [0,pi/4,3*pi/4,z])
+ (1.057495752337234229715836 + 1.198140234735592207439922j)
+ (1.057495752337234229715836 + 1.198140234735592207439922j)
+
+**References**
+
+1. [WolframFunctions]_ http://functions.wolfram.com/EllipticIntegrals/EllipticE2/26/01/
+2. [SrivastavaKarlsson]_
+3. [CabralRosetti]_
+4. [Vidunas]_
+5. [Slater]_
+
+"""
+
+angerj = r"""
+Gives the Anger function
+
+.. math ::
+
+ \mathbf{J}_{\nu}(z) = \frac{1}{\pi}
+ \int_0^{\pi} \cos(\nu t - z \sin t) dt
+
+which is an entire function of both the parameter `\nu` and
+the argument `z`. It solves the inhomogeneous Bessel differential
+equation
+
+.. math ::
+
+ f''(z) + \frac{1}{z}f'(z) + \left(1-\frac{\nu^2}{z^2}\right) f(z)
+ = \frac{(z-\nu)}{\pi z^2} \sin(\pi \nu).
+
+**Examples**
+
+Evaluation for real and complex parameter and argument::
+
+ >>> from mpmath import *
+ >>> mp.dps = 25; mp.pretty = True
+ >>> angerj(2,3)
+ 0.4860912605858910769078311
+ >>> angerj(-3+4j, 2+5j)
+ (-5033.358320403384472395612 + 585.8011892476145118551756j)
+ >>> angerj(3.25, 1e6j)
+ (4.630743639715893346570743e+434290 - 1.117960409887505906848456e+434291j)
+ >>> angerj(-1.5, 1e6)
+ 0.0002795719747073879393087011
+
+The Anger function coincides with the Bessel J-function when `\nu`
+is an integer::
+
+ >>> angerj(1,3); besselj(1,3)
+ 0.3390589585259364589255146
+ 0.3390589585259364589255146
+ >>> angerj(1.5,3); besselj(1.5,3)
+ 0.4088969848691080859328847
+ 0.4777182150870917715515015
+
+Verifying the differential equation::
+
+ >>> v,z = mpf(2.25), 0.75
+ >>> f = lambda z: angerj(v,z)
+ >>> diff(f,z,2) + diff(f,z)/z + (1-(v/z)**2)*f(z)
+ -0.6002108774380707130367995
+ >>> (z-v)/(pi*z**2) * sinpi(v)
+ -0.6002108774380707130367995
+
+Verifying the integral representation::
+
+ >>> angerj(v,z)
+ 0.1145380759919333180900501
+ >>> quad(lambda t: cos(v*t-z*sin(t))/pi, [0,pi])
+ 0.1145380759919333180900501
+
+**References**
+
+1. [DLMF]_ section 11.10: Anger-Weber Functions
+"""
+
+webere = r"""
+Gives the Weber function
+
+.. math ::
+
+ \mathbf{E}_{\nu}(z) = \frac{1}{\pi}
+ \int_0^{\pi} \sin(\nu t - z \sin t) dt
+
+which is an entire function of both the parameter `\nu` and
+the argument `z`. It solves the inhomogeneous Bessel differential
+equation
+
+.. math ::
+
+ f''(z) + \frac{1}{z}f'(z) + \left(1-\frac{\nu^2}{z^2}\right) f(z)
+ = -\frac{1}{\pi z^2} (z+\nu+(z-\nu)\cos(\pi \nu)).
+
+**Examples**
+
+Evaluation for real and complex parameter and argument::
+
+ >>> from mpmath import *
+ >>> mp.dps = 25; mp.pretty = True
+ >>> webere(2,3)
+ -0.1057668973099018425662646
+ >>> webere(-3+4j, 2+5j)
+ (-585.8081418209852019290498 - 5033.314488899926921597203j)
+ >>> webere(3.25, 1e6j)
+ (-1.117960409887505906848456e+434291 - 4.630743639715893346570743e+434290j)
+ >>> webere(3.25, 1e6)
+ -0.00002812518265894315604914453
+
+Up to addition of a rational function of `z`, the Weber function coincides
+with the Struve H-function when `\nu` is an integer::
+
+ >>> webere(1,3); 2/pi-struveh(1,3)
+ -0.3834897968188690177372881
+ -0.3834897968188690177372881
+ >>> webere(5,3); 26/(35*pi)-struveh(5,3)
+ 0.2009680659308154011878075
+ 0.2009680659308154011878075
+
+Verifying the differential equation::
+
+ >>> v,z = mpf(2.25), 0.75
+ >>> f = lambda z: webere(v,z)
+ >>> diff(f,z,2) + diff(f,z)/z + (1-(v/z)**2)*f(z)
+ -1.097441848875479535164627
+ >>> -(z+v+(z-v)*cospi(v))/(pi*z**2)
+ -1.097441848875479535164627
+
+Verifying the integral representation::
+
+ >>> webere(v,z)
+ 0.1486507351534283744485421
+ >>> quad(lambda t: sin(v*t-z*sin(t))/pi, [0,pi])
+ 0.1486507351534283744485421
+
+**References**
+
+1. [DLMF]_ section 11.10: Anger-Weber Functions
+"""
+
+lommels1 = r"""
+Gives the Lommel function `s_{\mu,\nu}` or `s^{(1)}_{\mu,\nu}`
+
+.. math ::
+
+ s_{\mu,\nu}(z) = \frac{z^{\mu+1}}{(\mu-\nu+1)(\mu+\nu+1)}
+ \,_1F_2\left(1; \frac{\mu-\nu+3}{2}, \frac{\mu+\nu+3}{2};
+ -\frac{z^2}{4} \right)
+
+which solves the inhomogeneous Bessel equation
+
+.. math ::
+
+ z^2 f''(z) + z f'(z) + (z^2-\nu^2) f(z) = z^{\mu+1}.
+
+A second solution is given by :func:`~mpmath.lommels2`.
+
+**Plots**
+
+.. literalinclude :: /plots/lommels1.py
+.. image :: /plots/lommels1.png
+
+**Examples**
+
+An integral representation::
+
+ >>> from mpmath import *
+ >>> mp.dps = 25; mp.pretty = True
+ >>> u,v,z = 0.25, 0.125, mpf(0.75)
+ >>> lommels1(u,v,z)
+ 0.4276243877565150372999126
+ >>> (bessely(v,z)*quad(lambda t: t**u*besselj(v,t), [0,z]) - \
+ ... besselj(v,z)*quad(lambda t: t**u*bessely(v,t), [0,z]))*(pi/2)
+ 0.4276243877565150372999126
+
+A special value::
+
+ >>> lommels1(v,v,z)
+ 0.5461221367746048054932553
+ >>> gamma(v+0.5)*sqrt(pi)*power(2,v-1)*struveh(v,z)
+ 0.5461221367746048054932553
+
+Verifying the differential equation::
+
+ >>> f = lambda z: lommels1(u,v,z)
+ >>> z**2*diff(f,z,2) + z*diff(f,z) + (z**2-v**2)*f(z)
+ 0.6979536443265746992059141
+ >>> z**(u+1)
+ 0.6979536443265746992059141
+
+**References**
+
+1. [GradshteynRyzhik]_
+2. [Weisstein]_ http://mathworld.wolfram.com/LommelFunction.html
+"""
+
+lommels2 = r"""
+Gives the second Lommel function `S_{\mu,\nu}` or `s^{(2)}_{\mu,\nu}`
+
+.. math ::
+
+ S_{\mu,\nu}(z) = s_{\mu,\nu}(z) + 2^{\mu-1}
+ \Gamma\left(\tfrac{1}{2}(\mu-\nu+1)\right)
+ \Gamma\left(\tfrac{1}{2}(\mu+\nu+1)\right) \times
+
+ \left[\sin(\tfrac{1}{2}(\mu-\nu)\pi) J_{\nu}(z) -
+ \cos(\tfrac{1}{2}(\mu-\nu)\pi) Y_{\nu}(z)
+ \right]
+
+which solves the same differential equation as
+:func:`~mpmath.lommels1`.
+
+**Plots**
+
+.. literalinclude :: /plots/lommels2.py
+.. image :: /plots/lommels2.png
+
+**Examples**
+
+For large `|z|`, `S_{\mu,\nu} \sim z^{\mu-1}`::
+
+ >>> from mpmath import *
+ >>> mp.dps = 25; mp.pretty = True
+ >>> lommels2(10,2,30000)
+ 1.968299831601008419949804e+40
+ >>> power(30000,9)
+ 1.9683e+40
+
+A special value::
+
+ >>> u,v,z = 0.5, 0.125, mpf(0.75)
+ >>> lommels2(v,v,z)
+ 0.9589683199624672099969765
+ >>> (struveh(v,z)-bessely(v,z))*power(2,v-1)*sqrt(pi)*gamma(v+0.5)
+ 0.9589683199624672099969765
+
+Verifying the differential equation::
+
+ >>> f = lambda z: lommels2(u,v,z)
+ >>> z**2*diff(f,z,2) + z*diff(f,z) + (z**2-v**2)*f(z)
+ 0.6495190528383289850727924
+ >>> z**(u+1)
+ 0.6495190528383289850727924
+
+**References**
+
+1. [GradshteynRyzhik]_
+2. [Weisstein]_ http://mathworld.wolfram.com/LommelFunction.html
+"""
+
+appellf2 = r"""
+Gives the Appell F2 hypergeometric function of two variables
+
+.. math ::
+
+ F_2(a,b_1,b_2,c_1,c_2,x,y) = \sum_{m=0}^{\infty} \sum_{n=0}^{\infty}
+ \frac{(a)_{m+n} (b_1)_m (b_2)_n}{(c_1)_m (c_2)_n}
+ \frac{x^m y^n}{m! n!}.
+
+The series is generally absolutely convergent for `|x| + |y| < 1`.
+
+**Examples**
+
+Evaluation for real and complex arguments::
+
+ >>> from mpmath import *
+ >>> mp.dps = 25; mp.pretty = True
+ >>> appellf2(1,2,3,4,5,0.25,0.125)
+ 1.257417193533135344785602
+ >>> appellf2(1,-3,-4,2,3,2,3)
+ -42.8
+ >>> appellf2(0.5,0.25,-0.25,2,3,0.25j,0.25)
+ (0.9880539519421899867041719 + 0.01497616165031102661476978j)
+ >>> chop(appellf2(1,1+j,1-j,3j,-3j,0.25,0.25))
+ 1.201311219287411337955192
+ >>> appellf2(1,1,1,4,6,0.125,16)
+ (-0.09455532250274744282125152 - 0.7647282253046207836769297j)
+
+A transformation formula::
+
+ >>> a,b1,b2,c1,c2,x,y = map(mpf, [1,2,0.5,0.25,1.625,-0.125,0.125])
+ >>> appellf2(a,b1,b2,c1,c2,x,y)
+ 0.2299211717841180783309688
+ >>> (1-x)**(-a)*appellf2(a,c1-b1,b2,c1,c2,x/(x-1),y/(1-x))
+ 0.2299211717841180783309688
+
+A system of partial differential equations satisfied by F2::
+
+ >>> a,b1,b2,c1,c2,x,y = map(mpf, [1,0.5,0.25,1.125,1.5,0.0625,-0.0625])
+ >>> F = lambda x,y: appellf2(a,b1,b2,c1,c2,x,y)
+ >>> chop(x*(1-x)*diff(F,(x,y),(2,0)) -
+ ... x*y*diff(F,(x,y),(1,1)) +
+ ... (c1-(a+b1+1)*x)*diff(F,(x,y),(1,0)) -
+ ... b1*y*diff(F,(x,y),(0,1)) -
+ ... a*b1*F(x,y))
+ 0.0
+ >>> chop(y*(1-y)*diff(F,(x,y),(0,2)) -
+ ... x*y*diff(F,(x,y),(1,1)) +
+ ... (c2-(a+b2+1)*y)*diff(F,(x,y),(0,1)) -
+ ... b2*x*diff(F,(x,y),(1,0)) -
+ ... a*b2*F(x,y))
+ 0.0
+
+**References**
+
+See references for :func:`~mpmath.appellf1`.
+"""
+
+appellf3 = r"""
+Gives the Appell F3 hypergeometric function of two variables
+
+.. math ::
+
+ F_3(a_1,a_2,b_1,b_2,c,x,y) = \sum_{m=0}^{\infty} \sum_{n=0}^{\infty}
+ \frac{(a_1)_m (a_2)_n (b_1)_m (b_2)_n}{(c)_{m+n}}
+ \frac{x^m y^n}{m! n!}.
+
+The series is generally absolutely convergent for `|x| < 1, |y| < 1`.
+
+**Examples**
+
+Evaluation for various parameters and variables::
+
+ >>> from mpmath import *
+ >>> mp.dps = 25; mp.pretty = True
+ >>> appellf3(1,2,3,4,5,0.5,0.25)
+ 2.221557778107438938158705
+ >>> appellf3(1,2,3,4,5,6,0); hyp2f1(1,3,5,6)
+ (-0.5189554589089861284537389 - 0.1454441043328607980769742j)
+ (-0.5189554589089861284537389 - 0.1454441043328607980769742j)
+ >>> appellf3(1,-2,-3,1,1,4,6)
+ -17.4
+ >>> appellf3(1,2,-3,1,1,4,6)
+ (17.7876136773677356641825 + 19.54768762233649126154534j)
+ >>> appellf3(1,2,-3,1,1,6,4)
+ (85.02054175067929402953645 + 148.4402528821177305173599j)
+ >>> chop(appellf3(1+j,2,1-j,2,3,0.25,0.25))
+ 1.719992169545200286696007
+
+Many transformations and evaluations for special combinations
+of the parameters are possible, e.g.:
+
+ >>> a,b,c,x,y = map(mpf, [0.5,0.25,0.125,0.125,-0.125])
+ >>> appellf3(a,c-a,b,c-b,c,x,y)
+ 1.093432340896087107444363
+ >>> (1-y)**(a+b-c)*hyp2f1(a,b,c,x+y-x*y)
+ 1.093432340896087107444363
+ >>> x**2*appellf3(1,1,1,1,3,x,-x)
+ 0.01568646277445385390945083
+ >>> polylog(2,x**2)
+ 0.01568646277445385390945083
+ >>> a1,a2,b1,b2,c,x = map(mpf, [0.5,0.25,0.125,0.5,4.25,0.125])
+ >>> appellf3(a1,a2,b1,b2,c,x,1)
+ 1.03947361709111140096947
+ >>> gammaprod([c,c-a2-b2],[c-a2,c-b2])*hyp3f2(a1,b1,c-a2-b2,c-a2,c-b2,x)
+ 1.03947361709111140096947
+
+The Appell F3 function satisfies a pair of partial
+differential equations::
+
+ >>> a1,a2,b1,b2,c,x,y = map(mpf, [0.5,0.25,0.125,0.5,0.625,0.0625,-0.0625])
+ >>> F = lambda x,y: appellf3(a1,a2,b1,b2,c,x,y)
+ >>> chop(x*(1-x)*diff(F,(x,y),(2,0)) +
+ ... y*diff(F,(x,y),(1,1)) +
+ ... (c-(a1+b1+1)*x)*diff(F,(x,y),(1,0)) -
+ ... a1*b1*F(x,y))
+ 0.0
+ >>> chop(y*(1-y)*diff(F,(x,y),(0,2)) +
+ ... x*diff(F,(x,y),(1,1)) +
+ ... (c-(a2+b2+1)*y)*diff(F,(x,y),(0,1)) -
+ ... a2*b2*F(x,y))
+ 0.0
+
+**References**
+
+See references for :func:`~mpmath.appellf1`.
+"""
+
+appellf4 = r"""
+Gives the Appell F4 hypergeometric function of two variables
+
+.. math ::
+
+ F_4(a,b,c_1,c_2,x,y) = \sum_{m=0}^{\infty} \sum_{n=0}^{\infty}
+ \frac{(a)_{m+n} (b)_{m+n}}{(c_1)_m (c_2)_n}
+ \frac{x^m y^n}{m! n!}.
+
+The series is generally absolutely convergent for
+`\sqrt{|x|} + \sqrt{|y|} < 1`.
+
+**Examples**
+
+Evaluation for various parameters and arguments::
+
+ >>> from mpmath import *
+ >>> mp.dps = 25; mp.pretty = True
+ >>> appellf4(1,1,2,2,0.25,0.125)
+ 1.286182069079718313546608
+ >>> appellf4(-2,-3,4,5,4,5)
+ 34.8
+ >>> appellf4(5,4,2,3,0.25j,-0.125j)
+ (-0.2585967215437846642163352 + 2.436102233553582711818743j)
+
+Reduction to `\,_2F_1` in a special case::
+
+ >>> a,b,c,x,y = map(mpf, [0.5,0.25,0.125,0.125,-0.125])
+ >>> appellf4(a,b,c,a+b-c+1,x*(1-y),y*(1-x))
+ 1.129143488466850868248364
+ >>> hyp2f1(a,b,c,x)*hyp2f1(a,b,a+b-c+1,y)
+ 1.129143488466850868248364
+
+A system of partial differential equations satisfied by F4::
+
+ >>> a,b,c1,c2,x,y = map(mpf, [1,0.5,0.25,1.125,0.0625,-0.0625])
+ >>> F = lambda x,y: appellf4(a,b,c1,c2,x,y)
+ >>> chop(x*(1-x)*diff(F,(x,y),(2,0)) -
+ ... y**2*diff(F,(x,y),(0,2)) -
+ ... 2*x*y*diff(F,(x,y),(1,1)) +
+ ... (c1-(a+b+1)*x)*diff(F,(x,y),(1,0)) -
+ ... ((a+b+1)*y)*diff(F,(x,y),(0,1)) -
+ ... a*b*F(x,y))
+ 0.0
+ >>> chop(y*(1-y)*diff(F,(x,y),(0,2)) -
+ ... x**2*diff(F,(x,y),(2,0)) -
+ ... 2*x*y*diff(F,(x,y),(1,1)) +
+ ... (c2-(a+b+1)*y)*diff(F,(x,y),(0,1)) -
+ ... ((a+b+1)*x)*diff(F,(x,y),(1,0)) -
+ ... a*b*F(x,y))
+ 0.0
+
+**References**
+
+See references for :func:`~mpmath.appellf1`.
+"""
+
+zeta = r"""
+Computes the Riemann zeta function
+
+.. math ::
+
+ \zeta(s) = 1+\frac{1}{2^s}+\frac{1}{3^s}+\frac{1}{4^s}+\ldots
+
+or, with `a \ne 1`, the more general Hurwitz zeta function
+
+.. math ::
+
+ \zeta(s,a) = \sum_{k=0}^\infty \frac{1}{(a+k)^s}.
+
+Optionally, ``zeta(s, a, n)`` computes the `n`-th derivative with
+respect to `s`,
+
+.. math ::
+
+ \zeta^{(n)}(s,a) = (-1)^n \sum_{k=0}^\infty \frac{\log^n(a+k)}{(a+k)^s}.
+
+Although these series only converge for `\Re(s) > 1`, the Riemann and Hurwitz
+zeta functions are defined through analytic continuation for arbitrary
+complex `s \ne 1` (`s = 1` is a pole).
+
+The implementation uses three algorithms: the Borwein algorithm for
+the Riemann zeta function when `s` is close to the real line;
+the Riemann-Siegel formula for the Riemann zeta function when `s` is
+large imaginary, and Euler-Maclaurin summation in all other cases.
+The reflection formula for `\Re(s) < 0` is implemented in some cases.
+The algorithm can be chosen with ``method = 'borwein'``,
+``method='riemann-siegel'`` or ``method = 'euler-maclaurin'``.
+
+The parameter `a` is usually a rational number `a = p/q`, and may be specified
+as such by passing an integer tuple `(p, q)`. Evaluation is supported for
+arbitrary complex `a`, but may be slow and/or inaccurate when `\Re(s) < 0` for
+nonrational `a` or when computing derivatives.
+
+**Examples**
+
+Some values of the Riemann zeta function::
+
+ >>> from mpmath import *
+ >>> mp.dps = 25; mp.pretty = True
+ >>> zeta(2); pi**2 / 6
+ 1.644934066848226436472415
+ 1.644934066848226436472415
+ >>> zeta(0)
+ -0.5
+ >>> zeta(-1)
+ -0.08333333333333333333333333
+ >>> zeta(-2)
+ 0.0
+
+For large positive `s`, `\zeta(s)` rapidly approaches 1::
+
+ >>> zeta(50)
+ 1.000000000000000888178421
+ >>> zeta(100)
+ 1.0
+ >>> zeta(inf)
+ 1.0
+ >>> 1-sum((zeta(k)-1)/k for k in range(2,85)); +euler
+ 0.5772156649015328606065121
+ 0.5772156649015328606065121
+ >>> nsum(lambda k: zeta(k)-1, [2, inf])
+ 1.0
+
+Evaluation is supported for complex `s` and `a`:
+
+ >>> zeta(-3+4j)
+ (-0.03373057338827757067584698 + 0.2774499251557093745297677j)
+ >>> zeta(2+3j, -1+j)
+ (389.6841230140842816370741 + 295.2674610150305334025962j)
+
+The Riemann zeta function has so-called nontrivial zeros on
+the critical line `s = 1/2 + it`::
+
+ >>> findroot(zeta, 0.5+14j); zetazero(1)
+ (0.5 + 14.13472514173469379045725j)
+ (0.5 + 14.13472514173469379045725j)
+ >>> findroot(zeta, 0.5+21j); zetazero(2)
+ (0.5 + 21.02203963877155499262848j)
+ (0.5 + 21.02203963877155499262848j)
+ >>> findroot(zeta, 0.5+25j); zetazero(3)
+ (0.5 + 25.01085758014568876321379j)
+ (0.5 + 25.01085758014568876321379j)
+ >>> chop(zeta(zetazero(10)))
+ 0.0
+
+Evaluation on and near the critical line is supported for large
+heights `t` by means of the Riemann-Siegel formula (currently
+for `a = 1`, `n \le 4`)::
+
+ >>> zeta(0.5+100000j)
+ (1.073032014857753132114076 + 5.780848544363503984261041j)
+ >>> zeta(0.75+1000000j)
+ (0.9535316058375145020351559 + 0.9525945894834273060175651j)
+ >>> zeta(0.5+10000000j)
+ (11.45804061057709254500227 - 8.643437226836021723818215j)
+ >>> zeta(0.5+100000000j, derivative=1)
+ (51.12433106710194942681869 + 43.87221167872304520599418j)
+ >>> zeta(0.5+100000000j, derivative=2)
+ (-444.2760822795430400549229 - 896.3789978119185981665403j)
+ >>> zeta(0.5+100000000j, derivative=3)
+ (3230.72682687670422215339 + 14374.36950073615897616781j)
+ >>> zeta(0.5+100000000j, derivative=4)
+ (-11967.35573095046402130602 - 218945.7817789262839266148j)
+ >>> zeta(1+10000000j) # off the line
+ (2.859846483332530337008882 + 0.491808047480981808903986j)
+ >>> zeta(1+10000000j, derivative=1)
+ (-4.333835494679647915673205 - 0.08405337962602933636096103j)
+ >>> zeta(1+10000000j, derivative=4)
+ (453.2764822702057701894278 - 581.963625832768189140995j)
+
+For investigation of the zeta function zeros, the Riemann-Siegel
+Z-function is often more convenient than working with the Riemann
+zeta function directly (see :func:`~mpmath.siegelz`).
+
+Some values of the Hurwitz zeta function::
+
+ >>> zeta(2, 3); -5./4 + pi**2/6
+ 0.3949340668482264364724152
+ 0.3949340668482264364724152
+ >>> zeta(2, (3,4)); pi**2 - 8*catalan
+ 2.541879647671606498397663
+ 2.541879647671606498397663
+
+For positive integer values of `s`, the Hurwitz zeta function is
+equivalent to a polygamma function (except for a normalizing factor)::
+
+ >>> zeta(4, (1,5)); psi(3, '1/5')/6
+ 625.5408324774542966919938
+ 625.5408324774542966919938
+
+Evaluation of derivatives::
+
+ >>> zeta(0, 3+4j, 1); loggamma(3+4j) - ln(2*pi)/2
+ (-2.675565317808456852310934 + 4.742664438034657928194889j)
+ (-2.675565317808456852310934 + 4.742664438034657928194889j)
+ >>> zeta(2, 1, 20)
+ 2432902008176640000.000242
+ >>> zeta(3+4j, 5.5+2j, 4)
+ (-0.140075548947797130681075 - 0.3109263360275413251313634j)
+ >>> zeta(0.5+100000j, 1, 4)
+ (-10407.16081931495861539236 + 13777.78669862804508537384j)
+ >>> zeta(-100+0.5j, (1,3), derivative=4)
+ (4.007180821099823942702249e+79 + 4.916117957092593868321778e+78j)
+
+Generating a Taylor series at `s = 2` using derivatives::
+
+ >>> for k in range(11): print("%s * (s-2)^%i" % (zeta(2,1,k)/fac(k), k))
+ ...
+ 1.644934066848226436472415 * (s-2)^0
+ -0.9375482543158437537025741 * (s-2)^1
+ 0.9946401171494505117104293 * (s-2)^2
+ -1.000024300473840810940657 * (s-2)^3
+ 1.000061933072352565457512 * (s-2)^4
+ -1.000006869443931806408941 * (s-2)^5
+ 1.000000173233769531820592 * (s-2)^6
+ -0.9999999569989868493432399 * (s-2)^7
+ 0.9999999937218844508684206 * (s-2)^8
+ -0.9999999996355013916608284 * (s-2)^9
+ 1.000000000004610645020747 * (s-2)^10
+
+Evaluation at zero and for negative integer `s`::
+
+ >>> zeta(0, 10)
+ -9.5
+ >>> zeta(-2, (2,3)); mpf(1)/81
+ 0.01234567901234567901234568
+ 0.01234567901234567901234568
+ >>> zeta(-3+4j, (5,4))
+ (0.2899236037682695182085988 + 0.06561206166091757973112783j)
+ >>> zeta(-3.25, 1/pi)
+ -0.0005117269627574430494396877
+ >>> zeta(-3.5, pi, 1)
+ 11.156360390440003294709
+ >>> zeta(-100.5, (8,3))
+ -4.68162300487989766727122e+77
+ >>> zeta(-10.5, (-8,3))
+ (-0.01521913704446246609237979 + 29907.72510874248161608216j)
+ >>> zeta(-1000.5, (-8,3))
+ (1.031911949062334538202567e+1770 + 1.519555750556794218804724e+426j)
+ >>> zeta(-1+j, 3+4j)
+ (-16.32988355630802510888631 - 22.17706465801374033261383j)
+ >>> zeta(-1+j, 3+4j, 2)
+ (32.48985276392056641594055 - 51.11604466157397267043655j)
+ >>> diff(lambda s: zeta(s, 3+4j), -1+j, 2)
+ (32.48985276392056641594055 - 51.11604466157397267043655j)
+
+**References**
+
+1. http://mathworld.wolfram.com/RiemannZetaFunction.html
+
+2. http://mathworld.wolfram.com/HurwitzZetaFunction.html
+
+3. [BorweinZeta]_
+
+"""
+
+dirichlet = r"""
+Evaluates the Dirichlet L-function
+
+.. math ::
+
+ L(s,\chi) = \sum_{k=1}^\infty \frac{\chi(k)}{k^s}.
+
+where `\chi` is a periodic sequence of length `q` which should be supplied
+in the form of a list `[\chi(0), \chi(1), \ldots, \chi(q-1)]`.
+Strictly, `\chi` should be a Dirichlet character, but any periodic
+sequence will work.
+
+For example, ``dirichlet(s, [1])`` gives the ordinary
+Riemann zeta function and ``dirichlet(s, [-1,1])`` gives
+the alternating zeta function (Dirichlet eta function).
+
+Also the derivative with respect to `s` (currently only a first
+derivative) can be evaluated.
+
+**Examples**
+
+The ordinary Riemann zeta function::
+
+ >>> from mpmath import *
+ >>> mp.dps = 25; mp.pretty = True
+ >>> dirichlet(3, [1]); zeta(3)
+ 1.202056903159594285399738
+ 1.202056903159594285399738
+ >>> dirichlet(1, [1])
+ +inf
+
+The alternating zeta function::
+
+ >>> dirichlet(1, [-1,1]); ln(2)
+ 0.6931471805599453094172321
+ 0.6931471805599453094172321
+
+The following defines the Dirichlet beta function
+`\beta(s) = \sum_{k=0}^\infty \frac{(-1)^k}{(2k+1)^s}` and verifies
+several values of this function::
+
+ >>> B = lambda s, d=0: dirichlet(s, [0, 1, 0, -1], d)
+ >>> B(0); 1./2
+ 0.5
+ 0.5
+ >>> B(1); pi/4
+ 0.7853981633974483096156609
+ 0.7853981633974483096156609
+ >>> B(2); +catalan
+ 0.9159655941772190150546035
+ 0.9159655941772190150546035
+ >>> B(2,1); diff(B, 2)
+ 0.08158073611659279510291217
+ 0.08158073611659279510291217
+ >>> B(-1,1); 2*catalan/pi
+ 0.5831218080616375602767689
+ 0.5831218080616375602767689
+ >>> B(0,1); log(gamma(0.25)**2/(2*pi*sqrt(2)))
+ 0.3915943927068367764719453
+ 0.3915943927068367764719454
+ >>> B(1,1); 0.25*pi*(euler+2*ln2+3*ln(pi)-4*ln(gamma(0.25)))
+ 0.1929013167969124293631898
+ 0.1929013167969124293631898
+
+A custom L-series of period 3::
+
+ >>> dirichlet(2, [2,0,1])
+ 0.7059715047839078092146831
+ >>> 2*nsum(lambda k: (3*k)**-2, [1,inf]) + \
+ ... nsum(lambda k: (3*k+2)**-2, [0,inf])
+ 0.7059715047839078092146831
+
+"""
+
+coulombf = r"""
+Calculates the regular Coulomb wave function
+
+.. math ::
+
+ F_l(\eta,z) = C_l(\eta) z^{l+1} e^{-iz} \,_1F_1(l+1-i\eta, 2l+2, 2iz)
+
+where the normalization constant `C_l(\eta)` is as calculated by
+:func:`~mpmath.coulombc`. This function solves the differential equation
+
+.. math ::
+
+ f''(z) + \left(1-\frac{2\eta}{z}-\frac{l(l+1)}{z^2}\right) f(z) = 0.
+
+A second linearly independent solution is given by the irregular
+Coulomb wave function `G_l(\eta,z)` (see :func:`~mpmath.coulombg`)
+and thus the general solution is
+`f(z) = C_1 F_l(\eta,z) + C_2 G_l(\eta,z)` for arbitrary
+constants `C_1`, `C_2`.
+Physically, the Coulomb wave functions give the radial solution
+to the Schrodinger equation for a point particle in a `1/z` potential; `z` is
+then the radius and `l`, `\eta` are quantum numbers.
+
+The Coulomb wave functions with real parameters are defined
+in Abramowitz & Stegun, section 14. However, all parameters are permitted
+to be complex in this implementation (see references).
+
+**Plots**
+
+.. literalinclude :: /plots/coulombf.py
+.. image :: /plots/coulombf.png
+.. literalinclude :: /plots/coulombf_c.py
+.. image :: /plots/coulombf_c.png
+
+**Examples**
+
+Evaluation is supported for arbitrary magnitudes of `z`::
+
+ >>> from mpmath import *
+ >>> mp.dps = 25; mp.pretty = True
+ >>> coulombf(2, 1.5, 3.5)
+ 0.4080998961088761187426445
+ >>> coulombf(-2, 1.5, 3.5)
+ 0.7103040849492536747533465
+ >>> coulombf(2, 1.5, '1e-10')
+ 4.143324917492256448770769e-33
+ >>> coulombf(2, 1.5, 1000)
+ 0.4482623140325567050716179
+ >>> coulombf(2, 1.5, 10**10)
+ -0.066804196437694360046619
+
+Verifying the differential equation::
+
+ >>> l, eta, z = 2, 3, mpf(2.75)
+ >>> A, B = 1, 2
+ >>> f = lambda z: A*coulombf(l,eta,z) + B*coulombg(l,eta,z)
+ >>> chop(diff(f,z,2) + (1-2*eta/z - l*(l+1)/z**2)*f(z))
+ 0.0
+
+A Wronskian relation satisfied by the Coulomb wave functions::
+
+ >>> l = 2
+ >>> eta = 1.5
+ >>> F = lambda z: coulombf(l,eta,z)
+ >>> G = lambda z: coulombg(l,eta,z)
+ >>> for z in [3.5, -1, 2+3j]:
+ ... chop(diff(F,z)*G(z) - F(z)*diff(G,z))
+ ...
+ 1.0
+ 1.0
+ 1.0
+
+Another Wronskian relation::
+
+ >>> F = coulombf
+ >>> G = coulombg
+ >>> for z in [3.5, -1, 2+3j]:
+ ... chop(F(l-1,eta,z)*G(l,eta,z)-F(l,eta,z)*G(l-1,eta,z) - l/sqrt(l**2+eta**2))
+ ...
+ 0.0
+ 0.0
+ 0.0
+
+An integral identity connecting the regular and irregular wave functions::
+
+ >>> l, eta, z = 4+j, 2-j, 5+2j
+ >>> coulombf(l,eta,z) + j*coulombg(l,eta,z)
+ (0.7997977752284033239714479 + 0.9294486669502295512503127j)
+ >>> g = lambda t: exp(-t)*t**(l-j*eta)*(t+2*j*z)**(l+j*eta)
+ >>> j*exp(-j*z)*z**(-l)/fac(2*l+1)/coulombc(l,eta)*quad(g, [0,inf])
+ (0.7997977752284033239714479 + 0.9294486669502295512503127j)
+
+Some test case with complex parameters, taken from Michel [2]::
+
+ >>> mp.dps = 15
+ >>> coulombf(1+0.1j, 50+50j, 100.156)
+ (-1.02107292320897e+15 - 2.83675545731519e+15j)
+ >>> coulombg(1+0.1j, 50+50j, 100.156)
+ (2.83675545731519e+15 - 1.02107292320897e+15j)
+ >>> coulombf(1e-5j, 10+1e-5j, 0.1+1e-6j)
+ (4.30566371247811e-14 - 9.03347835361657e-19j)
+ >>> coulombg(1e-5j, 10+1e-5j, 0.1+1e-6j)
+ (778709182061.134 + 18418936.2660553j)
+
+The following reproduces a table in Abramowitz & Stegun, at twice
+the precision::
+
+ >>> mp.dps = 10
+ >>> eta = 2; z = 5
+ >>> for l in [5, 4, 3, 2, 1, 0]:
+ ... print("%s %s %s" % (l, coulombf(l,eta,z),
+ ... diff(lambda z: coulombf(l,eta,z), z)))
+ ...
+ 5 0.09079533488 0.1042553261
+ 4 0.2148205331 0.2029591779
+ 3 0.4313159311 0.320534053
+ 2 0.7212774133 0.3952408216
+ 1 0.9935056752 0.3708676452
+ 0 1.143337392 0.2937960375
+
+**References**
+
+1. I.J. Thompson & A.R. Barnett, "Coulomb and Bessel Functions of Complex
+ Arguments and Order", J. Comp. Phys., vol 64, no. 2, June 1986.
+
+2. N. Michel, "Precise Coulomb wave functions for a wide range of
+ complex `l`, `\eta` and `z`", http://arxiv.org/abs/physics/0702051v1
+
+"""
+
+coulombg = r"""
+Calculates the irregular Coulomb wave function
+
+.. math ::
+
+ G_l(\eta,z) = \frac{F_l(\eta,z) \cos(\chi) - F_{-l-1}(\eta,z)}{\sin(\chi)}
+
+where `\chi = \sigma_l - \sigma_{-l-1} - (l+1/2) \pi`
+and `\sigma_l(\eta) = (\ln \Gamma(1+l+i\eta)-\ln \Gamma(1+l-i\eta))/(2i)`.
+
+See :func:`~mpmath.coulombf` for additional information.
+
+**Plots**
+
+.. literalinclude :: /plots/coulombg.py
+.. image :: /plots/coulombg.png
+.. literalinclude :: /plots/coulombg_c.py
+.. image :: /plots/coulombg_c.png
+
+**Examples**
+
+Evaluation is supported for arbitrary magnitudes of `z`::
+
+ >>> from mpmath import *
+ >>> mp.dps = 25; mp.pretty = True
+ >>> coulombg(-2, 1.5, 3.5)
+ 1.380011900612186346255524
+ >>> coulombg(2, 1.5, 3.5)
+ 1.919153700722748795245926
+ >>> coulombg(-2, 1.5, '1e-10')
+ 201126715824.7329115106793
+ >>> coulombg(-2, 1.5, 1000)
+ 0.1802071520691149410425512
+ >>> coulombg(-2, 1.5, 10**10)
+ 0.652103020061678070929794
+
+The following reproduces a table in Abramowitz & Stegun,
+at twice the precision::
+
+ >>> mp.dps = 10
+ >>> eta = 2; z = 5
+ >>> for l in [1, 2, 3, 4, 5]:
+ ... print("%s %s %s" % (l, coulombg(l,eta,z),
+ ... -diff(lambda z: coulombg(l,eta,z), z)))
+ ...
+ 1 1.08148276 0.6028279961
+ 2 1.496877075 0.5661803178
+ 3 2.048694714 0.7959909551
+ 4 3.09408669 1.731802374
+ 5 5.629840456 4.549343289
+
+Evaluation close to the singularity at `z = 0`::
+
+ >>> mp.dps = 15
+ >>> coulombg(0,10,1)
+ 3088184933.67358
+ >>> coulombg(0,10,'1e-10')
+ 5554866000719.8
+ >>> coulombg(0,10,'1e-100')
+ 5554866221524.1
+
+Evaluation with a half-integer value for `l`::
+
+ >>> coulombg(1.5, 1, 10)
+ 0.852320038297334
+"""
+
+coulombc = r"""
+Gives the normalizing Gamow constant for Coulomb wave functions,
+
+.. math ::
+
+ C_l(\eta) = 2^l \exp\left(-\pi \eta/2 + [\ln \Gamma(1+l+i\eta) +
+ \ln \Gamma(1+l-i\eta)]/2 - \ln \Gamma(2l+2)\right),
+
+where the log gamma function with continuous imaginary part
+away from the negative half axis (see :func:`~mpmath.loggamma`) is implied.
+
+This function is used internally for the calculation of
+Coulomb wave functions, and automatically cached to make multiple
+evaluations with fixed `l`, `\eta` fast.
+"""
+
+ellipfun = r"""
+Computes any of the Jacobi elliptic functions, defined
+in terms of Jacobi theta functions as
+
+.. math ::
+
+ \mathrm{sn}(u,m) = \frac{\vartheta_3(0,q)}{\vartheta_2(0,q)}
+ \frac{\vartheta_1(t,q)}{\vartheta_4(t,q)}
+
+ \mathrm{cn}(u,m) = \frac{\vartheta_4(0,q)}{\vartheta_2(0,q)}
+ \frac{\vartheta_2(t,q)}{\vartheta_4(t,q)}
+
+ \mathrm{dn}(u,m) = \frac{\vartheta_4(0,q)}{\vartheta_3(0,q)}
+ \frac{\vartheta_3(t,q)}{\vartheta_4(t,q)},
+
+or more generally computes a ratio of two such functions. Here
+`t = u/\vartheta_3(0,q)^2`, and `q = q(m)` denotes the nome (see
+:func:`~mpmath.nome`). Optionally, you can specify the nome directly
+instead of `m` by passing ``q=``, or you can directly
+specify the elliptic parameter `k` with ``k=``.
+
+The first argument should be a two-character string specifying the
+function using any combination of ``'s'``, ``'c'``, ``'d'``, ``'n'``. These
+letters respectively denote the basic functions
+`\mathrm{sn}(u,m)`, `\mathrm{cn}(u,m)`, `\mathrm{dn}(u,m)`, and `1`.
+The identifier specifies the ratio of two such functions.
+For example, ``'ns'`` identifies the function
+
+.. math ::
+
+ \mathrm{ns}(u,m) = \frac{1}{\mathrm{sn}(u,m)}
+
+and ``'cd'`` identifies the function
+
+.. math ::
+
+ \mathrm{cd}(u,m) = \frac{\mathrm{cn}(u,m)}{\mathrm{dn}(u,m)}.
+
+If called with only the first argument, a function object
+evaluating the chosen function for given arguments is returned.
+
+**Examples**
+
+Basic evaluation::
+
+ >>> from mpmath import *
+ >>> mp.dps = 25; mp.pretty = True
+ >>> ellipfun('cd', 3.5, 0.5)
+ -0.9891101840595543931308394
+ >>> ellipfun('cd', 3.5, q=0.25)
+ 0.07111979240214668158441418
+
+The sn-function is doubly periodic in the complex plane with periods
+`4 K(m)` and `2 i K(1-m)` (see :func:`~mpmath.ellipk`)::
+
+ >>> sn = ellipfun('sn')
+ >>> sn(2, 0.25)
+ 0.9628981775982774425751399
+ >>> sn(2+4*ellipk(0.25), 0.25)
+ 0.9628981775982774425751399
+ >>> chop(sn(2+2*j*ellipk(1-0.25), 0.25))
+ 0.9628981775982774425751399
+
+The cn-function is doubly periodic with periods `4 K(m)` and `2 K(m) + 2 i K(1-m)`::
+
+ >>> cn = ellipfun('cn')
+ >>> cn(2, 0.25)
+ -0.2698649654510865792581416
+ >>> cn(2+4*ellipk(0.25), 0.25)
+ -0.2698649654510865792581416
+ >>> chop(cn(2+2*ellipk(0.25)+2*j*ellipk(1-0.25), 0.25))
+ -0.2698649654510865792581416
+
+The dn-function is doubly periodic with periods `2 K(m)` and `4 i K(1-m)`::
+
+ >>> dn = ellipfun('dn')
+ >>> dn(2, 0.25)
+ 0.8764740583123262286931578
+ >>> dn(2+2*ellipk(0.25), 0.25)
+ 0.8764740583123262286931578
+ >>> chop(dn(2+4*j*ellipk(1-0.25), 0.25))
+ 0.8764740583123262286931578
+
+"""
+
+
+jtheta = r"""
+Computes the Jacobi theta function `\vartheta_n(z, q)`, where
+`n = 1, 2, 3, 4`, defined by the infinite series:
+
+.. math ::
+
+ \vartheta_1(z,q) = 2 q^{1/4} \sum_{n=0}^{\infty}
+ (-1)^n q^{n^2+n\,} \sin((2n+1)z)
+
+ \vartheta_2(z,q) = 2 q^{1/4} \sum_{n=0}^{\infty}
+ q^{n^{2\,} + n} \cos((2n+1)z)
+
+ \vartheta_3(z,q) = 1 + 2 \sum_{n=1}^{\infty}
+ q^{n^2\,} \cos(2 n z)
+
+ \vartheta_4(z,q) = 1 + 2 \sum_{n=1}^{\infty}
+ (-q)^{n^2\,} \cos(2 n z)
+
+The theta functions are functions of two variables:
+
+* `z` is the *argument*, an arbitrary real or complex number
+
+* `q` is the *nome*, which must be a real or complex number
+ in the unit disk (i.e. `|q| < 1`). For `|q| \ll 1`, the
+ series converge very quickly, so the Jacobi theta functions
+ can efficiently be evaluated to high precision.
+
+The compact notations `\vartheta_n(q) = \vartheta_n(0,q)`
+and `\vartheta_n = \vartheta_n(0,q)` are also frequently
+encountered. Finally, Jacobi theta functions are frequently
+considered as functions of the half-period ratio `\tau`
+and then usually denoted by `\vartheta_n(z|\tau)`.
+
+Optionally, ``jtheta(n, z, q, derivative=d)`` with `d > 0` computes
+a `d`-th derivative with respect to `z`.
+
+**Examples and basic properties**
+
+Considered as functions of `z`, the Jacobi theta functions may be
+viewed as generalizations of the ordinary trigonometric functions
+cos and sin. They are periodic functions::
+
+ >>> from mpmath import *
+ >>> mp.dps = 25; mp.pretty = True
+ >>> jtheta(1, 0.25, '0.2')
+ 0.2945120798627300045053104
+ >>> jtheta(1, 0.25 + 2*pi, '0.2')
+ 0.2945120798627300045053104
+
+Indeed, the series defining the theta functions are essentially
+trigonometric Fourier series. The coefficients can be retrieved
+using :func:`~mpmath.fourier`::
+
+ >>> mp.dps = 10
+ >>> nprint(fourier(lambda x: jtheta(2, x, 0.5), [-pi, pi], 4))
+ ([0.0, 1.68179, 0.0, 0.420448, 0.0], [0.0, 0.0, 0.0, 0.0, 0.0])
+
+The Jacobi theta functions are also so-called quasiperiodic
+functions of `z` and `\tau`, meaning that for fixed `\tau`,
+`\vartheta_n(z, q)` and `\vartheta_n(z+\pi \tau, q)` are the same
+except for an exponential factor::
+
+ >>> mp.dps = 25
+ >>> tau = 3*j/10
+ >>> q = exp(pi*j*tau)
+ >>> z = 10
+ >>> jtheta(4, z+tau*pi, q)
+ (-0.682420280786034687520568 + 1.526683999721399103332021j)
+ >>> -exp(-2*j*z)/q * jtheta(4, z, q)
+ (-0.682420280786034687520568 + 1.526683999721399103332021j)
+
+The Jacobi theta functions satisfy a huge number of other
+functional equations, such as the following identity (valid for
+any `q`)::
+
+ >>> q = mpf(3)/10
+ >>> jtheta(3,0,q)**4
+ 6.823744089352763305137427
+ >>> jtheta(2,0,q)**4 + jtheta(4,0,q)**4
+ 6.823744089352763305137427
+
+Extensive listings of identities satisfied by the Jacobi theta
+functions can be found in standard reference works.
+
+The Jacobi theta functions are related to the gamma function
+for special arguments::
+
+ >>> jtheta(3, 0, exp(-pi))
+ 1.086434811213308014575316
+ >>> pi**(1/4.) / gamma(3/4.)
+ 1.086434811213308014575316
+
+:func:`~mpmath.jtheta` supports arbitrary precision evaluation and complex
+arguments::
+
+ >>> mp.dps = 50
+ >>> jtheta(4, sqrt(2), 0.5)
+ 2.0549510717571539127004115835148878097035750653737
+ >>> mp.dps = 25
+ >>> jtheta(4, 1+2j, (1+j)/5)
+ (7.180331760146805926356634 - 1.634292858119162417301683j)
+
+Evaluation of derivatives::
+
+ >>> mp.dps = 25
+ >>> jtheta(1, 7, 0.25, 1); diff(lambda z: jtheta(1, z, 0.25), 7)
+ 1.209857192844475388637236
+ 1.209857192844475388637236
+ >>> jtheta(1, 7, 0.25, 2); diff(lambda z: jtheta(1, z, 0.25), 7, 2)
+ -0.2598718791650217206533052
+ -0.2598718791650217206533052
+ >>> jtheta(2, 7, 0.25, 1); diff(lambda z: jtheta(2, z, 0.25), 7)
+ -1.150231437070259644461474
+ -1.150231437070259644461474
+ >>> jtheta(2, 7, 0.25, 2); diff(lambda z: jtheta(2, z, 0.25), 7, 2)
+ -0.6226636990043777445898114
+ -0.6226636990043777445898114
+ >>> jtheta(3, 7, 0.25, 1); diff(lambda z: jtheta(3, z, 0.25), 7)
+ -0.9990312046096634316587882
+ -0.9990312046096634316587882
+ >>> jtheta(3, 7, 0.25, 2); diff(lambda z: jtheta(3, z, 0.25), 7, 2)
+ -0.1530388693066334936151174
+ -0.1530388693066334936151174
+ >>> jtheta(4, 7, 0.25, 1); diff(lambda z: jtheta(4, z, 0.25), 7)
+ 0.9820995967262793943571139
+ 0.9820995967262793943571139
+ >>> jtheta(4, 7, 0.25, 2); diff(lambda z: jtheta(4, z, 0.25), 7, 2)
+ 0.3936902850291437081667755
+ 0.3936902850291437081667755
+
+**Possible issues**
+
+For `|q| \ge 1` or `\Im(\tau) \le 0`, :func:`~mpmath.jtheta` raises
+``ValueError``. This exception is also raised for `|q|` extremely
+close to 1 (or equivalently `\tau` very close to 0), since the
+series would converge too slowly::
+
+ >>> jtheta(1, 10, 0.99999999 * exp(0.5*j))
+ Traceback (most recent call last):
+ ...
+ ValueError: abs(q) > THETA_Q_LIM = 1.000000
+
+"""
+
+eulernum = r"""
+Gives the `n`-th Euler number, defined as the `n`-th derivative of
+`\mathrm{sech}(t) = 1/\cosh(t)` evaluated at `t = 0`. Equivalently, the
+Euler numbers give the coefficients of the Taylor series
+
+.. math ::
+
+ \mathrm{sech}(t) = \sum_{n=0}^{\infty} \frac{E_n}{n!} t^n.
+
+The Euler numbers are closely related to Bernoulli numbers
+and Bernoulli polynomials. They can also be evaluated in terms of
+Euler polynomials (see :func:`~mpmath.eulerpoly`) as `E_n = 2^n E_n(1/2)`.
+
+**Examples**
+
+Computing the first few Euler numbers and verifying that they
+agree with the Taylor series::
+
+ >>> from mpmath import *
+ >>> mp.dps = 25; mp.pretty = True
+ >>> [eulernum(n) for n in range(11)]
+ [1.0, 0.0, -1.0, 0.0, 5.0, 0.0, -61.0, 0.0, 1385.0, 0.0, -50521.0]
+ >>> chop(diffs(sech, 0, 10))
+ [1.0, 0.0, -1.0, 0.0, 5.0, 0.0, -61.0, 0.0, 1385.0, 0.0, -50521.0]
+
+Euler numbers grow very rapidly. :func:`~mpmath.eulernum` efficiently
+computes numerical approximations for large indices::
+
+ >>> eulernum(50)
+ -6.053285248188621896314384e+54
+ >>> eulernum(1000)
+ 3.887561841253070615257336e+2371
+ >>> eulernum(10**20)
+ 4.346791453661149089338186e+1936958564106659551331
+
+Comparing with an asymptotic formula for the Euler numbers::
+
+ >>> n = 10**5
+ >>> (-1)**(n//2) * 8 * sqrt(n/(2*pi)) * (2*n/(pi*e))**n
+ 3.69919063017432362805663e+436961
+ >>> eulernum(n)
+ 3.699193712834466537941283e+436961
+
+Pass ``exact=True`` to obtain exact values of Euler numbers as integers::
+
+ >>> print(eulernum(50, exact=True))
+ -6053285248188621896314383785111649088103498225146815121
+ >>> print(eulernum(200, exact=True) % 10**10)
+ 1925859625
+ >>> eulernum(1001, exact=True)
+ 0
+"""
+
+eulerpoly = r"""
+Evaluates the Euler polynomial `E_n(z)`, defined by the generating function
+representation
+
+.. math ::
+
+ \frac{2e^{zt}}{e^t+1} = \sum_{n=0}^\infty E_n(z) \frac{t^n}{n!}.
+
+The Euler polynomials may also be represented in terms of
+Bernoulli polynomials (see :func:`~mpmath.bernpoly`) using various formulas, for
+example
+
+.. math ::
+
+ E_n(z) = \frac{2}{n+1} \left(
+ B_n(z)-2^{n+1}B_n\left(\frac{z}{2}\right)
+ \right).
+
+Special values include the Euler numbers `E_n = 2^n E_n(1/2)` (see
+:func:`~mpmath.eulernum`).
+
+**Examples**
+
+Computing the coefficients of the first few Euler polynomials::
+
+ >>> from mpmath import *
+ >>> mp.dps = 25; mp.pretty = True
+ >>> for n in range(6):
+ ... chop(taylor(lambda z: eulerpoly(n,z), 0, n))
+ ...
+ [1.0]
+ [-0.5, 1.0]
+ [0.0, -1.0, 1.0]
+ [0.25, 0.0, -1.5, 1.0]
+ [0.0, 1.0, 0.0, -2.0, 1.0]
+ [-0.5, 0.0, 2.5, 0.0, -2.5, 1.0]
+
+Evaluation for arbitrary `z`::
+
+ >>> eulerpoly(2,3)
+ 6.0
+ >>> eulerpoly(5,4)
+ 423.5
+ >>> eulerpoly(35, 11111111112)
+ 3.994957561486776072734601e+351
+ >>> eulerpoly(4, 10+20j)
+ (-47990.0 - 235980.0j)
+ >>> eulerpoly(2, '-3.5e-5')
+ 0.000035001225
+ >>> eulerpoly(3, 0.5)
+ 0.0
+ >>> eulerpoly(55, -10**80)
+ -1.0e+4400
+ >>> eulerpoly(5, -inf)
+ -inf
+ >>> eulerpoly(6, -inf)
+ +inf
+
+Computing Euler numbers::
+
+ >>> 2**26 * eulerpoly(26,0.5)
+ -4087072509293123892361.0
+ >>> eulernum(26)
+ -4087072509293123892361.0
+
+Evaluation is accurate for large `n` and small `z`::
+
+ >>> eulerpoly(100, 0.5)
+ 2.29047999988194114177943e+108
+ >>> eulerpoly(1000, 10.5)
+ 3.628120031122876847764566e+2070
+ >>> eulerpoly(10000, 10.5)
+ 1.149364285543783412210773e+30688
+"""
+
+spherharm = r"""
+Evaluates the spherical harmonic `Y_l^m(\theta,\phi)`,
+
+.. math ::
+
+ Y_l^m(\theta,\phi) = \sqrt{\frac{2l+1}{4\pi}\frac{(l-m)!}{(l+m)!}}
+ P_l^m(\cos \theta) e^{i m \phi}
+
+where `P_l^m` is an associated Legendre function (see :func:`~mpmath.legenp`).
+
+Here `\theta \in [0, \pi]` denotes the polar coordinate (ranging
+from the north pole to the south pole) and `\phi \in [0, 2 \pi]` denotes the
+azimuthal coordinate on a sphere. Care should be used since many different
+conventions for spherical coordinate variables are used.
+
+Usually spherical harmonics are considered for `l \in \mathbb{N}`,
+`m \in \mathbb{Z}`, `|m| \le l`. More generally, `l,m,\theta,\phi`
+are permitted to be complex numbers.
+
+.. note ::
+
+ :func:`~mpmath.spherharm` returns a complex number, even if the value is
+ purely real.
+
+**Plots**
+
+.. literalinclude :: /plots/spherharm40.py
+
+`Y_{4,0}`:
+
+.. image :: /plots/spherharm40.png
+
+`Y_{4,1}`:
+
+.. image :: /plots/spherharm41.png
+
+`Y_{4,2}`:
+
+.. image :: /plots/spherharm42.png
+
+`Y_{4,3}`:
+
+.. image :: /plots/spherharm43.png
+
+`Y_{4,4}`:
+
+.. image :: /plots/spherharm44.png
+
+**Examples**
+
+Some low-order spherical harmonics with reference values::
+
+ >>> from mpmath import *
+ >>> mp.dps = 25; mp.pretty = True
+ >>> theta = pi/4
+ >>> phi = pi/3
+ >>> spherharm(0,0,theta,phi); 0.5*sqrt(1/pi)*expj(0)
+ (0.2820947917738781434740397 + 0.0j)
+ (0.2820947917738781434740397 + 0.0j)
+ >>> spherharm(1,-1,theta,phi); 0.5*sqrt(3/(2*pi))*expj(-phi)*sin(theta)
+ (0.1221506279757299803965962 - 0.2115710938304086076055298j)
+ (0.1221506279757299803965962 - 0.2115710938304086076055298j)
+ >>> spherharm(1,0,theta,phi); 0.5*sqrt(3/pi)*cos(theta)*expj(0)
+ (0.3454941494713354792652446 + 0.0j)
+ (0.3454941494713354792652446 + 0.0j)
+ >>> spherharm(1,1,theta,phi); -0.5*sqrt(3/(2*pi))*expj(phi)*sin(theta)
+ (-0.1221506279757299803965962 - 0.2115710938304086076055298j)
+ (-0.1221506279757299803965962 - 0.2115710938304086076055298j)
+
+With the normalization convention used, the spherical harmonics are orthonormal
+on the unit sphere::
+
+ >>> sphere = [0,pi], [0,2*pi]
+ >>> dS = lambda t,p: fp.sin(t) # differential element
+ >>> Y1 = lambda t,p: fp.spherharm(l1,m1,t,p)
+ >>> Y2 = lambda t,p: fp.conj(fp.spherharm(l2,m2,t,p))
+ >>> l1 = l2 = 3; m1 = m2 = 2
+ >>> fp.chop(fp.quad(lambda t,p: Y1(t,p)*Y2(t,p)*dS(t,p), *sphere))
+ 1.0000000000000007
+ >>> m2 = 1 # m1 != m2
+ >>> print(fp.chop(fp.quad(lambda t,p: Y1(t,p)*Y2(t,p)*dS(t,p), *sphere)))
+ 0.0
+
+Evaluation is accurate for large orders::
+
+ >>> spherharm(1000,750,0.5,0.25)
+ (3.776445785304252879026585e-102 - 5.82441278771834794493484e-102j)
+
+Evaluation works with complex parameter values::
+
+ >>> spherharm(1+j, 2j, 2+3j, -0.5j)
+ (64.44922331113759992154992 + 1981.693919841408089681743j)
+"""
+
+scorergi = r"""
+Evaluates the Scorer function
+
+.. math ::
+
+ \operatorname{Gi}(z) =
+ \operatorname{Ai}(z) \int_0^z \operatorname{Bi}(t) dt +
+ \operatorname{Bi}(z) \int_z^{\infty} \operatorname{Ai}(t) dt
+
+which gives a particular solution to the inhomogeneous Airy
+differential equation `f''(z) - z f(z) = 1/\pi`. Another
+particular solution is given by the Scorer Hi-function
+(:func:`~mpmath.scorerhi`). The two functions are related as
+`\operatorname{Gi}(z) + \operatorname{Hi}(z) = \operatorname{Bi}(z)`.
+
+**Plots**
+
+.. literalinclude :: /plots/gi.py
+.. image :: /plots/gi.png
+.. literalinclude :: /plots/gi_c.py
+.. image :: /plots/gi_c.png
+
+**Examples**
+
+Some values and limits::
+
+ >>> from mpmath import *
+ >>> mp.dps = 25; mp.pretty = True
+ >>> scorergi(0); 1/(power(3,'7/6')*gamma('2/3'))
+ 0.2049755424820002450503075
+ 0.2049755424820002450503075
+ >>> diff(scorergi, 0); 1/(power(3,'5/6')*gamma('1/3'))
+ 0.1494294524512754526382746
+ 0.1494294524512754526382746
+ >>> scorergi(+inf); scorergi(-inf)
+ 0.0
+ 0.0
+ >>> scorergi(1)
+ 0.2352184398104379375986902
+ >>> scorergi(-1)
+ -0.1166722172960152826494198
+
+Evaluation for large arguments::
+
+ >>> scorergi(10)
+ 0.03189600510067958798062034
+ >>> scorergi(100)
+ 0.003183105228162961476590531
+ >>> scorergi(1000000)
+ 0.0000003183098861837906721743873
+ >>> 1/(pi*1000000)
+ 0.0000003183098861837906715377675
+ >>> scorergi(-1000)
+ -0.08358288400262780392338014
+ >>> scorergi(-100000)
+ 0.02886866118619660226809581
+ >>> scorergi(50+10j)
+ (0.0061214102799778578790984 - 0.001224335676457532180747917j)
+ >>> scorergi(-50-10j)
+ (5.236047850352252236372551e+29 - 3.08254224233701381482228e+29j)
+ >>> scorergi(100000j)
+ (-8.806659285336231052679025e+6474077 + 8.684731303500835514850962e+6474077j)
+
+Verifying the connection between Gi and Hi::
+
+ >>> z = 0.25
+ >>> scorergi(z) + scorerhi(z)
+ 0.7287469039362150078694543
+ >>> airybi(z)
+ 0.7287469039362150078694543
+
+Verifying the differential equation::
+
+ >>> for z in [-3.4, 0, 2.5, 1+2j]:
+ ... chop(diff(scorergi,z,2) - z*scorergi(z))
+ ...
+ -0.3183098861837906715377675
+ -0.3183098861837906715377675
+ -0.3183098861837906715377675
+ -0.3183098861837906715377675
+
+Verifying the integral representation::
+
+ >>> z = 0.5
+ >>> scorergi(z)
+ 0.2447210432765581976910539
+ >>> Ai,Bi = airyai,airybi
+ >>> Bi(z)*(Ai(inf,-1)-Ai(z,-1)) + Ai(z)*(Bi(z,-1)-Bi(0,-1))
+ 0.2447210432765581976910539
+
+**References**
+
+1. [DLMF]_ section 9.12: Scorer Functions
+
+"""
+
+scorerhi = r"""
+Evaluates the second Scorer function
+
+.. math ::
+
+ \operatorname{Hi}(z) =
+ \operatorname{Bi}(z) \int_{-\infty}^z \operatorname{Ai}(t) dt -
+ \operatorname{Ai}(z) \int_{-\infty}^z \operatorname{Bi}(t) dt
+
+which gives a particular solution to the inhomogeneous Airy
+differential equation `f''(z) - z f(z) = 1/\pi`. See also
+:func:`~mpmath.scorergi`.
+
+**Plots**
+
+.. literalinclude :: /plots/hi.py
+.. image :: /plots/hi.png
+.. literalinclude :: /plots/hi_c.py
+.. image :: /plots/hi_c.png
+
+**Examples**
+
+Some values and limits::
+
+ >>> from mpmath import *
+ >>> mp.dps = 25; mp.pretty = True
+ >>> scorerhi(0); 2/(power(3,'7/6')*gamma('2/3'))
+ 0.4099510849640004901006149
+ 0.4099510849640004901006149
+ >>> diff(scorerhi,0); 2/(power(3,'5/6')*gamma('1/3'))
+ 0.2988589049025509052765491
+ 0.2988589049025509052765491
+ >>> scorerhi(+inf); scorerhi(-inf)
+ +inf
+ 0.0
+ >>> scorerhi(1)
+ 0.9722051551424333218376886
+ >>> scorerhi(-1)
+ 0.2206696067929598945381098
+
+Evaluation for large arguments::
+
+ >>> scorerhi(10)
+ 455641153.5163291358991077
+ >>> scorerhi(100)
+ 6.041223996670201399005265e+288
+ >>> scorerhi(1000000)
+ 7.138269638197858094311122e+289529652
+ >>> scorerhi(-10)
+ 0.0317685352825022727415011
+ >>> scorerhi(-100)
+ 0.003183092495767499864680483
+ >>> scorerhi(100j)
+ (-6.366197716545672122983857e-9 + 0.003183098861710582761688475j)
+ >>> scorerhi(50+50j)
+ (-5.322076267321435669290334e+63 + 1.478450291165243789749427e+65j)
+ >>> scorerhi(-1000-1000j)
+ (0.0001591549432510502796565538 - 0.000159154943091895334973109j)
+
+Verifying the differential equation::
+
+ >>> for z in [-3.4, 0, 2, 1+2j]:
+ ... chop(diff(scorerhi,z,2) - z*scorerhi(z))
+ ...
+ 0.3183098861837906715377675
+ 0.3183098861837906715377675
+ 0.3183098861837906715377675
+ 0.3183098861837906715377675
+
+Verifying the integral representation::
+
+ >>> z = 0.5
+ >>> scorerhi(z)
+ 0.6095559998265972956089949
+ >>> Ai,Bi = airyai,airybi
+ >>> Bi(z)*(Ai(z,-1)-Ai(-inf,-1)) - Ai(z)*(Bi(z,-1)-Bi(-inf,-1))
+ 0.6095559998265972956089949
+
+"""
+
+
+stirling1 = r"""
+Gives the Stirling number of the first kind `s(n,k)`, defined by
+
+.. math ::
+
+ x(x-1)(x-2)\cdots(x-n+1) = \sum_{k=0}^n s(n,k) x^k.
+
+The value is computed using an integer recurrence. The implementation
+is not optimized for approximating large values quickly.
+
+**Examples**
+
+Comparing with the generating function::
+
+ >>> from mpmath import *
+ >>> mp.dps = 25; mp.pretty = True
+ >>> taylor(lambda x: ff(x, 5), 0, 5)
+ [0.0, 24.0, -50.0, 35.0, -10.0, 1.0]
+ >>> [stirling1(5, k) for k in range(6)]
+ [0.0, 24.0, -50.0, 35.0, -10.0, 1.0]
+
+Recurrence relation::
+
+ >>> n, k = 5, 3
+ >>> stirling1(n+1,k) + n*stirling1(n,k) - stirling1(n,k-1)
+ 0.0
+
+The matrices of Stirling numbers of first and second kind are inverses
+of each other::
+
+ >>> A = matrix(5, 5); B = matrix(5, 5)
+ >>> for n in range(5):
+ ... for k in range(5):
+ ... A[n,k] = stirling1(n,k)
+ ... B[n,k] = stirling2(n,k)
+ ...
+ >>> A * B
+ [1.0 0.0 0.0 0.0 0.0]
+ [0.0 1.0 0.0 0.0 0.0]
+ [0.0 0.0 1.0 0.0 0.0]
+ [0.0 0.0 0.0 1.0 0.0]
+ [0.0 0.0 0.0 0.0 1.0]
+
+Pass ``exact=True`` to obtain exact values of Stirling numbers as integers::
+
+ >>> stirling1(42, 5)
+ -2.864498971768501633736628e+50
+ >>> print(stirling1(42, 5, exact=True))
+ -286449897176850163373662803014001546235808317440000
+
+"""
+
+stirling2 = r"""
+Gives the Stirling number of the second kind `S(n,k)`, defined by
+
+.. math ::
+
+ x^n = \sum_{k=0}^n S(n,k) x(x-1)(x-2)\cdots(x-k+1)
+
+The value is computed using integer arithmetic to evaluate a power sum.
+The implementation is not optimized for approximating large values quickly.
+
+**Examples**
+
+Comparing with the generating function::
+
+ >>> from mpmath import *
+ >>> mp.dps = 25; mp.pretty = True
+ >>> taylor(lambda x: sum(stirling2(5,k) * ff(x,k) for k in range(6)), 0, 5)
+ [0.0, 0.0, 0.0, 0.0, 0.0, 1.0]
+
+Recurrence relation::
+
+ >>> n, k = 5, 3
+ >>> stirling2(n+1,k) - k*stirling2(n,k) - stirling2(n,k-1)
+ 0.0
+
+Pass ``exact=True`` to obtain exact values of Stirling numbers as integers::
+
+ >>> stirling2(52, 10)
+ 2.641822121003543906807485e+45
+ >>> print(stirling2(52, 10, exact=True))
+ 2641822121003543906807485307053638921722527655
+
+
+"""
+
+squarew = r"""
+Computes the square wave function using the definition:
+
+.. math::
+ x(t) = A(-1)^{\left\lfloor{2t / P}\right\rfloor}
+
+where `P` is the period of the wave and `A` is the amplitude.
+
+**Examples**
+
+Square wave with period = 2, amplitude = 1 ::
+
+ >>> from mpmath import *
+ >>> mp.dps = 25; mp.pretty = True
+ >>> squarew(0,1,2)
+ 1.0
+ >>> squarew(0.5,1,2)
+ 1.0
+ >>> squarew(1,1,2)
+ -1.0
+ >>> squarew(1.5,1,2)
+ -1.0
+ >>> squarew(2,1,2)
+ 1.0
+"""
+
+trianglew = r"""
+Computes the triangle wave function using the definition:
+
+.. math::
+ x(t) = 2A\left(\frac{1}{2}-\left|1-2 \operatorname{frac}\left(\frac{x}{P}+\frac{1}{4}\right)\right|\right)
+
+where :math:`\operatorname{frac}\left(\frac{t}{T}\right) = \frac{t}{T}-\left\lfloor{\frac{t}{T}}\right\rfloor`
+, `P` is the period of the wave, and `A` is the amplitude.
+
+**Examples**
+
+Triangle wave with period = 2, amplitude = 1 ::
+
+ >>> from mpmath import *
+ >>> mp.dps = 25; mp.pretty = True
+ >>> trianglew(0,1,2)
+ 0.0
+ >>> trianglew(0.25,1,2)
+ 0.5
+ >>> trianglew(0.5,1,2)
+ 1.0
+ >>> trianglew(1,1,2)
+ 0.0
+ >>> trianglew(1.5,1,2)
+ -1.0
+ >>> trianglew(2,1,2)
+ 0.0
+"""
+
+sawtoothw = r"""
+Computes the sawtooth wave function using the definition:
+
+.. math::
+ x(t) = A\operatorname{frac}\left(\frac{t}{T}\right)
+
+where :math:`\operatorname{frac}\left(\frac{t}{T}\right) = \frac{t}{T}-\left\lfloor{\frac{t}{T}}\right\rfloor`,
+`P` is the period of the wave, and `A` is the amplitude.
+
+**Examples**
+
+Sawtooth wave with period = 2, amplitude = 1 ::
+
+ >>> from mpmath import *
+ >>> mp.dps = 25; mp.pretty = True
+ >>> sawtoothw(0,1,2)
+ 0.0
+ >>> sawtoothw(0.5,1,2)
+ 0.25
+ >>> sawtoothw(1,1,2)
+ 0.5
+ >>> sawtoothw(1.5,1,2)
+ 0.75
+ >>> sawtoothw(2,1,2)
+ 0.0
+"""
+
+unit_triangle = r"""
+Computes the unit triangle using the definition:
+
+.. math::
+ x(t) = A(-\left| t \right| + 1)
+
+where `A` is the amplitude.
+
+**Examples**
+
+Unit triangle with amplitude = 1 ::
+
+ >>> from mpmath import *
+ >>> mp.dps = 25; mp.pretty = True
+ >>> unit_triangle(-1,1)
+ 0.0
+ >>> unit_triangle(-0.5,1)
+ 0.5
+ >>> unit_triangle(0,1)
+ 1.0
+ >>> unit_triangle(0.5,1)
+ 0.5
+ >>> unit_triangle(1,1)
+ 0.0
+"""
+
+sigmoid = r"""
+Computes the sigmoid function using the definition:
+
+.. math::
+ x(t) = \frac{A}{1 + e^{-t}}
+
+where `A` is the amplitude.
+
+**Examples**
+
+Sigmoid function with amplitude = 1 ::
+
+ >>> from mpmath import *
+ >>> mp.dps = 25; mp.pretty = True
+ >>> sigmoid(-1,1)
+ 0.2689414213699951207488408
+ >>> sigmoid(-0.5,1)
+ 0.3775406687981454353610994
+ >>> sigmoid(0,1)
+ 0.5
+ >>> sigmoid(0.5,1)
+ 0.6224593312018545646389006
+ >>> sigmoid(1,1)
+ 0.7310585786300048792511592
+
+"""
diff --git a/pythonProject/.venv/Lib/site-packages/mpmath/functions/bessel.py b/pythonProject/.venv/Lib/site-packages/mpmath/functions/bessel.py
new file mode 100644
index 0000000000000000000000000000000000000000..8b41d87bb0118de61d5561433dabcb181f872f84
--- /dev/null
+++ b/pythonProject/.venv/Lib/site-packages/mpmath/functions/bessel.py
@@ -0,0 +1,1108 @@
+from .functions import defun, defun_wrapped
+
+@defun
+def j0(ctx, x):
+ """Computes the Bessel function `J_0(x)`. See :func:`~mpmath.besselj`."""
+ return ctx.besselj(0, x)
+
+@defun
+def j1(ctx, x):
+ """Computes the Bessel function `J_1(x)`. See :func:`~mpmath.besselj`."""
+ return ctx.besselj(1, x)
+
+@defun
+def besselj(ctx, n, z, derivative=0, **kwargs):
+ if type(n) is int:
+ n_isint = True
+ else:
+ n = ctx.convert(n)
+ n_isint = ctx.isint(n)
+ if n_isint:
+ n = int(ctx._re(n))
+ if n_isint and n < 0:
+ return (-1)**n * ctx.besselj(-n, z, derivative, **kwargs)
+ z = ctx.convert(z)
+ M = ctx.mag(z)
+ if derivative:
+ d = ctx.convert(derivative)
+ # TODO: the integer special-casing shouldn't be necessary.
+ # However, the hypergeometric series gets inaccurate for large d
+ # because of inaccurate pole cancellation at a pole far from
+ # zero (needs to be fixed in hypercomb or hypsum)
+ if ctx.isint(d) and d >= 0:
+ d = int(d)
+ orig = ctx.prec
+ try:
+ ctx.prec += 15
+ v = ctx.fsum((-1)**k * ctx.binomial(d,k) * ctx.besselj(2*k+n-d,z)
+ for k in range(d+1))
+ finally:
+ ctx.prec = orig
+ v *= ctx.mpf(2)**(-d)
+ else:
+ def h(n,d):
+ r = ctx.fmul(ctx.fmul(z, z, prec=ctx.prec+M), -0.25, exact=True)
+ B = [0.5*(n-d+1), 0.5*(n-d+2)]
+ T = [([2,ctx.pi,z],[d-2*n,0.5,n-d],[],B,[(n+1)*0.5,(n+2)*0.5],B+[n+1],r)]
+ return T
+ v = ctx.hypercomb(h, [n,d], **kwargs)
+ else:
+ # Fast case: J_n(x), n int, appropriate magnitude for fixed-point calculation
+ if (not derivative) and n_isint and abs(M) < 10 and abs(n) < 20:
+ try:
+ return ctx._besselj(n, z)
+ except NotImplementedError:
+ pass
+ if not z:
+ if not n:
+ v = ctx.one + n+z
+ elif ctx.re(n) > 0:
+ v = n*z
+ else:
+ v = ctx.inf + z + n
+ else:
+ #v = 0
+ orig = ctx.prec
+ try:
+ # XXX: workaround for accuracy in low level hypergeometric series
+ # when alternating, large arguments
+ ctx.prec += min(3*abs(M), ctx.prec)
+ w = ctx.fmul(z, 0.5, exact=True)
+ def h(n):
+ r = ctx.fneg(ctx.fmul(w, w, prec=max(0,ctx.prec+M)), exact=True)
+ return [([w], [n], [], [n+1], [], [n+1], r)]
+ v = ctx.hypercomb(h, [n], **kwargs)
+ finally:
+ ctx.prec = orig
+ v = +v
+ return v
+
+@defun
+def besseli(ctx, n, z, derivative=0, **kwargs):
+ n = ctx.convert(n)
+ z = ctx.convert(z)
+ if not z:
+ if derivative:
+ raise ValueError
+ if not n:
+ # I(0,0) = 1
+ return 1+n+z
+ if ctx.isint(n):
+ return 0*(n+z)
+ r = ctx.re(n)
+ if r == 0:
+ return ctx.nan*(n+z)
+ elif r > 0:
+ return 0*(n+z)
+ else:
+ return ctx.inf+(n+z)
+ M = ctx.mag(z)
+ if derivative:
+ d = ctx.convert(derivative)
+ def h(n,d):
+ r = ctx.fmul(ctx.fmul(z, z, prec=ctx.prec+M), 0.25, exact=True)
+ B = [0.5*(n-d+1), 0.5*(n-d+2), n+1]
+ T = [([2,ctx.pi,z],[d-2*n,0.5,n-d],[n+1],B,[(n+1)*0.5,(n+2)*0.5],B,r)]
+ return T
+ v = ctx.hypercomb(h, [n,d], **kwargs)
+ else:
+ def h(n):
+ w = ctx.fmul(z, 0.5, exact=True)
+ r = ctx.fmul(w, w, prec=max(0,ctx.prec+M))
+ return [([w], [n], [], [n+1], [], [n+1], r)]
+ v = ctx.hypercomb(h, [n], **kwargs)
+ return v
+
+@defun_wrapped
+def bessely(ctx, n, z, derivative=0, **kwargs):
+ if not z:
+ if derivative:
+ # Not implemented
+ raise ValueError
+ if not n:
+ # ~ log(z/2)
+ return -ctx.inf + (n+z)
+ if ctx.im(n):
+ return ctx.nan * (n+z)
+ r = ctx.re(n)
+ q = n+0.5
+ if ctx.isint(q):
+ if n > 0:
+ return -ctx.inf + (n+z)
+ else:
+ return 0 * (n+z)
+ if r < 0 and int(ctx.floor(q)) % 2:
+ return ctx.inf + (n+z)
+ else:
+ return ctx.ninf + (n+z)
+ # XXX: use hypercomb
+ ctx.prec += 10
+ m, d = ctx.nint_distance(n)
+ if d < -ctx.prec:
+ h = +ctx.eps
+ ctx.prec *= 2
+ n += h
+ elif d < 0:
+ ctx.prec -= d
+ # TODO: avoid cancellation for imaginary arguments
+ cos, sin = ctx.cospi_sinpi(n)
+ return (ctx.besselj(n,z,derivative,**kwargs)*cos - \
+ ctx.besselj(-n,z,derivative,**kwargs))/sin
+
+@defun_wrapped
+def besselk(ctx, n, z, **kwargs):
+ if not z:
+ return ctx.inf
+ M = ctx.mag(z)
+ if M < 1:
+ # Represent as limit definition
+ def h(n):
+ r = (z/2)**2
+ T1 = [z, 2], [-n, n-1], [n], [], [], [1-n], r
+ T2 = [z, 2], [n, -n-1], [-n], [], [], [1+n], r
+ return T1, T2
+ # We could use the limit definition always, but it leads
+ # to very bad cancellation (of exponentially large terms)
+ # for large real z
+ # Instead represent in terms of 2F0
+ else:
+ ctx.prec += M
+ def h(n):
+ return [([ctx.pi/2, z, ctx.exp(-z)], [0.5,-0.5,1], [], [], \
+ [n+0.5, 0.5-n], [], -1/(2*z))]
+ return ctx.hypercomb(h, [n], **kwargs)
+
+@defun_wrapped
+def hankel1(ctx,n,x,**kwargs):
+ return ctx.besselj(n,x,**kwargs) + ctx.j*ctx.bessely(n,x,**kwargs)
+
+@defun_wrapped
+def hankel2(ctx,n,x,**kwargs):
+ return ctx.besselj(n,x,**kwargs) - ctx.j*ctx.bessely(n,x,**kwargs)
+
+@defun_wrapped
+def whitm(ctx,k,m,z,**kwargs):
+ if z == 0:
+ # M(k,m,z) = 0^(1/2+m)
+ if ctx.re(m) > -0.5:
+ return z
+ elif ctx.re(m) < -0.5:
+ return ctx.inf + z
+ else:
+ return ctx.nan * z
+ x = ctx.fmul(-0.5, z, exact=True)
+ y = 0.5+m
+ return ctx.exp(x) * z**y * ctx.hyp1f1(y-k, 1+2*m, z, **kwargs)
+
+@defun_wrapped
+def whitw(ctx,k,m,z,**kwargs):
+ if z == 0:
+ g = abs(ctx.re(m))
+ if g < 0.5:
+ return z
+ elif g > 0.5:
+ return ctx.inf + z
+ else:
+ return ctx.nan * z
+ x = ctx.fmul(-0.5, z, exact=True)
+ y = 0.5+m
+ return ctx.exp(x) * z**y * ctx.hyperu(y-k, 1+2*m, z, **kwargs)
+
+@defun
+def hyperu(ctx, a, b, z, **kwargs):
+ a, atype = ctx._convert_param(a)
+ b, btype = ctx._convert_param(b)
+ z = ctx.convert(z)
+ if not z:
+ if ctx.re(b) <= 1:
+ return ctx.gammaprod([1-b],[a-b+1])
+ else:
+ return ctx.inf + z
+ bb = 1+a-b
+ bb, bbtype = ctx._convert_param(bb)
+ try:
+ orig = ctx.prec
+ try:
+ ctx.prec += 10
+ v = ctx.hypsum(2, 0, (atype, bbtype), [a, bb], -1/z, maxterms=ctx.prec)
+ return v / z**a
+ finally:
+ ctx.prec = orig
+ except ctx.NoConvergence:
+ pass
+ def h(a,b):
+ w = ctx.sinpi(b)
+ T1 = ([ctx.pi,w],[1,-1],[],[a-b+1,b],[a],[b],z)
+ T2 = ([-ctx.pi,w,z],[1,-1,1-b],[],[a,2-b],[a-b+1],[2-b],z)
+ return T1, T2
+ return ctx.hypercomb(h, [a,b], **kwargs)
+
+@defun
+def struveh(ctx,n,z, **kwargs):
+ n = ctx.convert(n)
+ z = ctx.convert(z)
+ # http://functions.wolfram.com/Bessel-TypeFunctions/StruveH/26/01/02/
+ def h(n):
+ return [([z/2, 0.5*ctx.sqrt(ctx.pi)], [n+1, -1], [], [n+1.5], [1], [1.5, n+1.5], -(z/2)**2)]
+ return ctx.hypercomb(h, [n], **kwargs)
+
+@defun
+def struvel(ctx,n,z, **kwargs):
+ n = ctx.convert(n)
+ z = ctx.convert(z)
+ # http://functions.wolfram.com/Bessel-TypeFunctions/StruveL/26/01/02/
+ def h(n):
+ return [([z/2, 0.5*ctx.sqrt(ctx.pi)], [n+1, -1], [], [n+1.5], [1], [1.5, n+1.5], (z/2)**2)]
+ return ctx.hypercomb(h, [n], **kwargs)
+
+def _anger(ctx,which,v,z,**kwargs):
+ v = ctx._convert_param(v)[0]
+ z = ctx.convert(z)
+ def h(v):
+ b = ctx.mpq_1_2
+ u = v*b
+ m = b*3
+ a1,a2,b1,b2 = m-u, m+u, 1-u, 1+u
+ c, s = ctx.cospi_sinpi(u)
+ if which == 0:
+ A, B = [b*z, s], [c]
+ if which == 1:
+ A, B = [b*z, -c], [s]
+ w = ctx.square_exp_arg(z, mult=-0.25)
+ T1 = A, [1, 1], [], [a1,a2], [1], [a1,a2], w
+ T2 = B, [1], [], [b1,b2], [1], [b1,b2], w
+ return T1, T2
+ return ctx.hypercomb(h, [v], **kwargs)
+
+@defun
+def angerj(ctx, v, z, **kwargs):
+ return _anger(ctx, 0, v, z, **kwargs)
+
+@defun
+def webere(ctx, v, z, **kwargs):
+ return _anger(ctx, 1, v, z, **kwargs)
+
+@defun
+def lommels1(ctx, u, v, z, **kwargs):
+ u = ctx._convert_param(u)[0]
+ v = ctx._convert_param(v)[0]
+ z = ctx.convert(z)
+ def h(u,v):
+ b = ctx.mpq_1_2
+ w = ctx.square_exp_arg(z, mult=-0.25)
+ return ([u-v+1, u+v+1, z], [-1, -1, u+1], [], [], [1], \
+ [b*(u-v+3),b*(u+v+3)], w),
+ return ctx.hypercomb(h, [u,v], **kwargs)
+
+@defun
+def lommels2(ctx, u, v, z, **kwargs):
+ u = ctx._convert_param(u)[0]
+ v = ctx._convert_param(v)[0]
+ z = ctx.convert(z)
+ # Asymptotic expansion (GR p. 947) -- need to be careful
+ # not to use for small arguments
+ # def h(u,v):
+ # b = ctx.mpq_1_2
+ # w = -(z/2)**(-2)
+ # return ([z], [u-1], [], [], [b*(1-u+v)], [b*(1-u-v)], w),
+ def h(u,v):
+ b = ctx.mpq_1_2
+ w = ctx.square_exp_arg(z, mult=-0.25)
+ T1 = [u-v+1, u+v+1, z], [-1, -1, u+1], [], [], [1], [b*(u-v+3),b*(u+v+3)], w
+ T2 = [2, z], [u+v-1, -v], [v, b*(u+v+1)], [b*(v-u+1)], [], [1-v], w
+ T3 = [2, z], [u-v-1, v], [-v, b*(u-v+1)], [b*(1-u-v)], [], [1+v], w
+ #c1 = ctx.cospi((u-v)*b)
+ #c2 = ctx.cospi((u+v)*b)
+ #s = ctx.sinpi(v)
+ #r1 = (u-v+1)*b
+ #r2 = (u+v+1)*b
+ #T2 = [c1, s, z, 2], [1, -1, -v, v], [], [-v+1], [], [-v+1], w
+ #T3 = [-c2, s, z, 2], [1, -1, v, -v], [], [v+1], [], [v+1], w
+ #T2 = [c1, s, z, 2], [1, -1, -v, v+u-1], [r1, r2], [-v+1], [], [-v+1], w
+ #T3 = [-c2, s, z, 2], [1, -1, v, -v+u-1], [r1, r2], [v+1], [], [v+1], w
+ return T1, T2, T3
+ return ctx.hypercomb(h, [u,v], **kwargs)
+
+@defun
+def ber(ctx, n, z, **kwargs):
+ n = ctx.convert(n)
+ z = ctx.convert(z)
+ # http://functions.wolfram.com/Bessel-TypeFunctions/KelvinBer2/26/01/02/0001/
+ def h(n):
+ r = -(z/4)**4
+ cos, sin = ctx.cospi_sinpi(-0.75*n)
+ T1 = [cos, z/2], [1, n], [], [n+1], [], [0.5, 0.5*(n+1), 0.5*n+1], r
+ T2 = [sin, z/2], [1, n+2], [], [n+2], [], [1.5, 0.5*(n+3), 0.5*n+1], r
+ return T1, T2
+ return ctx.hypercomb(h, [n], **kwargs)
+
+@defun
+def bei(ctx, n, z, **kwargs):
+ n = ctx.convert(n)
+ z = ctx.convert(z)
+ # http://functions.wolfram.com/Bessel-TypeFunctions/KelvinBei2/26/01/02/0001/
+ def h(n):
+ r = -(z/4)**4
+ cos, sin = ctx.cospi_sinpi(0.75*n)
+ T1 = [cos, z/2], [1, n+2], [], [n+2], [], [1.5, 0.5*(n+3), 0.5*n+1], r
+ T2 = [sin, z/2], [1, n], [], [n+1], [], [0.5, 0.5*(n+1), 0.5*n+1], r
+ return T1, T2
+ return ctx.hypercomb(h, [n], **kwargs)
+
+@defun
+def ker(ctx, n, z, **kwargs):
+ n = ctx.convert(n)
+ z = ctx.convert(z)
+ # http://functions.wolfram.com/Bessel-TypeFunctions/KelvinKer2/26/01/02/0001/
+ def h(n):
+ r = -(z/4)**4
+ cos1, sin1 = ctx.cospi_sinpi(0.25*n)
+ cos2, sin2 = ctx.cospi_sinpi(0.75*n)
+ T1 = [2, z, 4*cos1], [-n-3, n, 1], [-n], [], [], [0.5, 0.5*(1+n), 0.5*(n+2)], r
+ T2 = [2, z, -sin1], [-n-3, 2+n, 1], [-n-1], [], [], [1.5, 0.5*(3+n), 0.5*(n+2)], r
+ T3 = [2, z, 4*cos2], [n-3, -n, 1], [n], [], [], [0.5, 0.5*(1-n), 1-0.5*n], r
+ T4 = [2, z, -sin2], [n-3, 2-n, 1], [n-1], [], [], [1.5, 0.5*(3-n), 1-0.5*n], r
+ return T1, T2, T3, T4
+ return ctx.hypercomb(h, [n], **kwargs)
+
+@defun
+def kei(ctx, n, z, **kwargs):
+ n = ctx.convert(n)
+ z = ctx.convert(z)
+ # http://functions.wolfram.com/Bessel-TypeFunctions/KelvinKei2/26/01/02/0001/
+ def h(n):
+ r = -(z/4)**4
+ cos1, sin1 = ctx.cospi_sinpi(0.75*n)
+ cos2, sin2 = ctx.cospi_sinpi(0.25*n)
+ T1 = [-cos1, 2, z], [1, n-3, 2-n], [n-1], [], [], [1.5, 0.5*(3-n), 1-0.5*n], r
+ T2 = [-sin1, 2, z], [1, n-1, -n], [n], [], [], [0.5, 0.5*(1-n), 1-0.5*n], r
+ T3 = [-sin2, 2, z], [1, -n-1, n], [-n], [], [], [0.5, 0.5*(n+1), 0.5*(n+2)], r
+ T4 = [-cos2, 2, z], [1, -n-3, n+2], [-n-1], [], [], [1.5, 0.5*(n+3), 0.5*(n+2)], r
+ return T1, T2, T3, T4
+ return ctx.hypercomb(h, [n], **kwargs)
+
+# TODO: do this more generically?
+def c_memo(f):
+ name = f.__name__
+ def f_wrapped(ctx):
+ cache = ctx._misc_const_cache
+ prec = ctx.prec
+ p,v = cache.get(name, (-1,0))
+ if p >= prec:
+ return +v
+ else:
+ cache[name] = (prec, f(ctx))
+ return cache[name][1]
+ return f_wrapped
+
+@c_memo
+def _airyai_C1(ctx):
+ return 1 / (ctx.cbrt(9) * ctx.gamma(ctx.mpf(2)/3))
+
+@c_memo
+def _airyai_C2(ctx):
+ return -1 / (ctx.cbrt(3) * ctx.gamma(ctx.mpf(1)/3))
+
+@c_memo
+def _airybi_C1(ctx):
+ return 1 / (ctx.nthroot(3,6) * ctx.gamma(ctx.mpf(2)/3))
+
+@c_memo
+def _airybi_C2(ctx):
+ return ctx.nthroot(3,6) / ctx.gamma(ctx.mpf(1)/3)
+
+def _airybi_n2_inf(ctx):
+ prec = ctx.prec
+ try:
+ v = ctx.power(3,'2/3')*ctx.gamma('2/3')/(2*ctx.pi)
+ finally:
+ ctx.prec = prec
+ return +v
+
+# Derivatives at z = 0
+# TODO: could be expressed more elegantly using triple factorials
+def _airyderiv_0(ctx, z, n, ntype, which):
+ if ntype == 'Z':
+ if n < 0:
+ return z
+ r = ctx.mpq_1_3
+ prec = ctx.prec
+ try:
+ ctx.prec += 10
+ v = ctx.gamma((n+1)*r) * ctx.power(3,n*r) / ctx.pi
+ if which == 0:
+ v *= ctx.sinpi(2*(n+1)*r)
+ v /= ctx.power(3,'2/3')
+ else:
+ v *= abs(ctx.sinpi(2*(n+1)*r))
+ v /= ctx.power(3,'1/6')
+ finally:
+ ctx.prec = prec
+ return +v + z
+ else:
+ # singular (does the limit exist?)
+ raise NotImplementedError
+
+@defun
+def airyai(ctx, z, derivative=0, **kwargs):
+ z = ctx.convert(z)
+ if derivative:
+ n, ntype = ctx._convert_param(derivative)
+ else:
+ n = 0
+ # Values at infinities
+ if not ctx.isnormal(z) and z:
+ if n and ntype == 'Z':
+ if n == -1:
+ if z == ctx.inf:
+ return ctx.mpf(1)/3 + 1/z
+ if z == ctx.ninf:
+ return ctx.mpf(-2)/3 + 1/z
+ if n < -1:
+ if z == ctx.inf:
+ return z
+ if z == ctx.ninf:
+ return (-1)**n * (-z)
+ if (not n) and z == ctx.inf or z == ctx.ninf:
+ return 1/z
+ # TODO: limits
+ raise ValueError("essential singularity of Ai(z)")
+ # Account for exponential scaling
+ if z:
+ extraprec = max(0, int(1.5*ctx.mag(z)))
+ else:
+ extraprec = 0
+ if n:
+ if n == 1:
+ def h():
+ # http://functions.wolfram.com/03.07.06.0005.01
+ if ctx._re(z) > 4:
+ ctx.prec += extraprec
+ w = z**1.5; r = -0.75/w; u = -2*w/3
+ ctx.prec -= extraprec
+ C = -ctx.exp(u)/(2*ctx.sqrt(ctx.pi))*ctx.nthroot(z,4)
+ return ([C],[1],[],[],[(-1,6),(7,6)],[],r),
+ # http://functions.wolfram.com/03.07.26.0001.01
+ else:
+ ctx.prec += extraprec
+ w = z**3 / 9
+ ctx.prec -= extraprec
+ C1 = _airyai_C1(ctx) * 0.5
+ C2 = _airyai_C2(ctx)
+ T1 = [C1,z],[1,2],[],[],[],[ctx.mpq_5_3],w
+ T2 = [C2],[1],[],[],[],[ctx.mpq_1_3],w
+ return T1, T2
+ return ctx.hypercomb(h, [], **kwargs)
+ else:
+ if z == 0:
+ return _airyderiv_0(ctx, z, n, ntype, 0)
+ # http://functions.wolfram.com/03.05.20.0004.01
+ def h(n):
+ ctx.prec += extraprec
+ w = z**3/9
+ ctx.prec -= extraprec
+ q13,q23,q43 = ctx.mpq_1_3, ctx.mpq_2_3, ctx.mpq_4_3
+ a1=q13; a2=1; b1=(1-n)*q13; b2=(2-n)*q13; b3=1-n*q13
+ T1 = [3, z], [n-q23, -n], [a1], [b1,b2,b3], \
+ [a1,a2], [b1,b2,b3], w
+ a1=q23; b1=(2-n)*q13; b2=1-n*q13; b3=(4-n)*q13
+ T2 = [3, z, -z], [n-q43, -n, 1], [a1], [b1,b2,b3], \
+ [a1,a2], [b1,b2,b3], w
+ return T1, T2
+ v = ctx.hypercomb(h, [n], **kwargs)
+ if ctx._is_real_type(z) and ctx.isint(n):
+ v = ctx._re(v)
+ return v
+ else:
+ def h():
+ if ctx._re(z) > 4:
+ # We could use 1F1, but it results in huge cancellation;
+ # the following expansion is better.
+ # TODO: asymptotic series for derivatives
+ ctx.prec += extraprec
+ w = z**1.5; r = -0.75/w; u = -2*w/3
+ ctx.prec -= extraprec
+ C = ctx.exp(u)/(2*ctx.sqrt(ctx.pi)*ctx.nthroot(z,4))
+ return ([C],[1],[],[],[(1,6),(5,6)],[],r),
+ else:
+ ctx.prec += extraprec
+ w = z**3 / 9
+ ctx.prec -= extraprec
+ C1 = _airyai_C1(ctx)
+ C2 = _airyai_C2(ctx)
+ T1 = [C1],[1],[],[],[],[ctx.mpq_2_3],w
+ T2 = [z*C2],[1],[],[],[],[ctx.mpq_4_3],w
+ return T1, T2
+ return ctx.hypercomb(h, [], **kwargs)
+
+@defun
+def airybi(ctx, z, derivative=0, **kwargs):
+ z = ctx.convert(z)
+ if derivative:
+ n, ntype = ctx._convert_param(derivative)
+ else:
+ n = 0
+ # Values at infinities
+ if not ctx.isnormal(z) and z:
+ if n and ntype == 'Z':
+ if z == ctx.inf:
+ return z
+ if z == ctx.ninf:
+ if n == -1:
+ return 1/z
+ if n == -2:
+ return _airybi_n2_inf(ctx)
+ if n < -2:
+ return (-1)**n * (-z)
+ if not n:
+ if z == ctx.inf:
+ return z
+ if z == ctx.ninf:
+ return 1/z
+ # TODO: limits
+ raise ValueError("essential singularity of Bi(z)")
+ if z:
+ extraprec = max(0, int(1.5*ctx.mag(z)))
+ else:
+ extraprec = 0
+ if n:
+ if n == 1:
+ # http://functions.wolfram.com/03.08.26.0001.01
+ def h():
+ ctx.prec += extraprec
+ w = z**3 / 9
+ ctx.prec -= extraprec
+ C1 = _airybi_C1(ctx)*0.5
+ C2 = _airybi_C2(ctx)
+ T1 = [C1,z],[1,2],[],[],[],[ctx.mpq_5_3],w
+ T2 = [C2],[1],[],[],[],[ctx.mpq_1_3],w
+ return T1, T2
+ return ctx.hypercomb(h, [], **kwargs)
+ else:
+ if z == 0:
+ return _airyderiv_0(ctx, z, n, ntype, 1)
+ def h(n):
+ ctx.prec += extraprec
+ w = z**3/9
+ ctx.prec -= extraprec
+ q13,q23,q43 = ctx.mpq_1_3, ctx.mpq_2_3, ctx.mpq_4_3
+ q16 = ctx.mpq_1_6
+ q56 = ctx.mpq_5_6
+ a1=q13; a2=1; b1=(1-n)*q13; b2=(2-n)*q13; b3=1-n*q13
+ T1 = [3, z], [n-q16, -n], [a1], [b1,b2,b3], \
+ [a1,a2], [b1,b2,b3], w
+ a1=q23; b1=(2-n)*q13; b2=1-n*q13; b3=(4-n)*q13
+ T2 = [3, z], [n-q56, 1-n], [a1], [b1,b2,b3], \
+ [a1,a2], [b1,b2,b3], w
+ return T1, T2
+ v = ctx.hypercomb(h, [n], **kwargs)
+ if ctx._is_real_type(z) and ctx.isint(n):
+ v = ctx._re(v)
+ return v
+ else:
+ def h():
+ ctx.prec += extraprec
+ w = z**3 / 9
+ ctx.prec -= extraprec
+ C1 = _airybi_C1(ctx)
+ C2 = _airybi_C2(ctx)
+ T1 = [C1],[1],[],[],[],[ctx.mpq_2_3],w
+ T2 = [z*C2],[1],[],[],[],[ctx.mpq_4_3],w
+ return T1, T2
+ return ctx.hypercomb(h, [], **kwargs)
+
+def _airy_zero(ctx, which, k, derivative, complex=False):
+ # Asymptotic formulas are given in DLMF section 9.9
+ def U(t): return t**(2/3.)*(1-7/(t**2*48))
+ def T(t): return t**(2/3.)*(1+5/(t**2*48))
+ k = int(k)
+ if k < 1:
+ raise ValueError("k cannot be less than 1")
+ if not derivative in (0,1):
+ raise ValueError("Derivative should lie between 0 and 1")
+ if which == 0:
+ if derivative:
+ return ctx.findroot(lambda z: ctx.airyai(z,1),
+ -U(3*ctx.pi*(4*k-3)/8))
+ return ctx.findroot(ctx.airyai, -T(3*ctx.pi*(4*k-1)/8))
+ if which == 1 and complex == False:
+ if derivative:
+ return ctx.findroot(lambda z: ctx.airybi(z,1),
+ -U(3*ctx.pi*(4*k-1)/8))
+ return ctx.findroot(ctx.airybi, -T(3*ctx.pi*(4*k-3)/8))
+ if which == 1 and complex == True:
+ if derivative:
+ t = 3*ctx.pi*(4*k-3)/8 + 0.75j*ctx.ln2
+ s = ctx.expjpi(ctx.mpf(1)/3) * T(t)
+ return ctx.findroot(lambda z: ctx.airybi(z,1), s)
+ t = 3*ctx.pi*(4*k-1)/8 + 0.75j*ctx.ln2
+ s = ctx.expjpi(ctx.mpf(1)/3) * U(t)
+ return ctx.findroot(ctx.airybi, s)
+
+@defun
+def airyaizero(ctx, k, derivative=0):
+ return _airy_zero(ctx, 0, k, derivative, False)
+
+@defun
+def airybizero(ctx, k, derivative=0, complex=False):
+ return _airy_zero(ctx, 1, k, derivative, complex)
+
+def _scorer(ctx, z, which, kwargs):
+ z = ctx.convert(z)
+ if ctx.isinf(z):
+ if z == ctx.inf:
+ if which == 0: return 1/z
+ if which == 1: return z
+ if z == ctx.ninf:
+ return 1/z
+ raise ValueError("essential singularity")
+ if z:
+ extraprec = max(0, int(1.5*ctx.mag(z)))
+ else:
+ extraprec = 0
+ if kwargs.get('derivative'):
+ raise NotImplementedError
+ # Direct asymptotic expansions, to avoid
+ # exponentially large cancellation
+ try:
+ if ctx.mag(z) > 3:
+ if which == 0 and abs(ctx.arg(z)) < ctx.pi/3 * 0.999:
+ def h():
+ return (([ctx.pi,z],[-1,-1],[],[],[(1,3),(2,3),1],[],9/z**3),)
+ return ctx.hypercomb(h, [], maxterms=ctx.prec, force_series=True)
+ if which == 1 and abs(ctx.arg(-z)) < 2*ctx.pi/3 * 0.999:
+ def h():
+ return (([-ctx.pi,z],[-1,-1],[],[],[(1,3),(2,3),1],[],9/z**3),)
+ return ctx.hypercomb(h, [], maxterms=ctx.prec, force_series=True)
+ except ctx.NoConvergence:
+ pass
+ def h():
+ A = ctx.airybi(z, **kwargs)/3
+ B = -2*ctx.pi
+ if which == 1:
+ A *= 2
+ B *= -1
+ ctx.prec += extraprec
+ w = z**3/9
+ ctx.prec -= extraprec
+ T1 = [A], [1], [], [], [], [], 0
+ T2 = [B,z], [-1,2], [], [], [1], [ctx.mpq_4_3,ctx.mpq_5_3], w
+ return T1, T2
+ return ctx.hypercomb(h, [], **kwargs)
+
+@defun
+def scorergi(ctx, z, **kwargs):
+ return _scorer(ctx, z, 0, kwargs)
+
+@defun
+def scorerhi(ctx, z, **kwargs):
+ return _scorer(ctx, z, 1, kwargs)
+
+@defun_wrapped
+def coulombc(ctx, l, eta, _cache={}):
+ if (l, eta) in _cache and _cache[l,eta][0] >= ctx.prec:
+ return +_cache[l,eta][1]
+ G3 = ctx.loggamma(2*l+2)
+ G1 = ctx.loggamma(1+l+ctx.j*eta)
+ G2 = ctx.loggamma(1+l-ctx.j*eta)
+ v = 2**l * ctx.exp((-ctx.pi*eta+G1+G2)/2 - G3)
+ if not (ctx.im(l) or ctx.im(eta)):
+ v = ctx.re(v)
+ _cache[l,eta] = (ctx.prec, v)
+ return v
+
+@defun_wrapped
+def coulombf(ctx, l, eta, z, w=1, chop=True, **kwargs):
+ # Regular Coulomb wave function
+ # Note: w can be either 1 or -1; the other may be better in some cases
+ # TODO: check that chop=True chops when and only when it should
+ #ctx.prec += 10
+ def h(l, eta):
+ try:
+ jw = ctx.j*w
+ jwz = ctx.fmul(jw, z, exact=True)
+ jwz2 = ctx.fmul(jwz, -2, exact=True)
+ C = ctx.coulombc(l, eta)
+ T1 = [C, z, ctx.exp(jwz)], [1, l+1, 1], [], [], [1+l+jw*eta], \
+ [2*l+2], jwz2
+ except ValueError:
+ T1 = [0], [-1], [], [], [], [], 0
+ return (T1,)
+ v = ctx.hypercomb(h, [l,eta], **kwargs)
+ if chop and (not ctx.im(l)) and (not ctx.im(eta)) and (not ctx.im(z)) and \
+ (ctx.re(z) >= 0):
+ v = ctx.re(v)
+ return v
+
+@defun_wrapped
+def _coulomb_chi(ctx, l, eta, _cache={}):
+ if (l, eta) in _cache and _cache[l,eta][0] >= ctx.prec:
+ return _cache[l,eta][1]
+ def terms():
+ l2 = -l-1
+ jeta = ctx.j*eta
+ return [ctx.loggamma(1+l+jeta) * (-0.5j),
+ ctx.loggamma(1+l-jeta) * (0.5j),
+ ctx.loggamma(1+l2+jeta) * (0.5j),
+ ctx.loggamma(1+l2-jeta) * (-0.5j),
+ -(l+0.5)*ctx.pi]
+ v = ctx.sum_accurately(terms, 1)
+ _cache[l,eta] = (ctx.prec, v)
+ return v
+
+@defun_wrapped
+def coulombg(ctx, l, eta, z, w=1, chop=True, **kwargs):
+ # Irregular Coulomb wave function
+ # Note: w can be either 1 or -1; the other may be better in some cases
+ # TODO: check that chop=True chops when and only when it should
+ if not ctx._im(l):
+ l = ctx._re(l) # XXX: for isint
+ def h(l, eta):
+ # Force perturbation for integers and half-integers
+ if ctx.isint(l*2):
+ T1 = [0], [-1], [], [], [], [], 0
+ return (T1,)
+ l2 = -l-1
+ try:
+ chi = ctx._coulomb_chi(l, eta)
+ jw = ctx.j*w
+ s = ctx.sin(chi); c = ctx.cos(chi)
+ C1 = ctx.coulombc(l,eta)
+ C2 = ctx.coulombc(l2,eta)
+ u = ctx.exp(jw*z)
+ x = -2*jw*z
+ T1 = [s, C1, z, u, c], [-1, 1, l+1, 1, 1], [], [], \
+ [1+l+jw*eta], [2*l+2], x
+ T2 = [-s, C2, z, u], [-1, 1, l2+1, 1], [], [], \
+ [1+l2+jw*eta], [2*l2+2], x
+ return T1, T2
+ except ValueError:
+ T1 = [0], [-1], [], [], [], [], 0
+ return (T1,)
+ v = ctx.hypercomb(h, [l,eta], **kwargs)
+ if chop and (not ctx._im(l)) and (not ctx._im(eta)) and (not ctx._im(z)) and \
+ (ctx._re(z) >= 0):
+ v = ctx._re(v)
+ return v
+
+def mcmahon(ctx,kind,prime,v,m):
+ """
+ Computes an estimate for the location of the Bessel function zero
+ j_{v,m}, y_{v,m}, j'_{v,m} or y'_{v,m} using McMahon's asymptotic
+ expansion (Abramowitz & Stegun 9.5.12-13, DLMF 20.21(vi)).
+
+ Returns (r,err) where r is the estimated location of the root
+ and err is a positive number estimating the error of the
+ asymptotic expansion.
+ """
+ u = 4*v**2
+ if kind == 1 and not prime: b = (4*m+2*v-1)*ctx.pi/4
+ if kind == 2 and not prime: b = (4*m+2*v-3)*ctx.pi/4
+ if kind == 1 and prime: b = (4*m+2*v-3)*ctx.pi/4
+ if kind == 2 and prime: b = (4*m+2*v-1)*ctx.pi/4
+ if not prime:
+ s1 = b
+ s2 = -(u-1)/(8*b)
+ s3 = -4*(u-1)*(7*u-31)/(3*(8*b)**3)
+ s4 = -32*(u-1)*(83*u**2-982*u+3779)/(15*(8*b)**5)
+ s5 = -64*(u-1)*(6949*u**3-153855*u**2+1585743*u-6277237)/(105*(8*b)**7)
+ if prime:
+ s1 = b
+ s2 = -(u+3)/(8*b)
+ s3 = -4*(7*u**2+82*u-9)/(3*(8*b)**3)
+ s4 = -32*(83*u**3+2075*u**2-3039*u+3537)/(15*(8*b)**5)
+ s5 = -64*(6949*u**4+296492*u**3-1248002*u**2+7414380*u-5853627)/(105*(8*b)**7)
+ terms = [s1,s2,s3,s4,s5]
+ s = s1
+ err = 0.0
+ for i in range(1,len(terms)):
+ if abs(terms[i]) < abs(terms[i-1]):
+ s += terms[i]
+ else:
+ err = abs(terms[i])
+ if i == len(terms)-1:
+ err = abs(terms[-1])
+ return s, err
+
+def generalized_bisection(ctx,f,a,b,n):
+ """
+ Given f known to have exactly n simple roots within [a,b],
+ return a list of n intervals isolating the roots
+ and having opposite signs at the endpoints.
+
+ TODO: this can be optimized, e.g. by reusing evaluation points.
+ """
+ if n < 1:
+ raise ValueError("n cannot be less than 1")
+ N = n+1
+ points = []
+ signs = []
+ while 1:
+ points = ctx.linspace(a,b,N)
+ signs = [ctx.sign(f(x)) for x in points]
+ ok_intervals = [(points[i],points[i+1]) for i in range(N-1) \
+ if signs[i]*signs[i+1] == -1]
+ if len(ok_intervals) == n:
+ return ok_intervals
+ N = N*2
+
+def find_in_interval(ctx, f, ab):
+ return ctx.findroot(f, ab, solver='illinois', verify=False)
+
+def bessel_zero(ctx, kind, prime, v, m, isoltol=0.01, _interval_cache={}):
+ prec = ctx.prec
+ workprec = max(prec, ctx.mag(v), ctx.mag(m))+10
+ try:
+ ctx.prec = workprec
+ v = ctx.mpf(v)
+ m = int(m)
+ prime = int(prime)
+ if v < 0:
+ raise ValueError("v cannot be negative")
+ if m < 1:
+ raise ValueError("m cannot be less than 1")
+ if not prime in (0,1):
+ raise ValueError("prime should lie between 0 and 1")
+ if kind == 1:
+ if prime: f = lambda x: ctx.besselj(v,x,derivative=1)
+ else: f = lambda x: ctx.besselj(v,x)
+ if kind == 2:
+ if prime: f = lambda x: ctx.bessely(v,x,derivative=1)
+ else: f = lambda x: ctx.bessely(v,x)
+ # The first root of J' is very close to 0 for small
+ # orders, and this needs to be special-cased
+ if kind == 1 and prime and m == 1:
+ if v == 0:
+ return ctx.zero
+ if v <= 1:
+ # TODO: use v <= j'_{v,1} < y_{v,1}?
+ r = 2*ctx.sqrt(v*(1+v)/(v+2))
+ return find_in_interval(ctx, f, (r/10, 2*r))
+ if (kind,prime,v,m) in _interval_cache:
+ return find_in_interval(ctx, f, _interval_cache[kind,prime,v,m])
+ r, err = mcmahon(ctx, kind, prime, v, m)
+ if err < isoltol:
+ return find_in_interval(ctx, f, (r-isoltol, r+isoltol))
+ # An x such that 0 < x < r_{v,1}
+ if kind == 1 and not prime: low = 2.4
+ if kind == 1 and prime: low = 1.8
+ if kind == 2 and not prime: low = 0.8
+ if kind == 2 and prime: low = 2.0
+ n = m+1
+ while 1:
+ r1, err = mcmahon(ctx, kind, prime, v, n)
+ if err < isoltol:
+ r2, err2 = mcmahon(ctx, kind, prime, v, n+1)
+ intervals = generalized_bisection(ctx, f, low, 0.5*(r1+r2), n)
+ for k, ab in enumerate(intervals):
+ _interval_cache[kind,prime,v,k+1] = ab
+ return find_in_interval(ctx, f, intervals[m-1])
+ else:
+ n = n*2
+ finally:
+ ctx.prec = prec
+
+@defun
+def besseljzero(ctx, v, m, derivative=0):
+ r"""
+ For a real order `\nu \ge 0` and a positive integer `m`, returns
+ `j_{\nu,m}`, the `m`-th positive zero of the Bessel function of the
+ first kind `J_{\nu}(z)` (see :func:`~mpmath.besselj`). Alternatively,
+ with *derivative=1*, gives the first nonnegative simple zero
+ `j'_{\nu,m}` of `J'_{\nu}(z)`.
+
+ The indexing convention is that used by Abramowitz & Stegun
+ and the DLMF. Note the special case `j'_{0,1} = 0`, while all other
+ zeros are positive. In effect, only simple zeros are counted
+ (all zeros of Bessel functions are simple except possibly `z = 0`)
+ and `j_{\nu,m}` becomes a monotonic function of both `\nu`
+ and `m`.
+
+ The zeros are interlaced according to the inequalities
+
+ .. math ::
+
+ j'_{\nu,k} < j_{\nu,k} < j'_{\nu,k+1}
+
+ j_{\nu,1} < j_{\nu+1,2} < j_{\nu,2} < j_{\nu+1,2} < j_{\nu,3} < \cdots
+
+ **Examples**
+
+ Initial zeros of the Bessel functions `J_0(z), J_1(z), J_2(z)`::
+
+ >>> from mpmath import *
+ >>> mp.dps = 25; mp.pretty = True
+ >>> besseljzero(0,1); besseljzero(0,2); besseljzero(0,3)
+ 2.404825557695772768621632
+ 5.520078110286310649596604
+ 8.653727912911012216954199
+ >>> besseljzero(1,1); besseljzero(1,2); besseljzero(1,3)
+ 3.831705970207512315614436
+ 7.01558666981561875353705
+ 10.17346813506272207718571
+ >>> besseljzero(2,1); besseljzero(2,2); besseljzero(2,3)
+ 5.135622301840682556301402
+ 8.417244140399864857783614
+ 11.61984117214905942709415
+
+ Initial zeros of `J'_0(z), J'_1(z), J'_2(z)`::
+
+ 0.0
+ 3.831705970207512315614436
+ 7.01558666981561875353705
+ >>> besseljzero(1,1,1); besseljzero(1,2,1); besseljzero(1,3,1)
+ 1.84118378134065930264363
+ 5.331442773525032636884016
+ 8.536316366346285834358961
+ >>> besseljzero(2,1,1); besseljzero(2,2,1); besseljzero(2,3,1)
+ 3.054236928227140322755932
+ 6.706133194158459146634394
+ 9.969467823087595793179143
+
+ Zeros with large index::
+
+ >>> besseljzero(0,100); besseljzero(0,1000); besseljzero(0,10000)
+ 313.3742660775278447196902
+ 3140.807295225078628895545
+ 31415.14114171350798533666
+ >>> besseljzero(5,100); besseljzero(5,1000); besseljzero(5,10000)
+ 321.1893195676003157339222
+ 3148.657306813047523500494
+ 31422.9947255486291798943
+ >>> besseljzero(0,100,1); besseljzero(0,1000,1); besseljzero(0,10000,1)
+ 311.8018681873704508125112
+ 3139.236339643802482833973
+ 31413.57032947022399485808
+
+ Zeros of functions with large order::
+
+ >>> besseljzero(50,1)
+ 57.11689916011917411936228
+ >>> besseljzero(50,2)
+ 62.80769876483536093435393
+ >>> besseljzero(50,100)
+ 388.6936600656058834640981
+ >>> besseljzero(50,1,1)
+ 52.99764038731665010944037
+ >>> besseljzero(50,2,1)
+ 60.02631933279942589882363
+ >>> besseljzero(50,100,1)
+ 387.1083151608726181086283
+
+ Zeros of functions with fractional order::
+
+ >>> besseljzero(0.5,1); besseljzero(1.5,1); besseljzero(2.25,4)
+ 3.141592653589793238462643
+ 4.493409457909064175307881
+ 15.15657692957458622921634
+
+ Both `J_{\nu}(z)` and `J'_{\nu}(z)` can be expressed as infinite
+ products over their zeros::
+
+ >>> v,z = 2, mpf(1)
+ >>> (z/2)**v/gamma(v+1) * \
+ ... nprod(lambda k: 1-(z/besseljzero(v,k))**2, [1,inf])
+ ...
+ 0.1149034849319004804696469
+ >>> besselj(v,z)
+ 0.1149034849319004804696469
+ >>> (z/2)**(v-1)/2/gamma(v) * \
+ ... nprod(lambda k: 1-(z/besseljzero(v,k,1))**2, [1,inf])
+ ...
+ 0.2102436158811325550203884
+ >>> besselj(v,z,1)
+ 0.2102436158811325550203884
+
+ """
+ return +bessel_zero(ctx, 1, derivative, v, m)
+
+@defun
+def besselyzero(ctx, v, m, derivative=0):
+ r"""
+ For a real order `\nu \ge 0` and a positive integer `m`, returns
+ `y_{\nu,m}`, the `m`-th positive zero of the Bessel function of the
+ second kind `Y_{\nu}(z)` (see :func:`~mpmath.bessely`). Alternatively,
+ with *derivative=1*, gives the first positive zero `y'_{\nu,m}` of
+ `Y'_{\nu}(z)`.
+
+ The zeros are interlaced according to the inequalities
+
+ .. math ::
+
+ y_{\nu,k} < y'_{\nu,k} < y_{\nu,k+1}
+
+ y_{\nu,1} < y_{\nu+1,2} < y_{\nu,2} < y_{\nu+1,2} < y_{\nu,3} < \cdots
+
+ **Examples**
+
+ Initial zeros of the Bessel functions `Y_0(z), Y_1(z), Y_2(z)`::
+
+ >>> from mpmath import *
+ >>> mp.dps = 25; mp.pretty = True
+ >>> besselyzero(0,1); besselyzero(0,2); besselyzero(0,3)
+ 0.8935769662791675215848871
+ 3.957678419314857868375677
+ 7.086051060301772697623625
+ >>> besselyzero(1,1); besselyzero(1,2); besselyzero(1,3)
+ 2.197141326031017035149034
+ 5.429681040794135132772005
+ 8.596005868331168926429606
+ >>> besselyzero(2,1); besselyzero(2,2); besselyzero(2,3)
+ 3.384241767149593472701426
+ 6.793807513268267538291167
+ 10.02347797936003797850539
+
+ Initial zeros of `Y'_0(z), Y'_1(z), Y'_2(z)`::
+
+ >>> besselyzero(0,1,1); besselyzero(0,2,1); besselyzero(0,3,1)
+ 2.197141326031017035149034
+ 5.429681040794135132772005
+ 8.596005868331168926429606
+ >>> besselyzero(1,1,1); besselyzero(1,2,1); besselyzero(1,3,1)
+ 3.683022856585177699898967
+ 6.941499953654175655751944
+ 10.12340465543661307978775
+ >>> besselyzero(2,1,1); besselyzero(2,2,1); besselyzero(2,3,1)
+ 5.002582931446063945200176
+ 8.350724701413079526349714
+ 11.57419546521764654624265
+
+ Zeros with large index::
+
+ >>> besselyzero(0,100); besselyzero(0,1000); besselyzero(0,10000)
+ 311.8034717601871549333419
+ 3139.236498918198006794026
+ 31413.57034538691205229188
+ >>> besselyzero(5,100); besselyzero(5,1000); besselyzero(5,10000)
+ 319.6183338562782156235062
+ 3147.086508524556404473186
+ 31421.42392920214673402828
+ >>> besselyzero(0,100,1); besselyzero(0,1000,1); besselyzero(0,10000,1)
+ 313.3726705426359345050449
+ 3140.807136030340213610065
+ 31415.14112579761578220175
+
+ Zeros of functions with large order::
+
+ >>> besselyzero(50,1)
+ 53.50285882040036394680237
+ >>> besselyzero(50,2)
+ 60.11244442774058114686022
+ >>> besselyzero(50,100)
+ 387.1096509824943957706835
+ >>> besselyzero(50,1,1)
+ 56.96290427516751320063605
+ >>> besselyzero(50,2,1)
+ 62.74888166945933944036623
+ >>> besselyzero(50,100,1)
+ 388.6923300548309258355475
+
+ Zeros of functions with fractional order::
+
+ >>> besselyzero(0.5,1); besselyzero(1.5,1); besselyzero(2.25,4)
+ 1.570796326794896619231322
+ 2.798386045783887136720249
+ 13.56721208770735123376018
+
+ """
+ return +bessel_zero(ctx, 2, derivative, v, m)
diff --git a/pythonProject/.venv/Lib/site-packages/mpmath/functions/elliptic.py b/pythonProject/.venv/Lib/site-packages/mpmath/functions/elliptic.py
new file mode 100644
index 0000000000000000000000000000000000000000..1e198697fa042b7cc8bcba9e9e770f5c8106dad6
--- /dev/null
+++ b/pythonProject/.venv/Lib/site-packages/mpmath/functions/elliptic.py
@@ -0,0 +1,1431 @@
+r"""
+Elliptic functions historically comprise the elliptic integrals
+and their inverses, and originate from the problem of computing the
+arc length of an ellipse. From a more modern point of view,
+an elliptic function is defined as a doubly periodic function, i.e.
+a function which satisfies
+
+.. math ::
+
+ f(z + 2 \omega_1) = f(z + 2 \omega_2) = f(z)
+
+for some half-periods `\omega_1, \omega_2` with
+`\mathrm{Im}[\omega_1 / \omega_2] > 0`. The canonical elliptic
+functions are the Jacobi elliptic functions. More broadly, this section
+includes quasi-doubly periodic functions (such as the Jacobi theta
+functions) and other functions useful in the study of elliptic functions.
+
+Many different conventions for the arguments of
+elliptic functions are in use. It is even standard to use
+different parameterizations for different functions in the same
+text or software (and mpmath is no exception).
+The usual parameters are the elliptic nome `q`, which usually
+must satisfy `|q| < 1`; the elliptic parameter `m` (an arbitrary
+complex number); the elliptic modulus `k` (an arbitrary complex
+number); and the half-period ratio `\tau`, which usually must
+satisfy `\mathrm{Im}[\tau] > 0`.
+These quantities can be expressed in terms of each other
+using the following relations:
+
+.. math ::
+
+ m = k^2
+
+.. math ::
+
+ \tau = i \frac{K(1-m)}{K(m)}
+
+.. math ::
+
+ q = e^{i \pi \tau}
+
+.. math ::
+
+ k = \frac{\vartheta_2^2(q)}{\vartheta_3^2(q)}
+
+In addition, an alternative definition is used for the nome in
+number theory, which we here denote by q-bar:
+
+.. math ::
+
+ \bar{q} = q^2 = e^{2 i \pi \tau}
+
+For convenience, mpmath provides functions to convert
+between the various parameters (:func:`~mpmath.qfrom`, :func:`~mpmath.mfrom`,
+:func:`~mpmath.kfrom`, :func:`~mpmath.taufrom`, :func:`~mpmath.qbarfrom`).
+
+**References**
+
+1. [AbramowitzStegun]_
+
+2. [WhittakerWatson]_
+
+"""
+
+from .functions import defun, defun_wrapped
+
+@defun_wrapped
+def eta(ctx, tau):
+ r"""
+ Returns the Dedekind eta function of tau in the upper half-plane.
+
+ >>> from mpmath import *
+ >>> mp.dps = 25; mp.pretty = True
+ >>> eta(1j); gamma(0.25) / (2*pi**0.75)
+ (0.7682254223260566590025942 + 0.0j)
+ 0.7682254223260566590025942
+ >>> tau = sqrt(2) + sqrt(5)*1j
+ >>> eta(-1/tau); sqrt(-1j*tau) * eta(tau)
+ (0.9022859908439376463573294 + 0.07985093673948098408048575j)
+ (0.9022859908439376463573295 + 0.07985093673948098408048575j)
+ >>> eta(tau+1); exp(pi*1j/12) * eta(tau)
+ (0.4493066139717553786223114 + 0.3290014793877986663915939j)
+ (0.4493066139717553786223114 + 0.3290014793877986663915939j)
+ >>> f = lambda z: diff(eta, z) / eta(z)
+ >>> chop(36*diff(f,tau)**2 - 24*diff(f,tau,2)*f(tau) + diff(f,tau,3))
+ 0.0
+
+ """
+ if ctx.im(tau) <= 0.0:
+ raise ValueError("eta is only defined in the upper half-plane")
+ q = ctx.expjpi(tau/12)
+ return q * ctx.qp(q**24)
+
+def nome(ctx, m):
+ m = ctx.convert(m)
+ if not m:
+ return m
+ if m == ctx.one:
+ return m
+ if ctx.isnan(m):
+ return m
+ if ctx.isinf(m):
+ if m == ctx.ninf:
+ return type(m)(-1)
+ else:
+ return ctx.mpc(-1)
+ a = ctx.ellipk(ctx.one-m)
+ b = ctx.ellipk(m)
+ v = ctx.exp(-ctx.pi*a/b)
+ if not ctx._im(m) and ctx._re(m) < 1:
+ if ctx._is_real_type(m):
+ return v.real
+ else:
+ return v.real + 0j
+ elif m == 2:
+ v = ctx.mpc(0, v.imag)
+ return v
+
+@defun_wrapped
+def qfrom(ctx, q=None, m=None, k=None, tau=None, qbar=None):
+ r"""
+ Returns the elliptic nome `q`, given any of `q, m, k, \tau, \bar{q}`::
+
+ >>> from mpmath import *
+ >>> mp.dps = 25; mp.pretty = True
+ >>> qfrom(q=0.25)
+ 0.25
+ >>> qfrom(m=mfrom(q=0.25))
+ 0.25
+ >>> qfrom(k=kfrom(q=0.25))
+ 0.25
+ >>> qfrom(tau=taufrom(q=0.25))
+ (0.25 + 0.0j)
+ >>> qfrom(qbar=qbarfrom(q=0.25))
+ 0.25
+
+ """
+ if q is not None:
+ return ctx.convert(q)
+ if m is not None:
+ return nome(ctx, m)
+ if k is not None:
+ return nome(ctx, ctx.convert(k)**2)
+ if tau is not None:
+ return ctx.expjpi(tau)
+ if qbar is not None:
+ return ctx.sqrt(qbar)
+
+@defun_wrapped
+def qbarfrom(ctx, q=None, m=None, k=None, tau=None, qbar=None):
+ r"""
+ Returns the number-theoretic nome `\bar q`, given any of
+ `q, m, k, \tau, \bar{q}`::
+
+ >>> from mpmath import *
+ >>> mp.dps = 25; mp.pretty = True
+ >>> qbarfrom(qbar=0.25)
+ 0.25
+ >>> qbarfrom(q=qfrom(qbar=0.25))
+ 0.25
+ >>> qbarfrom(m=extraprec(20)(mfrom)(qbar=0.25)) # ill-conditioned
+ 0.25
+ >>> qbarfrom(k=extraprec(20)(kfrom)(qbar=0.25)) # ill-conditioned
+ 0.25
+ >>> qbarfrom(tau=taufrom(qbar=0.25))
+ (0.25 + 0.0j)
+
+ """
+ if qbar is not None:
+ return ctx.convert(qbar)
+ if q is not None:
+ return ctx.convert(q) ** 2
+ if m is not None:
+ return nome(ctx, m) ** 2
+ if k is not None:
+ return nome(ctx, ctx.convert(k)**2) ** 2
+ if tau is not None:
+ return ctx.expjpi(2*tau)
+
+@defun_wrapped
+def taufrom(ctx, q=None, m=None, k=None, tau=None, qbar=None):
+ r"""
+ Returns the elliptic half-period ratio `\tau`, given any of
+ `q, m, k, \tau, \bar{q}`::
+
+ >>> from mpmath import *
+ >>> mp.dps = 25; mp.pretty = True
+ >>> taufrom(tau=0.5j)
+ (0.0 + 0.5j)
+ >>> taufrom(q=qfrom(tau=0.5j))
+ (0.0 + 0.5j)
+ >>> taufrom(m=mfrom(tau=0.5j))
+ (0.0 + 0.5j)
+ >>> taufrom(k=kfrom(tau=0.5j))
+ (0.0 + 0.5j)
+ >>> taufrom(qbar=qbarfrom(tau=0.5j))
+ (0.0 + 0.5j)
+
+ """
+ if tau is not None:
+ return ctx.convert(tau)
+ if m is not None:
+ m = ctx.convert(m)
+ return ctx.j*ctx.ellipk(1-m)/ctx.ellipk(m)
+ if k is not None:
+ k = ctx.convert(k)
+ return ctx.j*ctx.ellipk(1-k**2)/ctx.ellipk(k**2)
+ if q is not None:
+ return ctx.log(q) / (ctx.pi*ctx.j)
+ if qbar is not None:
+ qbar = ctx.convert(qbar)
+ return ctx.log(qbar) / (2*ctx.pi*ctx.j)
+
+@defun_wrapped
+def kfrom(ctx, q=None, m=None, k=None, tau=None, qbar=None):
+ r"""
+ Returns the elliptic modulus `k`, given any of
+ `q, m, k, \tau, \bar{q}`::
+
+ >>> from mpmath import *
+ >>> mp.dps = 25; mp.pretty = True
+ >>> kfrom(k=0.25)
+ 0.25
+ >>> kfrom(m=mfrom(k=0.25))
+ 0.25
+ >>> kfrom(q=qfrom(k=0.25))
+ 0.25
+ >>> kfrom(tau=taufrom(k=0.25))
+ (0.25 + 0.0j)
+ >>> kfrom(qbar=qbarfrom(k=0.25))
+ 0.25
+
+ As `q \to 1` and `q \to -1`, `k` rapidly approaches
+ `1` and `i \infty` respectively::
+
+ >>> kfrom(q=0.75)
+ 0.9999999999999899166471767
+ >>> kfrom(q=-0.75)
+ (0.0 + 7041781.096692038332790615j)
+ >>> kfrom(q=1)
+ 1
+ >>> kfrom(q=-1)
+ (0.0 + +infj)
+ """
+ if k is not None:
+ return ctx.convert(k)
+ if m is not None:
+ return ctx.sqrt(m)
+ if tau is not None:
+ q = ctx.expjpi(tau)
+ if qbar is not None:
+ q = ctx.sqrt(qbar)
+ if q == 1:
+ return q
+ if q == -1:
+ return ctx.mpc(0,'inf')
+ return (ctx.jtheta(2,0,q)/ctx.jtheta(3,0,q))**2
+
+@defun_wrapped
+def mfrom(ctx, q=None, m=None, k=None, tau=None, qbar=None):
+ r"""
+ Returns the elliptic parameter `m`, given any of
+ `q, m, k, \tau, \bar{q}`::
+
+ >>> from mpmath import *
+ >>> mp.dps = 25; mp.pretty = True
+ >>> mfrom(m=0.25)
+ 0.25
+ >>> mfrom(q=qfrom(m=0.25))
+ 0.25
+ >>> mfrom(k=kfrom(m=0.25))
+ 0.25
+ >>> mfrom(tau=taufrom(m=0.25))
+ (0.25 + 0.0j)
+ >>> mfrom(qbar=qbarfrom(m=0.25))
+ 0.25
+
+ As `q \to 1` and `q \to -1`, `m` rapidly approaches
+ `1` and `-\infty` respectively::
+
+ >>> mfrom(q=0.75)
+ 0.9999999999999798332943533
+ >>> mfrom(q=-0.75)
+ -49586681013729.32611558353
+ >>> mfrom(q=1)
+ 1.0
+ >>> mfrom(q=-1)
+ -inf
+
+ The inverse nome as a function of `q` has an integer
+ Taylor series expansion::
+
+ >>> taylor(lambda q: mfrom(q), 0, 7)
+ [0.0, 16.0, -128.0, 704.0, -3072.0, 11488.0, -38400.0, 117632.0]
+
+ """
+ if m is not None:
+ return m
+ if k is not None:
+ return k**2
+ if tau is not None:
+ q = ctx.expjpi(tau)
+ if qbar is not None:
+ q = ctx.sqrt(qbar)
+ if q == 1:
+ return ctx.convert(q)
+ if q == -1:
+ return q*ctx.inf
+ v = (ctx.jtheta(2,0,q)/ctx.jtheta(3,0,q))**4
+ if ctx._is_real_type(q) and q < 0:
+ v = v.real
+ return v
+
+jacobi_spec = {
+ 'sn' : ([3],[2],[1],[4], 'sin', 'tanh'),
+ 'cn' : ([4],[2],[2],[4], 'cos', 'sech'),
+ 'dn' : ([4],[3],[3],[4], '1', 'sech'),
+ 'ns' : ([2],[3],[4],[1], 'csc', 'coth'),
+ 'nc' : ([2],[4],[4],[2], 'sec', 'cosh'),
+ 'nd' : ([3],[4],[4],[3], '1', 'cosh'),
+ 'sc' : ([3],[4],[1],[2], 'tan', 'sinh'),
+ 'sd' : ([3,3],[2,4],[1],[3], 'sin', 'sinh'),
+ 'cd' : ([3],[2],[2],[3], 'cos', '1'),
+ 'cs' : ([4],[3],[2],[1], 'cot', 'csch'),
+ 'dc' : ([2],[3],[3],[2], 'sec', '1'),
+ 'ds' : ([2,4],[3,3],[3],[1], 'csc', 'csch'),
+ 'cc' : None,
+ 'ss' : None,
+ 'nn' : None,
+ 'dd' : None
+}
+
+@defun
+def ellipfun(ctx, kind, u=None, m=None, q=None, k=None, tau=None):
+ try:
+ S = jacobi_spec[kind]
+ except KeyError:
+ raise ValueError("First argument must be a two-character string "
+ "containing 's', 'c', 'd' or 'n', e.g.: 'sn'")
+ if u is None:
+ def f(*args, **kwargs):
+ return ctx.ellipfun(kind, *args, **kwargs)
+ f.__name__ = kind
+ return f
+ prec = ctx.prec
+ try:
+ ctx.prec += 10
+ u = ctx.convert(u)
+ q = ctx.qfrom(m=m, q=q, k=k, tau=tau)
+ if S is None:
+ v = ctx.one + 0*q*u
+ elif q == ctx.zero:
+ if S[4] == '1': v = ctx.one
+ else: v = getattr(ctx, S[4])(u)
+ v += 0*q*u
+ elif q == ctx.one:
+ if S[5] == '1': v = ctx.one
+ else: v = getattr(ctx, S[5])(u)
+ v += 0*q*u
+ else:
+ t = u / ctx.jtheta(3, 0, q)**2
+ v = ctx.one
+ for a in S[0]: v *= ctx.jtheta(a, 0, q)
+ for b in S[1]: v /= ctx.jtheta(b, 0, q)
+ for c in S[2]: v *= ctx.jtheta(c, t, q)
+ for d in S[3]: v /= ctx.jtheta(d, t, q)
+ finally:
+ ctx.prec = prec
+ return +v
+
+@defun_wrapped
+def kleinj(ctx, tau=None, **kwargs):
+ r"""
+ Evaluates the Klein j-invariant, which is a modular function defined for
+ `\tau` in the upper half-plane as
+
+ .. math ::
+
+ J(\tau) = \frac{g_2^3(\tau)}{g_2^3(\tau) - 27 g_3^2(\tau)}
+
+ where `g_2` and `g_3` are the modular invariants of the Weierstrass
+ elliptic function,
+
+ .. math ::
+
+ g_2(\tau) = 60 \sum_{(m,n) \in \mathbb{Z}^2 \setminus (0,0)} (m \tau+n)^{-4}
+
+ g_3(\tau) = 140 \sum_{(m,n) \in \mathbb{Z}^2 \setminus (0,0)} (m \tau+n)^{-6}.
+
+ An alternative, common notation is that of the j-function
+ `j(\tau) = 1728 J(\tau)`.
+
+ **Plots**
+
+ .. literalinclude :: /plots/kleinj.py
+ .. image :: /plots/kleinj.png
+ .. literalinclude :: /plots/kleinj2.py
+ .. image :: /plots/kleinj2.png
+
+ **Examples**
+
+ Verifying the functional equation `J(\tau) = J(\tau+1) = J(-\tau^{-1})`::
+
+ >>> from mpmath import *
+ >>> mp.dps = 25; mp.pretty = True
+ >>> tau = 0.625+0.75*j
+ >>> tau = 0.625+0.75*j
+ >>> kleinj(tau)
+ (-0.1507492166511182267125242 + 0.07595948379084571927228948j)
+ >>> kleinj(tau+1)
+ (-0.1507492166511182267125242 + 0.07595948379084571927228948j)
+ >>> kleinj(-1/tau)
+ (-0.1507492166511182267125242 + 0.07595948379084571927228946j)
+
+ The j-function has a famous Laurent series expansion in terms of the nome
+ `\bar{q}`, `j(\tau) = \bar{q}^{-1} + 744 + 196884\bar{q} + \ldots`::
+
+ >>> mp.dps = 15
+ >>> taylor(lambda q: 1728*q*kleinj(qbar=q), 0, 5, singular=True)
+ [1.0, 744.0, 196884.0, 21493760.0, 864299970.0, 20245856256.0]
+
+ The j-function admits exact evaluation at special algebraic points
+ related to the Heegner numbers 1, 2, 3, 7, 11, 19, 43, 67, 163::
+
+ >>> @extraprec(10)
+ ... def h(n):
+ ... v = (1+sqrt(n)*j)
+ ... if n > 2:
+ ... v *= 0.5
+ ... return v
+ ...
+ >>> mp.dps = 25
+ >>> for n in [1,2,3,7,11,19,43,67,163]:
+ ... n, chop(1728*kleinj(h(n)))
+ ...
+ (1, 1728.0)
+ (2, 8000.0)
+ (3, 0.0)
+ (7, -3375.0)
+ (11, -32768.0)
+ (19, -884736.0)
+ (43, -884736000.0)
+ (67, -147197952000.0)
+ (163, -262537412640768000.0)
+
+ Also at other special points, the j-function assumes explicit
+ algebraic values, e.g.::
+
+ >>> chop(1728*kleinj(j*sqrt(5)))
+ 1264538.909475140509320227
+ >>> identify(cbrt(_)) # note: not simplified
+ '((100+sqrt(13520))/2)'
+ >>> (50+26*sqrt(5))**3
+ 1264538.909475140509320227
+
+ """
+ q = ctx.qfrom(tau=tau, **kwargs)
+ t2 = ctx.jtheta(2,0,q)
+ t3 = ctx.jtheta(3,0,q)
+ t4 = ctx.jtheta(4,0,q)
+ P = (t2**8 + t3**8 + t4**8)**3
+ Q = 54*(t2*t3*t4)**8
+ return P/Q
+
+
+def RF_calc(ctx, x, y, z, r):
+ if y == z: return RC_calc(ctx, x, y, r)
+ if x == z: return RC_calc(ctx, y, x, r)
+ if x == y: return RC_calc(ctx, z, x, r)
+ if not (ctx.isnormal(x) and ctx.isnormal(y) and ctx.isnormal(z)):
+ if ctx.isnan(x) or ctx.isnan(y) or ctx.isnan(z):
+ return x*y*z
+ if ctx.isinf(x) or ctx.isinf(y) or ctx.isinf(z):
+ return ctx.zero
+ xm,ym,zm = x,y,z
+ A0 = Am = (x+y+z)/3
+ Q = ctx.root(3*r, -6) * max(abs(A0-x),abs(A0-y),abs(A0-z))
+ g = ctx.mpf(0.25)
+ pow4 = ctx.one
+ while 1:
+ xs = ctx.sqrt(xm)
+ ys = ctx.sqrt(ym)
+ zs = ctx.sqrt(zm)
+ lm = xs*ys + xs*zs + ys*zs
+ Am1 = (Am+lm)*g
+ xm, ym, zm = (xm+lm)*g, (ym+lm)*g, (zm+lm)*g
+ if pow4 * Q < abs(Am):
+ break
+ Am = Am1
+ pow4 *= g
+ t = pow4/Am
+ X = (A0-x)*t
+ Y = (A0-y)*t
+ Z = -X-Y
+ E2 = X*Y-Z**2
+ E3 = X*Y*Z
+ return ctx.power(Am,-0.5) * (9240-924*E2+385*E2**2+660*E3-630*E2*E3)/9240
+
+def RC_calc(ctx, x, y, r, pv=True):
+ if not (ctx.isnormal(x) and ctx.isnormal(y)):
+ if ctx.isinf(x) or ctx.isinf(y):
+ return 1/(x*y)
+ if y == 0:
+ return ctx.inf
+ if x == 0:
+ return ctx.pi / ctx.sqrt(y) / 2
+ raise ValueError
+ # Cauchy principal value
+ if pv and ctx._im(y) == 0 and ctx._re(y) < 0:
+ return ctx.sqrt(x/(x-y)) * RC_calc(ctx, x-y, -y, r)
+ if x == y:
+ return 1/ctx.sqrt(x)
+ extraprec = 2*max(0,-ctx.mag(x-y)+ctx.mag(x))
+ ctx.prec += extraprec
+ if ctx._is_real_type(x) and ctx._is_real_type(y):
+ x = ctx._re(x)
+ y = ctx._re(y)
+ a = ctx.sqrt(x/y)
+ if x < y:
+ b = ctx.sqrt(y-x)
+ v = ctx.acos(a)/b
+ else:
+ b = ctx.sqrt(x-y)
+ v = ctx.acosh(a)/b
+ else:
+ sx = ctx.sqrt(x)
+ sy = ctx.sqrt(y)
+ v = ctx.acos(sx/sy)/(ctx.sqrt((1-x/y))*sy)
+ ctx.prec -= extraprec
+ return v
+
+def RJ_calc(ctx, x, y, z, p, r, integration):
+ """
+ With integration == 0, computes RJ only using Carlson's algorithm
+ (may be wrong for some values).
+ With integration == 1, uses an initial integration to make sure
+ Carlson's algorithm is correct.
+ With integration == 2, uses only integration.
+ """
+ if not (ctx.isnormal(x) and ctx.isnormal(y) and \
+ ctx.isnormal(z) and ctx.isnormal(p)):
+ if ctx.isnan(x) or ctx.isnan(y) or ctx.isnan(z) or ctx.isnan(p):
+ return x*y*z
+ if ctx.isinf(x) or ctx.isinf(y) or ctx.isinf(z) or ctx.isinf(p):
+ return ctx.zero
+ if not p:
+ return ctx.inf
+ if (not x) + (not y) + (not z) > 1:
+ return ctx.inf
+ # Check conditions and fall back on integration for argument
+ # reduction if needed. The following conditions might be needlessly
+ # restrictive.
+ initial_integral = ctx.zero
+ if integration >= 1:
+ ok = (x.real >= 0 and y.real >= 0 and z.real >= 0 and p.real > 0)
+ if not ok:
+ if x == p or y == p or z == p:
+ ok = True
+ if not ok:
+ if p.imag != 0 or p.real >= 0:
+ if (x.imag == 0 and x.real >= 0 and ctx.conj(y) == z):
+ ok = True
+ if (y.imag == 0 and y.real >= 0 and ctx.conj(x) == z):
+ ok = True
+ if (z.imag == 0 and z.real >= 0 and ctx.conj(x) == y):
+ ok = True
+ if not ok or (integration == 2):
+ N = ctx.ceil(-min(x.real, y.real, z.real, p.real)) + 1
+ # Integrate around any singularities
+ if all((t.imag >= 0 or t.real > 0) for t in [x, y, z, p]):
+ margin = ctx.j
+ elif all((t.imag < 0 or t.real > 0) for t in [x, y, z, p]):
+ margin = -ctx.j
+ else:
+ margin = 1
+ # Go through the upper half-plane, but low enough that any
+ # parameter starting in the lower plane doesn't cross the
+ # branch cut
+ for t in [x, y, z, p]:
+ if t.imag >= 0 or t.real > 0:
+ continue
+ margin = min(margin, abs(t.imag) * 0.5)
+ margin *= ctx.j
+ N += margin
+ F = lambda t: 1/(ctx.sqrt(t+x)*ctx.sqrt(t+y)*ctx.sqrt(t+z)*(t+p))
+ if integration == 2:
+ return 1.5 * ctx.quadsubdiv(F, [0, N, ctx.inf])
+ initial_integral = 1.5 * ctx.quadsubdiv(F, [0, N])
+ x += N; y += N; z += N; p += N
+ xm,ym,zm,pm = x,y,z,p
+ A0 = Am = (x + y + z + 2*p)/5
+ delta = (p-x)*(p-y)*(p-z)
+ Q = ctx.root(0.25*r, -6) * max(abs(A0-x),abs(A0-y),abs(A0-z),abs(A0-p))
+ g = ctx.mpf(0.25)
+ pow4 = ctx.one
+ S = 0
+ while 1:
+ sx = ctx.sqrt(xm)
+ sy = ctx.sqrt(ym)
+ sz = ctx.sqrt(zm)
+ sp = ctx.sqrt(pm)
+ lm = sx*sy + sx*sz + sy*sz
+ Am1 = (Am+lm)*g
+ xm = (xm+lm)*g; ym = (ym+lm)*g; zm = (zm+lm)*g; pm = (pm+lm)*g
+ dm = (sp+sx) * (sp+sy) * (sp+sz)
+ em = delta * pow4**3 / dm**2
+ if pow4 * Q < abs(Am):
+ break
+ T = RC_calc(ctx, ctx.one, ctx.one+em, r) * pow4 / dm
+ S += T
+ pow4 *= g
+ Am = Am1
+ t = pow4 / Am
+ X = (A0-x)*t
+ Y = (A0-y)*t
+ Z = (A0-z)*t
+ P = (-X-Y-Z)/2
+ E2 = X*Y + X*Z + Y*Z - 3*P**2
+ E3 = X*Y*Z + 2*E2*P + 4*P**3
+ E4 = (2*X*Y*Z + E2*P + 3*P**3)*P
+ E5 = X*Y*Z*P**2
+ P = 24024 - 5148*E2 + 2457*E2**2 + 4004*E3 - 4158*E2*E3 - 3276*E4 + 2772*E5
+ Q = 24024
+ v1 = pow4 * ctx.power(Am, -1.5) * P/Q
+ v2 = 6*S
+ return initial_integral + v1 + v2
+
+@defun
+def elliprf(ctx, x, y, z):
+ r"""
+ Evaluates the Carlson symmetric elliptic integral of the first kind
+
+ .. math ::
+
+ R_F(x,y,z) = \frac{1}{2}
+ \int_0^{\infty} \frac{dt}{\sqrt{(t+x)(t+y)(t+z)}}
+
+ which is defined for `x,y,z \notin (-\infty,0)`, and with
+ at most one of `x,y,z` being zero.
+
+ For real `x,y,z \ge 0`, the principal square root is taken in the integrand.
+ For complex `x,y,z`, the principal square root is taken as `t \to \infty`
+ and as `t \to 0` non-principal branches are chosen as necessary so as to
+ make the integrand continuous.
+
+ **Examples**
+
+ Some basic values and limits::
+
+ >>> from mpmath import *
+ >>> mp.dps = 25; mp.pretty = True
+ >>> elliprf(0,1,1); pi/2
+ 1.570796326794896619231322
+ 1.570796326794896619231322
+ >>> elliprf(0,1,inf)
+ 0.0
+ >>> elliprf(1,1,1)
+ 1.0
+ >>> elliprf(2,2,2)**2
+ 0.5
+ >>> elliprf(1,0,0); elliprf(0,0,1); elliprf(0,1,0); elliprf(0,0,0)
+ +inf
+ +inf
+ +inf
+ +inf
+
+ Representing complete elliptic integrals in terms of `R_F`::
+
+ >>> m = mpf(0.75)
+ >>> ellipk(m); elliprf(0,1-m,1)
+ 2.156515647499643235438675
+ 2.156515647499643235438675
+ >>> ellipe(m); elliprf(0,1-m,1)-m*elliprd(0,1-m,1)/3
+ 1.211056027568459524803563
+ 1.211056027568459524803563
+
+ Some symmetries and argument transformations::
+
+ >>> x,y,z = 2,3,4
+ >>> elliprf(x,y,z); elliprf(y,x,z); elliprf(z,y,x)
+ 0.5840828416771517066928492
+ 0.5840828416771517066928492
+ 0.5840828416771517066928492
+ >>> k = mpf(100000)
+ >>> elliprf(k*x,k*y,k*z); k**(-0.5) * elliprf(x,y,z)
+ 0.001847032121923321253219284
+ 0.001847032121923321253219284
+ >>> l = sqrt(x*y) + sqrt(y*z) + sqrt(z*x)
+ >>> elliprf(x,y,z); 2*elliprf(x+l,y+l,z+l)
+ 0.5840828416771517066928492
+ 0.5840828416771517066928492
+ >>> elliprf((x+l)/4,(y+l)/4,(z+l)/4)
+ 0.5840828416771517066928492
+
+ Comparing with numerical integration::
+
+ >>> x,y,z = 2,3,4
+ >>> elliprf(x,y,z)
+ 0.5840828416771517066928492
+ >>> f = lambda t: 0.5*((t+x)*(t+y)*(t+z))**(-0.5)
+ >>> q = extradps(25)(quad)
+ >>> q(f, [0,inf])
+ 0.5840828416771517066928492
+
+ With the following arguments, the square root in the integrand becomes
+ discontinuous at `t = 1/2` if the principal branch is used. To obtain
+ the right value, `-\sqrt{r}` must be taken instead of `\sqrt{r}`
+ on `t \in (0, 1/2)`::
+
+ >>> x,y,z = j-1,j,0
+ >>> elliprf(x,y,z)
+ (0.7961258658423391329305694 - 1.213856669836495986430094j)
+ >>> -q(f, [0,0.5]) + q(f, [0.5,inf])
+ (0.7961258658423391329305694 - 1.213856669836495986430094j)
+
+ The so-called *first lemniscate constant*, a transcendental number::
+
+ >>> elliprf(0,1,2)
+ 1.31102877714605990523242
+ >>> extradps(25)(quad)(lambda t: 1/sqrt(1-t**4), [0,1])
+ 1.31102877714605990523242
+ >>> gamma('1/4')**2/(4*sqrt(2*pi))
+ 1.31102877714605990523242
+
+ **References**
+
+ 1. [Carlson]_
+ 2. [DLMF]_ Chapter 19. Elliptic Integrals
+
+ """
+ x = ctx.convert(x)
+ y = ctx.convert(y)
+ z = ctx.convert(z)
+ prec = ctx.prec
+ try:
+ ctx.prec += 20
+ tol = ctx.eps * 2**10
+ v = RF_calc(ctx, x, y, z, tol)
+ finally:
+ ctx.prec = prec
+ return +v
+
+@defun
+def elliprc(ctx, x, y, pv=True):
+ r"""
+ Evaluates the degenerate Carlson symmetric elliptic integral
+ of the first kind
+
+ .. math ::
+
+ R_C(x,y) = R_F(x,y,y) =
+ \frac{1}{2} \int_0^{\infty} \frac{dt}{(t+y) \sqrt{(t+x)}}.
+
+ If `y \in (-\infty,0)`, either a value defined by continuity,
+ or with *pv=True* the Cauchy principal value, can be computed.
+
+ If `x \ge 0, y > 0`, the value can be expressed in terms of
+ elementary functions as
+
+ .. math ::
+
+ R_C(x,y) =
+ \begin{cases}
+ \dfrac{1}{\sqrt{y-x}}
+ \cos^{-1}\left(\sqrt{\dfrac{x}{y}}\right), & x < y \\
+ \dfrac{1}{\sqrt{y}}, & x = y \\
+ \dfrac{1}{\sqrt{x-y}}
+ \cosh^{-1}\left(\sqrt{\dfrac{x}{y}}\right), & x > y \\
+ \end{cases}.
+
+ **Examples**
+
+ Some special values and limits::
+
+ >>> from mpmath import *
+ >>> mp.dps = 25; mp.pretty = True
+ >>> elliprc(1,2)*4; elliprc(0,1)*2; +pi
+ 3.141592653589793238462643
+ 3.141592653589793238462643
+ 3.141592653589793238462643
+ >>> elliprc(1,0)
+ +inf
+ >>> elliprc(5,5)**2
+ 0.2
+ >>> elliprc(1,inf); elliprc(inf,1); elliprc(inf,inf)
+ 0.0
+ 0.0
+ 0.0
+
+ Comparing with the elementary closed-form solution::
+
+ >>> elliprc('1/3', '1/5'); sqrt(7.5)*acosh(sqrt('5/3'))
+ 2.041630778983498390751238
+ 2.041630778983498390751238
+ >>> elliprc('1/5', '1/3'); sqrt(7.5)*acos(sqrt('3/5'))
+ 1.875180765206547065111085
+ 1.875180765206547065111085
+
+ Comparing with numerical integration::
+
+ >>> q = extradps(25)(quad)
+ >>> elliprc(2, -3, pv=True)
+ 0.3333969101113672670749334
+ >>> elliprc(2, -3, pv=False)
+ (0.3333969101113672670749334 + 0.7024814731040726393156375j)
+ >>> 0.5*q(lambda t: 1/(sqrt(t+2)*(t-3)), [0,3-j,6,inf])
+ (0.3333969101113672670749334 + 0.7024814731040726393156375j)
+
+ """
+ x = ctx.convert(x)
+ y = ctx.convert(y)
+ prec = ctx.prec
+ try:
+ ctx.prec += 20
+ tol = ctx.eps * 2**10
+ v = RC_calc(ctx, x, y, tol, pv)
+ finally:
+ ctx.prec = prec
+ return +v
+
+@defun
+def elliprj(ctx, x, y, z, p, integration=1):
+ r"""
+ Evaluates the Carlson symmetric elliptic integral of the third kind
+
+ .. math ::
+
+ R_J(x,y,z,p) = \frac{3}{2}
+ \int_0^{\infty} \frac{dt}{(t+p)\sqrt{(t+x)(t+y)(t+z)}}.
+
+ Like :func:`~mpmath.elliprf`, the branch of the square root in the integrand
+ is defined so as to be continuous along the path of integration for
+ complex values of the arguments.
+
+ **Examples**
+
+ Some values and limits::
+
+ >>> from mpmath import *
+ >>> mp.dps = 25; mp.pretty = True
+ >>> elliprj(1,1,1,1)
+ 1.0
+ >>> elliprj(2,2,2,2); 1/(2*sqrt(2))
+ 0.3535533905932737622004222
+ 0.3535533905932737622004222
+ >>> elliprj(0,1,2,2)
+ 1.067937989667395702268688
+ >>> 3*(2*gamma('5/4')**2-pi**2/gamma('1/4')**2)/(sqrt(2*pi))
+ 1.067937989667395702268688
+ >>> elliprj(0,1,1,2); 3*pi*(2-sqrt(2))/4
+ 1.380226776765915172432054
+ 1.380226776765915172432054
+ >>> elliprj(1,3,2,0); elliprj(0,1,1,0); elliprj(0,0,0,0)
+ +inf
+ +inf
+ +inf
+ >>> elliprj(1,inf,1,0); elliprj(1,1,1,inf)
+ 0.0
+ 0.0
+ >>> chop(elliprj(1+j, 1-j, 1, 1))
+ 0.8505007163686739432927844
+
+ Scale transformation::
+
+ >>> x,y,z,p = 2,3,4,5
+ >>> k = mpf(100000)
+ >>> elliprj(k*x,k*y,k*z,k*p); k**(-1.5)*elliprj(x,y,z,p)
+ 4.521291677592745527851168e-9
+ 4.521291677592745527851168e-9
+
+ Comparing with numerical integration::
+
+ >>> elliprj(1,2,3,4)
+ 0.2398480997495677621758617
+ >>> f = lambda t: 1/((t+4)*sqrt((t+1)*(t+2)*(t+3)))
+ >>> 1.5*quad(f, [0,inf])
+ 0.2398480997495677621758617
+ >>> elliprj(1,2+1j,3,4-2j)
+ (0.216888906014633498739952 + 0.04081912627366673332369512j)
+ >>> f = lambda t: 1/((t+4-2j)*sqrt((t+1)*(t+2+1j)*(t+3)))
+ >>> 1.5*quad(f, [0,inf])
+ (0.216888906014633498739952 + 0.04081912627366673332369511j)
+
+ """
+ x = ctx.convert(x)
+ y = ctx.convert(y)
+ z = ctx.convert(z)
+ p = ctx.convert(p)
+ prec = ctx.prec
+ try:
+ ctx.prec += 20
+ tol = ctx.eps * 2**10
+ v = RJ_calc(ctx, x, y, z, p, tol, integration)
+ finally:
+ ctx.prec = prec
+ return +v
+
+@defun
+def elliprd(ctx, x, y, z):
+ r"""
+ Evaluates the degenerate Carlson symmetric elliptic integral
+ of the third kind or Carlson elliptic integral of the
+ second kind `R_D(x,y,z) = R_J(x,y,z,z)`.
+
+ See :func:`~mpmath.elliprj` for additional information.
+
+ **Examples**
+
+ >>> from mpmath import *
+ >>> mp.dps = 25; mp.pretty = True
+ >>> elliprd(1,2,3)
+ 0.2904602810289906442326534
+ >>> elliprj(1,2,3,3)
+ 0.2904602810289906442326534
+
+ The so-called *second lemniscate constant*, a transcendental number::
+
+ >>> elliprd(0,2,1)/3
+ 0.5990701173677961037199612
+ >>> extradps(25)(quad)(lambda t: t**2/sqrt(1-t**4), [0,1])
+ 0.5990701173677961037199612
+ >>> gamma('3/4')**2/sqrt(2*pi)
+ 0.5990701173677961037199612
+
+ """
+ return ctx.elliprj(x,y,z,z)
+
+@defun
+def elliprg(ctx, x, y, z):
+ r"""
+ Evaluates the Carlson completely symmetric elliptic integral
+ of the second kind
+
+ .. math ::
+
+ R_G(x,y,z) = \frac{1}{4} \int_0^{\infty}
+ \frac{t}{\sqrt{(t+x)(t+y)(t+z)}}
+ \left( \frac{x}{t+x} + \frac{y}{t+y} + \frac{z}{t+z}\right) dt.
+
+ **Examples**
+
+ Evaluation for real and complex arguments::
+
+ >>> from mpmath import *
+ >>> mp.dps = 25; mp.pretty = True
+ >>> elliprg(0,1,1)*4; +pi
+ 3.141592653589793238462643
+ 3.141592653589793238462643
+ >>> elliprg(0,0.5,1)
+ 0.6753219405238377512600874
+ >>> chop(elliprg(1+j, 1-j, 2))
+ 1.172431327676416604532822
+
+ A double integral that can be evaluated in terms of `R_G`::
+
+ >>> x,y,z = 2,3,4
+ >>> def f(t,u):
+ ... st = fp.sin(t); ct = fp.cos(t)
+ ... su = fp.sin(u); cu = fp.cos(u)
+ ... return (x*(st*cu)**2 + y*(st*su)**2 + z*ct**2)**0.5 * st
+ ...
+ >>> nprint(mpf(fp.quad(f, [0,fp.pi], [0,2*fp.pi])/(4*fp.pi)), 13)
+ 1.725503028069
+ >>> nprint(elliprg(x,y,z), 13)
+ 1.725503028069
+
+ """
+ x = ctx.convert(x)
+ y = ctx.convert(y)
+ z = ctx.convert(z)
+ zeros = (not x) + (not y) + (not z)
+ if zeros == 3:
+ return (x+y+z)*0
+ if zeros == 2:
+ if x: return 0.5*ctx.sqrt(x)
+ if y: return 0.5*ctx.sqrt(y)
+ return 0.5*ctx.sqrt(z)
+ if zeros == 1:
+ if not z:
+ x, z = z, x
+ def terms():
+ T1 = 0.5*z*ctx.elliprf(x,y,z)
+ T2 = -0.5*(x-z)*(y-z)*ctx.elliprd(x,y,z)/3
+ T3 = 0.5*ctx.sqrt(x)*ctx.sqrt(y)/ctx.sqrt(z)
+ return T1,T2,T3
+ return ctx.sum_accurately(terms)
+
+
+@defun_wrapped
+def ellipf(ctx, phi, m):
+ r"""
+ Evaluates the Legendre incomplete elliptic integral of the first kind
+
+ .. math ::
+
+ F(\phi,m) = \int_0^{\phi} \frac{dt}{\sqrt{1-m \sin^2 t}}
+
+ or equivalently
+
+ .. math ::
+
+ F(\phi,m) = \int_0^{\sin \phi}
+ \frac{dt}{\left(\sqrt{1-t^2}\right)\left(\sqrt{1-mt^2}\right)}.
+
+ The function reduces to a complete elliptic integral of the first kind
+ (see :func:`~mpmath.ellipk`) when `\phi = \frac{\pi}{2}`; that is,
+
+ .. math ::
+
+ F\left(\frac{\pi}{2}, m\right) = K(m).
+
+ In the defining integral, it is assumed that the principal branch
+ of the square root is taken and that the path of integration avoids
+ crossing any branch cuts. Outside `-\pi/2 \le \Re(\phi) \le \pi/2`,
+ the function extends quasi-periodically as
+
+ .. math ::
+
+ F(\phi + n \pi, m) = 2 n K(m) + F(\phi,m), n \in \mathbb{Z}.
+
+ **Plots**
+
+ .. literalinclude :: /plots/ellipf.py
+ .. image :: /plots/ellipf.png
+
+ **Examples**
+
+ Basic values and limits::
+
+ >>> from mpmath import *
+ >>> mp.dps = 25; mp.pretty = True
+ >>> ellipf(0,1)
+ 0.0
+ >>> ellipf(0,0)
+ 0.0
+ >>> ellipf(1,0); ellipf(2+3j,0)
+ 1.0
+ (2.0 + 3.0j)
+ >>> ellipf(1,1); log(sec(1)+tan(1))
+ 1.226191170883517070813061
+ 1.226191170883517070813061
+ >>> ellipf(pi/2, -0.5); ellipk(-0.5)
+ 1.415737208425956198892166
+ 1.415737208425956198892166
+ >>> ellipf(pi/2+eps, 1); ellipf(-pi/2-eps, 1)
+ +inf
+ +inf
+ >>> ellipf(1.5, 1)
+ 3.340677542798311003320813
+
+ Comparing with numerical integration::
+
+ >>> z,m = 0.5, 1.25
+ >>> ellipf(z,m)
+ 0.5287219202206327872978255
+ >>> quad(lambda t: (1-m*sin(t)**2)**(-0.5), [0,z])
+ 0.5287219202206327872978255
+
+ The arguments may be complex numbers::
+
+ >>> ellipf(3j, 0.5)
+ (0.0 + 1.713602407841590234804143j)
+ >>> ellipf(3+4j, 5-6j)
+ (1.269131241950351323305741 - 0.3561052815014558335412538j)
+ >>> z,m = 2+3j, 1.25
+ >>> k = 1011
+ >>> ellipf(z+pi*k,m); ellipf(z,m) + 2*k*ellipk(m)
+ (4086.184383622179764082821 - 3003.003538923749396546871j)
+ (4086.184383622179764082821 - 3003.003538923749396546871j)
+
+ For `|\Re(z)| < \pi/2`, the function can be expressed as a
+ hypergeometric series of two variables
+ (see :func:`~mpmath.appellf1`)::
+
+ >>> z,m = 0.5, 0.25
+ >>> ellipf(z,m)
+ 0.5050887275786480788831083
+ >>> sin(z)*appellf1(0.5,0.5,0.5,1.5,sin(z)**2,m*sin(z)**2)
+ 0.5050887275786480788831083
+
+ """
+ z = phi
+ if not (ctx.isnormal(z) and ctx.isnormal(m)):
+ if m == 0:
+ return z + m
+ if z == 0:
+ return z * m
+ if m == ctx.inf or m == ctx.ninf: return z/m
+ raise ValueError
+ x = z.real
+ ctx.prec += max(0, ctx.mag(x))
+ pi = +ctx.pi
+ away = abs(x) > pi/2
+ if m == 1:
+ if away:
+ return ctx.inf
+ if away:
+ d = ctx.nint(x/pi)
+ z = z-pi*d
+ P = 2*d*ctx.ellipk(m)
+ else:
+ P = 0
+ c, s = ctx.cos_sin(z)
+ return s * ctx.elliprf(c**2, 1-m*s**2, 1) + P
+
+@defun_wrapped
+def ellipe(ctx, *args):
+ r"""
+ Called with a single argument `m`, evaluates the Legendre complete
+ elliptic integral of the second kind, `E(m)`, defined by
+
+ .. math :: E(m) = \int_0^{\pi/2} \sqrt{1-m \sin^2 t} \, dt \,=\,
+ \frac{\pi}{2}
+ \,_2F_1\left(\frac{1}{2}, -\frac{1}{2}, 1, m\right).
+
+ Called with two arguments `\phi, m`, evaluates the incomplete elliptic
+ integral of the second kind
+
+ .. math ::
+
+ E(\phi,m) = \int_0^{\phi} \sqrt{1-m \sin^2 t} \, dt =
+ \int_0^{\sin z}
+ \frac{\sqrt{1-mt^2}}{\sqrt{1-t^2}} \, dt.
+
+ The incomplete integral reduces to a complete integral when
+ `\phi = \frac{\pi}{2}`; that is,
+
+ .. math ::
+
+ E\left(\frac{\pi}{2}, m\right) = E(m).
+
+ In the defining integral, it is assumed that the principal branch
+ of the square root is taken and that the path of integration avoids
+ crossing any branch cuts. Outside `-\pi/2 \le \Re(z) \le \pi/2`,
+ the function extends quasi-periodically as
+
+ .. math ::
+
+ E(\phi + n \pi, m) = 2 n E(m) + E(\phi,m), n \in \mathbb{Z}.
+
+ **Plots**
+
+ .. literalinclude :: /plots/ellipe.py
+ .. image :: /plots/ellipe.png
+
+ **Examples for the complete integral**
+
+ Basic values and limits::
+
+ >>> from mpmath import *
+ >>> mp.dps = 25; mp.pretty = True
+ >>> ellipe(0)
+ 1.570796326794896619231322
+ >>> ellipe(1)
+ 1.0
+ >>> ellipe(-1)
+ 1.910098894513856008952381
+ >>> ellipe(2)
+ (0.5990701173677961037199612 + 0.5990701173677961037199612j)
+ >>> ellipe(inf)
+ (0.0 + +infj)
+ >>> ellipe(-inf)
+ +inf
+
+ Verifying the defining integral and hypergeometric
+ representation::
+
+ >>> ellipe(0.5)
+ 1.350643881047675502520175
+ >>> quad(lambda t: sqrt(1-0.5*sin(t)**2), [0, pi/2])
+ 1.350643881047675502520175
+ >>> pi/2*hyp2f1(0.5,-0.5,1,0.5)
+ 1.350643881047675502520175
+
+ Evaluation is supported for arbitrary complex `m`::
+
+ >>> ellipe(0.5+0.25j)
+ (1.360868682163129682716687 - 0.1238733442561786843557315j)
+ >>> ellipe(3+4j)
+ (1.499553520933346954333612 - 1.577879007912758274533309j)
+
+ A definite integral::
+
+ >>> quad(ellipe, [0,1])
+ 1.333333333333333333333333
+
+ **Examples for the incomplete integral**
+
+ Basic values and limits::
+
+ >>> ellipe(0,1)
+ 0.0
+ >>> ellipe(0,0)
+ 0.0
+ >>> ellipe(1,0)
+ 1.0
+ >>> ellipe(2+3j,0)
+ (2.0 + 3.0j)
+ >>> ellipe(1,1); sin(1)
+ 0.8414709848078965066525023
+ 0.8414709848078965066525023
+ >>> ellipe(pi/2, -0.5); ellipe(-0.5)
+ 1.751771275694817862026502
+ 1.751771275694817862026502
+ >>> ellipe(pi/2, 1); ellipe(-pi/2, 1)
+ 1.0
+ -1.0
+ >>> ellipe(1.5, 1)
+ 0.9974949866040544309417234
+
+ Comparing with numerical integration::
+
+ >>> z,m = 0.5, 1.25
+ >>> ellipe(z,m)
+ 0.4740152182652628394264449
+ >>> quad(lambda t: sqrt(1-m*sin(t)**2), [0,z])
+ 0.4740152182652628394264449
+
+ The arguments may be complex numbers::
+
+ >>> ellipe(3j, 0.5)
+ (0.0 + 7.551991234890371873502105j)
+ >>> ellipe(3+4j, 5-6j)
+ (24.15299022574220502424466 + 75.2503670480325997418156j)
+ >>> k = 35
+ >>> z,m = 2+3j, 1.25
+ >>> ellipe(z+pi*k,m); ellipe(z,m) + 2*k*ellipe(m)
+ (48.30138799412005235090766 + 17.47255216721987688224357j)
+ (48.30138799412005235090766 + 17.47255216721987688224357j)
+
+ For `|\Re(z)| < \pi/2`, the function can be expressed as a
+ hypergeometric series of two variables
+ (see :func:`~mpmath.appellf1`)::
+
+ >>> z,m = 0.5, 0.25
+ >>> ellipe(z,m)
+ 0.4950017030164151928870375
+ >>> sin(z)*appellf1(0.5,0.5,-0.5,1.5,sin(z)**2,m*sin(z)**2)
+ 0.4950017030164151928870376
+
+ """
+ if len(args) == 1:
+ return ctx._ellipe(args[0])
+ else:
+ phi, m = args
+ z = phi
+ if not (ctx.isnormal(z) and ctx.isnormal(m)):
+ if m == 0:
+ return z + m
+ if z == 0:
+ return z * m
+ if m == ctx.inf or m == ctx.ninf:
+ return ctx.inf
+ raise ValueError
+ x = z.real
+ ctx.prec += max(0, ctx.mag(x))
+ pi = +ctx.pi
+ away = abs(x) > pi/2
+ if away:
+ d = ctx.nint(x/pi)
+ z = z-pi*d
+ P = 2*d*ctx.ellipe(m)
+ else:
+ P = 0
+ def terms():
+ c, s = ctx.cos_sin(z)
+ x = c**2
+ y = 1-m*s**2
+ RF = ctx.elliprf(x, y, 1)
+ RD = ctx.elliprd(x, y, 1)
+ return s*RF, -m*s**3*RD/3
+ return ctx.sum_accurately(terms) + P
+
+@defun_wrapped
+def ellippi(ctx, *args):
+ r"""
+ Called with three arguments `n, \phi, m`, evaluates the Legendre
+ incomplete elliptic integral of the third kind
+
+ .. math ::
+
+ \Pi(n; \phi, m) = \int_0^{\phi}
+ \frac{dt}{(1-n \sin^2 t) \sqrt{1-m \sin^2 t}} =
+ \int_0^{\sin \phi}
+ \frac{dt}{(1-nt^2) \sqrt{1-t^2} \sqrt{1-mt^2}}.
+
+ Called with two arguments `n, m`, evaluates the complete
+ elliptic integral of the third kind
+ `\Pi(n,m) = \Pi(n; \frac{\pi}{2},m)`.
+
+ In the defining integral, it is assumed that the principal branch
+ of the square root is taken and that the path of integration avoids
+ crossing any branch cuts. Outside `-\pi/2 \le \Re(\phi) \le \pi/2`,
+ the function extends quasi-periodically as
+
+ .. math ::
+
+ \Pi(n,\phi+k\pi,m) = 2k\Pi(n,m) + \Pi(n,\phi,m), k \in \mathbb{Z}.
+
+ **Plots**
+
+ .. literalinclude :: /plots/ellippi.py
+ .. image :: /plots/ellippi.png
+
+ **Examples for the complete integral**
+
+ Some basic values and limits::
+
+ >>> from mpmath import *
+ >>> mp.dps = 25; mp.pretty = True
+ >>> ellippi(0,-5); ellipk(-5)
+ 0.9555039270640439337379334
+ 0.9555039270640439337379334
+ >>> ellippi(inf,2)
+ 0.0
+ >>> ellippi(2,inf)
+ 0.0
+ >>> abs(ellippi(1,5))
+ +inf
+ >>> abs(ellippi(0.25,1))
+ +inf
+
+ Evaluation in terms of simpler functions::
+
+ >>> ellippi(0.25,0.25); ellipe(0.25)/(1-0.25)
+ 1.956616279119236207279727
+ 1.956616279119236207279727
+ >>> ellippi(3,0); pi/(2*sqrt(-2))
+ (0.0 - 1.11072073453959156175397j)
+ (0.0 - 1.11072073453959156175397j)
+ >>> ellippi(-3,0); pi/(2*sqrt(4))
+ 0.7853981633974483096156609
+ 0.7853981633974483096156609
+
+ **Examples for the incomplete integral**
+
+ Basic values and limits::
+
+ >>> ellippi(0.25,-0.5); ellippi(0.25,pi/2,-0.5)
+ 1.622944760954741603710555
+ 1.622944760954741603710555
+ >>> ellippi(1,0,1)
+ 0.0
+ >>> ellippi(inf,0,1)
+ 0.0
+ >>> ellippi(0,0.25,0.5); ellipf(0.25,0.5)
+ 0.2513040086544925794134591
+ 0.2513040086544925794134591
+ >>> ellippi(1,1,1); (log(sec(1)+tan(1))+sec(1)*tan(1))/2
+ 2.054332933256248668692452
+ 2.054332933256248668692452
+ >>> ellippi(0.25, 53*pi/2, 0.75); 53*ellippi(0.25,0.75)
+ 135.240868757890840755058
+ 135.240868757890840755058
+ >>> ellippi(0.5,pi/4,0.5); 2*ellipe(pi/4,0.5)-1/sqrt(3)
+ 0.9190227391656969903987269
+ 0.9190227391656969903987269
+
+ Complex arguments are supported::
+
+ >>> ellippi(0.5, 5+6j-2*pi, -7-8j)
+ (-0.3612856620076747660410167 + 0.5217735339984807829755815j)
+
+ Some degenerate cases::
+
+ >>> ellippi(1,1)
+ +inf
+ >>> ellippi(1,0)
+ +inf
+ >>> ellippi(1,2,0)
+ +inf
+ >>> ellippi(1,2,1)
+ +inf
+ >>> ellippi(1,0,1)
+ 0.0
+
+ """
+ if len(args) == 2:
+ n, m = args
+ complete = True
+ z = phi = ctx.pi/2
+ else:
+ n, phi, m = args
+ complete = False
+ z = phi
+ if not (ctx.isnormal(n) and ctx.isnormal(z) and ctx.isnormal(m)):
+ if ctx.isnan(n) or ctx.isnan(z) or ctx.isnan(m):
+ raise ValueError
+ if complete:
+ if m == 0:
+ if n == 1:
+ return ctx.inf
+ return ctx.pi/(2*ctx.sqrt(1-n))
+ if n == 0: return ctx.ellipk(m)
+ if ctx.isinf(n) or ctx.isinf(m): return ctx.zero
+ else:
+ if z == 0: return z
+ if ctx.isinf(n): return ctx.zero
+ if ctx.isinf(m): return ctx.zero
+ if ctx.isinf(n) or ctx.isinf(z) or ctx.isinf(m):
+ raise ValueError
+ if complete:
+ if m == 1:
+ if n == 1:
+ return ctx.inf
+ return -ctx.inf/ctx.sign(n-1)
+ away = False
+ else:
+ x = z.real
+ ctx.prec += max(0, ctx.mag(x))
+ pi = +ctx.pi
+ away = abs(x) > pi/2
+ if away:
+ d = ctx.nint(x/pi)
+ z = z-pi*d
+ P = 2*d*ctx.ellippi(n,m)
+ if ctx.isinf(P):
+ return ctx.inf
+ else:
+ P = 0
+ def terms():
+ if complete:
+ c, s = ctx.zero, ctx.one
+ else:
+ c, s = ctx.cos_sin(z)
+ x = c**2
+ y = 1-m*s**2
+ RF = ctx.elliprf(x, y, 1)
+ RJ = ctx.elliprj(x, y, 1, 1-n*s**2)
+ return s*RF, n*s**3*RJ/3
+ return ctx.sum_accurately(terms) + P
diff --git a/pythonProject/.venv/Lib/site-packages/mpmath/functions/expintegrals.py b/pythonProject/.venv/Lib/site-packages/mpmath/functions/expintegrals.py
new file mode 100644
index 0000000000000000000000000000000000000000..0dee8356c0386819d8f0421fded476ee77229359
--- /dev/null
+++ b/pythonProject/.venv/Lib/site-packages/mpmath/functions/expintegrals.py
@@ -0,0 +1,425 @@
+from .functions import defun, defun_wrapped
+
+@defun_wrapped
+def _erf_complex(ctx, z):
+ z2 = ctx.square_exp_arg(z, -1)
+ #z2 = -z**2
+ v = (2/ctx.sqrt(ctx.pi))*z * ctx.hyp1f1((1,2),(3,2), z2)
+ if not ctx._re(z):
+ v = ctx._im(v)*ctx.j
+ return v
+
+@defun_wrapped
+def _erfc_complex(ctx, z):
+ if ctx.re(z) > 2:
+ z2 = ctx.square_exp_arg(z)
+ nz2 = ctx.fneg(z2, exact=True)
+ v = ctx.exp(nz2)/ctx.sqrt(ctx.pi) * ctx.hyperu((1,2),(1,2), z2)
+ else:
+ v = 1 - ctx._erf_complex(z)
+ if not ctx._re(z):
+ v = 1+ctx._im(v)*ctx.j
+ return v
+
+@defun
+def erf(ctx, z):
+ z = ctx.convert(z)
+ if ctx._is_real_type(z):
+ try:
+ return ctx._erf(z)
+ except NotImplementedError:
+ pass
+ if ctx._is_complex_type(z) and not z.imag:
+ try:
+ return type(z)(ctx._erf(z.real))
+ except NotImplementedError:
+ pass
+ return ctx._erf_complex(z)
+
+@defun
+def erfc(ctx, z):
+ z = ctx.convert(z)
+ if ctx._is_real_type(z):
+ try:
+ return ctx._erfc(z)
+ except NotImplementedError:
+ pass
+ if ctx._is_complex_type(z) and not z.imag:
+ try:
+ return type(z)(ctx._erfc(z.real))
+ except NotImplementedError:
+ pass
+ return ctx._erfc_complex(z)
+
+@defun
+def square_exp_arg(ctx, z, mult=1, reciprocal=False):
+ prec = ctx.prec*4+20
+ if reciprocal:
+ z2 = ctx.fmul(z, z, prec=prec)
+ z2 = ctx.fdiv(ctx.one, z2, prec=prec)
+ else:
+ z2 = ctx.fmul(z, z, prec=prec)
+ if mult != 1:
+ z2 = ctx.fmul(z2, mult, exact=True)
+ return z2
+
+@defun_wrapped
+def erfi(ctx, z):
+ if not z:
+ return z
+ z2 = ctx.square_exp_arg(z)
+ v = (2/ctx.sqrt(ctx.pi)*z) * ctx.hyp1f1((1,2), (3,2), z2)
+ if not ctx._re(z):
+ v = ctx._im(v)*ctx.j
+ return v
+
+@defun_wrapped
+def erfinv(ctx, x):
+ xre = ctx._re(x)
+ if (xre != x) or (xre < -1) or (xre > 1):
+ return ctx.bad_domain("erfinv(x) is defined only for -1 <= x <= 1")
+ x = xre
+ #if ctx.isnan(x): return x
+ if not x: return x
+ if x == 1: return ctx.inf
+ if x == -1: return ctx.ninf
+ if abs(x) < 0.9:
+ a = 0.53728*x**3 + 0.813198*x
+ else:
+ # An asymptotic formula
+ u = ctx.ln(2/ctx.pi/(abs(x)-1)**2)
+ a = ctx.sign(x) * ctx.sqrt(u - ctx.ln(u))/ctx.sqrt(2)
+ ctx.prec += 10
+ return ctx.findroot(lambda t: ctx.erf(t)-x, a)
+
+@defun_wrapped
+def npdf(ctx, x, mu=0, sigma=1):
+ sigma = ctx.convert(sigma)
+ return ctx.exp(-(x-mu)**2/(2*sigma**2)) / (sigma*ctx.sqrt(2*ctx.pi))
+
+@defun_wrapped
+def ncdf(ctx, x, mu=0, sigma=1):
+ a = (x-mu)/(sigma*ctx.sqrt(2))
+ if a < 0:
+ return ctx.erfc(-a)/2
+ else:
+ return (1+ctx.erf(a))/2
+
+@defun_wrapped
+def betainc(ctx, a, b, x1=0, x2=1, regularized=False):
+ if x1 == x2:
+ v = 0
+ elif not x1:
+ if x1 == 0 and x2 == 1:
+ v = ctx.beta(a, b)
+ else:
+ v = x2**a * ctx.hyp2f1(a, 1-b, a+1, x2) / a
+ else:
+ m, d = ctx.nint_distance(a)
+ if m <= 0:
+ if d < -ctx.prec:
+ h = +ctx.eps
+ ctx.prec *= 2
+ a += h
+ elif d < -4:
+ ctx.prec -= d
+ s1 = x2**a * ctx.hyp2f1(a,1-b,a+1,x2)
+ s2 = x1**a * ctx.hyp2f1(a,1-b,a+1,x1)
+ v = (s1 - s2) / a
+ if regularized:
+ v /= ctx.beta(a,b)
+ return v
+
+@defun
+def gammainc(ctx, z, a=0, b=None, regularized=False):
+ regularized = bool(regularized)
+ z = ctx.convert(z)
+ if a is None:
+ a = ctx.zero
+ lower_modified = False
+ else:
+ a = ctx.convert(a)
+ lower_modified = a != ctx.zero
+ if b is None:
+ b = ctx.inf
+ upper_modified = False
+ else:
+ b = ctx.convert(b)
+ upper_modified = b != ctx.inf
+ # Complete gamma function
+ if not (upper_modified or lower_modified):
+ if regularized:
+ if ctx.re(z) < 0:
+ return ctx.inf
+ elif ctx.re(z) > 0:
+ return ctx.one
+ else:
+ return ctx.nan
+ return ctx.gamma(z)
+ if a == b:
+ return ctx.zero
+ # Standardize
+ if ctx.re(a) > ctx.re(b):
+ return -ctx.gammainc(z, b, a, regularized)
+ # Generalized gamma
+ if upper_modified and lower_modified:
+ return +ctx._gamma3(z, a, b, regularized)
+ # Upper gamma
+ elif lower_modified:
+ return ctx._upper_gamma(z, a, regularized)
+ # Lower gamma
+ elif upper_modified:
+ return ctx._lower_gamma(z, b, regularized)
+
+@defun
+def _lower_gamma(ctx, z, b, regularized=False):
+ # Pole
+ if ctx.isnpint(z):
+ return type(z)(ctx.inf)
+ G = [z] * regularized
+ negb = ctx.fneg(b, exact=True)
+ def h(z):
+ T1 = [ctx.exp(negb), b, z], [1, z, -1], [], G, [1], [1+z], b
+ return (T1,)
+ return ctx.hypercomb(h, [z])
+
+@defun
+def _upper_gamma(ctx, z, a, regularized=False):
+ # Fast integer case, when available
+ if ctx.isint(z):
+ try:
+ if regularized:
+ # Gamma pole
+ if ctx.isnpint(z):
+ return type(z)(ctx.zero)
+ orig = ctx.prec
+ try:
+ ctx.prec += 10
+ return ctx._gamma_upper_int(z, a) / ctx.gamma(z)
+ finally:
+ ctx.prec = orig
+ else:
+ return ctx._gamma_upper_int(z, a)
+ except NotImplementedError:
+ pass
+ # hypercomb is unable to detect the exact zeros, so handle them here
+ if z == 2 and a == -1:
+ return (z+a)*0
+ if z == 3 and (a == -1-1j or a == -1+1j):
+ return (z+a)*0
+ nega = ctx.fneg(a, exact=True)
+ G = [z] * regularized
+ # Use 2F0 series when possible; fall back to lower gamma representation
+ try:
+ def h(z):
+ r = z-1
+ return [([ctx.exp(nega), a], [1, r], [], G, [1, -r], [], 1/nega)]
+ return ctx.hypercomb(h, [z], force_series=True)
+ except ctx.NoConvergence:
+ def h(z):
+ T1 = [], [1, z-1], [z], G, [], [], 0
+ T2 = [-ctx.exp(nega), a, z], [1, z, -1], [], G, [1], [1+z], a
+ return T1, T2
+ return ctx.hypercomb(h, [z])
+
+@defun
+def _gamma3(ctx, z, a, b, regularized=False):
+ pole = ctx.isnpint(z)
+ if regularized and pole:
+ return ctx.zero
+ try:
+ ctx.prec += 15
+ # We don't know in advance whether it's better to write as a difference
+ # of lower or upper gamma functions, so try both
+ T1 = ctx.gammainc(z, a, regularized=regularized)
+ T2 = ctx.gammainc(z, b, regularized=regularized)
+ R = T1 - T2
+ if ctx.mag(R) - max(ctx.mag(T1), ctx.mag(T2)) > -10:
+ return R
+ if not pole:
+ T1 = ctx.gammainc(z, 0, b, regularized=regularized)
+ T2 = ctx.gammainc(z, 0, a, regularized=regularized)
+ R = T1 - T2
+ # May be ok, but should probably at least print a warning
+ # about possible cancellation
+ if 1: #ctx.mag(R) - max(ctx.mag(T1), ctx.mag(T2)) > -10:
+ return R
+ finally:
+ ctx.prec -= 15
+ raise NotImplementedError
+
+@defun_wrapped
+def expint(ctx, n, z):
+ if ctx.isint(n) and ctx._is_real_type(z):
+ try:
+ return ctx._expint_int(n, z)
+ except NotImplementedError:
+ pass
+ if ctx.isnan(n) or ctx.isnan(z):
+ return z*n
+ if z == ctx.inf:
+ return 1/z
+ if z == 0:
+ # integral from 1 to infinity of t^n
+ if ctx.re(n) <= 1:
+ # TODO: reasonable sign of infinity
+ return type(z)(ctx.inf)
+ else:
+ return ctx.one/(n-1)
+ if n == 0:
+ return ctx.exp(-z)/z
+ if n == -1:
+ return ctx.exp(-z)*(z+1)/z**2
+ return z**(n-1) * ctx.gammainc(1-n, z)
+
+@defun_wrapped
+def li(ctx, z, offset=False):
+ if offset:
+ if z == 2:
+ return ctx.zero
+ return ctx.ei(ctx.ln(z)) - ctx.ei(ctx.ln2)
+ if not z:
+ return z
+ if z == 1:
+ return ctx.ninf
+ return ctx.ei(ctx.ln(z))
+
+@defun
+def ei(ctx, z):
+ try:
+ return ctx._ei(z)
+ except NotImplementedError:
+ return ctx._ei_generic(z)
+
+@defun_wrapped
+def _ei_generic(ctx, z):
+ # Note: the following is currently untested because mp and fp
+ # both use special-case ei code
+ if z == ctx.inf:
+ return z
+ if z == ctx.ninf:
+ return ctx.zero
+ if ctx.mag(z) > 1:
+ try:
+ r = ctx.one/z
+ v = ctx.exp(z)*ctx.hyper([1,1],[],r,
+ maxterms=ctx.prec, force_series=True)/z
+ im = ctx._im(z)
+ if im > 0:
+ v += ctx.pi*ctx.j
+ if im < 0:
+ v -= ctx.pi*ctx.j
+ return v
+ except ctx.NoConvergence:
+ pass
+ v = z*ctx.hyp2f2(1,1,2,2,z) + ctx.euler
+ if ctx._im(z):
+ v += 0.5*(ctx.log(z) - ctx.log(ctx.one/z))
+ else:
+ v += ctx.log(abs(z))
+ return v
+
+@defun
+def e1(ctx, z):
+ try:
+ return ctx._e1(z)
+ except NotImplementedError:
+ return ctx.expint(1, z)
+
+@defun
+def ci(ctx, z):
+ try:
+ return ctx._ci(z)
+ except NotImplementedError:
+ return ctx._ci_generic(z)
+
+@defun_wrapped
+def _ci_generic(ctx, z):
+ if ctx.isinf(z):
+ if z == ctx.inf: return ctx.zero
+ if z == ctx.ninf: return ctx.pi*1j
+ jz = ctx.fmul(ctx.j,z,exact=True)
+ njz = ctx.fneg(jz,exact=True)
+ v = 0.5*(ctx.ei(jz) + ctx.ei(njz))
+ zreal = ctx._re(z)
+ zimag = ctx._im(z)
+ if zreal == 0:
+ if zimag > 0: v += ctx.pi*0.5j
+ if zimag < 0: v -= ctx.pi*0.5j
+ if zreal < 0:
+ if zimag >= 0: v += ctx.pi*1j
+ if zimag < 0: v -= ctx.pi*1j
+ if ctx._is_real_type(z) and zreal > 0:
+ v = ctx._re(v)
+ return v
+
+@defun
+def si(ctx, z):
+ try:
+ return ctx._si(z)
+ except NotImplementedError:
+ return ctx._si_generic(z)
+
+@defun_wrapped
+def _si_generic(ctx, z):
+ if ctx.isinf(z):
+ if z == ctx.inf: return 0.5*ctx.pi
+ if z == ctx.ninf: return -0.5*ctx.pi
+ # Suffers from cancellation near 0
+ if ctx.mag(z) >= -1:
+ jz = ctx.fmul(ctx.j,z,exact=True)
+ njz = ctx.fneg(jz,exact=True)
+ v = (-0.5j)*(ctx.ei(jz) - ctx.ei(njz))
+ zreal = ctx._re(z)
+ if zreal > 0:
+ v -= 0.5*ctx.pi
+ if zreal < 0:
+ v += 0.5*ctx.pi
+ if ctx._is_real_type(z):
+ v = ctx._re(v)
+ return v
+ else:
+ return z*ctx.hyp1f2((1,2),(3,2),(3,2),-0.25*z*z)
+
+@defun_wrapped
+def chi(ctx, z):
+ nz = ctx.fneg(z, exact=True)
+ v = 0.5*(ctx.ei(z) + ctx.ei(nz))
+ zreal = ctx._re(z)
+ zimag = ctx._im(z)
+ if zimag > 0:
+ v += ctx.pi*0.5j
+ elif zimag < 0:
+ v -= ctx.pi*0.5j
+ elif zreal < 0:
+ v += ctx.pi*1j
+ return v
+
+@defun_wrapped
+def shi(ctx, z):
+ # Suffers from cancellation near 0
+ if ctx.mag(z) >= -1:
+ nz = ctx.fneg(z, exact=True)
+ v = 0.5*(ctx.ei(z) - ctx.ei(nz))
+ zimag = ctx._im(z)
+ if zimag > 0: v -= 0.5j*ctx.pi
+ if zimag < 0: v += 0.5j*ctx.pi
+ return v
+ else:
+ return z * ctx.hyp1f2((1,2),(3,2),(3,2),0.25*z*z)
+
+@defun_wrapped
+def fresnels(ctx, z):
+ if z == ctx.inf:
+ return ctx.mpf(0.5)
+ if z == ctx.ninf:
+ return ctx.mpf(-0.5)
+ return ctx.pi*z**3/6*ctx.hyp1f2((3,4),(3,2),(7,4),-ctx.pi**2*z**4/16)
+
+@defun_wrapped
+def fresnelc(ctx, z):
+ if z == ctx.inf:
+ return ctx.mpf(0.5)
+ if z == ctx.ninf:
+ return ctx.mpf(-0.5)
+ return z*ctx.hyp1f2((1,4),(1,2),(5,4),-ctx.pi**2*z**4/16)
diff --git a/pythonProject/.venv/Lib/site-packages/mpmath/functions/factorials.py b/pythonProject/.venv/Lib/site-packages/mpmath/functions/factorials.py
new file mode 100644
index 0000000000000000000000000000000000000000..9259e40b95bf1c908a7ad98b59bbb33528606b07
--- /dev/null
+++ b/pythonProject/.venv/Lib/site-packages/mpmath/functions/factorials.py
@@ -0,0 +1,187 @@
+from ..libmp.backend import xrange
+from .functions import defun, defun_wrapped
+
+@defun
+def gammaprod(ctx, a, b, _infsign=False):
+ a = [ctx.convert(x) for x in a]
+ b = [ctx.convert(x) for x in b]
+ poles_num = []
+ poles_den = []
+ regular_num = []
+ regular_den = []
+ for x in a: [regular_num, poles_num][ctx.isnpint(x)].append(x)
+ for x in b: [regular_den, poles_den][ctx.isnpint(x)].append(x)
+ # One more pole in numerator or denominator gives 0 or inf
+ if len(poles_num) < len(poles_den): return ctx.zero
+ if len(poles_num) > len(poles_den):
+ # Get correct sign of infinity for x+h, h -> 0 from above
+ # XXX: hack, this should be done properly
+ if _infsign:
+ a = [x and x*(1+ctx.eps) or x+ctx.eps for x in poles_num]
+ b = [x and x*(1+ctx.eps) or x+ctx.eps for x in poles_den]
+ return ctx.sign(ctx.gammaprod(a+regular_num,b+regular_den)) * ctx.inf
+ else:
+ return ctx.inf
+ # All poles cancel
+ # lim G(i)/G(j) = (-1)**(i+j) * gamma(1-j) / gamma(1-i)
+ p = ctx.one
+ orig = ctx.prec
+ try:
+ ctx.prec = orig + 15
+ while poles_num:
+ i = poles_num.pop()
+ j = poles_den.pop()
+ p *= (-1)**(i+j) * ctx.gamma(1-j) / ctx.gamma(1-i)
+ for x in regular_num: p *= ctx.gamma(x)
+ for x in regular_den: p /= ctx.gamma(x)
+ finally:
+ ctx.prec = orig
+ return +p
+
+@defun
+def beta(ctx, x, y):
+ x = ctx.convert(x)
+ y = ctx.convert(y)
+ if ctx.isinf(y):
+ x, y = y, x
+ if ctx.isinf(x):
+ if x == ctx.inf and not ctx._im(y):
+ if y == ctx.ninf:
+ return ctx.nan
+ if y > 0:
+ return ctx.zero
+ if ctx.isint(y):
+ return ctx.nan
+ if y < 0:
+ return ctx.sign(ctx.gamma(y)) * ctx.inf
+ return ctx.nan
+ xy = ctx.fadd(x, y, prec=2*ctx.prec)
+ return ctx.gammaprod([x, y], [xy])
+
+@defun
+def binomial(ctx, n, k):
+ n1 = ctx.fadd(n, 1, prec=2*ctx.prec)
+ k1 = ctx.fadd(k, 1, prec=2*ctx.prec)
+ nk1 = ctx.fsub(n1, k, prec=2*ctx.prec)
+ return ctx.gammaprod([n1], [k1, nk1])
+
+@defun
+def rf(ctx, x, n):
+ xn = ctx.fadd(x, n, prec=2*ctx.prec)
+ return ctx.gammaprod([xn], [x])
+
+@defun
+def ff(ctx, x, n):
+ x1 = ctx.fadd(x, 1, prec=2*ctx.prec)
+ xn1 = ctx.fadd(ctx.fsub(x, n, prec=2*ctx.prec), 1, prec=2*ctx.prec)
+ return ctx.gammaprod([x1], [xn1])
+
+@defun_wrapped
+def fac2(ctx, x):
+ if ctx.isinf(x):
+ if x == ctx.inf:
+ return x
+ return ctx.nan
+ return 2**(x/2)*(ctx.pi/2)**((ctx.cospi(x)-1)/4)*ctx.gamma(x/2+1)
+
+@defun_wrapped
+def barnesg(ctx, z):
+ if ctx.isinf(z):
+ if z == ctx.inf:
+ return z
+ return ctx.nan
+ if ctx.isnan(z):
+ return z
+ if (not ctx._im(z)) and ctx._re(z) <= 0 and ctx.isint(ctx._re(z)):
+ return z*0
+ # Account for size (would not be needed if computing log(G))
+ if abs(z) > 5:
+ ctx.dps += 2*ctx.log(abs(z),2)
+ # Reflection formula
+ if ctx.re(z) < -ctx.dps:
+ w = 1-z
+ pi2 = 2*ctx.pi
+ u = ctx.expjpi(2*w)
+ v = ctx.j*ctx.pi/12 - ctx.j*ctx.pi*w**2/2 + w*ctx.ln(1-u) - \
+ ctx.j*ctx.polylog(2, u)/pi2
+ v = ctx.barnesg(2-z)*ctx.exp(v)/pi2**w
+ if ctx._is_real_type(z):
+ v = ctx._re(v)
+ return v
+ # Estimate terms for asymptotic expansion
+ # TODO: fixme, obviously
+ N = ctx.dps // 2 + 5
+ G = 1
+ while abs(z) < N or ctx.re(z) < 1:
+ G /= ctx.gamma(z)
+ z += 1
+ z -= 1
+ s = ctx.mpf(1)/12
+ s -= ctx.log(ctx.glaisher)
+ s += z*ctx.log(2*ctx.pi)/2
+ s += (z**2/2-ctx.mpf(1)/12)*ctx.log(z)
+ s -= 3*z**2/4
+ z2k = z2 = z**2
+ for k in xrange(1, N+1):
+ t = ctx.bernoulli(2*k+2) / (4*k*(k+1)*z2k)
+ if abs(t) < ctx.eps:
+ #print k, N # check how many terms were needed
+ break
+ z2k *= z2
+ s += t
+ #if k == N:
+ # print "warning: series for barnesg failed to converge", ctx.dps
+ return G*ctx.exp(s)
+
+@defun
+def superfac(ctx, z):
+ return ctx.barnesg(z+2)
+
+@defun_wrapped
+def hyperfac(ctx, z):
+ # XXX: estimate needed extra bits accurately
+ if z == ctx.inf:
+ return z
+ if abs(z) > 5:
+ extra = 4*int(ctx.log(abs(z),2))
+ else:
+ extra = 0
+ ctx.prec += extra
+ if not ctx._im(z) and ctx._re(z) < 0 and ctx.isint(ctx._re(z)):
+ n = int(ctx.re(z))
+ h = ctx.hyperfac(-n-1)
+ if ((n+1)//2) & 1:
+ h = -h
+ if ctx._is_complex_type(z):
+ return h + 0j
+ return h
+ zp1 = z+1
+ # Wrong branch cut
+ #v = ctx.gamma(zp1)**z
+ #ctx.prec -= extra
+ #return v / ctx.barnesg(zp1)
+ v = ctx.exp(z*ctx.loggamma(zp1))
+ ctx.prec -= extra
+ return v / ctx.barnesg(zp1)
+
+'''
+@defun
+def psi0(ctx, z):
+ """Shortcut for psi(0,z) (the digamma function)"""
+ return ctx.psi(0, z)
+
+@defun
+def psi1(ctx, z):
+ """Shortcut for psi(1,z) (the trigamma function)"""
+ return ctx.psi(1, z)
+
+@defun
+def psi2(ctx, z):
+ """Shortcut for psi(2,z) (the tetragamma function)"""
+ return ctx.psi(2, z)
+
+@defun
+def psi3(ctx, z):
+ """Shortcut for psi(3,z) (the pentagamma function)"""
+ return ctx.psi(3, z)
+'''
diff --git a/pythonProject/.venv/Lib/site-packages/mpmath/functions/functions.py b/pythonProject/.venv/Lib/site-packages/mpmath/functions/functions.py
new file mode 100644
index 0000000000000000000000000000000000000000..4cdf5dd921418db10847ea75b32f8e6dfacdba64
--- /dev/null
+++ b/pythonProject/.venv/Lib/site-packages/mpmath/functions/functions.py
@@ -0,0 +1,645 @@
+from ..libmp.backend import xrange
+
+class SpecialFunctions(object):
+ """
+ This class implements special functions using high-level code.
+
+ Elementary and some other functions (e.g. gamma function, basecase
+ hypergeometric series) are assumed to be predefined by the context as
+ "builtins" or "low-level" functions.
+ """
+ defined_functions = {}
+
+ # The series for the Jacobi theta functions converge for |q| < 1;
+ # in the current implementation they throw a ValueError for
+ # abs(q) > THETA_Q_LIM
+ THETA_Q_LIM = 1 - 10**-7
+
+ def __init__(self):
+ cls = self.__class__
+ for name in cls.defined_functions:
+ f, wrap = cls.defined_functions[name]
+ cls._wrap_specfun(name, f, wrap)
+
+ self.mpq_1 = self._mpq((1,1))
+ self.mpq_0 = self._mpq((0,1))
+ self.mpq_1_2 = self._mpq((1,2))
+ self.mpq_3_2 = self._mpq((3,2))
+ self.mpq_1_4 = self._mpq((1,4))
+ self.mpq_1_16 = self._mpq((1,16))
+ self.mpq_3_16 = self._mpq((3,16))
+ self.mpq_5_2 = self._mpq((5,2))
+ self.mpq_3_4 = self._mpq((3,4))
+ self.mpq_7_4 = self._mpq((7,4))
+ self.mpq_5_4 = self._mpq((5,4))
+ self.mpq_1_3 = self._mpq((1,3))
+ self.mpq_2_3 = self._mpq((2,3))
+ self.mpq_4_3 = self._mpq((4,3))
+ self.mpq_1_6 = self._mpq((1,6))
+ self.mpq_5_6 = self._mpq((5,6))
+ self.mpq_5_3 = self._mpq((5,3))
+
+ self._misc_const_cache = {}
+
+ self._aliases.update({
+ 'phase' : 'arg',
+ 'conjugate' : 'conj',
+ 'nthroot' : 'root',
+ 'polygamma' : 'psi',
+ 'hurwitz' : 'zeta',
+ #'digamma' : 'psi0',
+ #'trigamma' : 'psi1',
+ #'tetragamma' : 'psi2',
+ #'pentagamma' : 'psi3',
+ 'fibonacci' : 'fib',
+ 'factorial' : 'fac',
+ })
+
+ self.zetazero_memoized = self.memoize(self.zetazero)
+
+ # Default -- do nothing
+ @classmethod
+ def _wrap_specfun(cls, name, f, wrap):
+ setattr(cls, name, f)
+
+ # Optional fast versions of common functions in common cases.
+ # If not overridden, default (generic hypergeometric series)
+ # implementations will be used
+ def _besselj(ctx, n, z): raise NotImplementedError
+ def _erf(ctx, z): raise NotImplementedError
+ def _erfc(ctx, z): raise NotImplementedError
+ def _gamma_upper_int(ctx, z, a): raise NotImplementedError
+ def _expint_int(ctx, n, z): raise NotImplementedError
+ def _zeta(ctx, s): raise NotImplementedError
+ def _zetasum_fast(ctx, s, a, n, derivatives, reflect): raise NotImplementedError
+ def _ei(ctx, z): raise NotImplementedError
+ def _e1(ctx, z): raise NotImplementedError
+ def _ci(ctx, z): raise NotImplementedError
+ def _si(ctx, z): raise NotImplementedError
+ def _altzeta(ctx, s): raise NotImplementedError
+
+def defun_wrapped(f):
+ SpecialFunctions.defined_functions[f.__name__] = f, True
+ return f
+
+def defun(f):
+ SpecialFunctions.defined_functions[f.__name__] = f, False
+ return f
+
+def defun_static(f):
+ setattr(SpecialFunctions, f.__name__, f)
+ return f
+
+@defun_wrapped
+def cot(ctx, z): return ctx.one / ctx.tan(z)
+
+@defun_wrapped
+def sec(ctx, z): return ctx.one / ctx.cos(z)
+
+@defun_wrapped
+def csc(ctx, z): return ctx.one / ctx.sin(z)
+
+@defun_wrapped
+def coth(ctx, z): return ctx.one / ctx.tanh(z)
+
+@defun_wrapped
+def sech(ctx, z): return ctx.one / ctx.cosh(z)
+
+@defun_wrapped
+def csch(ctx, z): return ctx.one / ctx.sinh(z)
+
+@defun_wrapped
+def acot(ctx, z):
+ if not z:
+ return ctx.pi * 0.5
+ else:
+ return ctx.atan(ctx.one / z)
+
+@defun_wrapped
+def asec(ctx, z): return ctx.acos(ctx.one / z)
+
+@defun_wrapped
+def acsc(ctx, z): return ctx.asin(ctx.one / z)
+
+@defun_wrapped
+def acoth(ctx, z):
+ if not z:
+ return ctx.pi * 0.5j
+ else:
+ return ctx.atanh(ctx.one / z)
+
+
+@defun_wrapped
+def asech(ctx, z): return ctx.acosh(ctx.one / z)
+
+@defun_wrapped
+def acsch(ctx, z): return ctx.asinh(ctx.one / z)
+
+@defun
+def sign(ctx, x):
+ x = ctx.convert(x)
+ if not x or ctx.isnan(x):
+ return x
+ if ctx._is_real_type(x):
+ if x > 0:
+ return ctx.one
+ else:
+ return -ctx.one
+ return x / abs(x)
+
+@defun
+def agm(ctx, a, b=1):
+ if b == 1:
+ return ctx.agm1(a)
+ a = ctx.convert(a)
+ b = ctx.convert(b)
+ return ctx._agm(a, b)
+
+@defun_wrapped
+def sinc(ctx, x):
+ if ctx.isinf(x):
+ return 1/x
+ if not x:
+ return x+1
+ return ctx.sin(x)/x
+
+@defun_wrapped
+def sincpi(ctx, x):
+ if ctx.isinf(x):
+ return 1/x
+ if not x:
+ return x+1
+ return ctx.sinpi(x)/(ctx.pi*x)
+
+# TODO: tests; improve implementation
+@defun_wrapped
+def expm1(ctx, x):
+ if not x:
+ return ctx.zero
+ # exp(x) - 1 ~ x
+ if ctx.mag(x) < -ctx.prec:
+ return x + 0.5*x**2
+ # TODO: accurately eval the smaller of the real/imag parts
+ return ctx.sum_accurately(lambda: iter([ctx.exp(x),-1]),1)
+
+@defun_wrapped
+def log1p(ctx, x):
+ if not x:
+ return ctx.zero
+ if ctx.mag(x) < -ctx.prec:
+ return x - 0.5*x**2
+ return ctx.log(ctx.fadd(1, x, prec=2*ctx.prec))
+
+@defun_wrapped
+def powm1(ctx, x, y):
+ mag = ctx.mag
+ one = ctx.one
+ w = x**y - one
+ M = mag(w)
+ # Only moderate cancellation
+ if M > -8:
+ return w
+ # Check for the only possible exact cases
+ if not w:
+ if (not y) or (x in (1, -1, 1j, -1j) and ctx.isint(y)):
+ return w
+ x1 = x - one
+ magy = mag(y)
+ lnx = ctx.ln(x)
+ # Small y: x^y - 1 ~ log(x)*y + O(log(x)^2 * y^2)
+ if magy + mag(lnx) < -ctx.prec:
+ return lnx*y + (lnx*y)**2/2
+ # TODO: accurately eval the smaller of the real/imag part
+ return ctx.sum_accurately(lambda: iter([x**y, -1]), 1)
+
+@defun
+def _rootof1(ctx, k, n):
+ k = int(k)
+ n = int(n)
+ k %= n
+ if not k:
+ return ctx.one
+ elif 2*k == n:
+ return -ctx.one
+ elif 4*k == n:
+ return ctx.j
+ elif 4*k == 3*n:
+ return -ctx.j
+ return ctx.expjpi(2*ctx.mpf(k)/n)
+
+@defun
+def root(ctx, x, n, k=0):
+ n = int(n)
+ x = ctx.convert(x)
+ if k:
+ # Special case: there is an exact real root
+ if (n & 1 and 2*k == n-1) and (not ctx.im(x)) and (ctx.re(x) < 0):
+ return -ctx.root(-x, n)
+ # Multiply by root of unity
+ prec = ctx.prec
+ try:
+ ctx.prec += 10
+ v = ctx.root(x, n, 0) * ctx._rootof1(k, n)
+ finally:
+ ctx.prec = prec
+ return +v
+ return ctx._nthroot(x, n)
+
+@defun
+def unitroots(ctx, n, primitive=False):
+ gcd = ctx._gcd
+ prec = ctx.prec
+ try:
+ ctx.prec += 10
+ if primitive:
+ v = [ctx._rootof1(k,n) for k in range(n) if gcd(k,n) == 1]
+ else:
+ # TODO: this can be done *much* faster
+ v = [ctx._rootof1(k,n) for k in range(n)]
+ finally:
+ ctx.prec = prec
+ return [+x for x in v]
+
+@defun
+def arg(ctx, x):
+ x = ctx.convert(x)
+ re = ctx._re(x)
+ im = ctx._im(x)
+ return ctx.atan2(im, re)
+
+@defun
+def fabs(ctx, x):
+ return abs(ctx.convert(x))
+
+@defun
+def re(ctx, x):
+ x = ctx.convert(x)
+ if hasattr(x, "real"): # py2.5 doesn't have .real/.imag for all numbers
+ return x.real
+ return x
+
+@defun
+def im(ctx, x):
+ x = ctx.convert(x)
+ if hasattr(x, "imag"): # py2.5 doesn't have .real/.imag for all numbers
+ return x.imag
+ return ctx.zero
+
+@defun
+def conj(ctx, x):
+ x = ctx.convert(x)
+ try:
+ return x.conjugate()
+ except AttributeError:
+ return x
+
+@defun
+def polar(ctx, z):
+ return (ctx.fabs(z), ctx.arg(z))
+
+@defun_wrapped
+def rect(ctx, r, phi):
+ return r * ctx.mpc(*ctx.cos_sin(phi))
+
+@defun
+def log(ctx, x, b=None):
+ if b is None:
+ return ctx.ln(x)
+ wp = ctx.prec + 20
+ return ctx.ln(x, prec=wp) / ctx.ln(b, prec=wp)
+
+@defun
+def log10(ctx, x):
+ return ctx.log(x, 10)
+
+@defun
+def fmod(ctx, x, y):
+ return ctx.convert(x) % ctx.convert(y)
+
+@defun
+def degrees(ctx, x):
+ return x / ctx.degree
+
+@defun
+def radians(ctx, x):
+ return x * ctx.degree
+
+def _lambertw_special(ctx, z, k):
+ # W(0,0) = 0; all other branches are singular
+ if not z:
+ if not k:
+ return z
+ return ctx.ninf + z
+ if z == ctx.inf:
+ if k == 0:
+ return z
+ else:
+ return z + 2*k*ctx.pi*ctx.j
+ if z == ctx.ninf:
+ return (-z) + (2*k+1)*ctx.pi*ctx.j
+ # Some kind of nan or complex inf/nan?
+ return ctx.ln(z)
+
+import math
+import cmath
+
+def _lambertw_approx_hybrid(z, k):
+ imag_sign = 0
+ if hasattr(z, "imag"):
+ x = float(z.real)
+ y = z.imag
+ if y:
+ imag_sign = (-1) ** (y < 0)
+ y = float(y)
+ else:
+ x = float(z)
+ y = 0.0
+ imag_sign = 0
+ # hack to work regardless of whether Python supports -0.0
+ if not y:
+ y = 0.0
+ z = complex(x,y)
+ if k == 0:
+ if -4.0 < y < 4.0 and -1.0 < x < 2.5:
+ if imag_sign:
+ # Taylor series in upper/lower half-plane
+ if y > 1.00: return (0.876+0.645j) + (0.118-0.174j)*(z-(0.75+2.5j))
+ if y > 0.25: return (0.505+0.204j) + (0.375-0.132j)*(z-(0.75+0.5j))
+ if y < -1.00: return (0.876-0.645j) + (0.118+0.174j)*(z-(0.75-2.5j))
+ if y < -0.25: return (0.505-0.204j) + (0.375+0.132j)*(z-(0.75-0.5j))
+ # Taylor series near -1
+ if x < -0.5:
+ if imag_sign >= 0:
+ return (-0.318+1.34j) + (-0.697-0.593j)*(z+1)
+ else:
+ return (-0.318-1.34j) + (-0.697+0.593j)*(z+1)
+ # return real type
+ r = -0.367879441171442
+ if (not imag_sign) and x > r:
+ z = x
+ # Singularity near -1/e
+ if x < -0.2:
+ return -1 + 2.33164398159712*(z-r)**0.5 - 1.81218788563936*(z-r)
+ # Taylor series near 0
+ if x < 0.5: return z
+ # Simple linear approximation
+ return 0.2 + 0.3*z
+ if (not imag_sign) and x > 0.0:
+ L1 = math.log(x); L2 = math.log(L1)
+ else:
+ L1 = cmath.log(z); L2 = cmath.log(L1)
+ elif k == -1:
+ # return real type
+ r = -0.367879441171442
+ if (not imag_sign) and r < x < 0.0:
+ z = x
+ if (imag_sign >= 0) and y < 0.1 and -0.6 < x < -0.2:
+ return -1 - 2.33164398159712*(z-r)**0.5 - 1.81218788563936*(z-r)
+ if (not imag_sign) and -0.2 <= x < 0.0:
+ L1 = math.log(-x)
+ return L1 - math.log(-L1)
+ else:
+ if imag_sign == -1 and (not y) and x < 0.0:
+ L1 = cmath.log(z) - 3.1415926535897932j
+ else:
+ L1 = cmath.log(z) - 6.2831853071795865j
+ L2 = cmath.log(L1)
+ return L1 - L2 + L2/L1 + L2*(L2-2)/(2*L1**2)
+
+def _lambertw_series(ctx, z, k, tol):
+ """
+ Return rough approximation for W_k(z) from an asymptotic series,
+ sufficiently accurate for the Halley iteration to converge to
+ the correct value.
+ """
+ magz = ctx.mag(z)
+ if (-10 < magz < 900) and (-1000 < k < 1000):
+ # Near the branch point at -1/e
+ if magz < 1 and abs(z+0.36787944117144) < 0.05:
+ if k == 0 or (k == -1 and ctx._im(z) >= 0) or \
+ (k == 1 and ctx._im(z) < 0):
+ delta = ctx.sum_accurately(lambda: [z, ctx.exp(-1)])
+ cancellation = -ctx.mag(delta)
+ ctx.prec += cancellation
+ # Use series given in Corless et al.
+ p = ctx.sqrt(2*(ctx.e*z+1))
+ ctx.prec -= cancellation
+ u = {0:ctx.mpf(-1), 1:ctx.mpf(1)}
+ a = {0:ctx.mpf(2), 1:ctx.mpf(-1)}
+ if k != 0:
+ p = -p
+ s = ctx.zero
+ # The series converges, so we could use it directly, but unless
+ # *extremely* close, it is better to just use the first few
+ # terms to get a good approximation for the iteration
+ for l in xrange(max(2,cancellation)):
+ if l not in u:
+ a[l] = ctx.fsum(u[j]*u[l+1-j] for j in xrange(2,l))
+ u[l] = (l-1)*(u[l-2]/2+a[l-2]/4)/(l+1)-a[l]/2-u[l-1]/(l+1)
+ term = u[l] * p**l
+ s += term
+ if ctx.mag(term) < -tol:
+ return s, True
+ l += 1
+ ctx.prec += cancellation//2
+ return s, False
+ if k == 0 or k == -1:
+ return _lambertw_approx_hybrid(z, k), False
+ if k == 0:
+ if magz < -1:
+ return z*(1-z), False
+ L1 = ctx.ln(z)
+ L2 = ctx.ln(L1)
+ elif k == -1 and (not ctx._im(z)) and (-0.36787944117144 < ctx._re(z) < 0):
+ L1 = ctx.ln(-z)
+ return L1 - ctx.ln(-L1), False
+ else:
+ # This holds both as z -> 0 and z -> inf.
+ # Relative error is O(1/log(z)).
+ L1 = ctx.ln(z) + 2j*ctx.pi*k
+ L2 = ctx.ln(L1)
+ return L1 - L2 + L2/L1 + L2*(L2-2)/(2*L1**2), False
+
+@defun
+def lambertw(ctx, z, k=0):
+ z = ctx.convert(z)
+ k = int(k)
+ if not ctx.isnormal(z):
+ return _lambertw_special(ctx, z, k)
+ prec = ctx.prec
+ ctx.prec += 20 + ctx.mag(k or 1)
+ wp = ctx.prec
+ tol = wp - 5
+ w, done = _lambertw_series(ctx, z, k, tol)
+ if not done:
+ # Use Halley iteration to solve w*exp(w) = z
+ two = ctx.mpf(2)
+ for i in xrange(100):
+ ew = ctx.exp(w)
+ wew = w*ew
+ wewz = wew-z
+ wn = w - wewz/(wew+ew-(w+two)*wewz/(two*w+two))
+ if ctx.mag(wn-w) <= ctx.mag(wn) - tol:
+ w = wn
+ break
+ else:
+ w = wn
+ if i == 100:
+ ctx.warn("Lambert W iteration failed to converge for z = %s" % z)
+ ctx.prec = prec
+ return +w
+
+@defun_wrapped
+def bell(ctx, n, x=1):
+ x = ctx.convert(x)
+ if not n:
+ if ctx.isnan(x):
+ return x
+ return type(x)(1)
+ if ctx.isinf(x) or ctx.isinf(n) or ctx.isnan(x) or ctx.isnan(n):
+ return x**n
+ if n == 1: return x
+ if n == 2: return x*(x+1)
+ if x == 0: return ctx.sincpi(n)
+ return _polyexp(ctx, n, x, True) / ctx.exp(x)
+
+def _polyexp(ctx, n, x, extra=False):
+ def _terms():
+ if extra:
+ yield ctx.sincpi(n)
+ t = x
+ k = 1
+ while 1:
+ yield k**n * t
+ k += 1
+ t = t*x/k
+ return ctx.sum_accurately(_terms, check_step=4)
+
+@defun_wrapped
+def polyexp(ctx, s, z):
+ if ctx.isinf(z) or ctx.isinf(s) or ctx.isnan(z) or ctx.isnan(s):
+ return z**s
+ if z == 0: return z*s
+ if s == 0: return ctx.expm1(z)
+ if s == 1: return ctx.exp(z)*z
+ if s == 2: return ctx.exp(z)*z*(z+1)
+ return _polyexp(ctx, s, z)
+
+@defun_wrapped
+def cyclotomic(ctx, n, z):
+ n = int(n)
+ if n < 0:
+ raise ValueError("n cannot be negative")
+ p = ctx.one
+ if n == 0:
+ return p
+ if n == 1:
+ return z - p
+ if n == 2:
+ return z + p
+ # Use divisor product representation. Unfortunately, this sometimes
+ # includes singularities for roots of unity, which we have to cancel out.
+ # Matching zeros/poles pairwise, we have (1-z^a)/(1-z^b) ~ a/b + O(z-1).
+ a_prod = 1
+ b_prod = 1
+ num_zeros = 0
+ num_poles = 0
+ for d in range(1,n+1):
+ if not n % d:
+ w = ctx.moebius(n//d)
+ # Use powm1 because it is important that we get 0 only
+ # if it really is exactly 0
+ b = -ctx.powm1(z, d)
+ if b:
+ p *= b**w
+ else:
+ if w == 1:
+ a_prod *= d
+ num_zeros += 1
+ elif w == -1:
+ b_prod *= d
+ num_poles += 1
+ #print n, num_zeros, num_poles
+ if num_zeros:
+ if num_zeros > num_poles:
+ p *= 0
+ else:
+ p *= a_prod
+ p /= b_prod
+ return p
+
+@defun
+def mangoldt(ctx, n):
+ r"""
+ Evaluates the von Mangoldt function `\Lambda(n) = \log p`
+ if `n = p^k` a power of a prime, and `\Lambda(n) = 0` otherwise.
+
+ **Examples**
+
+ >>> from mpmath import *
+ >>> mp.dps = 25; mp.pretty = True
+ >>> [mangoldt(n) for n in range(-2,3)]
+ [0.0, 0.0, 0.0, 0.0, 0.6931471805599453094172321]
+ >>> mangoldt(6)
+ 0.0
+ >>> mangoldt(7)
+ 1.945910149055313305105353
+ >>> mangoldt(8)
+ 0.6931471805599453094172321
+ >>> fsum(mangoldt(n) for n in range(101))
+ 94.04531122935739224600493
+ >>> fsum(mangoldt(n) for n in range(10001))
+ 10013.39669326311478372032
+
+ """
+ n = int(n)
+ if n < 2:
+ return ctx.zero
+ if n % 2 == 0:
+ # Must be a power of two
+ if n & (n-1) == 0:
+ return +ctx.ln2
+ else:
+ return ctx.zero
+ # TODO: the following could be generalized into a perfect
+ # power testing function
+ # ---
+ # Look for a small factor
+ for p in (3,5,7,11,13,17,19,23,29,31):
+ if not n % p:
+ q, r = n // p, 0
+ while q > 1:
+ q, r = divmod(q, p)
+ if r:
+ return ctx.zero
+ return ctx.ln(p)
+ if ctx.isprime(n):
+ return ctx.ln(n)
+ # Obviously, we could use arbitrary-precision arithmetic for this...
+ if n > 10**30:
+ raise NotImplementedError
+ k = 2
+ while 1:
+ p = int(n**(1./k) + 0.5)
+ if p < 2:
+ return ctx.zero
+ if p ** k == n:
+ if ctx.isprime(p):
+ return ctx.ln(p)
+ k += 1
+
+@defun
+def stirling1(ctx, n, k, exact=False):
+ v = ctx._stirling1(int(n), int(k))
+ if exact:
+ return int(v)
+ else:
+ return ctx.mpf(v)
+
+@defun
+def stirling2(ctx, n, k, exact=False):
+ v = ctx._stirling2(int(n), int(k))
+ if exact:
+ return int(v)
+ else:
+ return ctx.mpf(v)
diff --git a/pythonProject/.venv/Lib/site-packages/mpmath/identification.py b/pythonProject/.venv/Lib/site-packages/mpmath/identification.py
new file mode 100644
index 0000000000000000000000000000000000000000..226f62d3fe9cacedbd9ba2b1e66ff0ad017fa604
--- /dev/null
+++ b/pythonProject/.venv/Lib/site-packages/mpmath/identification.py
@@ -0,0 +1,844 @@
+"""
+Implements the PSLQ algorithm for integer relation detection,
+and derivative algorithms for constant recognition.
+"""
+
+from .libmp.backend import xrange
+from .libmp import int_types, sqrt_fixed
+
+# round to nearest integer (can be done more elegantly...)
+def round_fixed(x, prec):
+ return ((x + (1<<(prec-1))) >> prec) << prec
+
+class IdentificationMethods(object):
+ pass
+
+
+def pslq(ctx, x, tol=None, maxcoeff=1000, maxsteps=100, verbose=False):
+ r"""
+ Given a vector of real numbers `x = [x_0, x_1, ..., x_n]`, ``pslq(x)``
+ uses the PSLQ algorithm to find a list of integers
+ `[c_0, c_1, ..., c_n]` such that
+
+ .. math ::
+
+ |c_1 x_1 + c_2 x_2 + ... + c_n x_n| < \mathrm{tol}
+
+ and such that `\max |c_k| < \mathrm{maxcoeff}`. If no such vector
+ exists, :func:`~mpmath.pslq` returns ``None``. The tolerance defaults to
+ 3/4 of the working precision.
+
+ **Examples**
+
+ Find rational approximations for `\pi`::
+
+ >>> from mpmath import *
+ >>> mp.dps = 15; mp.pretty = True
+ >>> pslq([-1, pi], tol=0.01)
+ [22, 7]
+ >>> pslq([-1, pi], tol=0.001)
+ [355, 113]
+ >>> mpf(22)/7; mpf(355)/113; +pi
+ 3.14285714285714
+ 3.14159292035398
+ 3.14159265358979
+
+ Pi is not a rational number with denominator less than 1000::
+
+ >>> pslq([-1, pi])
+ >>>
+
+ To within the standard precision, it can however be approximated
+ by at least one rational number with denominator less than `10^{12}`::
+
+ >>> p, q = pslq([-1, pi], maxcoeff=10**12)
+ >>> print(p); print(q)
+ 238410049439
+ 75888275702
+ >>> mpf(p)/q
+ 3.14159265358979
+
+ The PSLQ algorithm can be applied to long vectors. For example,
+ we can investigate the rational (in)dependence of integer square
+ roots::
+
+ >>> mp.dps = 30
+ >>> pslq([sqrt(n) for n in range(2, 5+1)])
+ >>>
+ >>> pslq([sqrt(n) for n in range(2, 6+1)])
+ >>>
+ >>> pslq([sqrt(n) for n in range(2, 8+1)])
+ [2, 0, 0, 0, 0, 0, -1]
+
+ **Machin formulas**
+
+ A famous formula for `\pi` is Machin's,
+
+ .. math ::
+
+ \frac{\pi}{4} = 4 \operatorname{acot} 5 - \operatorname{acot} 239
+
+ There are actually infinitely many formulas of this type. Two
+ others are
+
+ .. math ::
+
+ \frac{\pi}{4} = \operatorname{acot} 1
+
+ \frac{\pi}{4} = 12 \operatorname{acot} 49 + 32 \operatorname{acot} 57
+ + 5 \operatorname{acot} 239 + 12 \operatorname{acot} 110443
+
+ We can easily verify the formulas using the PSLQ algorithm::
+
+ >>> mp.dps = 30
+ >>> pslq([pi/4, acot(1)])
+ [1, -1]
+ >>> pslq([pi/4, acot(5), acot(239)])
+ [1, -4, 1]
+ >>> pslq([pi/4, acot(49), acot(57), acot(239), acot(110443)])
+ [1, -12, -32, 5, -12]
+
+ We could try to generate a custom Machin-like formula by running
+ the PSLQ algorithm with a few inverse cotangent values, for example
+ acot(2), acot(3) ... acot(10). Unfortunately, there is a linear
+ dependence among these values, resulting in only that dependence
+ being detected, with a zero coefficient for `\pi`::
+
+ >>> pslq([pi] + [acot(n) for n in range(2,11)])
+ [0, 1, -1, 0, 0, 0, -1, 0, 0, 0]
+
+ We get better luck by removing linearly dependent terms::
+
+ >>> pslq([pi] + [acot(n) for n in range(2,11) if n not in (3, 5)])
+ [1, -8, 0, 0, 4, 0, 0, 0]
+
+ In other words, we found the following formula::
+
+ >>> 8*acot(2) - 4*acot(7)
+ 3.14159265358979323846264338328
+ >>> +pi
+ 3.14159265358979323846264338328
+
+ **Algorithm**
+
+ This is a fairly direct translation to Python of the pseudocode given by
+ David Bailey, "The PSLQ Integer Relation Algorithm":
+ http://www.cecm.sfu.ca/organics/papers/bailey/paper/html/node3.html
+
+ The present implementation uses fixed-point instead of floating-point
+ arithmetic, since this is significantly (about 7x) faster.
+ """
+
+ n = len(x)
+ if n < 2:
+ raise ValueError("n cannot be less than 2")
+
+ # At too low precision, the algorithm becomes meaningless
+ prec = ctx.prec
+ if prec < 53:
+ raise ValueError("prec cannot be less than 53")
+
+ if verbose and prec // max(2,n) < 5:
+ print("Warning: precision for PSLQ may be too low")
+
+ target = int(prec * 0.75)
+
+ if tol is None:
+ tol = ctx.mpf(2)**(-target)
+ else:
+ tol = ctx.convert(tol)
+
+ extra = 60
+ prec += extra
+
+ if verbose:
+ print("PSLQ using prec %i and tol %s" % (prec, ctx.nstr(tol)))
+
+ tol = ctx.to_fixed(tol, prec)
+ assert tol
+
+ # Convert to fixed-point numbers. The dummy None is added so we can
+ # use 1-based indexing. (This just allows us to be consistent with
+ # Bailey's indexing. The algorithm is 100 lines long, so debugging
+ # a single wrong index can be painful.)
+ x = [None] + [ctx.to_fixed(ctx.mpf(xk), prec) for xk in x]
+
+ # Sanity check on magnitudes
+ minx = min(abs(xx) for xx in x[1:])
+ if not minx:
+ raise ValueError("PSLQ requires a vector of nonzero numbers")
+ if minx < tol//100:
+ if verbose:
+ print("STOPPING: (one number is too small)")
+ return None
+
+ g = sqrt_fixed((4<> prec)
+ s[k] = sqrt_fixed(t, prec)
+ t = s[1]
+ y = x[:]
+ for k in xrange(1, n+1):
+ y[k] = (x[k] << prec) // t
+ s[k] = (s[k] << prec) // t
+ # step 3
+ for i in xrange(1, n+1):
+ for j in xrange(i+1, n):
+ H[i,j] = 0
+ if i <= n-1:
+ if s[i]:
+ H[i,i] = (s[i+1] << prec) // s[i]
+ else:
+ H[i,i] = 0
+ for j in range(1, i):
+ sjj1 = s[j]*s[j+1]
+ if sjj1:
+ H[i,j] = ((-y[i]*y[j])<> prec)
+ for k in xrange(1, j+1):
+ H[i,k] = H[i,k] - (t*H[j,k] >> prec)
+ for k in xrange(1, n+1):
+ A[i,k] = A[i,k] - (t*A[j,k] >> prec)
+ B[k,j] = B[k,j] + (t*B[k,i] >> prec)
+ # Main algorithm
+ for REP in range(maxsteps):
+ # Step 1
+ m = -1
+ szmax = -1
+ for i in range(1, n):
+ h = H[i,i]
+ sz = (g**i * abs(h)) >> (prec*(i-1))
+ if sz > szmax:
+ m = i
+ szmax = sz
+ # Step 2
+ y[m], y[m+1] = y[m+1], y[m]
+ for i in xrange(1,n+1): H[m,i], H[m+1,i] = H[m+1,i], H[m,i]
+ for i in xrange(1,n+1): A[m,i], A[m+1,i] = A[m+1,i], A[m,i]
+ for i in xrange(1,n+1): B[i,m], B[i,m+1] = B[i,m+1], B[i,m]
+ # Step 3
+ if m <= n - 2:
+ t0 = sqrt_fixed((H[m,m]**2 + H[m,m+1]**2)>>prec, prec)
+ # A zero element probably indicates that the precision has
+ # been exhausted. XXX: this could be spurious, due to
+ # using fixed-point arithmetic
+ if not t0:
+ break
+ t1 = (H[m,m] << prec) // t0
+ t2 = (H[m,m+1] << prec) // t0
+ for i in xrange(m, n+1):
+ t3 = H[i,m]
+ t4 = H[i,m+1]
+ H[i,m] = (t1*t3+t2*t4) >> prec
+ H[i,m+1] = (-t2*t3+t1*t4) >> prec
+ # Step 4
+ for i in xrange(m+1, n+1):
+ for j in xrange(min(i-1, m+1), 0, -1):
+ try:
+ t = round_fixed((H[i,j] << prec)//H[j,j], prec)
+ # Precision probably exhausted
+ except ZeroDivisionError:
+ break
+ y[j] = y[j] + ((t*y[i]) >> prec)
+ for k in xrange(1, j+1):
+ H[i,k] = H[i,k] - (t*H[j,k] >> prec)
+ for k in xrange(1, n+1):
+ A[i,k] = A[i,k] - (t*A[j,k] >> prec)
+ B[k,j] = B[k,j] + (t*B[k,i] >> prec)
+ # Until a relation is found, the error typically decreases
+ # slowly (e.g. a factor 1-10) with each step TODO: we could
+ # compare err from two successive iterations. If there is a
+ # large drop (several orders of magnitude), that indicates a
+ # "high quality" relation was detected. Reporting this to
+ # the user somehow might be useful.
+ best_err = maxcoeff<> prec) for j in \
+ range(1,n+1)]
+ if max(abs(v) for v in vec) < maxcoeff:
+ if verbose:
+ print("FOUND relation at iter %i/%i, error: %s" % \
+ (REP, maxsteps, ctx.nstr(err / ctx.mpf(2)**prec, 1)))
+ return vec
+ best_err = min(err, best_err)
+ # Calculate a lower bound for the norm. We could do this
+ # more exactly (using the Euclidean norm) but there is probably
+ # no practical benefit.
+ recnorm = max(abs(h) for h in H.values())
+ if recnorm:
+ norm = ((1 << (2*prec)) // recnorm) >> prec
+ norm //= 100
+ else:
+ norm = ctx.inf
+ if verbose:
+ print("%i/%i: Error: %8s Norm: %s" % \
+ (REP, maxsteps, ctx.nstr(best_err / ctx.mpf(2)**prec, 1), norm))
+ if norm >= maxcoeff:
+ break
+ if verbose:
+ print("CANCELLING after step %i/%i." % (REP, maxsteps))
+ print("Could not find an integer relation. Norm bound: %s" % norm)
+ return None
+
+def findpoly(ctx, x, n=1, **kwargs):
+ r"""
+ ``findpoly(x, n)`` returns the coefficients of an integer
+ polynomial `P` of degree at most `n` such that `P(x) \approx 0`.
+ If no polynomial having `x` as a root can be found,
+ :func:`~mpmath.findpoly` returns ``None``.
+
+ :func:`~mpmath.findpoly` works by successively calling :func:`~mpmath.pslq` with
+ the vectors `[1, x]`, `[1, x, x^2]`, `[1, x, x^2, x^3]`, ...,
+ `[1, x, x^2, .., x^n]` as input. Keyword arguments given to
+ :func:`~mpmath.findpoly` are forwarded verbatim to :func:`~mpmath.pslq`. In
+ particular, you can specify a tolerance for `P(x)` with ``tol``
+ and a maximum permitted coefficient size with ``maxcoeff``.
+
+ For large values of `n`, it is recommended to run :func:`~mpmath.findpoly`
+ at high precision; preferably 50 digits or more.
+
+ **Examples**
+
+ By default (degree `n = 1`), :func:`~mpmath.findpoly` simply finds a linear
+ polynomial with a rational root::
+
+ >>> from mpmath import *
+ >>> mp.dps = 15; mp.pretty = True
+ >>> findpoly(0.7)
+ [-10, 7]
+
+ The generated coefficient list is valid input to ``polyval`` and
+ ``polyroots``::
+
+ >>> nprint(polyval(findpoly(phi, 2), phi), 1)
+ -2.0e-16
+ >>> for r in polyroots(findpoly(phi, 2)):
+ ... print(r)
+ ...
+ -0.618033988749895
+ 1.61803398874989
+
+ Numbers of the form `m + n \sqrt p` for integers `(m, n, p)` are
+ solutions to quadratic equations. As we find here, `1+\sqrt 2`
+ is a root of the polynomial `x^2 - 2x - 1`::
+
+ >>> findpoly(1+sqrt(2), 2)
+ [1, -2, -1]
+ >>> findroot(lambda x: x**2 - 2*x - 1, 1)
+ 2.4142135623731
+
+ Despite only containing square roots, the following number results
+ in a polynomial of degree 4::
+
+ >>> findpoly(sqrt(2)+sqrt(3), 4)
+ [1, 0, -10, 0, 1]
+
+ In fact, `x^4 - 10x^2 + 1` is the *minimal polynomial* of
+ `r = \sqrt 2 + \sqrt 3`, meaning that a rational polynomial of
+ lower degree having `r` as a root does not exist. Given sufficient
+ precision, :func:`~mpmath.findpoly` will usually find the correct
+ minimal polynomial of a given algebraic number.
+
+ **Non-algebraic numbers**
+
+ If :func:`~mpmath.findpoly` fails to find a polynomial with given
+ coefficient size and tolerance constraints, that means no such
+ polynomial exists.
+
+ We can verify that `\pi` is not an algebraic number of degree 3 with
+ coefficients less than 1000::
+
+ >>> mp.dps = 15
+ >>> findpoly(pi, 3)
+ >>>
+
+ It is always possible to find an algebraic approximation of a number
+ using one (or several) of the following methods:
+
+ 1. Increasing the permitted degree
+ 2. Allowing larger coefficients
+ 3. Reducing the tolerance
+
+ One example of each method is shown below::
+
+ >>> mp.dps = 15
+ >>> findpoly(pi, 4)
+ [95, -545, 863, -183, -298]
+ >>> findpoly(pi, 3, maxcoeff=10000)
+ [836, -1734, -2658, -457]
+ >>> findpoly(pi, 3, tol=1e-7)
+ [-4, 22, -29, -2]
+
+ It is unknown whether Euler's constant is transcendental (or even
+ irrational). We can use :func:`~mpmath.findpoly` to check that if is
+ an algebraic number, its minimal polynomial must have degree
+ at least 7 and a coefficient of magnitude at least 1000000::
+
+ >>> mp.dps = 200
+ >>> findpoly(euler, 6, maxcoeff=10**6, tol=1e-100, maxsteps=1000)
+ >>>
+
+ Note that the high precision and strict tolerance is necessary
+ for such high-degree runs, since otherwise unwanted low-accuracy
+ approximations will be detected. It may also be necessary to set
+ maxsteps high to prevent a premature exit (before the coefficient
+ bound has been reached). Running with ``verbose=True`` to get an
+ idea what is happening can be useful.
+ """
+ x = ctx.mpf(x)
+ if n < 1:
+ raise ValueError("n cannot be less than 1")
+ if x == 0:
+ return [1, 0]
+ xs = [ctx.mpf(1)]
+ for i in range(1,n+1):
+ xs.append(x**i)
+ a = ctx.pslq(xs, **kwargs)
+ if a is not None:
+ return a[::-1]
+
+def fracgcd(p, q):
+ x, y = p, q
+ while y:
+ x, y = y, x % y
+ if x != 1:
+ p //= x
+ q //= x
+ if q == 1:
+ return p
+ return p, q
+
+def pslqstring(r, constants):
+ q = r[0]
+ r = r[1:]
+ s = []
+ for i in range(len(r)):
+ p = r[i]
+ if p:
+ z = fracgcd(-p,q)
+ cs = constants[i][1]
+ if cs == '1':
+ cs = ''
+ else:
+ cs = '*' + cs
+ if isinstance(z, int_types):
+ if z > 0: term = str(z) + cs
+ else: term = ("(%s)" % z) + cs
+ else:
+ term = ("(%s/%s)" % z) + cs
+ s.append(term)
+ s = ' + '.join(s)
+ if '+' in s or '*' in s:
+ s = '(' + s + ')'
+ return s or '0'
+
+def prodstring(r, constants):
+ q = r[0]
+ r = r[1:]
+ num = []
+ den = []
+ for i in range(len(r)):
+ p = r[i]
+ if p:
+ z = fracgcd(-p,q)
+ cs = constants[i][1]
+ if isinstance(z, int_types):
+ if abs(z) == 1: t = cs
+ else: t = '%s**%s' % (cs, abs(z))
+ ([num,den][z<0]).append(t)
+ else:
+ t = '%s**(%s/%s)' % (cs, abs(z[0]), z[1])
+ ([num,den][z[0]<0]).append(t)
+ num = '*'.join(num)
+ den = '*'.join(den)
+ if num and den: return "(%s)/(%s)" % (num, den)
+ if num: return num
+ if den: return "1/(%s)" % den
+
+def quadraticstring(ctx,t,a,b,c):
+ if c < 0:
+ a,b,c = -a,-b,-c
+ u1 = (-b+ctx.sqrt(b**2-4*a*c))/(2*c)
+ u2 = (-b-ctx.sqrt(b**2-4*a*c))/(2*c)
+ if abs(u1-t) < abs(u2-t):
+ if b: s = '((%s+sqrt(%s))/%s)' % (-b,b**2-4*a*c,2*c)
+ else: s = '(sqrt(%s)/%s)' % (-4*a*c,2*c)
+ else:
+ if b: s = '((%s-sqrt(%s))/%s)' % (-b,b**2-4*a*c,2*c)
+ else: s = '(-sqrt(%s)/%s)' % (-4*a*c,2*c)
+ return s
+
+# Transformation y = f(x,c), with inverse function x = f(y,c)
+# The third entry indicates whether the transformation is
+# redundant when c = 1
+transforms = [
+ (lambda ctx,x,c: x*c, '$y/$c', 0),
+ (lambda ctx,x,c: x/c, '$c*$y', 1),
+ (lambda ctx,x,c: c/x, '$c/$y', 0),
+ (lambda ctx,x,c: (x*c)**2, 'sqrt($y)/$c', 0),
+ (lambda ctx,x,c: (x/c)**2, '$c*sqrt($y)', 1),
+ (lambda ctx,x,c: (c/x)**2, '$c/sqrt($y)', 0),
+ (lambda ctx,x,c: c*x**2, 'sqrt($y)/sqrt($c)', 1),
+ (lambda ctx,x,c: x**2/c, 'sqrt($c)*sqrt($y)', 1),
+ (lambda ctx,x,c: c/x**2, 'sqrt($c)/sqrt($y)', 1),
+ (lambda ctx,x,c: ctx.sqrt(x*c), '$y**2/$c', 0),
+ (lambda ctx,x,c: ctx.sqrt(x/c), '$c*$y**2', 1),
+ (lambda ctx,x,c: ctx.sqrt(c/x), '$c/$y**2', 0),
+ (lambda ctx,x,c: c*ctx.sqrt(x), '$y**2/$c**2', 1),
+ (lambda ctx,x,c: ctx.sqrt(x)/c, '$c**2*$y**2', 1),
+ (lambda ctx,x,c: c/ctx.sqrt(x), '$c**2/$y**2', 1),
+ (lambda ctx,x,c: ctx.exp(x*c), 'log($y)/$c', 0),
+ (lambda ctx,x,c: ctx.exp(x/c), '$c*log($y)', 1),
+ (lambda ctx,x,c: ctx.exp(c/x), '$c/log($y)', 0),
+ (lambda ctx,x,c: c*ctx.exp(x), 'log($y/$c)', 1),
+ (lambda ctx,x,c: ctx.exp(x)/c, 'log($c*$y)', 1),
+ (lambda ctx,x,c: c/ctx.exp(x), 'log($c/$y)', 0),
+ (lambda ctx,x,c: ctx.ln(x*c), 'exp($y)/$c', 0),
+ (lambda ctx,x,c: ctx.ln(x/c), '$c*exp($y)', 1),
+ (lambda ctx,x,c: ctx.ln(c/x), '$c/exp($y)', 0),
+ (lambda ctx,x,c: c*ctx.ln(x), 'exp($y/$c)', 1),
+ (lambda ctx,x,c: ctx.ln(x)/c, 'exp($c*$y)', 1),
+ (lambda ctx,x,c: c/ctx.ln(x), 'exp($c/$y)', 0),
+]
+
+def identify(ctx, x, constants=[], tol=None, maxcoeff=1000, full=False,
+ verbose=False):
+ r"""
+ Given a real number `x`, ``identify(x)`` attempts to find an exact
+ formula for `x`. This formula is returned as a string. If no match
+ is found, ``None`` is returned. With ``full=True``, a list of
+ matching formulas is returned.
+
+ As a simple example, :func:`~mpmath.identify` will find an algebraic
+ formula for the golden ratio::
+
+ >>> from mpmath import *
+ >>> mp.dps = 15; mp.pretty = True
+ >>> identify(phi)
+ '((1+sqrt(5))/2)'
+
+ :func:`~mpmath.identify` can identify simple algebraic numbers and simple
+ combinations of given base constants, as well as certain basic
+ transformations thereof. More specifically, :func:`~mpmath.identify`
+ looks for the following:
+
+ 1. Fractions
+ 2. Quadratic algebraic numbers
+ 3. Rational linear combinations of the base constants
+ 4. Any of the above after first transforming `x` into `f(x)` where
+ `f(x)` is `1/x`, `\sqrt x`, `x^2`, `\log x` or `\exp x`, either
+ directly or with `x` or `f(x)` multiplied or divided by one of
+ the base constants
+ 5. Products of fractional powers of the base constants and
+ small integers
+
+ Base constants can be given as a list of strings representing mpmath
+ expressions (:func:`~mpmath.identify` will ``eval`` the strings to numerical
+ values and use the original strings for the output), or as a dict of
+ formula:value pairs.
+
+ In order not to produce spurious results, :func:`~mpmath.identify` should
+ be used with high precision; preferably 50 digits or more.
+
+ **Examples**
+
+ Simple identifications can be performed safely at standard
+ precision. Here the default recognition of rational, algebraic,
+ and exp/log of algebraic numbers is demonstrated::
+
+ >>> mp.dps = 15
+ >>> identify(0.22222222222222222)
+ '(2/9)'
+ >>> identify(1.9662210973805663)
+ 'sqrt(((24+sqrt(48))/8))'
+ >>> identify(4.1132503787829275)
+ 'exp((sqrt(8)/2))'
+ >>> identify(0.881373587019543)
+ 'log(((2+sqrt(8))/2))'
+
+ By default, :func:`~mpmath.identify` does not recognize `\pi`. At standard
+ precision it finds a not too useful approximation. At slightly
+ increased precision, this approximation is no longer accurate
+ enough and :func:`~mpmath.identify` more correctly returns ``None``::
+
+ >>> identify(pi)
+ '(2**(176/117)*3**(20/117)*5**(35/39))/(7**(92/117))'
+ >>> mp.dps = 30
+ >>> identify(pi)
+ >>>
+
+ Numbers such as `\pi`, and simple combinations of user-defined
+ constants, can be identified if they are provided explicitly::
+
+ >>> identify(3*pi-2*e, ['pi', 'e'])
+ '(3*pi + (-2)*e)'
+
+ Here is an example using a dict of constants. Note that the
+ constants need not be "atomic"; :func:`~mpmath.identify` can just
+ as well express the given number in terms of expressions
+ given by formulas::
+
+ >>> identify(pi+e, {'a':pi+2, 'b':2*e})
+ '((-2) + 1*a + (1/2)*b)'
+
+ Next, we attempt some identifications with a set of base constants.
+ It is necessary to increase the precision a bit.
+
+ >>> mp.dps = 50
+ >>> base = ['sqrt(2)','pi','log(2)']
+ >>> identify(0.25, base)
+ '(1/4)'
+ >>> identify(3*pi + 2*sqrt(2) + 5*log(2)/7, base)
+ '(2*sqrt(2) + 3*pi + (5/7)*log(2))'
+ >>> identify(exp(pi+2), base)
+ 'exp((2 + 1*pi))'
+ >>> identify(1/(3+sqrt(2)), base)
+ '((3/7) + (-1/7)*sqrt(2))'
+ >>> identify(sqrt(2)/(3*pi+4), base)
+ 'sqrt(2)/(4 + 3*pi)'
+ >>> identify(5**(mpf(1)/3)*pi*log(2)**2, base)
+ '5**(1/3)*pi*log(2)**2'
+
+ An example of an erroneous solution being found when too low
+ precision is used::
+
+ >>> mp.dps = 15
+ >>> identify(1/(3*pi-4*e+sqrt(8)), ['pi', 'e', 'sqrt(2)'])
+ '((11/25) + (-158/75)*pi + (76/75)*e + (44/15)*sqrt(2))'
+ >>> mp.dps = 50
+ >>> identify(1/(3*pi-4*e+sqrt(8)), ['pi', 'e', 'sqrt(2)'])
+ '1/(3*pi + (-4)*e + 2*sqrt(2))'
+
+ **Finding approximate solutions**
+
+ The tolerance ``tol`` defaults to 3/4 of the working precision.
+ Lowering the tolerance is useful for finding approximate matches.
+ We can for example try to generate approximations for pi::
+
+ >>> mp.dps = 15
+ >>> identify(pi, tol=1e-2)
+ '(22/7)'
+ >>> identify(pi, tol=1e-3)
+ '(355/113)'
+ >>> identify(pi, tol=1e-10)
+ '(5**(339/269))/(2**(64/269)*3**(13/269)*7**(92/269))'
+
+ With ``full=True``, and by supplying a few base constants,
+ ``identify`` can generate almost endless lists of approximations
+ for any number (the output below has been truncated to show only
+ the first few)::
+
+ >>> for p in identify(pi, ['e', 'catalan'], tol=1e-5, full=True):
+ ... print(p)
+ ... # doctest: +ELLIPSIS
+ e/log((6 + (-4/3)*e))
+ (3**3*5*e*catalan**2)/(2*7**2)
+ sqrt(((-13) + 1*e + 22*catalan))
+ log(((-6) + 24*e + 4*catalan)/e)
+ exp(catalan*((-1/5) + (8/15)*e))
+ catalan*(6 + (-6)*e + 15*catalan)
+ sqrt((5 + 26*e + (-3)*catalan))/e
+ e*sqrt(((-27) + 2*e + 25*catalan))
+ log(((-1) + (-11)*e + 59*catalan))
+ ((3/20) + (21/20)*e + (3/20)*catalan)
+ ...
+
+ The numerical values are roughly as close to `\pi` as permitted by the
+ specified tolerance:
+
+ >>> e/log(6-4*e/3)
+ 3.14157719846001
+ >>> 135*e*catalan**2/98
+ 3.14166950419369
+ >>> sqrt(e-13+22*catalan)
+ 3.14158000062992
+ >>> log(24*e-6+4*catalan)-1
+ 3.14158791577159
+
+ **Symbolic processing**
+
+ The output formula can be evaluated as a Python expression.
+ Note however that if fractions (like '2/3') are present in
+ the formula, Python's :func:`~mpmath.eval()` may erroneously perform
+ integer division. Note also that the output is not necessarily
+ in the algebraically simplest form::
+
+ >>> identify(sqrt(2))
+ '(sqrt(8)/2)'
+
+ As a solution to both problems, consider using SymPy's
+ :func:`~mpmath.sympify` to convert the formula into a symbolic expression.
+ SymPy can be used to pretty-print or further simplify the formula
+ symbolically::
+
+ >>> from sympy import sympify # doctest: +SKIP
+ >>> sympify(identify(sqrt(2))) # doctest: +SKIP
+ 2**(1/2)
+
+ Sometimes :func:`~mpmath.identify` can simplify an expression further than
+ a symbolic algorithm::
+
+ >>> from sympy import simplify # doctest: +SKIP
+ >>> x = sympify('-1/(-3/2+(1/2)*5**(1/2))*(3/2-1/2*5**(1/2))**(1/2)') # doctest: +SKIP
+ >>> x # doctest: +SKIP
+ (3/2 - 5**(1/2)/2)**(-1/2)
+ >>> x = simplify(x) # doctest: +SKIP
+ >>> x # doctest: +SKIP
+ 2/(6 - 2*5**(1/2))**(1/2)
+ >>> mp.dps = 30 # doctest: +SKIP
+ >>> x = sympify(identify(x.evalf(30))) # doctest: +SKIP
+ >>> x # doctest: +SKIP
+ 1/2 + 5**(1/2)/2
+
+ (In fact, this functionality is available directly in SymPy as the
+ function :func:`~mpmath.nsimplify`, which is essentially a wrapper for
+ :func:`~mpmath.identify`.)
+
+ **Miscellaneous issues and limitations**
+
+ The input `x` must be a real number. All base constants must be
+ positive real numbers and must not be rationals or rational linear
+ combinations of each other.
+
+ The worst-case computation time grows quickly with the number of
+ base constants. Already with 3 or 4 base constants,
+ :func:`~mpmath.identify` may require several seconds to finish. To search
+ for relations among a large number of constants, you should
+ consider using :func:`~mpmath.pslq` directly.
+
+ The extended transformations are applied to x, not the constants
+ separately. As a result, ``identify`` will for example be able to
+ recognize ``exp(2*pi+3)`` with ``pi`` given as a base constant, but
+ not ``2*exp(pi)+3``. It will be able to recognize the latter if
+ ``exp(pi)`` is given explicitly as a base constant.
+
+ """
+
+ solutions = []
+
+ def addsolution(s):
+ if verbose: print("Found: ", s)
+ solutions.append(s)
+
+ x = ctx.mpf(x)
+
+ # Further along, x will be assumed positive
+ if x == 0:
+ if full: return ['0']
+ else: return '0'
+ if x < 0:
+ sol = ctx.identify(-x, constants, tol, maxcoeff, full, verbose)
+ if sol is None:
+ return sol
+ if full:
+ return ["-(%s)"%s for s in sol]
+ else:
+ return "-(%s)" % sol
+
+ if tol:
+ tol = ctx.mpf(tol)
+ else:
+ tol = ctx.eps**0.7
+ M = maxcoeff
+
+ if constants:
+ if isinstance(constants, dict):
+ constants = [(ctx.mpf(v), name) for (name, v) in sorted(constants.items())]
+ else:
+ namespace = dict((name, getattr(ctx,name)) for name in dir(ctx))
+ constants = [(eval(p, namespace), p) for p in constants]
+ else:
+ constants = []
+
+ # We always want to find at least rational terms
+ if 1 not in [value for (name, value) in constants]:
+ constants = [(ctx.mpf(1), '1')] + constants
+
+ # PSLQ with simple algebraic and functional transformations
+ for ft, ftn, red in transforms:
+ for c, cn in constants:
+ if red and cn == '1':
+ continue
+ t = ft(ctx,x,c)
+ # Prevent exponential transforms from wreaking havoc
+ if abs(t) > M**2 or abs(t) < tol:
+ continue
+ # Linear combination of base constants
+ r = ctx.pslq([t] + [a[0] for a in constants], tol, M)
+ s = None
+ if r is not None and max(abs(uw) for uw in r) <= M and r[0]:
+ s = pslqstring(r, constants)
+ # Quadratic algebraic numbers
+ else:
+ q = ctx.pslq([ctx.one, t, t**2], tol, M)
+ if q is not None and len(q) == 3 and q[2]:
+ aa, bb, cc = q
+ if max(abs(aa),abs(bb),abs(cc)) <= M:
+ s = quadraticstring(ctx,t,aa,bb,cc)
+ if s:
+ if cn == '1' and ('/$c' in ftn):
+ s = ftn.replace('$y', s).replace('/$c', '')
+ else:
+ s = ftn.replace('$y', s).replace('$c', cn)
+ addsolution(s)
+ if not full: return solutions[0]
+
+ if verbose:
+ print(".")
+
+ # Check for a direct multiplicative formula
+ if x != 1:
+ # Allow fractional powers of fractions
+ ilogs = [2,3,5,7]
+ # Watch out for existing fractional powers of fractions
+ logs = []
+ for a, s in constants:
+ if not sum(bool(ctx.findpoly(ctx.ln(a)/ctx.ln(i),1)) for i in ilogs):
+ logs.append((ctx.ln(a), s))
+ logs = [(ctx.ln(i),str(i)) for i in ilogs] + logs
+ r = ctx.pslq([ctx.ln(x)] + [a[0] for a in logs], tol, M)
+ if r is not None and max(abs(uw) for uw in r) <= M and r[0]:
+ addsolution(prodstring(r, logs))
+ if not full: return solutions[0]
+
+ if full:
+ return sorted(solutions, key=len)
+ else:
+ return None
+
+IdentificationMethods.pslq = pslq
+IdentificationMethods.findpoly = findpoly
+IdentificationMethods.identify = identify
+
+
+if __name__ == '__main__':
+ import doctest
+ doctest.testmod()
diff --git a/pythonProject/.venv/Lib/site-packages/mpmath/matrices/__pycache__/__init__.cpython-310.pyc b/pythonProject/.venv/Lib/site-packages/mpmath/matrices/__pycache__/__init__.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..cb513df4efe8fcfee147411e427f9aece1f8e5f6
Binary files /dev/null and b/pythonProject/.venv/Lib/site-packages/mpmath/matrices/__pycache__/__init__.cpython-310.pyc differ
diff --git a/pythonProject/.venv/Lib/site-packages/mpmath/matrices/__pycache__/linalg.cpython-310.pyc b/pythonProject/.venv/Lib/site-packages/mpmath/matrices/__pycache__/linalg.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..9423a0972148883d2d22f71bb2ec6a36390bfd7d
Binary files /dev/null and b/pythonProject/.venv/Lib/site-packages/mpmath/matrices/__pycache__/linalg.cpython-310.pyc differ
diff --git a/pythonProject/.venv/Lib/site-packages/mpmath/matrices/__pycache__/matrices.cpython-310.pyc b/pythonProject/.venv/Lib/site-packages/mpmath/matrices/__pycache__/matrices.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..018da60e59c670967412cd5fd8c471c0f8800331
Binary files /dev/null and b/pythonProject/.venv/Lib/site-packages/mpmath/matrices/__pycache__/matrices.cpython-310.pyc differ
diff --git a/pythonProject/.venv/Lib/site-packages/mpmath/tests/__pycache__/__init__.cpython-310.pyc b/pythonProject/.venv/Lib/site-packages/mpmath/tests/__pycache__/__init__.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..4fe51a0c25226e73a0ea4f28a5a3fa80ba7383d3
Binary files /dev/null and b/pythonProject/.venv/Lib/site-packages/mpmath/tests/__pycache__/__init__.cpython-310.pyc differ
diff --git a/pythonProject/.venv/Lib/site-packages/mpmath/tests/__pycache__/test_levin.cpython-310.pyc b/pythonProject/.venv/Lib/site-packages/mpmath/tests/__pycache__/test_levin.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..a1aafc4158dfab1911387770d38face8b29c4da2
Binary files /dev/null and b/pythonProject/.venv/Lib/site-packages/mpmath/tests/__pycache__/test_levin.cpython-310.pyc differ
diff --git a/pythonProject/.venv/Lib/site-packages/mpmath/tests/__pycache__/test_linalg.cpython-310.pyc b/pythonProject/.venv/Lib/site-packages/mpmath/tests/__pycache__/test_linalg.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..fc4dc2fb5273137d46f6d148a13918bc9bf4fad1
Binary files /dev/null and b/pythonProject/.venv/Lib/site-packages/mpmath/tests/__pycache__/test_linalg.cpython-310.pyc differ
diff --git a/pythonProject/.venv/Lib/site-packages/mpmath/tests/__pycache__/test_matrices.cpython-310.pyc b/pythonProject/.venv/Lib/site-packages/mpmath/tests/__pycache__/test_matrices.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..3d35480d9d793560e5f3d23a8f6d8f4a0a1ac4d5
Binary files /dev/null and b/pythonProject/.venv/Lib/site-packages/mpmath/tests/__pycache__/test_matrices.cpython-310.pyc differ
diff --git a/pythonProject/.venv/Lib/site-packages/mpmath/tests/__pycache__/test_mpmath.cpython-310.pyc b/pythonProject/.venv/Lib/site-packages/mpmath/tests/__pycache__/test_mpmath.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..962e093a4a212865e1cc05639f26971934784472
Binary files /dev/null and b/pythonProject/.venv/Lib/site-packages/mpmath/tests/__pycache__/test_mpmath.cpython-310.pyc differ
diff --git a/pythonProject/.venv/Lib/site-packages/mpmath/tests/__pycache__/test_ode.cpython-310.pyc b/pythonProject/.venv/Lib/site-packages/mpmath/tests/__pycache__/test_ode.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..c540390d860c16f84636eebe01c322b4b8c98ccc
Binary files /dev/null and b/pythonProject/.venv/Lib/site-packages/mpmath/tests/__pycache__/test_ode.cpython-310.pyc differ
diff --git a/pythonProject/.venv/Lib/site-packages/mpmath/tests/__pycache__/test_pickle.cpython-310.pyc b/pythonProject/.venv/Lib/site-packages/mpmath/tests/__pycache__/test_pickle.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..1aeb6dc824f88ef5ba054fbc7b57a875b6f0015b
Binary files /dev/null and b/pythonProject/.venv/Lib/site-packages/mpmath/tests/__pycache__/test_pickle.cpython-310.pyc differ
diff --git a/pythonProject/.venv/Lib/site-packages/mpmath/tests/__pycache__/test_power.cpython-310.pyc b/pythonProject/.venv/Lib/site-packages/mpmath/tests/__pycache__/test_power.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..d8a28cc4302d49ec6830ea0b0d152962c9e2b49c
Binary files /dev/null and b/pythonProject/.venv/Lib/site-packages/mpmath/tests/__pycache__/test_power.cpython-310.pyc differ
diff --git a/pythonProject/.venv/Lib/site-packages/mpmath/tests/__pycache__/test_quad.cpython-310.pyc b/pythonProject/.venv/Lib/site-packages/mpmath/tests/__pycache__/test_quad.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..8a966de27c1a52c611619930ff4e8b347e4ea224
Binary files /dev/null and b/pythonProject/.venv/Lib/site-packages/mpmath/tests/__pycache__/test_quad.cpython-310.pyc differ
diff --git a/pythonProject/.venv/Lib/site-packages/mpmath/tests/__pycache__/test_rootfinding.cpython-310.pyc b/pythonProject/.venv/Lib/site-packages/mpmath/tests/__pycache__/test_rootfinding.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..2e74a2d85df34f36e8b80a7120d459e17e89cc9f
Binary files /dev/null and b/pythonProject/.venv/Lib/site-packages/mpmath/tests/__pycache__/test_rootfinding.cpython-310.pyc differ
diff --git a/pythonProject/.venv/Lib/site-packages/mpmath/tests/__pycache__/test_special.cpython-310.pyc b/pythonProject/.venv/Lib/site-packages/mpmath/tests/__pycache__/test_special.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..09e01025f48274f40c62eb5756009fe5eff8f2f8
Binary files /dev/null and b/pythonProject/.venv/Lib/site-packages/mpmath/tests/__pycache__/test_special.cpython-310.pyc differ
diff --git a/pythonProject/.venv/Lib/site-packages/mpmath/tests/__pycache__/test_str.cpython-310.pyc b/pythonProject/.venv/Lib/site-packages/mpmath/tests/__pycache__/test_str.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..6442fbbcea25c8af36ebcb37024487ea283e5655
Binary files /dev/null and b/pythonProject/.venv/Lib/site-packages/mpmath/tests/__pycache__/test_str.cpython-310.pyc differ
diff --git a/pythonProject/.venv/Lib/site-packages/mpmath/tests/__pycache__/test_summation.cpython-310.pyc b/pythonProject/.venv/Lib/site-packages/mpmath/tests/__pycache__/test_summation.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..ea8a2e0a722816ddbcf0472bfae346ca56487578
Binary files /dev/null and b/pythonProject/.venv/Lib/site-packages/mpmath/tests/__pycache__/test_summation.cpython-310.pyc differ
diff --git a/pythonProject/.venv/Lib/site-packages/mpmath/tests/__pycache__/test_trig.cpython-310.pyc b/pythonProject/.venv/Lib/site-packages/mpmath/tests/__pycache__/test_trig.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..443c31bc52e75871175ee2e34958e48abed70fa5
Binary files /dev/null and b/pythonProject/.venv/Lib/site-packages/mpmath/tests/__pycache__/test_trig.cpython-310.pyc differ
diff --git a/pythonProject/.venv/Lib/site-packages/mpmath/tests/__pycache__/test_visualization.cpython-310.pyc b/pythonProject/.venv/Lib/site-packages/mpmath/tests/__pycache__/test_visualization.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..fce9bfe186cf9dc4635851b6bd1b7e845469e72a
Binary files /dev/null and b/pythonProject/.venv/Lib/site-packages/mpmath/tests/__pycache__/test_visualization.cpython-310.pyc differ
diff --git a/pythonProject/.venv/Lib/site-packages/mpmath/tests/__pycache__/torture.cpython-310.pyc b/pythonProject/.venv/Lib/site-packages/mpmath/tests/__pycache__/torture.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..c55a25376069848cb538397571b67bdda42585c8
Binary files /dev/null and b/pythonProject/.venv/Lib/site-packages/mpmath/tests/__pycache__/torture.cpython-310.pyc differ
diff --git a/pythonProject/.venv/Lib/site-packages/mpmath/tests/extratest_gamma.py b/pythonProject/.venv/Lib/site-packages/mpmath/tests/extratest_gamma.py
new file mode 100644
index 0000000000000000000000000000000000000000..5a27b61b19aba0abf6bdb8adc16fc1ec7689b67a
--- /dev/null
+++ b/pythonProject/.venv/Lib/site-packages/mpmath/tests/extratest_gamma.py
@@ -0,0 +1,215 @@
+from mpmath import *
+from mpmath.libmp import ifac
+
+import sys
+if "-dps" in sys.argv:
+ maxdps = int(sys.argv[sys.argv.index("-dps")+1])
+else:
+ maxdps = 1000
+
+raise_ = "-raise" in sys.argv
+
+errcount = 0
+
+def check(name, func, z, y):
+ global errcount
+ try:
+ x = func(z)
+ except:
+ errcount += 1
+ if raise_:
+ raise
+ print()
+ print(name)
+ print("EXCEPTION")
+ import traceback
+ traceback.print_tb(sys.exc_info()[2])
+ print()
+ return
+ xre = x.real
+ xim = x.imag
+ yre = y.real
+ yim = y.imag
+ tol = eps*8
+ err = 0
+ if abs(xre-yre) > abs(yre)*tol:
+ err = 1
+ print()
+ print("Error! %s (re = %s, wanted %s, err=%s)" % (name, nstr(xre,10), nstr(yre,10), nstr(abs(xre-yre))))
+ errcount += 1
+ if raise_:
+ raise SystemExit
+ if abs(xim-yim) > abs(yim)*tol:
+ err = 1
+ print()
+ print("Error! %s (im = %s, wanted %s, err=%s)" % (name, nstr(xim,10), nstr(yim,10), nstr(abs(xim-yim))))
+ errcount += 1
+ if raise_:
+ raise SystemExit
+ if not err:
+ sys.stdout.write("%s ok; " % name)
+
+def testcase(case):
+ z, result = case
+ print("Testing z =", z)
+ mp.dps = 1010
+ z = eval(z)
+ mp.dps = maxdps + 50
+ if result is None:
+ gamma_val = gamma(z)
+ loggamma_val = loggamma(z)
+ factorial_val = factorial(z)
+ rgamma_val = rgamma(z)
+ else:
+ loggamma_val = eval(result)
+ gamma_val = exp(loggamma_val)
+ factorial_val = z * gamma_val
+ rgamma_val = 1/gamma_val
+ for dps in [5, 10, 15, 25, 40, 60, 90, 120, 250, 600, 1000, 1800, 3600]:
+ if dps > maxdps:
+ break
+ mp.dps = dps
+ print("dps = %s" % dps)
+ check("gamma", gamma, z, gamma_val)
+ check("rgamma", rgamma, z, rgamma_val)
+ check("loggamma", loggamma, z, loggamma_val)
+ check("factorial", factorial, z, factorial_val)
+ print()
+ mp.dps = 15
+
+testcases = []
+
+# Basic values
+for n in list(range(1,200)) + list(range(201,2000,17)):
+ testcases.append(["%s" % n, None])
+for n in range(-200,200):
+ testcases.append(["%s+0.5" % n, None])
+ testcases.append(["%s+0.37" % n, None])
+
+testcases += [\
+["(0.1+1j)", None],
+["(-0.1+1j)", None],
+["(0.1-1j)", None],
+["(-0.1-1j)", None],
+["10j", None],
+["-10j", None],
+["100j", None],
+["10000j", None],
+["-10000000j", None],
+["(10**100)*j", None],
+["125+(10**100)*j", None],
+["-125+(10**100)*j", None],
+["(10**10)*(1+j)", None],
+["(10**10)*(-1+j)", None],
+["(10**100)*(1+j)", None],
+["(10**100)*(-1+j)", None],
+["(1.5-1j)", None],
+["(6+4j)", None],
+["(4+1j)", None],
+["(3.5+2j)", None],
+["(1.5-1j)", None],
+["(-6-4j)", None],
+["(-2-3j)", None],
+["(-2.5-2j)", None],
+["(4+1j)", None],
+["(3+3j)", None],
+["(2-2j)", None],
+["1", "0"],
+["2", "0"],
+["3", "log(2)"],
+["4", "log(6)"],
+["5", "log(24)"],
+["0.5", "log(pi)/2"],
+["1.5", "log(sqrt(pi)/2)"],
+["2.5", "log(3*sqrt(pi)/4)"],
+["mpf('0.37')", None],
+["0.25", "log(sqrt(2*sqrt(2*pi**3)/agm(1,sqrt(2))))"],
+["-0.4", None],
+["mpf('-1.9')", None],
+["mpf('12.8')", None],
+["mpf('33.7')", None],
+["mpf('95.2')", None],
+["mpf('160.3')", None],
+["mpf('2057.8')", None],
+["25", "log(ifac(24))"],
+["80", "log(ifac(79))"],
+["500", "log(ifac(500-1))"],
+["8000", "log(ifac(8000-1))"],
+["8000.5", None],
+["mpf('8000.1')", None],
+["mpf('1.37e10')", None],
+["mpf('1.37e10')*(1+j)", None],
+["mpf('1.37e10')*(-1+j)", None],
+["mpf('1.37e10')*(-1-j)", None],
+["mpf('1.37e10')*(-1+j)", None],
+["mpf('1.37e100')", None],
+["mpf('1.37e100')*(1+j)", None],
+["mpf('1.37e100')*(-1+j)", None],
+["mpf('1.37e100')*(-1-j)", None],
+["mpf('1.37e100')*(-1+j)", None],
+["3+4j",
+"mpc('"
+"-1.7566267846037841105306041816232757851567066070613445016197619371316057169"
+"4723618263960834804618463052988607348289672535780644470689771115236512106002"
+"5970873471563240537307638968509556191696167970488390423963867031934333890838"
+"8009531786948197210025029725361069435208930363494971027388382086721660805397"
+"9163230643216054580167976201709951509519218635460317367338612500626714783631"
+"7498317478048447525674016344322545858832610325861086336204591943822302971823"
+"5161814175530618223688296232894588415495615809337292518431903058265147109853"
+"1710568942184987827643886816200452860853873815413367529829631430146227470517"
+"6579967222200868632179482214312673161276976117132204633283806161971389519137"
+"1243359764435612951384238091232760634271570950240717650166551484551654327989"
+"9360285030081716934130446150245110557038117075172576825490035434069388648124"
+"6678152254554001586736120762641422590778766100376515737713938521275749049949"
+"1284143906816424244705094759339932733567910991920631339597278805393743140853"
+"391550313363278558195609260225928','"
+"4.74266443803465792819488940755002274088830335171164611359052405215840070271"
+"5906813009373171139767051863542508136875688550817670379002790304870822775498"
+"2809996675877564504192565392367259119610438951593128982646945990372179860613"
+"4294436498090428077839141927485901735557543641049637962003652638924845391650"
+"9546290137755550107224907606529385248390667634297183361902055842228798984200"
+"9591180450211798341715874477629099687609819466457990642030707080894518168924"
+"6805549314043258530272479246115112769957368212585759640878745385160943755234"
+"9398036774908108204370323896757543121853650025529763655312360354244898913463"
+"7115955702828838923393113618205074162812089732064414530813087483533203244056"
+"0546577484241423134079056537777170351934430586103623577814746004431994179990"
+"5318522939077992613855205801498201930221975721246498720895122345420698451980"
+"0051215797310305885845964334761831751370672996984756815410977750799748813563"
+"8784405288158432214886648743541773208808731479748217023665577802702269468013"
+"673719173759245720489020315779001')"],
+]
+
+for z in [4, 14, 34, 64]:
+ testcases.append(["(2+j)*%s/3" % z, None])
+ testcases.append(["(-2+j)*%s/3" % z, None])
+ testcases.append(["(1+2*j)*%s/3" % z, None])
+ testcases.append(["(2-j)*%s/3" % z, None])
+ testcases.append(["(20+j)*%s/3" % z, None])
+ testcases.append(["(-20+j)*%s/3" % z, None])
+ testcases.append(["(1+20*j)*%s/3" % z, None])
+ testcases.append(["(20-j)*%s/3" % z, None])
+ testcases.append(["(200+j)*%s/3" % z, None])
+ testcases.append(["(-200+j)*%s/3" % z, None])
+ testcases.append(["(1+200*j)*%s/3" % z, None])
+ testcases.append(["(200-j)*%s/3" % z, None])
+
+# Poles
+for n in [0,1,2,3,4,25,-1,-2,-3,-4,-20,-21,-50,-51,-200,-201,-20000,-20001]:
+ for t in ['1e-5', '1e-20', '1e-100', '1e-10000']:
+ testcases.append(["fadd(%s,'%s',exact=True)" % (n, t), None])
+ testcases.append(["fsub(%s,'%s',exact=True)" % (n, t), None])
+ testcases.append(["fadd(%s,'%sj',exact=True)" % (n, t), None])
+ testcases.append(["fsub(%s,'%sj',exact=True)" % (n, t), None])
+
+if __name__ == "__main__":
+ from timeit import default_timer as clock
+ tot_time = 0.0
+ for case in testcases:
+ t1 = clock()
+ testcase(case)
+ t2 = clock()
+ print("Test time:", t2-t1)
+ print()
+ tot_time += (t2-t1)
+ print("Total time:", tot_time)
+ print("Errors:", errcount)
diff --git a/pythonProject/.venv/Lib/site-packages/mpmath/tests/extratest_zeta.py b/pythonProject/.venv/Lib/site-packages/mpmath/tests/extratest_zeta.py
new file mode 100644
index 0000000000000000000000000000000000000000..582b3d9cbd956b9cdf94309e0e718371fe716101
--- /dev/null
+++ b/pythonProject/.venv/Lib/site-packages/mpmath/tests/extratest_zeta.py
@@ -0,0 +1,30 @@
+from mpmath import zetazero
+from timeit import default_timer as clock
+
+def test_zetazero():
+ cases = [\
+ (399999999, 156762524.6750591511),
+ (241389216, 97490234.2276711795),
+ (526196239, 202950727.691229534),
+ (542964976, 209039046.578535272),
+ (1048449112, 388858885.231056486),
+ (1048449113, 388858885.384337406),
+ (1048449114, 388858886.002285122),
+ (1048449115, 388858886.00239369),
+ (1048449116, 388858886.690745053)
+ ]
+ for n, v in cases:
+ print(n, v)
+ t1 = clock()
+ ok = zetazero(n).ae(complex(0.5,v))
+ t2 = clock()
+ print("ok =", ok, ("(time = %s)" % round(t2-t1,3)))
+ print("Now computing two huge zeros (this may take hours)")
+ print("Computing zetazero(8637740722917)")
+ ok = zetazero(8637740722917).ae(complex(0.5,2124447368584.39296466152))
+ print("ok =", ok)
+ ok = zetazero(8637740722918).ae(complex(0.5,2124447368584.39298170604))
+ print("ok =", ok)
+
+if __name__ == "__main__":
+ test_zetazero()
diff --git a/pythonProject/.venv/Lib/site-packages/mpmath/tests/runtests.py b/pythonProject/.venv/Lib/site-packages/mpmath/tests/runtests.py
new file mode 100644
index 0000000000000000000000000000000000000000..70fde272fdc0e05e3d8951edddca380bd36139ab
--- /dev/null
+++ b/pythonProject/.venv/Lib/site-packages/mpmath/tests/runtests.py
@@ -0,0 +1,161 @@
+#!/usr/bin/env python
+
+"""
+python runtests.py -py
+ Use py.test to run tests (more useful for debugging)
+
+python runtests.py -coverage
+ Generate test coverage report. Statistics are written to /tmp
+
+python runtests.py -profile
+ Generate profile stats (this is much slower)
+
+python runtests.py -nogmpy
+ Run tests without using GMPY even if it exists
+
+python runtests.py -strict
+ Enforce extra tests in normalize()
+
+python runtests.py -local
+ Insert '../..' at the beginning of sys.path to use local mpmath
+
+python runtests.py -skip ...
+ Skip tests from the listed modules
+
+Additional arguments are used to filter the tests to run. Only files that have
+one of the arguments in their name are executed.
+
+"""
+
+import sys, os, traceback
+
+profile = False
+if "-profile" in sys.argv:
+ sys.argv.remove('-profile')
+ profile = True
+
+coverage = False
+if "-coverage" in sys.argv:
+ sys.argv.remove('-coverage')
+ coverage = True
+
+if "-nogmpy" in sys.argv:
+ sys.argv.remove('-nogmpy')
+ os.environ['MPMATH_NOGMPY'] = 'Y'
+
+if "-strict" in sys.argv:
+ sys.argv.remove('-strict')
+ os.environ['MPMATH_STRICT'] = 'Y'
+
+if "-local" in sys.argv:
+ sys.argv.remove('-local')
+ importdir = os.path.abspath(os.path.join(os.path.dirname(sys.argv[0]),
+ '../..'))
+else:
+ importdir = ''
+
+# TODO: add a flag for this
+testdir = ''
+
+def testit(importdir='', testdir='', exit_on_fail=False):
+ """Run all tests in testdir while importing from importdir."""
+ if importdir:
+ sys.path.insert(1, importdir)
+ if testdir:
+ sys.path.insert(1, testdir)
+ import os.path
+ import mpmath
+ print("mpmath imported from %s" % os.path.dirname(mpmath.__file__))
+ print("mpmath backend: %s" % mpmath.libmp.backend.BACKEND)
+ print("mpmath mp class: %s" % repr(mpmath.mp))
+ print("mpmath version: %s" % mpmath.__version__)
+ print("Python version: %s" % sys.version)
+ print("")
+ if "-py" in sys.argv:
+ sys.argv.remove('-py')
+ import py
+ py.test.cmdline.main()
+ else:
+ import glob
+ from timeit import default_timer as clock
+ modules = []
+ args = sys.argv[1:]
+ excluded = []
+ if '-skip' in args:
+ excluded = args[args.index('-skip')+1:]
+ args = args[:args.index('-skip')]
+ # search for tests in directory of this file if not otherwise specified
+ if not testdir:
+ pattern = os.path.dirname(sys.argv[0])
+ else:
+ pattern = testdir
+ if pattern:
+ pattern += '/'
+ pattern += 'test*.py'
+ # look for tests (respecting specified filter)
+ for f in glob.glob(pattern):
+ name = os.path.splitext(os.path.basename(f))[0]
+ # If run as a script, only run tests given as args, if any are given
+ if args and __name__ == "__main__":
+ ok = False
+ for arg in args:
+ if arg in name:
+ ok = True
+ break
+ if not ok:
+ continue
+ elif name in excluded:
+ continue
+ module = __import__(name)
+ priority = module.__dict__.get('priority', 100)
+ if priority == 666:
+ modules = [[priority, name, module]]
+ break
+ modules.append([priority, name, module])
+ # execute tests
+ modules.sort()
+ tstart = clock()
+ for priority, name, module in modules:
+ print(name)
+ for f in sorted(module.__dict__.keys()):
+ if f.startswith('test_'):
+ if coverage and ('numpy' in f):
+ continue
+ sys.stdout.write(" " + f[5:].ljust(25) + " ")
+ t1 = clock()
+ try:
+ module.__dict__[f]()
+ except:
+ etype, evalue, trb = sys.exc_info()
+ if etype in (KeyboardInterrupt, SystemExit):
+ raise
+ print("")
+ print("TEST FAILED!")
+ print("")
+ traceback.print_exc()
+ if exit_on_fail:
+ return
+ t2 = clock()
+ print("ok " + " " + ("%.7f" % (t2-t1)) + " s")
+ tend = clock()
+ print("")
+ print("finished tests in " + ("%.2f" % (tend-tstart)) + " seconds")
+ # clean sys.path
+ if importdir:
+ sys.path.remove(importdir)
+ if testdir:
+ sys.path.remove(testdir)
+
+if __name__ == '__main__':
+ if profile:
+ import cProfile
+ cProfile.run("testit('%s', '%s')" % (importdir, testdir), sort=1)
+ elif coverage:
+ import trace
+ tracer = trace.Trace(ignoredirs=[sys.prefix, sys.exec_prefix],
+ trace=0, count=1)
+ tracer.run('testit(importdir, testdir)')
+ r = tracer.results()
+ r.write_results(show_missing=True, summary=True, coverdir="/tmp")
+ else:
+ testit(importdir, testdir)
diff --git a/pythonProject/.venv/Lib/site-packages/mpmath/tests/test_basic_ops.py b/pythonProject/.venv/Lib/site-packages/mpmath/tests/test_basic_ops.py
new file mode 100644
index 0000000000000000000000000000000000000000..f577c7fa9f9734876b6767f6cc21144df305d82f
--- /dev/null
+++ b/pythonProject/.venv/Lib/site-packages/mpmath/tests/test_basic_ops.py
@@ -0,0 +1,451 @@
+import mpmath
+from mpmath import *
+from mpmath.libmp import *
+import random
+import sys
+
+try:
+ long = long
+except NameError:
+ long = int
+
+def test_type_compare():
+ assert mpf(2) == mpc(2,0)
+ assert mpf(0) == mpc(0)
+ assert mpf(2) != mpc(2, 0.00001)
+ assert mpf(2) == 2.0
+ assert mpf(2) != 3.0
+ assert mpf(2) == 2
+ assert mpf(2) != '2.0'
+ assert mpc(2) != '2.0'
+
+def test_add():
+ assert mpf(2.5) + mpf(3) == 5.5
+ assert mpf(2.5) + 3 == 5.5
+ assert mpf(2.5) + 3.0 == 5.5
+ assert 3 + mpf(2.5) == 5.5
+ assert 3.0 + mpf(2.5) == 5.5
+ assert (3+0j) + mpf(2.5) == 5.5
+ assert mpc(2.5) + mpf(3) == 5.5
+ assert mpc(2.5) + 3 == 5.5
+ assert mpc(2.5) + 3.0 == 5.5
+ assert mpc(2.5) + (3+0j) == 5.5
+ assert 3 + mpc(2.5) == 5.5
+ assert 3.0 + mpc(2.5) == 5.5
+ assert (3+0j) + mpc(2.5) == 5.5
+
+def test_sub():
+ assert mpf(2.5) - mpf(3) == -0.5
+ assert mpf(2.5) - 3 == -0.5
+ assert mpf(2.5) - 3.0 == -0.5
+ assert 3 - mpf(2.5) == 0.5
+ assert 3.0 - mpf(2.5) == 0.5
+ assert (3+0j) - mpf(2.5) == 0.5
+ assert mpc(2.5) - mpf(3) == -0.5
+ assert mpc(2.5) - 3 == -0.5
+ assert mpc(2.5) - 3.0 == -0.5
+ assert mpc(2.5) - (3+0j) == -0.5
+ assert 3 - mpc(2.5) == 0.5
+ assert 3.0 - mpc(2.5) == 0.5
+ assert (3+0j) - mpc(2.5) == 0.5
+
+def test_mul():
+ assert mpf(2.5) * mpf(3) == 7.5
+ assert mpf(2.5) * 3 == 7.5
+ assert mpf(2.5) * 3.0 == 7.5
+ assert 3 * mpf(2.5) == 7.5
+ assert 3.0 * mpf(2.5) == 7.5
+ assert (3+0j) * mpf(2.5) == 7.5
+ assert mpc(2.5) * mpf(3) == 7.5
+ assert mpc(2.5) * 3 == 7.5
+ assert mpc(2.5) * 3.0 == 7.5
+ assert mpc(2.5) * (3+0j) == 7.5
+ assert 3 * mpc(2.5) == 7.5
+ assert 3.0 * mpc(2.5) == 7.5
+ assert (3+0j) * mpc(2.5) == 7.5
+
+def test_div():
+ assert mpf(6) / mpf(3) == 2.0
+ assert mpf(6) / 3 == 2.0
+ assert mpf(6) / 3.0 == 2.0
+ assert 6 / mpf(3) == 2.0
+ assert 6.0 / mpf(3) == 2.0
+ assert (6+0j) / mpf(3.0) == 2.0
+ assert mpc(6) / mpf(3) == 2.0
+ assert mpc(6) / 3 == 2.0
+ assert mpc(6) / 3.0 == 2.0
+ assert mpc(6) / (3+0j) == 2.0
+ assert 6 / mpc(3) == 2.0
+ assert 6.0 / mpc(3) == 2.0
+ assert (6+0j) / mpc(3) == 2.0
+
+def test_pow():
+ assert mpf(6) ** mpf(3) == 216.0
+ assert mpf(6) ** 3 == 216.0
+ assert mpf(6) ** 3.0 == 216.0
+ assert 6 ** mpf(3) == 216.0
+ assert 6.0 ** mpf(3) == 216.0
+ assert (6+0j) ** mpf(3.0) == 216.0
+ assert mpc(6) ** mpf(3) == 216.0
+ assert mpc(6) ** 3 == 216.0
+ assert mpc(6) ** 3.0 == 216.0
+ assert mpc(6) ** (3+0j) == 216.0
+ assert 6 ** mpc(3) == 216.0
+ assert 6.0 ** mpc(3) == 216.0
+ assert (6+0j) ** mpc(3) == 216.0
+
+def test_mixed_misc():
+ assert 1 + mpf(3) == mpf(3) + 1 == 4
+ assert 1 - mpf(3) == -(mpf(3) - 1) == -2
+ assert 3 * mpf(2) == mpf(2) * 3 == 6
+ assert 6 / mpf(2) == mpf(6) / 2 == 3
+ assert 1.0 + mpf(3) == mpf(3) + 1.0 == 4
+ assert 1.0 - mpf(3) == -(mpf(3) - 1.0) == -2
+ assert 3.0 * mpf(2) == mpf(2) * 3.0 == 6
+ assert 6.0 / mpf(2) == mpf(6) / 2.0 == 3
+
+def test_add_misc():
+ mp.dps = 15
+ assert mpf(4) + mpf(-70) == -66
+ assert mpf(1) + mpf(1.1)/80 == 1 + 1.1/80
+ assert mpf((1, 10000000000)) + mpf(3) == mpf((1, 10000000000))
+ assert mpf(3) + mpf((1, 10000000000)) == mpf((1, 10000000000))
+ assert mpf((1, -10000000000)) + mpf(3) == mpf(3)
+ assert mpf(3) + mpf((1, -10000000000)) == mpf(3)
+ assert mpf(1) + 1e-15 != 1
+ assert mpf(1) + 1e-20 == 1
+ assert mpf(1.07e-22) + 0 == mpf(1.07e-22)
+ assert mpf(0) + mpf(1.07e-22) == mpf(1.07e-22)
+
+def test_complex_misc():
+ # many more tests needed
+ assert 1 + mpc(2) == 3
+ assert not mpc(2).ae(2 + 1e-13)
+ assert mpc(2+1e-15j).ae(2)
+
+def test_complex_zeros():
+ for a in [0,2]:
+ for b in [0,3]:
+ for c in [0,4]:
+ for d in [0,5]:
+ assert mpc(a,b)*mpc(c,d) == complex(a,b)*complex(c,d)
+
+def test_hash():
+ for i in range(-256, 256):
+ assert hash(mpf(i)) == hash(i)
+ assert hash(mpf(0.5)) == hash(0.5)
+ assert hash(mpc(2,3)) == hash(2+3j)
+ # Check that this doesn't fail
+ assert hash(inf)
+ # Check that overflow doesn't assign equal hashes to large numbers
+ assert hash(mpf('1e1000')) != hash('1e10000')
+ assert hash(mpc(100,'1e1000')) != hash(mpc(200,'1e1000'))
+ from mpmath.rational import mpq
+ assert hash(mp.mpq(1,3))
+ assert hash(mp.mpq(0,1)) == 0
+ assert hash(mp.mpq(-1,1)) == hash(-1)
+ assert hash(mp.mpq(1,1)) == hash(1)
+ assert hash(mp.mpq(5,1)) == hash(5)
+ assert hash(mp.mpq(1,2)) == hash(0.5)
+ if sys.version_info >= (3, 2):
+ assert hash(mpf(1)*2**2000) == hash(2**2000)
+ assert hash(mpf(1)/2**2000) == hash(mpq(1,2**2000))
+
+# Advanced rounding test
+def test_add_rounding():
+ mp.dps = 15
+ a = from_float(1e-50)
+ assert mpf_sub(mpf_add(fone, a, 53, round_up), fone, 53, round_up) == from_float(2.2204460492503131e-16)
+ assert mpf_sub(fone, a, 53, round_up) == fone
+ assert mpf_sub(fone, mpf_sub(fone, a, 53, round_down), 53, round_down) == from_float(1.1102230246251565e-16)
+ assert mpf_add(fone, a, 53, round_down) == fone
+
+def test_almost_equal():
+ assert mpf(1.2).ae(mpf(1.20000001), 1e-7)
+ assert not mpf(1.2).ae(mpf(1.20000001), 1e-9)
+ assert not mpf(-0.7818314824680298).ae(mpf(-0.774695868667929))
+
+def test_arithmetic_functions():
+ import operator
+ ops = [(operator.add, fadd), (operator.sub, fsub), (operator.mul, fmul),
+ (operator.truediv, fdiv)]
+ a = mpf(0.27)
+ b = mpf(1.13)
+ c = mpc(0.51+2.16j)
+ d = mpc(1.08-0.99j)
+ for x in [a,b,c,d]:
+ for y in [a,b,c,d]:
+ for op, fop in ops:
+ if fop is not fdiv:
+ mp.prec = 200
+ z0 = op(x,y)
+ mp.prec = 60
+ z1 = op(x,y)
+ mp.prec = 53
+ z2 = op(x,y)
+ assert fop(x, y, prec=60) == z1
+ assert fop(x, y) == z2
+ if fop is not fdiv:
+ assert fop(x, y, prec=inf) == z0
+ assert fop(x, y, dps=inf) == z0
+ assert fop(x, y, exact=True) == z0
+ assert fneg(fneg(z1, exact=True), prec=inf) == z1
+ assert fneg(z1) == -(+z1)
+ mp.dps = 15
+
+def test_exact_integer_arithmetic():
+ # XXX: re-fix this so that all operations are tested with all rounding modes
+ random.seed(0)
+ for prec in [6, 10, 25, 40, 100, 250, 725]:
+ for rounding in ['d', 'u', 'f', 'c', 'n']:
+ mp.dps = prec
+ M = 10**(prec-2)
+ M2 = 10**(prec//2-2)
+ for i in range(10):
+ a = random.randint(-M, M)
+ b = random.randint(-M, M)
+ assert mpf(a, rounding=rounding) == a
+ assert int(mpf(a, rounding=rounding)) == a
+ assert int(mpf(str(a), rounding=rounding)) == a
+ assert mpf(a) + mpf(b) == a + b
+ assert mpf(a) - mpf(b) == a - b
+ assert -mpf(a) == -a
+ a = random.randint(-M2, M2)
+ b = random.randint(-M2, M2)
+ assert mpf(a) * mpf(b) == a*b
+ assert mpf_mul(from_int(a), from_int(b), mp.prec, rounding) == from_int(a*b)
+ mp.dps = 15
+
+def test_odd_int_bug():
+ assert to_int(from_int(3), round_nearest) == 3
+
+def test_str_1000_digits():
+ mp.dps = 1001
+ # last digit may be wrong
+ assert str(mpf(2)**0.5)[-10:-1] == '9518488472'[:9]
+ assert str(pi)[-10:-1] == '2164201989'[:9]
+ mp.dps = 15
+
+def test_str_10000_digits():
+ mp.dps = 10001
+ # last digit may be wrong
+ assert str(mpf(2)**0.5)[-10:-1] == '5873258351'[:9]
+ assert str(pi)[-10:-1] == '5256375678'[:9]
+ mp.dps = 15
+
+def test_monitor():
+ f = lambda x: x**2
+ a = []
+ b = []
+ g = monitor(f, a.append, b.append)
+ assert g(3) == 9
+ assert g(4) == 16
+ assert a[0] == ((3,), {})
+ assert b[0] == 9
+
+def test_nint_distance():
+ assert nint_distance(mpf(-3)) == (-3, -inf)
+ assert nint_distance(mpc(-3)) == (-3, -inf)
+ assert nint_distance(mpf(-3.1)) == (-3, -3)
+ assert nint_distance(mpf(-3.01)) == (-3, -6)
+ assert nint_distance(mpf(-3.001)) == (-3, -9)
+ assert nint_distance(mpf(-3.0001)) == (-3, -13)
+ assert nint_distance(mpf(-2.9)) == (-3, -3)
+ assert nint_distance(mpf(-2.99)) == (-3, -6)
+ assert nint_distance(mpf(-2.999)) == (-3, -9)
+ assert nint_distance(mpf(-2.9999)) == (-3, -13)
+ assert nint_distance(mpc(-3+0.1j)) == (-3, -3)
+ assert nint_distance(mpc(-3+0.01j)) == (-3, -6)
+ assert nint_distance(mpc(-3.1+0.1j)) == (-3, -3)
+ assert nint_distance(mpc(-3.01+0.01j)) == (-3, -6)
+ assert nint_distance(mpc(-3.001+0.001j)) == (-3, -9)
+ assert nint_distance(mpf(0)) == (0, -inf)
+ assert nint_distance(mpf(0.01)) == (0, -6)
+ assert nint_distance(mpf('1e-100')) == (0, -332)
+
+def test_floor_ceil_nint_frac():
+ mp.dps = 15
+ for n in range(-10,10):
+ assert floor(n) == n
+ assert floor(n+0.5) == n
+ assert ceil(n) == n
+ assert ceil(n+0.5) == n+1
+ assert nint(n) == n
+ # nint rounds to even
+ if n % 2 == 1:
+ assert nint(n+0.5) == n+1
+ else:
+ assert nint(n+0.5) == n
+ assert floor(inf) == inf
+ assert floor(ninf) == ninf
+ assert isnan(floor(nan))
+ assert ceil(inf) == inf
+ assert ceil(ninf) == ninf
+ assert isnan(ceil(nan))
+ assert nint(inf) == inf
+ assert nint(ninf) == ninf
+ assert isnan(nint(nan))
+ assert floor(0.1) == 0
+ assert floor(0.9) == 0
+ assert floor(-0.1) == -1
+ assert floor(-0.9) == -1
+ assert floor(10000000000.1) == 10000000000
+ assert floor(10000000000.9) == 10000000000
+ assert floor(-10000000000.1) == -10000000000-1
+ assert floor(-10000000000.9) == -10000000000-1
+ assert floor(1e-100) == 0
+ assert floor(-1e-100) == -1
+ assert floor(1e100) == 1e100
+ assert floor(-1e100) == -1e100
+ assert ceil(0.1) == 1
+ assert ceil(0.9) == 1
+ assert ceil(-0.1) == 0
+ assert ceil(-0.9) == 0
+ assert ceil(10000000000.1) == 10000000000+1
+ assert ceil(10000000000.9) == 10000000000+1
+ assert ceil(-10000000000.1) == -10000000000
+ assert ceil(-10000000000.9) == -10000000000
+ assert ceil(1e-100) == 1
+ assert ceil(-1e-100) == 0
+ assert ceil(1e100) == 1e100
+ assert ceil(-1e100) == -1e100
+ assert nint(0.1) == 0
+ assert nint(0.9) == 1
+ assert nint(-0.1) == 0
+ assert nint(-0.9) == -1
+ assert nint(10000000000.1) == 10000000000
+ assert nint(10000000000.9) == 10000000000+1
+ assert nint(-10000000000.1) == -10000000000
+ assert nint(-10000000000.9) == -10000000000-1
+ assert nint(1e-100) == 0
+ assert nint(-1e-100) == 0
+ assert nint(1e100) == 1e100
+ assert nint(-1e100) == -1e100
+ assert floor(3.2+4.6j) == 3+4j
+ assert ceil(3.2+4.6j) == 4+5j
+ assert nint(3.2+4.6j) == 3+5j
+ for n in range(-10,10):
+ assert frac(n) == 0
+ assert frac(0.25) == 0.25
+ assert frac(1.25) == 0.25
+ assert frac(2.25) == 0.25
+ assert frac(-0.25) == 0.75
+ assert frac(-1.25) == 0.75
+ assert frac(-2.25) == 0.75
+ assert frac('1e100000000000000') == 0
+ u = mpf('1e-100000000000000')
+ assert frac(u) == u
+ assert frac(-u) == 1 # rounding!
+ u = mpf('1e-400')
+ assert frac(-u, prec=0) == fsub(1, u, exact=True)
+ assert frac(3.25+4.75j) == 0.25+0.75j
+
+def test_isnan_etc():
+ from mpmath.rational import mpq
+ assert isnan(nan) == True
+ assert isnan(3) == False
+ assert isnan(mpf(3)) == False
+ assert isnan(inf) == False
+ assert isnan(mpc(2,nan)) == True
+ assert isnan(mpc(2,nan)) == True
+ assert isnan(mpc(nan,nan)) == True
+ assert isnan(mpc(2,2)) == False
+ assert isnan(mpc(nan,inf)) == True
+ assert isnan(mpc(inf,inf)) == False
+ assert isnan(mpq((3,2))) == False
+ assert isnan(mpq((0,1))) == False
+ assert isinf(inf) == True
+ assert isinf(-inf) == True
+ assert isinf(3) == False
+ assert isinf(nan) == False
+ assert isinf(3+4j) == False
+ assert isinf(mpc(inf)) == True
+ assert isinf(mpc(3,inf)) == True
+ assert isinf(mpc(inf,3)) == True
+ assert isinf(mpc(inf,inf)) == True
+ assert isinf(mpc(nan,inf)) == True
+ assert isinf(mpc(inf,nan)) == True
+ assert isinf(mpc(nan,nan)) == False
+ assert isinf(mpq((3,2))) == False
+ assert isinf(mpq((0,1))) == False
+ assert isnormal(3) == True
+ assert isnormal(3.5) == True
+ assert isnormal(mpf(3.5)) == True
+ assert isnormal(0) == False
+ assert isnormal(mpf(0)) == False
+ assert isnormal(0.0) == False
+ assert isnormal(inf) == False
+ assert isnormal(-inf) == False
+ assert isnormal(nan) == False
+ assert isnormal(float(inf)) == False
+ assert isnormal(mpc(0,0)) == False
+ assert isnormal(mpc(3,0)) == True
+ assert isnormal(mpc(0,3)) == True
+ assert isnormal(mpc(3,3)) == True
+ assert isnormal(mpc(0,nan)) == False
+ assert isnormal(mpc(0,inf)) == False
+ assert isnormal(mpc(3,nan)) == False
+ assert isnormal(mpc(3,inf)) == False
+ assert isnormal(mpc(3,-inf)) == False
+ assert isnormal(mpc(nan,0)) == False
+ assert isnormal(mpc(inf,0)) == False
+ assert isnormal(mpc(nan,3)) == False
+ assert isnormal(mpc(inf,3)) == False
+ assert isnormal(mpc(inf,nan)) == False
+ assert isnormal(mpc(nan,inf)) == False
+ assert isnormal(mpc(nan,nan)) == False
+ assert isnormal(mpc(inf,inf)) == False
+ assert isnormal(mpq((3,2))) == True
+ assert isnormal(mpq((0,1))) == False
+ assert isint(3) == True
+ assert isint(0) == True
+ assert isint(long(3)) == True
+ assert isint(long(0)) == True
+ assert isint(mpf(3)) == True
+ assert isint(mpf(0)) == True
+ assert isint(mpf(-3)) == True
+ assert isint(mpf(3.2)) == False
+ assert isint(3.2) == False
+ assert isint(nan) == False
+ assert isint(inf) == False
+ assert isint(-inf) == False
+ assert isint(mpc(0)) == True
+ assert isint(mpc(3)) == True
+ assert isint(mpc(3.2)) == False
+ assert isint(mpc(3,inf)) == False
+ assert isint(mpc(inf)) == False
+ assert isint(mpc(3,2)) == False
+ assert isint(mpc(0,2)) == False
+ assert isint(mpc(3,2),gaussian=True) == True
+ assert isint(mpc(3,0),gaussian=True) == True
+ assert isint(mpc(0,3),gaussian=True) == True
+ assert isint(3+4j) == False
+ assert isint(3+4j, gaussian=True) == True
+ assert isint(3+0j) == True
+ assert isint(mpq((3,2))) == False
+ assert isint(mpq((3,9))) == False
+ assert isint(mpq((9,3))) == True
+ assert isint(mpq((0,4))) == True
+ assert isint(mpq((1,1))) == True
+ assert isint(mpq((-1,1))) == True
+ assert mp.isnpint(0) == True
+ assert mp.isnpint(1) == False
+ assert mp.isnpint(-1) == True
+ assert mp.isnpint(-1.1) == False
+ assert mp.isnpint(-1.0) == True
+ assert mp.isnpint(mp.mpq(1,2)) == False
+ assert mp.isnpint(mp.mpq(-1,2)) == False
+ assert mp.isnpint(mp.mpq(-3,1)) == True
+ assert mp.isnpint(mp.mpq(0,1)) == True
+ assert mp.isnpint(mp.mpq(1,1)) == False
+ assert mp.isnpint(0+0j) == True
+ assert mp.isnpint(-1+0j) == True
+ assert mp.isnpint(-1.1+0j) == False
+ assert mp.isnpint(-1+0.1j) == False
+ assert mp.isnpint(0+0.1j) == False
+
+
+def test_issue_438():
+ assert mpf(finf) == mpf('inf')
+ assert mpf(fninf) == mpf('-inf')
+ assert mpf(fnan)._mpf_ == mpf('nan')._mpf_
diff --git a/pythonProject/.venv/Lib/site-packages/mpmath/tests/test_gammazeta.py b/pythonProject/.venv/Lib/site-packages/mpmath/tests/test_gammazeta.py
new file mode 100644
index 0000000000000000000000000000000000000000..6a18a7964d746561dfd5f81177cd78ccc46d2a5d
--- /dev/null
+++ b/pythonProject/.venv/Lib/site-packages/mpmath/tests/test_gammazeta.py
@@ -0,0 +1,698 @@
+from mpmath import *
+from mpmath.libmp import round_up, from_float, mpf_zeta_int
+
+def test_zeta_int_bug():
+ assert mpf_zeta_int(0, 10) == from_float(-0.5)
+
+def test_bernoulli():
+ assert bernfrac(0) == (1,1)
+ assert bernfrac(1) == (-1,2)
+ assert bernfrac(2) == (1,6)
+ assert bernfrac(3) == (0,1)
+ assert bernfrac(4) == (-1,30)
+ assert bernfrac(5) == (0,1)
+ assert bernfrac(6) == (1,42)
+ assert bernfrac(8) == (-1,30)
+ assert bernfrac(10) == (5,66)
+ assert bernfrac(12) == (-691,2730)
+ assert bernfrac(18) == (43867,798)
+ p, q = bernfrac(228)
+ assert p % 10**10 == 164918161
+ assert q == 625170
+ p, q = bernfrac(1000)
+ assert p % 10**10 == 7950421099
+ assert q == 342999030
+ mp.dps = 15
+ assert bernoulli(0) == 1
+ assert bernoulli(1) == -0.5
+ assert bernoulli(2).ae(1./6)
+ assert bernoulli(3) == 0
+ assert bernoulli(4).ae(-1./30)
+ assert bernoulli(5) == 0
+ assert bernoulli(6).ae(1./42)
+ assert str(bernoulli(10)) == '0.0757575757575758'
+ assert str(bernoulli(234)) == '7.62772793964344e+267'
+ assert str(bernoulli(10**5)) == '-5.82229431461335e+376755'
+ assert str(bernoulli(10**8+2)) == '1.19570355039953e+676752584'
+
+ mp.dps = 50
+ assert str(bernoulli(10)) == '0.075757575757575757575757575757575757575757575757576'
+ assert str(bernoulli(234)) == '7.6277279396434392486994969020496121553385863373331e+267'
+ assert str(bernoulli(10**5)) == '-5.8222943146133508236497045360612887555320691004308e+376755'
+ assert str(bernoulli(10**8+2)) == '1.1957035503995297272263047884604346914602088317782e+676752584'
+
+ mp.dps = 1000
+ assert bernoulli(10).ae(mpf(5)/66)
+
+ mp.dps = 50000
+ assert bernoulli(10).ae(mpf(5)/66)
+
+ mp.dps = 15
+
+def test_bernpoly_eulerpoly():
+ mp.dps = 15
+ assert bernpoly(0,-1).ae(1)
+ assert bernpoly(0,0).ae(1)
+ assert bernpoly(0,'1/2').ae(1)
+ assert bernpoly(0,'3/4').ae(1)
+ assert bernpoly(0,1).ae(1)
+ assert bernpoly(0,2).ae(1)
+ assert bernpoly(1,-1).ae('-3/2')
+ assert bernpoly(1,0).ae('-1/2')
+ assert bernpoly(1,'1/2').ae(0)
+ assert bernpoly(1,'3/4').ae('1/4')
+ assert bernpoly(1,1).ae('1/2')
+ assert bernpoly(1,2).ae('3/2')
+ assert bernpoly(2,-1).ae('13/6')
+ assert bernpoly(2,0).ae('1/6')
+ assert bernpoly(2,'1/2').ae('-1/12')
+ assert bernpoly(2,'3/4').ae('-1/48')
+ assert bernpoly(2,1).ae('1/6')
+ assert bernpoly(2,2).ae('13/6')
+ assert bernpoly(3,-1).ae(-3)
+ assert bernpoly(3,0).ae(0)
+ assert bernpoly(3,'1/2').ae(0)
+ assert bernpoly(3,'3/4').ae('-3/64')
+ assert bernpoly(3,1).ae(0)
+ assert bernpoly(3,2).ae(3)
+ assert bernpoly(4,-1).ae('119/30')
+ assert bernpoly(4,0).ae('-1/30')
+ assert bernpoly(4,'1/2').ae('7/240')
+ assert bernpoly(4,'3/4').ae('7/3840')
+ assert bernpoly(4,1).ae('-1/30')
+ assert bernpoly(4,2).ae('119/30')
+ assert bernpoly(5,-1).ae(-5)
+ assert bernpoly(5,0).ae(0)
+ assert bernpoly(5,'1/2').ae(0)
+ assert bernpoly(5,'3/4').ae('25/1024')
+ assert bernpoly(5,1).ae(0)
+ assert bernpoly(5,2).ae(5)
+ assert bernpoly(10,-1).ae('665/66')
+ assert bernpoly(10,0).ae('5/66')
+ assert bernpoly(10,'1/2').ae('-2555/33792')
+ assert bernpoly(10,'3/4').ae('-2555/34603008')
+ assert bernpoly(10,1).ae('5/66')
+ assert bernpoly(10,2).ae('665/66')
+ assert bernpoly(11,-1).ae(-11)
+ assert bernpoly(11,0).ae(0)
+ assert bernpoly(11,'1/2').ae(0)
+ assert bernpoly(11,'3/4').ae('-555731/4194304')
+ assert bernpoly(11,1).ae(0)
+ assert bernpoly(11,2).ae(11)
+ assert eulerpoly(0,-1).ae(1)
+ assert eulerpoly(0,0).ae(1)
+ assert eulerpoly(0,'1/2').ae(1)
+ assert eulerpoly(0,'3/4').ae(1)
+ assert eulerpoly(0,1).ae(1)
+ assert eulerpoly(0,2).ae(1)
+ assert eulerpoly(1,-1).ae('-3/2')
+ assert eulerpoly(1,0).ae('-1/2')
+ assert eulerpoly(1,'1/2').ae(0)
+ assert eulerpoly(1,'3/4').ae('1/4')
+ assert eulerpoly(1,1).ae('1/2')
+ assert eulerpoly(1,2).ae('3/2')
+ assert eulerpoly(2,-1).ae(2)
+ assert eulerpoly(2,0).ae(0)
+ assert eulerpoly(2,'1/2').ae('-1/4')
+ assert eulerpoly(2,'3/4').ae('-3/16')
+ assert eulerpoly(2,1).ae(0)
+ assert eulerpoly(2,2).ae(2)
+ assert eulerpoly(3,-1).ae('-9/4')
+ assert eulerpoly(3,0).ae('1/4')
+ assert eulerpoly(3,'1/2').ae(0)
+ assert eulerpoly(3,'3/4').ae('-11/64')
+ assert eulerpoly(3,1).ae('-1/4')
+ assert eulerpoly(3,2).ae('9/4')
+ assert eulerpoly(4,-1).ae(2)
+ assert eulerpoly(4,0).ae(0)
+ assert eulerpoly(4,'1/2').ae('5/16')
+ assert eulerpoly(4,'3/4').ae('57/256')
+ assert eulerpoly(4,1).ae(0)
+ assert eulerpoly(4,2).ae(2)
+ assert eulerpoly(5,-1).ae('-3/2')
+ assert eulerpoly(5,0).ae('-1/2')
+ assert eulerpoly(5,'1/2').ae(0)
+ assert eulerpoly(5,'3/4').ae('361/1024')
+ assert eulerpoly(5,1).ae('1/2')
+ assert eulerpoly(5,2).ae('3/2')
+ assert eulerpoly(10,-1).ae(2)
+ assert eulerpoly(10,0).ae(0)
+ assert eulerpoly(10,'1/2').ae('-50521/1024')
+ assert eulerpoly(10,'3/4').ae('-36581523/1048576')
+ assert eulerpoly(10,1).ae(0)
+ assert eulerpoly(10,2).ae(2)
+ assert eulerpoly(11,-1).ae('-699/4')
+ assert eulerpoly(11,0).ae('691/4')
+ assert eulerpoly(11,'1/2').ae(0)
+ assert eulerpoly(11,'3/4').ae('-512343611/4194304')
+ assert eulerpoly(11,1).ae('-691/4')
+ assert eulerpoly(11,2).ae('699/4')
+ # Potential accuracy issues
+ assert bernpoly(10000,10000).ae('5.8196915936323387117e+39999')
+ assert bernpoly(200,17.5).ae(3.8048418524583064909e244)
+ assert eulerpoly(200,17.5).ae(-3.7309911582655785929e275)
+
+def test_gamma():
+ mp.dps = 15
+ assert gamma(0.25).ae(3.6256099082219083119)
+ assert gamma(0.0001).ae(9999.4228832316241908)
+ assert gamma(300).ae('1.0201917073881354535e612')
+ assert gamma(-0.5).ae(-3.5449077018110320546)
+ assert gamma(-7.43).ae(0.00026524416464197007186)
+ #assert gamma(Rational(1,2)) == gamma(0.5)
+ #assert gamma(Rational(-7,3)).ae(gamma(mpf(-7)/3))
+ assert gamma(1+1j).ae(0.49801566811835604271 - 0.15494982830181068512j)
+ assert gamma(-1+0.01j).ae(-0.422733904013474115 + 99.985883082635367436j)
+ assert gamma(20+30j).ae(-1453876687.5534810 + 1163777777.8031573j)
+ # Should always give exact factorials when they can
+ # be represented as mpfs under the current working precision
+ fact = 1
+ for i in range(1, 18):
+ assert gamma(i) == fact
+ fact *= i
+ for dps in [170, 600]:
+ fact = 1
+ mp.dps = dps
+ for i in range(1, 105):
+ assert gamma(i) == fact
+ fact *= i
+ mp.dps = 100
+ assert gamma(0.5).ae(sqrt(pi))
+ mp.dps = 15
+ assert factorial(0) == fac(0) == 1
+ assert factorial(3) == 6
+ assert isnan(gamma(nan))
+ assert gamma(1100).ae('4.8579168073569433667e2866')
+ assert rgamma(0) == 0
+ assert rgamma(-1) == 0
+ assert rgamma(2) == 1.0
+ assert rgamma(3) == 0.5
+ assert loggamma(2+8j).ae(-8.5205176753667636926 + 10.8569497125597429366j)
+ assert loggamma('1e10000').ae('2.302485092994045684017991e10004')
+ assert loggamma('1e10000j').ae(mpc('-1.570796326794896619231322e10000','2.302485092994045684017991e10004'))
+
+def test_fac2():
+ mp.dps = 15
+ assert [fac2(n) for n in range(10)] == [1,1,2,3,8,15,48,105,384,945]
+ assert fac2(-5).ae(1./3)
+ assert fac2(-11).ae(-1./945)
+ assert fac2(50).ae(5.20469842636666623e32)
+ assert fac2(0.5+0.75j).ae(0.81546769394688069176-0.34901016085573266889j)
+ assert fac2(inf) == inf
+ assert isnan(fac2(-inf))
+
+def test_gamma_quotients():
+ mp.dps = 15
+ h = 1e-8
+ ep = 1e-4
+ G = gamma
+ assert gammaprod([-1],[-3,-4]) == 0
+ assert gammaprod([-1,0],[-5]) == inf
+ assert abs(gammaprod([-1],[-2]) - G(-1+h)/G(-2+h)) < 1e-4
+ assert abs(gammaprod([-4,-3],[-2,0]) - G(-4+h)*G(-3+h)/G(-2+h)/G(0+h)) < 1e-4
+ assert rf(3,0) == 1
+ assert rf(2.5,1) == 2.5
+ assert rf(-5,2) == 20
+ assert rf(j,j).ae(gamma(2*j)/gamma(j))
+ assert rf('-255.5815971722918','-0.5119253100282322').ae('-0.1952720278805729485') # issue 421
+ assert ff(-2,0) == 1
+ assert ff(-2,1) == -2
+ assert ff(4,3) == 24
+ assert ff(3,4) == 0
+ assert binomial(0,0) == 1
+ assert binomial(1,0) == 1
+ assert binomial(0,-1) == 0
+ assert binomial(3,2) == 3
+ assert binomial(5,2) == 10
+ assert binomial(5,3) == 10
+ assert binomial(5,5) == 1
+ assert binomial(-1,0) == 1
+ assert binomial(-2,-4) == 3
+ assert binomial(4.5, 1.5) == 6.5625
+ assert binomial(1100,1) == 1100
+ assert binomial(1100,2) == 604450
+ assert beta(1,1) == 1
+ assert beta(0,0) == inf
+ assert beta(3,0) == inf
+ assert beta(-1,-1) == inf
+ assert beta(1.5,1).ae(2/3.)
+ assert beta(1.5,2.5).ae(pi/16)
+ assert (10**15*beta(10,100)).ae(2.3455339739604649879)
+ assert beta(inf,inf) == 0
+ assert isnan(beta(-inf,inf))
+ assert isnan(beta(-3,inf))
+ assert isnan(beta(0,inf))
+ assert beta(inf,0.5) == beta(0.5,inf) == 0
+ assert beta(inf,-1.5) == inf
+ assert beta(inf,-0.5) == -inf
+ assert beta(1+2j,-1-j/2).ae(1.16396542451069943086+0.08511695947832914640j)
+ assert beta(-0.5,0.5) == 0
+ assert beta(-3,3).ae(-1/3.)
+ assert beta('-255.5815971722918','-0.5119253100282322').ae('18.157330562703710339') # issue 421
+
+def test_zeta():
+ mp.dps = 15
+ assert zeta(2).ae(pi**2 / 6)
+ assert zeta(2.0).ae(pi**2 / 6)
+ assert zeta(mpc(2)).ae(pi**2 / 6)
+ assert zeta(100).ae(1)
+ assert zeta(0).ae(-0.5)
+ assert zeta(0.5).ae(-1.46035450880958681)
+ assert zeta(-1).ae(-mpf(1)/12)
+ assert zeta(-2) == 0
+ assert zeta(-3).ae(mpf(1)/120)
+ assert zeta(-4) == 0
+ assert zeta(-100) == 0
+ assert isnan(zeta(nan))
+ assert zeta(1e-30).ae(-0.5)
+ assert zeta(-1e-30).ae(-0.5)
+ # Zeros in the critical strip
+ assert zeta(mpc(0.5, 14.1347251417346937904)).ae(0)
+ assert zeta(mpc(0.5, 21.0220396387715549926)).ae(0)
+ assert zeta(mpc(0.5, 25.0108575801456887632)).ae(0)
+ assert zeta(mpc(1e-30,1e-40)).ae(-0.5)
+ assert zeta(mpc(-1e-30,1e-40)).ae(-0.5)
+ mp.dps = 50
+ im = '236.5242296658162058024755079556629786895294952121891237'
+ assert zeta(mpc(0.5, im)).ae(0, 1e-46)
+ mp.dps = 15
+ # Complex reflection formula
+ assert (zeta(-60+3j) / 10**34).ae(8.6270183987866146+15.337398548226238j)
+ # issue #358
+ assert zeta(0,0.5) == 0
+ assert zeta(0,0) == 0.5
+ assert zeta(0,0.5,1).ae(-0.34657359027997265)
+ # see issue #390
+ assert zeta(-1.5,0.5j).ae(-0.13671400162512768475 + 0.11411333638426559139j)
+
+def test_altzeta():
+ mp.dps = 15
+ assert altzeta(-2) == 0
+ assert altzeta(-4) == 0
+ assert altzeta(-100) == 0
+ assert altzeta(0) == 0.5
+ assert altzeta(-1) == 0.25
+ assert altzeta(-3) == -0.125
+ assert altzeta(-5) == 0.25
+ assert altzeta(-21) == 1180529130.25
+ assert altzeta(1).ae(log(2))
+ assert altzeta(2).ae(pi**2/12)
+ assert altzeta(10).ae(73*pi**10/6842880)
+ assert altzeta(50) < 1
+ assert altzeta(60, rounding='d') < 1
+ assert altzeta(60, rounding='u') == 1
+ assert altzeta(10000, rounding='d') < 1
+ assert altzeta(10000, rounding='u') == 1
+ assert altzeta(3+0j) == altzeta(3)
+ s = 3+4j
+ assert altzeta(s).ae((1-2**(1-s))*zeta(s))
+ s = -3+4j
+ assert altzeta(s).ae((1-2**(1-s))*zeta(s))
+ assert altzeta(-100.5).ae(4.58595480083585913e+108)
+ assert altzeta(1.3).ae(0.73821404216623045)
+ assert altzeta(1e-30).ae(0.5)
+ assert altzeta(-1e-30).ae(0.5)
+ assert altzeta(mpc(1e-30,1e-40)).ae(0.5)
+ assert altzeta(mpc(-1e-30,1e-40)).ae(0.5)
+
+def test_zeta_huge():
+ mp.dps = 15
+ assert zeta(inf) == 1
+ mp.dps = 50
+ assert zeta(100).ae('1.0000000000000000000000000000007888609052210118073522')
+ assert zeta(40*pi).ae('1.0000000000000000000000000000000000000148407238666182')
+ mp.dps = 10000
+ v = zeta(33000)
+ mp.dps = 15
+ assert str(v-1) == '1.02363019598118e-9934'
+ assert zeta(pi*1000, rounding=round_up) > 1
+ assert zeta(3000, rounding=round_up) > 1
+ assert zeta(pi*1000) == 1
+ assert zeta(3000) == 1
+
+def test_zeta_negative():
+ mp.dps = 150
+ a = -pi*10**40
+ mp.dps = 15
+ assert str(zeta(a)) == '2.55880492708712e+1233536161668617575553892558646631323374078'
+ mp.dps = 50
+ assert str(zeta(a)) == '2.5588049270871154960875033337384432038436330847333e+1233536161668617575553892558646631323374078'
+ mp.dps = 15
+
+def test_polygamma():
+ mp.dps = 15
+ psi0 = lambda z: psi(0,z)
+ psi1 = lambda z: psi(1,z)
+ assert psi0(3) == psi(0,3) == digamma(3)
+ #assert psi2(3) == psi(2,3) == tetragamma(3)
+ #assert psi3(3) == psi(3,3) == pentagamma(3)
+ assert psi0(pi).ae(0.97721330794200673)
+ assert psi0(-pi).ae(7.8859523853854902)
+ assert psi0(-pi+1).ae(7.5676424992016996)
+ assert psi0(pi+j).ae(1.04224048313859376 + 0.35853686544063749j)
+ assert psi0(-pi-j).ae(1.3404026194821986 - 2.8824392476809402j)
+ assert findroot(psi0, 1).ae(1.4616321449683622)
+ assert psi0(1e-10).ae(-10000000000.57722)
+ assert psi0(1e-40).ae(-1.000000000000000e+40)
+ assert psi0(1e-10+1e-10j).ae(-5000000000.577215 + 5000000000.000000j)
+ assert psi0(1e-40+1e-40j).ae(-5.000000000000000e+39 + 5.000000000000000e+39j)
+ assert psi0(inf) == inf
+ assert psi1(inf) == 0
+ assert psi(2,inf) == 0
+ assert psi1(pi).ae(0.37424376965420049)
+ assert psi1(-pi).ae(53.030438740085385)
+ assert psi1(pi+j).ae(0.32935710377142464 - 0.12222163911221135j)
+ assert psi1(-pi-j).ae(-0.30065008356019703 + 0.01149892486928227j)
+ assert (10**6*psi(4,1+10*pi*j)).ae(-6.1491803479004446 - 0.3921316371664063j)
+ assert psi0(1+10*pi*j).ae(3.4473994217222650 + 1.5548808324857071j)
+ assert isnan(psi0(nan))
+ assert isnan(psi0(-inf))
+ assert psi0(-100.5).ae(4.615124601338064)
+ assert psi0(3+0j).ae(psi0(3))
+ assert psi0(-100+3j).ae(4.6106071768714086321+3.1117510556817394626j)
+ assert isnan(psi(2,mpc(0,inf)))
+ assert isnan(psi(2,mpc(0,nan)))
+ assert isnan(psi(2,mpc(0,-inf)))
+ assert isnan(psi(2,mpc(1,inf)))
+ assert isnan(psi(2,mpc(1,nan)))
+ assert isnan(psi(2,mpc(1,-inf)))
+ assert isnan(psi(2,mpc(inf,inf)))
+ assert isnan(psi(2,mpc(nan,nan)))
+ assert isnan(psi(2,mpc(-inf,-inf)))
+ mp.dps = 30
+ # issue #534
+ assert digamma(-0.75+1j).ae(mpc('0.46317279488182026118963809283042317', '2.4821070143037957102007677817351115'))
+ mp.dps = 15
+
+def test_polygamma_high_prec():
+ mp.dps = 100
+ assert str(psi(0,pi)) == "0.9772133079420067332920694864061823436408346099943256380095232865318105924777141317302075654362928734"
+ assert str(psi(10,pi)) == "-12.98876181434889529310283769414222588307175962213707170773803550518307617769657562747174101900659238"
+
+def test_polygamma_identities():
+ mp.dps = 15
+ psi0 = lambda z: psi(0,z)
+ psi1 = lambda z: psi(1,z)
+ psi2 = lambda z: psi(2,z)
+ assert psi0(0.5).ae(-euler-2*log(2))
+ assert psi0(1).ae(-euler)
+ assert psi1(0.5).ae(0.5*pi**2)
+ assert psi1(1).ae(pi**2/6)
+ assert psi1(0.25).ae(pi**2 + 8*catalan)
+ assert psi2(1).ae(-2*apery)
+ mp.dps = 20
+ u = -182*apery+4*sqrt(3)*pi**3
+ mp.dps = 15
+ assert psi(2,5/6.).ae(u)
+ assert psi(3,0.5).ae(pi**4)
+
+def test_foxtrot_identity():
+ # A test of the complex digamma function.
+ # See http://mathworld.wolfram.com/FoxTrotSeries.html and
+ # http://mathworld.wolfram.com/DigammaFunction.html
+ psi0 = lambda z: psi(0,z)
+ mp.dps = 50
+ a = (-1)**fraction(1,3)
+ b = (-1)**fraction(2,3)
+ x = -psi0(0.5*a) - psi0(-0.5*b) + psi0(0.5*(1+a)) + psi0(0.5*(1-b))
+ y = 2*pi*sech(0.5*sqrt(3)*pi)
+ assert x.ae(y)
+ mp.dps = 15
+
+def test_polygamma_high_order():
+ mp.dps = 100
+ assert str(psi(50, pi)) == "-1344100348958402765749252447726432491812.641985273160531055707095989227897753035823152397679626136483"
+ assert str(psi(50, pi + 14*e)) == "-0.00000000000000000189793739550804321623512073101895801993019919886375952881053090844591920308111549337295143780341396"
+ assert str(psi(50, pi + 14*e*j)) == ("(-0.0000000000000000522516941152169248975225472155683565752375889510631513244785"
+ "9377385233700094871256507814151956624433 - 0.00000000000000001813157041407010184"
+ "702414110218205348527862196327980417757665282244728963891298080199341480881811613j)")
+ mp.dps = 15
+ assert str(psi(50, pi)) == "-1.34410034895841e+39"
+ assert str(psi(50, pi + 14*e)) == "-1.89793739550804e-18"
+ assert str(psi(50, pi + 14*e*j)) == "(-5.2251694115217e-17 - 1.81315704140701e-17j)"
+
+def test_harmonic():
+ mp.dps = 15
+ assert harmonic(0) == 0
+ assert harmonic(1) == 1
+ assert harmonic(2) == 1.5
+ assert harmonic(3).ae(1. + 1./2 + 1./3)
+ assert harmonic(10**10).ae(23.603066594891989701)
+ assert harmonic(10**1000).ae(2303.162308658947)
+ assert harmonic(0.5).ae(2-2*log(2))
+ assert harmonic(inf) == inf
+ assert harmonic(2+0j) == 1.5+0j
+ assert harmonic(1+2j).ae(1.4918071802755104+0.92080728264223022j)
+
+def test_gamma_huge_1():
+ mp.dps = 500
+ x = mpf(10**10) / 7
+ mp.dps = 15
+ assert str(gamma(x)) == "6.26075321389519e+12458010678"
+ mp.dps = 50
+ assert str(gamma(x)) == "6.2607532138951929201303779291707455874010420783933e+12458010678"
+ mp.dps = 15
+
+def test_gamma_huge_2():
+ mp.dps = 500
+ x = mpf(10**100) / 19
+ mp.dps = 15
+ assert str(gamma(x)) == (\
+ "1.82341134776679e+5172997469323364168990133558175077136829182824042201886051511"
+ "9656908623426021308685461258226190190661")
+ mp.dps = 50
+ assert str(gamma(x)) == (\
+ "1.82341134776678875374414910350027596939980412984e+5172997469323364168990133558"
+ "1750771368291828240422018860515119656908623426021308685461258226190190661")
+
+def test_gamma_huge_3():
+ mp.dps = 500
+ x = 10**80 // 3 + 10**70*j / 7
+ mp.dps = 15
+ y = gamma(x)
+ assert str(y.real) == (\
+ "-6.82925203918106e+2636286142112569524501781477865238132302397236429627932441916"
+ "056964386399485392600")
+ assert str(y.imag) == (\
+ "8.54647143678418e+26362861421125695245017814778652381323023972364296279324419160"
+ "56964386399485392600")
+ mp.dps = 50
+ y = gamma(x)
+ assert str(y.real) == (\
+ "-6.8292520391810548460682736226799637356016538421817e+26362861421125695245017814"
+ "77865238132302397236429627932441916056964386399485392600")
+ assert str(y.imag) == (\
+ "8.5464714367841748507479306948130687511711420234015e+263628614211256952450178147"
+ "7865238132302397236429627932441916056964386399485392600")
+
+def test_gamma_huge_4():
+ x = 3200+11500j
+ mp.dps = 15
+ assert str(gamma(x)) == \
+ "(8.95783268539713e+5164 - 1.94678798329735e+5164j)"
+ mp.dps = 50
+ assert str(gamma(x)) == (\
+ "(8.9578326853971339570292952697675570822206567327092e+5164"
+ " - 1.9467879832973509568895402139429643650329524144794e+51"
+ "64j)")
+ mp.dps = 15
+
+def test_gamma_huge_5():
+ mp.dps = 500
+ x = 10**60 * j / 3
+ mp.dps = 15
+ y = gamma(x)
+ assert str(y.real) == "-3.27753899634941e-227396058973640224580963937571892628368354580620654233316839"
+ assert str(y.imag) == "-7.1519888950416e-227396058973640224580963937571892628368354580620654233316841"
+ mp.dps = 50
+ y = gamma(x)
+ assert str(y.real) == (\
+ "-3.2775389963494132168950056995974690946983219123935e-22739605897364022458096393"
+ "7571892628368354580620654233316839")
+ assert str(y.imag) == (\
+ "-7.1519888950415979749736749222530209713136588885897e-22739605897364022458096393"
+ "7571892628368354580620654233316841")
+ mp.dps = 15
+
+def test_gamma_huge_7():
+ mp.dps = 100
+ a = 3 + j/mpf(10)**1000
+ mp.dps = 15
+ y = gamma(a)
+ assert str(y.real) == "2.0"
+ # wrong
+ #assert str(y.imag) == "2.16735365342606e-1000"
+ assert str(y.imag) == "1.84556867019693e-1000"
+ mp.dps = 50
+ y = gamma(a)
+ assert str(y.real) == "2.0"
+ #assert str(y.imag) == "2.1673536534260596065418805612488708028522563689298e-1000"
+ assert str(y.imag) == "1.8455686701969342787869758198351951379156813281202e-1000"
+
+def test_stieltjes():
+ mp.dps = 15
+ assert stieltjes(0).ae(+euler)
+ mp.dps = 25
+ assert stieltjes(1).ae('-0.07281584548367672486058637587')
+ assert stieltjes(2).ae('-0.009690363192872318484530386035')
+ assert stieltjes(3).ae('0.002053834420303345866160046543')
+ assert stieltjes(4).ae('0.002325370065467300057468170178')
+ mp.dps = 15
+ assert stieltjes(1).ae(-0.07281584548367672486058637587)
+ assert stieltjes(2).ae(-0.009690363192872318484530386035)
+ assert stieltjes(3).ae(0.002053834420303345866160046543)
+ assert stieltjes(4).ae(0.0023253700654673000574681701775)
+
+def test_barnesg():
+ mp.dps = 15
+ assert barnesg(0) == barnesg(-1) == 0
+ assert [superfac(i) for i in range(8)] == [1, 1, 2, 12, 288, 34560, 24883200, 125411328000]
+ assert str(superfac(1000)) == '3.24570818422368e+1177245'
+ assert isnan(barnesg(nan))
+ assert isnan(superfac(nan))
+ assert isnan(hyperfac(nan))
+ assert barnesg(inf) == inf
+ assert superfac(inf) == inf
+ assert hyperfac(inf) == inf
+ assert isnan(superfac(-inf))
+ assert barnesg(0.7).ae(0.8068722730141471)
+ assert barnesg(2+3j).ae(-0.17810213864082169+0.04504542715447838j)
+ assert [hyperfac(n) for n in range(7)] == [1, 1, 4, 108, 27648, 86400000, 4031078400000]
+ assert [hyperfac(n) for n in range(0,-7,-1)] == [1,1,-1,-4,108,27648,-86400000]
+ a = barnesg(-3+0j)
+ assert a == 0 and isinstance(a, mpc)
+ a = hyperfac(-3+0j)
+ assert a == -4 and isinstance(a, mpc)
+
+def test_polylog():
+ mp.dps = 15
+ zs = [mpmathify(z) for z in [0, 0.5, 0.99, 4, -0.5, -4, 1j, 3+4j]]
+ for z in zs: assert polylog(1, z).ae(-log(1-z))
+ for z in zs: assert polylog(0, z).ae(z/(1-z))
+ for z in zs: assert polylog(-1, z).ae(z/(1-z)**2)
+ for z in zs: assert polylog(-2, z).ae(z*(1+z)/(1-z)**3)
+ for z in zs: assert polylog(-3, z).ae(z*(1+4*z+z**2)/(1-z)**4)
+ assert polylog(3, 7).ae(5.3192579921456754382-5.9479244480803301023j)
+ assert polylog(3, -7).ae(-4.5693548977219423182)
+ assert polylog(2, 0.9).ae(1.2997147230049587252)
+ assert polylog(2, -0.9).ae(-0.75216317921726162037)
+ assert polylog(2, 0.9j).ae(-0.17177943786580149299+0.83598828572550503226j)
+ assert polylog(2, 1.1).ae(1.9619991013055685931-0.2994257606855892575j)
+ assert polylog(2, -1.1).ae(-0.89083809026228260587)
+ assert polylog(2, 1.1*sqrt(j)).ae(0.58841571107611387722+1.09962542118827026011j)
+ assert polylog(-2, 0.9).ae(1710)
+ assert polylog(-2, -0.9).ae(-90/6859.)
+ assert polylog(3, 0.9).ae(1.0496589501864398696)
+ assert polylog(-3, 0.9).ae(48690)
+ assert polylog(-3, -4).ae(-0.0064)
+ assert polylog(0.5+j/3, 0.5+j/2).ae(0.31739144796565650535 + 0.99255390416556261437j)
+ assert polylog(3+4j,1).ae(zeta(3+4j))
+ assert polylog(3+4j,-1).ae(-altzeta(3+4j))
+ # issue 390
+ assert polylog(1.5, -48.910886523731889).ae(-6.272992229311817)
+ assert polylog(1.5, 200).ae(-8.349608319033686529 - 8.159694826434266042j)
+ assert polylog(-2+0j, -2).ae(mpf(1)/13.5)
+ assert polylog(-2+0j, 1.25).ae(-180)
+
+def test_bell_polyexp():
+ mp.dps = 15
+ # TODO: more tests for polyexp
+ assert (polyexp(0,1e-10)*10**10).ae(1.00000000005)
+ assert (polyexp(1,1e-10)*10**10).ae(1.0000000001)
+ assert polyexp(5,3j).ae(-607.7044517476176454+519.962786482001476087j)
+ assert polyexp(-1,3.5).ae(12.09537536175543444)
+ # bell(0,x) = 1
+ assert bell(0,0) == 1
+ assert bell(0,1) == 1
+ assert bell(0,2) == 1
+ assert bell(0,inf) == 1
+ assert bell(0,-inf) == 1
+ assert isnan(bell(0,nan))
+ # bell(1,x) = x
+ assert bell(1,4) == 4
+ assert bell(1,0) == 0
+ assert bell(1,inf) == inf
+ assert bell(1,-inf) == -inf
+ assert isnan(bell(1,nan))
+ # bell(2,x) = x*(1+x)
+ assert bell(2,-1) == 0
+ assert bell(2,0) == 0
+ # large orders / arguments
+ assert bell(10) == 115975
+ assert bell(10,1) == 115975
+ assert bell(10, -8) == 11054008
+ assert bell(5,-50) == -253087550
+ assert bell(50,-50).ae('3.4746902914629720259e74')
+ mp.dps = 80
+ assert bell(50,-50) == 347469029146297202586097646631767227177164818163463279814268368579055777450
+ assert bell(40,50) == 5575520134721105844739265207408344706846955281965031698187656176321717550
+ assert bell(74) == 5006908024247925379707076470957722220463116781409659160159536981161298714301202
+ mp.dps = 15
+ assert bell(10,20j) == 7504528595600+15649605360020j
+ # continuity of the generalization
+ assert bell(0.5,0).ae(sinc(pi*0.5))
+
+def test_primezeta():
+ mp.dps = 15
+ assert primezeta(0.9).ae(1.8388316154446882243 + 3.1415926535897932385j)
+ assert primezeta(4).ae(0.076993139764246844943)
+ assert primezeta(1) == inf
+ assert primezeta(inf) == 0
+ assert isnan(primezeta(nan))
+
+def test_rs_zeta():
+ mp.dps = 15
+ assert zeta(0.5+100000j).ae(1.0730320148577531321 + 5.7808485443635039843j)
+ assert zeta(0.75+100000j).ae(1.837852337251873704 + 1.9988492668661145358j)
+ assert zeta(0.5+1000000j, derivative=3).ae(1647.7744105852674733 - 1423.1270943036622097j)
+ assert zeta(1+1000000j, derivative=3).ae(3.4085866124523582894 - 18.179184721525947301j)
+ assert zeta(1+1000000j, derivative=1).ae(-0.10423479366985452134 - 0.74728992803359056244j)
+ assert zeta(0.5-1000000j, derivative=1).ae(11.636804066002521459 + 17.127254072212996004j)
+ # Additional sanity tests using fp arithmetic.
+ # Some more high-precision tests are found in the docstrings
+ def ae(x, y, tol=1e-6):
+ return abs(x-y) < tol*abs(y)
+ assert ae(fp.zeta(0.5-100000j), 1.0730320148577531321 - 5.7808485443635039843j)
+ assert ae(fp.zeta(0.75-100000j), 1.837852337251873704 - 1.9988492668661145358j)
+ assert ae(fp.zeta(0.5+1e6j), 0.076089069738227100006 + 2.8051021010192989554j)
+ assert ae(fp.zeta(0.5+1e6j, derivative=1), 11.636804066002521459 - 17.127254072212996004j)
+ assert ae(fp.zeta(1+1e6j), 0.94738726251047891048 + 0.59421999312091832833j)
+ assert ae(fp.zeta(1+1e6j, derivative=1), -0.10423479366985452134 - 0.74728992803359056244j)
+ assert ae(fp.zeta(0.5+100000j, derivative=1), 10.766962036817482375 - 30.92705282105996714j)
+ assert ae(fp.zeta(0.5+100000j, derivative=2), -119.40515625740538429 + 217.14780631141830251j)
+ assert ae(fp.zeta(0.5+100000j, derivative=3), 1129.7550282628460881 - 1685.4736895169690346j)
+ assert ae(fp.zeta(0.5+100000j, derivative=4), -10407.160819314958615 + 13777.786698628045085j)
+ assert ae(fp.zeta(0.75+100000j, derivative=1), -0.41742276699594321475 - 6.4453816275049955949j)
+ assert ae(fp.zeta(0.75+100000j, derivative=2), -9.214314279161977266 + 35.07290795337967899j)
+ assert ae(fp.zeta(0.75+100000j, derivative=3), 110.61331857820103469 - 236.87847130518129926j)
+ assert ae(fp.zeta(0.75+100000j, derivative=4), -1054.334275898559401 + 1769.9177890161596383j)
+
+def test_siegelz():
+ mp.dps = 15
+ assert siegelz(100000).ae(5.87959246868176504171)
+ assert siegelz(100000, derivative=2).ae(-54.1172711010126452832)
+ assert siegelz(100000, derivative=3).ae(-278.930831343966552538)
+ assert siegelz(100000+j,derivative=1).ae(678.214511857070283307-379.742160779916375413j)
+
+
+
+def test_zeta_near_1():
+ # Test for a former bug in mpf_zeta and mpc_zeta
+ mp.dps = 15
+ s1 = fadd(1, '1e-10', exact=True)
+ s2 = fadd(1, '-1e-10', exact=True)
+ s3 = fadd(1, '1e-10j', exact=True)
+ assert zeta(s1).ae(1.000000000057721566490881444e10)
+ assert zeta(s2).ae(-9.99999999942278433510574872e9)
+ z = zeta(s3)
+ assert z.real.ae(0.57721566490153286060)
+ assert z.imag.ae(-9.9999999999999999999927184e9)
+ mp.dps = 30
+ s1 = fadd(1, '1e-50', exact=True)
+ s2 = fadd(1, '-1e-50', exact=True)
+ s3 = fadd(1, '1e-50j', exact=True)
+ assert zeta(s1).ae('1e50')
+ assert zeta(s2).ae('-1e50')
+ z = zeta(s3)
+ assert z.real.ae('0.57721566490153286060651209008240243104215933593992')
+ assert z.imag.ae('-1e50')
diff --git a/pythonProject/.venv/Lib/site-packages/mpmath/tests/test_hp.py b/pythonProject/.venv/Lib/site-packages/mpmath/tests/test_hp.py
new file mode 100644
index 0000000000000000000000000000000000000000..9eba0af798f64ac3f8d464e2d3bf231567a48c9b
--- /dev/null
+++ b/pythonProject/.venv/Lib/site-packages/mpmath/tests/test_hp.py
@@ -0,0 +1,291 @@
+"""
+Check that the output from irrational functions is accurate for
+high-precision input, from 5 to 200 digits. The reference values were
+verified with Mathematica.
+"""
+
+import time
+from mpmath import *
+
+precs = [5, 15, 28, 35, 57, 80, 100, 150, 200]
+
+# sqrt(3) + pi/2
+a = \
+"3.302847134363773912758768033145623809041389953497933538543279275605"\
+"841220051904536395163599428307109666700184672047856353516867399774243594"\
+"67433521615861420725323528325327484262075464241255915238845599752675"
+
+# e + 1/euler**2
+b = \
+"5.719681166601007617111261398629939965860873957353320734275716220045750"\
+"31474116300529519620938123730851145473473708966080207482581266469342214"\
+"824842256999042984813905047895479210702109260221361437411947323431"
+
+# sqrt(a)
+sqrt_a = \
+"1.817373691447021556327498239690365674922395036495564333152483422755"\
+"144321726165582817927383239308173567921345318453306994746434073691275094"\
+"484777905906961689902608644112196725896908619756404253109722911487"
+
+# sqrt(a+b*i).real
+sqrt_abi_real = \
+"2.225720098415113027729407777066107959851146508557282707197601407276"\
+"89160998185797504198062911768240808839104987021515555650875977724230130"\
+"3584116233925658621288393930286871862273400475179312570274423840384"
+
+# sqrt(a+b*i).imag
+sqrt_abi_imag = \
+"1.2849057639084690902371581529110949983261182430040898147672052833653668"\
+"0629534491275114877090834296831373498336559849050755848611854282001250"\
+"1924311019152914021365263161630765255610885489295778894976075186"
+
+# log(a)
+log_a = \
+"1.194784864491089550288313512105715261520511949410072046160598707069"\
+"4336653155025770546309137440687056366757650909754708302115204338077595203"\
+"83005773986664564927027147084436553262269459110211221152925732612"
+
+# log(a+b*i).real
+log_abi_real = \
+"1.8877985921697018111624077550443297276844736840853590212962006811663"\
+"04949387789489704203167470111267581371396245317618589339274243008242708"\
+"014251531496104028712866224020066439049377679709216784954509456421"
+
+# log(a+b*i).imag
+log_abi_imag = \
+"1.0471204952840802663567714297078763189256357109769672185219334169734948"\
+"4265809854092437285294686651806426649541504240470168212723133326542181"\
+"8300136462287639956713914482701017346851009323172531601894918640"
+
+# exp(a)
+exp_a = \
+"27.18994224087168661137253262213293847994194869430518354305430976149"\
+"382792035050358791398632888885200049857986258414049540376323785711941636"\
+"100358982497583832083513086941635049329804685212200507288797531143"
+
+# exp(a+b*i).real
+exp_abi_real = \
+"22.98606617170543596386921087657586890620262522816912505151109385026"\
+"40160179326569526152851983847133513990281518417211964710397233157168852"\
+"4963130831190142571659948419307628119985383887599493378056639916701"
+
+# exp(a+b*i).imag
+exp_abi_imag = \
+"-14.523557450291489727214750571590272774669907424478129280902375851196283"\
+"3377162379031724734050088565710975758824441845278120105728824497308303"\
+"6065619788140201636218705414429933685889542661364184694108251449"
+
+# a**b
+pow_a_b = \
+"928.7025342285568142947391505837660251004990092821305668257284426997"\
+"361966028275685583421197860603126498884545336686124793155581311527995550"\
+"580229264427202446131740932666832138634013168125809402143796691154"
+
+# (a**(a+b*i)).real
+pow_a_abi_real = \
+"44.09156071394489511956058111704382592976814280267142206420038656267"\
+"67707916510652790502399193109819563864568986234654864462095231138500505"\
+"8197456514795059492120303477512711977915544927440682508821426093455"
+
+# (a**(a+b*i)).imag
+pow_a_abi_imag = \
+"27.069371511573224750478105146737852141664955461266218367212527612279886"\
+"9322304536553254659049205414427707675802193810711302947536332040474573"\
+"8166261217563960235014674118610092944307893857862518964990092301"
+
+# ((a+b*i)**(a+b*i)).real
+pow_abi_abi_real = \
+"-0.15171310677859590091001057734676423076527145052787388589334350524"\
+"8084195882019497779202452975350579073716811284169068082670778986235179"\
+"0813026562962084477640470612184016755250592698408112493759742219150452"\
+
+# ((a+b*i)**(a+b*i)).imag
+pow_abi_abi_imag = \
+"1.2697592504953448936553147870155987153192995316950583150964099070426"\
+"4736837932577176947632535475040521749162383347758827307504526525647759"\
+"97547638617201824468382194146854367480471892602963428122896045019902"
+
+# sin(a)
+sin_a = \
+"-0.16055653857469062740274792907968048154164433772938156243509084009"\
+"38437090841460493108570147191289893388608611542655654723437248152535114"\
+"528368009465836614227575701220612124204622383149391870684288862269631"
+
+# sin(1000*a)
+sin_1000a = \
+"-0.85897040577443833776358106803777589664322997794126153477060795801"\
+"09151695416961724733492511852267067419573754315098042850381158563024337"\
+"216458577140500488715469780315833217177634490142748614625281171216863"
+
+# sin(a+b*i)
+sin_abi_real = \
+"-24.4696999681556977743346798696005278716053366404081910969773939630"\
+"7149215135459794473448465734589287491880563183624997435193637389884206"\
+"02151395451271809790360963144464736839412254746645151672423256977064"
+
+sin_abi_imag = \
+"-150.42505378241784671801405965872972765595073690984080160750785565810981"\
+"8314482499135443827055399655645954830931316357243750839088113122816583"\
+"7169201254329464271121058839499197583056427233866320456505060735"
+
+# cos
+cos_a = \
+"-0.98702664499035378399332439243967038895709261414476495730788864004"\
+"05406821549361039745258003422386169330787395654908532996287293003581554"\
+"257037193284199198069707141161341820684198547572456183525659969145501"
+
+cos_1000a = \
+"-0.51202523570982001856195696460663971099692261342827540426136215533"\
+"52686662667660613179619804463250686852463876088694806607652218586060613"\
+"951310588158830695735537073667299449753951774916401887657320950496820"
+
+# tan
+tan_a = \
+"0.162666873675188117341401059858835168007137819495998960250142156848"\
+"639654718809412181543343168174807985559916643549174530459883826451064966"\
+"7996119428949951351938178809444268785629011625179962457123195557310"
+
+tan_abi_real = \
+"6.822696615947538488826586186310162599974827139564433912601918442911"\
+"1026830824380070400102213741875804368044342309515353631134074491271890"\
+"467615882710035471686578162073677173148647065131872116479947620E-6"
+
+tan_abi_imag = \
+"0.9999795833048243692245661011298447587046967777739649018690797625964167"\
+"1446419978852235960862841608081413169601038230073129482874832053357571"\
+"62702259309150715669026865777947502665936317953101462202542168429"
+
+
+def test_hp():
+ for dps in precs:
+ mp.dps = dps + 8
+ aa = mpf(a)
+ bb = mpf(b)
+ a1000 = 1000*mpf(a)
+ abi = mpc(aa, bb)
+ mp.dps = dps
+ assert (sqrt(3) + pi/2).ae(aa)
+ assert (e + 1/euler**2).ae(bb)
+
+ assert sqrt(aa).ae(mpf(sqrt_a))
+ assert sqrt(abi).ae(mpc(sqrt_abi_real, sqrt_abi_imag))
+
+ assert log(aa).ae(mpf(log_a))
+ assert log(abi).ae(mpc(log_abi_real, log_abi_imag))
+
+ assert exp(aa).ae(mpf(exp_a))
+ assert exp(abi).ae(mpc(exp_abi_real, exp_abi_imag))
+
+ assert (aa**bb).ae(mpf(pow_a_b))
+ assert (aa**abi).ae(mpc(pow_a_abi_real, pow_a_abi_imag))
+ assert (abi**abi).ae(mpc(pow_abi_abi_real, pow_abi_abi_imag))
+
+ assert sin(a).ae(mpf(sin_a))
+ assert sin(a1000).ae(mpf(sin_1000a))
+ assert sin(abi).ae(mpc(sin_abi_real, sin_abi_imag))
+
+ assert cos(a).ae(mpf(cos_a))
+ assert cos(a1000).ae(mpf(cos_1000a))
+
+ assert tan(a).ae(mpf(tan_a))
+ assert tan(abi).ae(mpc(tan_abi_real, tan_abi_imag))
+
+ # check that complex cancellation is avoided so that both
+ # real and imaginary parts have high relative accuracy.
+ # abs_eps should be 0, but has to be set to 1e-205 to pass the
+ # 200-digit case, probably due to slight inaccuracy in the
+ # precomputed input
+ assert (tan(abi).real).ae(mpf(tan_abi_real), abs_eps=1e-205)
+ assert (tan(abi).imag).ae(mpf(tan_abi_imag), abs_eps=1e-205)
+ mp.dps = 460
+ assert str(log(3))[-20:] == '02166121184001409826'
+ mp.dps = 15
+
+# Since str(a) can differ in the last digit from rounded a, and I want
+# to compare the last digits of big numbers with the results in Mathematica,
+# I made this hack to get the last 20 digits of rounded a
+
+def last_digits(a):
+ r = repr(a)
+ s = str(a)
+ #dps = mp.dps
+ #mp.dps += 3
+ m = 10
+ r = r.replace(s[:-m],'')
+ r = r.replace("mpf('",'').replace("')",'')
+ num0 = 0
+ for c in r:
+ if c == '0':
+ num0 += 1
+ else:
+ break
+ b = float(int(r))/10**(len(r) - m)
+ if b >= 10**m - 0.5: # pragma: no cover
+ raise NotImplementedError
+ n = int(round(b))
+ sn = str(n)
+ s = s[:-m] + '0'*num0 + sn
+ return s[-20:]
+
+# values checked with Mathematica
+def test_log_hp():
+ mp.dps = 2000
+ a = mpf(10)**15000/3
+ r = log(a)
+ res = last_digits(r)
+ # Mathematica N[Log[10^15000/3], 2000]
+ # ...7443804441768333470331
+ assert res == '43804441768333470331'
+
+ # see issue 145
+ r = log(mpf(3)/2)
+ # Mathematica N[Log[3/2], 2000]
+ # ...69653749808140753263288
+ res = last_digits(r)
+ assert res == '53749808140753263288'
+
+ mp.dps = 10000
+ r = log(2)
+ res = last_digits(r)
+ # Mathematica N[Log[2], 10000]
+ # ...695615913401856601359655561
+ assert res == '13401856601359655561'
+ r = log(mpf(10)**10/3)
+ res = last_digits(r)
+ # Mathematica N[Log[10^10/3], 10000]
+ # ...587087654020631943060007154
+ assert res == '54020631943060007154', res
+ r = log(mpf(10)**100/3)
+ res = last_digits(r)
+ # Mathematica N[Log[10^100/3], 10000]
+ # ,,,59246336539088351652334666
+ assert res == '36539088351652334666', res
+ mp.dps += 10
+ a = 1 - mpf(1)/10**10
+ mp.dps -= 10
+ r = log(a)
+ res = last_digits(r)
+ # ...3310334360482956137216724048322957404
+ # 372167240483229574038733026370
+ # Mathematica N[Log[1 - 10^-10]*10^10, 10000]
+ # ...60482956137216724048322957404
+ assert res == '37216724048322957404', res
+ mp.dps = 10000
+ mp.dps += 100
+ a = 1 + mpf(1)/10**100
+ mp.dps -= 100
+
+ r = log(a)
+ res = last_digits(+r)
+ # Mathematica N[Log[1 + 10^-100]*10^10, 10030]
+ # ...3994733877377412241546890854692521568292338268273 10^-91
+ assert res == '39947338773774122415', res
+
+ mp.dps = 15
+
+def test_exp_hp():
+ mp.dps = 4000
+ r = exp(mpf(1)/10)
+ # IntegerPart[N[Exp[1/10] * 10^4000, 4000]]
+ # ...92167105162069688129
+ assert int(r * 10**mp.dps) % 10**20 == 92167105162069688129
diff --git a/pythonProject/.venv/Lib/site-packages/mpmath/tests/test_identify.py b/pythonProject/.venv/Lib/site-packages/mpmath/tests/test_identify.py
new file mode 100644
index 0000000000000000000000000000000000000000..f75ab0bc4f04ecb614011e7f4599989465cab785
--- /dev/null
+++ b/pythonProject/.venv/Lib/site-packages/mpmath/tests/test_identify.py
@@ -0,0 +1,19 @@
+from mpmath import *
+
+def test_pslq():
+ mp.dps = 15
+ assert pslq([3*pi+4*e/7, pi, e, log(2)]) == [7, -21, -4, 0]
+ assert pslq([4.9999999999999991, 1]) == [1, -5]
+ assert pslq([2,1]) == [1, -2]
+
+def test_identify():
+ mp.dps = 20
+ assert identify(zeta(4), ['log(2)', 'pi**4']) == '((1/90)*pi**4)'
+ mp.dps = 15
+ assert identify(exp(5)) == 'exp(5)'
+ assert identify(exp(4)) == 'exp(4)'
+ assert identify(log(5)) == 'log(5)'
+ assert identify(exp(3*pi), ['pi']) == 'exp((3*pi))'
+ assert identify(3, full=True) == ['3', '3', '1/(1/3)', 'sqrt(9)',
+ '1/sqrt((1/9))', '(sqrt(12)/2)**2', '1/(sqrt(12)/6)**2']
+ assert identify(pi+1, {'a':+pi}) == '(1 + 1*a)'
diff --git a/pythonProject/.venv/Lib/site-packages/mpmath/tests/test_interval.py b/pythonProject/.venv/Lib/site-packages/mpmath/tests/test_interval.py
new file mode 100644
index 0000000000000000000000000000000000000000..251fd8b7ddb00074e8ae27cce4a01d8f4f8fe151
--- /dev/null
+++ b/pythonProject/.venv/Lib/site-packages/mpmath/tests/test_interval.py
@@ -0,0 +1,453 @@
+from mpmath import *
+
+def test_interval_identity():
+ iv.dps = 15
+ assert mpi(2) == mpi(2, 2)
+ assert mpi(2) != mpi(-2, 2)
+ assert not (mpi(2) != mpi(2, 2))
+ assert mpi(-1, 1) == mpi(-1, 1)
+ assert str(mpi('0.1')) == "[0.099999999999999991673, 0.10000000000000000555]"
+ assert repr(mpi('0.1')) == "mpi('0.099999999999999992', '0.10000000000000001')"
+ u = mpi(-1, 3)
+ assert -1 in u
+ assert 2 in u
+ assert 3 in u
+ assert -1.1 not in u
+ assert 3.1 not in u
+ assert mpi(-1, 3) in u
+ assert mpi(0, 1) in u
+ assert mpi(-1.1, 2) not in u
+ assert mpi(2.5, 3.1) not in u
+ w = mpi(-inf, inf)
+ assert mpi(-5, 5) in w
+ assert mpi(2, inf) in w
+ assert mpi(0, 2) in mpi(0, 10)
+ assert not (3 in mpi(-inf, 0))
+
+def test_interval_hash():
+ assert hash(mpi(3)) == hash(3)
+ assert hash(mpi(3.25)) == hash(3.25)
+ assert hash(mpi(3,4)) == hash(mpi(3,4))
+ assert hash(iv.mpc(3)) == hash(3)
+ assert hash(iv.mpc(3,4)) == hash(3+4j)
+ assert hash(iv.mpc((1,3),(2,4))) == hash(iv.mpc((1,3),(2,4)))
+
+def test_interval_arithmetic():
+ iv.dps = 15
+ assert mpi(2) + mpi(3,4) == mpi(5,6)
+ assert mpi(1, 2)**2 == mpi(1, 4)
+ assert mpi(1) + mpi(0, 1e-50) == mpi(1, mpf('1.0000000000000002'))
+ x = 1 / (1 / mpi(3))
+ assert x.a < 3 < x.b
+ x = mpi(2) ** mpi(0.5)
+ iv.dps += 5
+ sq = iv.sqrt(2)
+ iv.dps -= 5
+ assert x.a < sq < x.b
+ assert mpi(1) / mpi(1, inf)
+ assert mpi(2, 3) / inf == mpi(0, 0)
+ assert mpi(0) / inf == 0
+ assert mpi(0) / 0 == mpi(-inf, inf)
+ assert mpi(inf) / 0 == mpi(-inf, inf)
+ assert mpi(0) * inf == mpi(-inf, inf)
+ assert 1 / mpi(2, inf) == mpi(0, 0.5)
+ assert str((mpi(50, 50) * mpi(-10, -10)) / 3) == \
+ '[-166.66666666666668561, -166.66666666666665719]'
+ assert mpi(0, 4) ** 3 == mpi(0, 64)
+ assert mpi(2,4).mid == 3
+ iv.dps = 30
+ a = mpi(iv.pi)
+ iv.dps = 15
+ b = +a
+ assert b.a < a.a
+ assert b.b > a.b
+ a = mpi(iv.pi)
+ assert a == +a
+ assert abs(mpi(-1,2)) == mpi(0,2)
+ assert abs(mpi(0.5,2)) == mpi(0.5,2)
+ assert abs(mpi(-3,2)) == mpi(0,3)
+ assert abs(mpi(-3,-0.5)) == mpi(0.5,3)
+ assert mpi(0) * mpi(2,3) == mpi(0)
+ assert mpi(2,3) * mpi(0) == mpi(0)
+ assert mpi(1,3).delta == 2
+ assert mpi(1,2) - mpi(3,4) == mpi(-3,-1)
+ assert mpi(-inf,0) - mpi(0,inf) == mpi(-inf,0)
+ assert mpi(-inf,0) - mpi(-inf,inf) == mpi(-inf,inf)
+ assert mpi(0,inf) - mpi(-inf,1) == mpi(-1,inf)
+
+def test_interval_mul():
+ assert mpi(-1, 0) * inf == mpi(-inf, 0)
+ assert mpi(-1, 0) * -inf == mpi(0, inf)
+ assert mpi(0, 1) * inf == mpi(0, inf)
+ assert mpi(0, 1) * mpi(0, inf) == mpi(0, inf)
+ assert mpi(-1, 1) * inf == mpi(-inf, inf)
+ assert mpi(-1, 1) * mpi(0, inf) == mpi(-inf, inf)
+ assert mpi(-1, 1) * mpi(-inf, inf) == mpi(-inf, inf)
+ assert mpi(-inf, 0) * mpi(0, 1) == mpi(-inf, 0)
+ assert mpi(-inf, 0) * mpi(0, 0) * mpi(-inf, 0)
+ assert mpi(-inf, 0) * mpi(-inf, inf) == mpi(-inf, inf)
+ assert mpi(-5,0)*mpi(-32,28) == mpi(-140,160)
+ assert mpi(2,3) * mpi(-1,2) == mpi(-3,6)
+ # Should be undefined?
+ assert mpi(inf, inf) * 0 == mpi(-inf, inf)
+ assert mpi(-inf, -inf) * 0 == mpi(-inf, inf)
+ assert mpi(0) * mpi(-inf,2) == mpi(-inf,inf)
+ assert mpi(0) * mpi(-2,inf) == mpi(-inf,inf)
+ assert mpi(-2,inf) * mpi(0) == mpi(-inf,inf)
+ assert mpi(-inf,2) * mpi(0) == mpi(-inf,inf)
+
+def test_interval_pow():
+ assert mpi(3)**2 == mpi(9, 9)
+ assert mpi(-3)**2 == mpi(9, 9)
+ assert mpi(-3, 1)**2 == mpi(0, 9)
+ assert mpi(-3, -1)**2 == mpi(1, 9)
+ assert mpi(-3, -1)**3 == mpi(-27, -1)
+ assert mpi(-3, 1)**3 == mpi(-27, 1)
+ assert mpi(-2, 3)**2 == mpi(0, 9)
+ assert mpi(-3, 2)**2 == mpi(0, 9)
+ assert mpi(4) ** -1 == mpi(0.25, 0.25)
+ assert mpi(-4) ** -1 == mpi(-0.25, -0.25)
+ assert mpi(4) ** -2 == mpi(0.0625, 0.0625)
+ assert mpi(-4) ** -2 == mpi(0.0625, 0.0625)
+ assert mpi(0, 1) ** inf == mpi(0, 1)
+ assert mpi(0, 1) ** -inf == mpi(1, inf)
+ assert mpi(0, inf) ** inf == mpi(0, inf)
+ assert mpi(0, inf) ** -inf == mpi(0, inf)
+ assert mpi(1, inf) ** inf == mpi(1, inf)
+ assert mpi(1, inf) ** -inf == mpi(0, 1)
+ assert mpi(2, 3) ** 1 == mpi(2, 3)
+ assert mpi(2, 3) ** 0 == 1
+ assert mpi(1,3) ** mpi(2) == mpi(1,9)
+
+def test_interval_sqrt():
+ assert mpi(4) ** 0.5 == mpi(2)
+
+def test_interval_div():
+ assert mpi(0.5, 1) / mpi(-1, 0) == mpi(-inf, -0.5)
+ assert mpi(0, 1) / mpi(0, 1) == mpi(0, inf)
+ assert mpi(inf, inf) / mpi(inf, inf) == mpi(0, inf)
+ assert mpi(inf, inf) / mpi(2, inf) == mpi(0, inf)
+ assert mpi(inf, inf) / mpi(2, 2) == mpi(inf, inf)
+ assert mpi(0, inf) / mpi(2, inf) == mpi(0, inf)
+ assert mpi(0, inf) / mpi(2, 2) == mpi(0, inf)
+ assert mpi(2, inf) / mpi(2, 2) == mpi(1, inf)
+ assert mpi(2, inf) / mpi(2, inf) == mpi(0, inf)
+ assert mpi(-4, 8) / mpi(1, inf) == mpi(-4, 8)
+ assert mpi(-4, 8) / mpi(0.5, inf) == mpi(-8, 16)
+ assert mpi(-inf, 8) / mpi(0.5, inf) == mpi(-inf, 16)
+ assert mpi(-inf, inf) / mpi(0.5, inf) == mpi(-inf, inf)
+ assert mpi(8, inf) / mpi(0.5, inf) == mpi(0, inf)
+ assert mpi(-8, inf) / mpi(0.5, inf) == mpi(-16, inf)
+ assert mpi(-4, 8) / mpi(inf, inf) == mpi(0, 0)
+ assert mpi(0, 8) / mpi(inf, inf) == mpi(0, 0)
+ assert mpi(0, 0) / mpi(inf, inf) == mpi(0, 0)
+ assert mpi(-inf, 0) / mpi(inf, inf) == mpi(-inf, 0)
+ assert mpi(-inf, 8) / mpi(inf, inf) == mpi(-inf, 0)
+ assert mpi(-inf, inf) / mpi(inf, inf) == mpi(-inf, inf)
+ assert mpi(-8, inf) / mpi(inf, inf) == mpi(0, inf)
+ assert mpi(0, inf) / mpi(inf, inf) == mpi(0, inf)
+ assert mpi(8, inf) / mpi(inf, inf) == mpi(0, inf)
+ assert mpi(inf, inf) / mpi(inf, inf) == mpi(0, inf)
+ assert mpi(-1, 2) / mpi(0, 1) == mpi(-inf, +inf)
+ assert mpi(0, 1) / mpi(0, 1) == mpi(0.0, +inf)
+ assert mpi(-1, 0) / mpi(0, 1) == mpi(-inf, 0.0)
+ assert mpi(-0.5, -0.25) / mpi(0, 1) == mpi(-inf, -0.25)
+ assert mpi(0.5, 1) / mpi(0, 1) == mpi(0.5, +inf)
+ assert mpi(0.5, 4) / mpi(0, 1) == mpi(0.5, +inf)
+ assert mpi(-1, -0.5) / mpi(0, 1) == mpi(-inf, -0.5)
+ assert mpi(-4, -0.5) / mpi(0, 1) == mpi(-inf, -0.5)
+ assert mpi(-1, 2) / mpi(-2, 0.5) == mpi(-inf, +inf)
+ assert mpi(0, 1) / mpi(-2, 0.5) == mpi(-inf, +inf)
+ assert mpi(-1, 0) / mpi(-2, 0.5) == mpi(-inf, +inf)
+ assert mpi(-0.5, -0.25) / mpi(-2, 0.5) == mpi(-inf, +inf)
+ assert mpi(0.5, 1) / mpi(-2, 0.5) == mpi(-inf, +inf)
+ assert mpi(0.5, 4) / mpi(-2, 0.5) == mpi(-inf, +inf)
+ assert mpi(-1, -0.5) / mpi(-2, 0.5) == mpi(-inf, +inf)
+ assert mpi(-4, -0.5) / mpi(-2, 0.5) == mpi(-inf, +inf)
+ assert mpi(-1, 2) / mpi(-1, 0) == mpi(-inf, +inf)
+ assert mpi(0, 1) / mpi(-1, 0) == mpi(-inf, 0.0)
+ assert mpi(-1, 0) / mpi(-1, 0) == mpi(0.0, +inf)
+ assert mpi(-0.5, -0.25) / mpi(-1, 0) == mpi(0.25, +inf)
+ assert mpi(0.5, 1) / mpi(-1, 0) == mpi(-inf, -0.5)
+ assert mpi(0.5, 4) / mpi(-1, 0) == mpi(-inf, -0.5)
+ assert mpi(-1, -0.5) / mpi(-1, 0) == mpi(0.5, +inf)
+ assert mpi(-4, -0.5) / mpi(-1, 0) == mpi(0.5, +inf)
+ assert mpi(-1, 2) / mpi(0.5, 1) == mpi(-2.0, 4.0)
+ assert mpi(0, 1) / mpi(0.5, 1) == mpi(0.0, 2.0)
+ assert mpi(-1, 0) / mpi(0.5, 1) == mpi(-2.0, 0.0)
+ assert mpi(-0.5, -0.25) / mpi(0.5, 1) == mpi(-1.0, -0.25)
+ assert mpi(0.5, 1) / mpi(0.5, 1) == mpi(0.5, 2.0)
+ assert mpi(0.5, 4) / mpi(0.5, 1) == mpi(0.5, 8.0)
+ assert mpi(-1, -0.5) / mpi(0.5, 1) == mpi(-2.0, -0.5)
+ assert mpi(-4, -0.5) / mpi(0.5, 1) == mpi(-8.0, -0.5)
+ assert mpi(-1, 2) / mpi(-2, -0.5) == mpi(-4.0, 2.0)
+ assert mpi(0, 1) / mpi(-2, -0.5) == mpi(-2.0, 0.0)
+ assert mpi(-1, 0) / mpi(-2, -0.5) == mpi(0.0, 2.0)
+ assert mpi(-0.5, -0.25) / mpi(-2, -0.5) == mpi(0.125, 1.0)
+ assert mpi(0.5, 1) / mpi(-2, -0.5) == mpi(-2.0, -0.25)
+ assert mpi(0.5, 4) / mpi(-2, -0.5) == mpi(-8.0, -0.25)
+ assert mpi(-1, -0.5) / mpi(-2, -0.5) == mpi(0.25, 2.0)
+ assert mpi(-4, -0.5) / mpi(-2, -0.5) == mpi(0.25, 8.0)
+ # Should be undefined?
+ assert mpi(0, 0) / mpi(0, 0) == mpi(-inf, inf)
+ assert mpi(0, 0) / mpi(0, 1) == mpi(-inf, inf)
+
+def test_interval_cos_sin():
+ iv.dps = 15
+ cos = iv.cos
+ sin = iv.sin
+ tan = iv.tan
+ pi = iv.pi
+ # Around 0
+ assert cos(mpi(0)) == 1
+ assert sin(mpi(0)) == 0
+ assert cos(mpi(0,1)) == mpi(0.54030230586813965399, 1.0)
+ assert sin(mpi(0,1)) == mpi(0, 0.8414709848078966159)
+ assert cos(mpi(1,2)) == mpi(-0.4161468365471424069, 0.54030230586813976501)
+ assert sin(mpi(1,2)) == mpi(0.84147098480789650488, 1.0)
+ assert sin(mpi(1,2.5)) == mpi(0.59847214410395643824, 1.0)
+ assert cos(mpi(-1, 1)) == mpi(0.54030230586813965399, 1.0)
+ assert cos(mpi(-1, 0.5)) == mpi(0.54030230586813965399, 1.0)
+ assert cos(mpi(-1, 1.5)) == mpi(0.070737201667702906405, 1.0)
+ assert sin(mpi(-1,1)) == mpi(-0.8414709848078966159, 0.8414709848078966159)
+ assert sin(mpi(-1,0.5)) == mpi(-0.8414709848078966159, 0.47942553860420300538)
+ assert mpi(-0.8414709848078966159, 1.00000000000000002e-100) in sin(mpi(-1,1e-100))
+ assert mpi(-2.00000000000000004e-100, 1.00000000000000002e-100) in sin(mpi(-2e-100,1e-100))
+ # Same interval
+ assert cos(mpi(2, 2.5))
+ assert cos(mpi(3.5, 4)) == mpi(-0.93645668729079634129, -0.65364362086361182946)
+ assert cos(mpi(5, 5.5)) == mpi(0.28366218546322624627, 0.70866977429126010168)
+ assert mpi(0.59847214410395654927, 0.90929742682568170942) in sin(mpi(2, 2.5))
+ assert sin(mpi(3.5, 4)) == mpi(-0.75680249530792831347, -0.35078322768961983646)
+ assert sin(mpi(5, 5.5)) == mpi(-0.95892427466313856499, -0.70554032557039181306)
+ # Higher roots
+ iv.dps = 55
+ w = 4*10**50 + mpi(0.5)
+ for p in [15, 40, 80]:
+ iv.dps = p
+ assert 0 in sin(4*mpi(pi))
+ assert 0 in sin(4*10**50*mpi(pi))
+ assert 0 in cos((4+0.5)*mpi(pi))
+ assert 0 in cos(w*mpi(pi))
+ assert 1 in cos(4*mpi(pi))
+ assert 1 in cos(4*10**50*mpi(pi))
+ iv.dps = 15
+ assert cos(mpi(2,inf)) == mpi(-1,1)
+ assert sin(mpi(2,inf)) == mpi(-1,1)
+ assert cos(mpi(-inf,2)) == mpi(-1,1)
+ assert sin(mpi(-inf,2)) == mpi(-1,1)
+ u = tan(mpi(0.5,1))
+ assert mpf(u.a).ae(mp.tan(0.5))
+ assert mpf(u.b).ae(mp.tan(1))
+ v = iv.cot(mpi(0.5,1))
+ assert mpf(v.a).ae(mp.cot(1))
+ assert mpf(v.b).ae(mp.cot(0.5))
+ # Sanity check of evaluation at n*pi and (n+1/2)*pi
+ for n in range(-5,7,2):
+ x = iv.cos(n*iv.pi)
+ assert -1 in x
+ assert x >= -1
+ assert x != -1
+ x = iv.sin((n+0.5)*iv.pi)
+ assert -1 in x
+ assert x >= -1
+ assert x != -1
+ for n in range(-6,8,2):
+ x = iv.cos(n*iv.pi)
+ assert 1 in x
+ assert x <= 1
+ if n:
+ assert x != 1
+ x = iv.sin((n+0.5)*iv.pi)
+ assert 1 in x
+ assert x <= 1
+ assert x != 1
+ for n in range(-6,7):
+ x = iv.cos((n+0.5)*iv.pi)
+ assert x.a < 0 < x.b
+ x = iv.sin(n*iv.pi)
+ if n:
+ assert x.a < 0 < x.b
+
+def test_interval_complex():
+ # TODO: many more tests
+ iv.dps = 15
+ mp.dps = 15
+ assert iv.mpc(2,3) == 2+3j
+ assert iv.mpc(2,3) != 2+4j
+ assert iv.mpc(2,3) != 1+3j
+ assert 1+3j in iv.mpc([1,2],[3,4])
+ assert 2+5j not in iv.mpc([1,2],[3,4])
+ assert iv.mpc(1,2) + 1j == 1+3j
+ assert iv.mpc([1,2],[2,3]) + 2+3j == iv.mpc([3,4],[5,6])
+ assert iv.mpc([2,4],[4,8]) / 2 == iv.mpc([1,2],[2,4])
+ assert iv.mpc([1,2],[2,4]) * 2j == iv.mpc([-8,-4],[2,4])
+ assert iv.mpc([2,4],[4,8]) / 2j == iv.mpc([2,4],[-2,-1])
+ assert iv.exp(2+3j).ae(mp.exp(2+3j))
+ assert iv.log(2+3j).ae(mp.log(2+3j))
+ assert (iv.mpc(2,3) ** iv.mpc(0.5,2)).ae(mp.mpc(2,3) ** mp.mpc(0.5,2))
+ assert 1j in (iv.mpf(-1) ** 0.5)
+ assert 1j in (iv.mpc(-1) ** 0.5)
+ assert abs(iv.mpc(0)) == 0
+ assert abs(iv.mpc(inf)) == inf
+ assert abs(iv.mpc(3,4)) == 5
+ assert abs(iv.mpc(4)) == 4
+ assert abs(iv.mpc(0,4)) == 4
+ assert abs(iv.mpc(0,[2,3])) == iv.mpf([2,3])
+ assert abs(iv.mpc(0,[-3,2])) == iv.mpf([0,3])
+ assert abs(iv.mpc([3,5],[4,12])) == iv.mpf([5,13])
+ assert abs(iv.mpc([3,5],[-4,12])) == iv.mpf([3,13])
+ assert iv.mpc(2,3) ** 0 == 1
+ assert iv.mpc(2,3) ** 1 == (2+3j)
+ assert iv.mpc(2,3) ** 2 == (2+3j)**2
+ assert iv.mpc(2,3) ** 3 == (2+3j)**3
+ assert iv.mpc(2,3) ** 4 == (2+3j)**4
+ assert iv.mpc(2,3) ** 5 == (2+3j)**5
+ assert iv.mpc(2,2) ** (-1) == (2+2j) ** (-1)
+ assert iv.mpc(2,2) ** (-2) == (2+2j) ** (-2)
+ assert iv.cos(2).ae(mp.cos(2))
+ assert iv.sin(2).ae(mp.sin(2))
+ assert iv.cos(2+3j).ae(mp.cos(2+3j))
+ assert iv.sin(2+3j).ae(mp.sin(2+3j))
+
+def test_interval_complex_arg():
+ mp.dps = 15
+ iv.dps = 15
+ assert iv.arg(3) == 0
+ assert iv.arg(0) == 0
+ assert iv.arg([0,3]) == 0
+ assert iv.arg(-3).ae(pi)
+ assert iv.arg(2+3j).ae(iv.arg(2+3j))
+ z = iv.mpc([-2,-1],[3,4])
+ t = iv.arg(z)
+ assert t.a.ae(mp.arg(-1+4j))
+ assert t.b.ae(mp.arg(-2+3j))
+ z = iv.mpc([-2,1],[3,4])
+ t = iv.arg(z)
+ assert t.a.ae(mp.arg(1+3j))
+ assert t.b.ae(mp.arg(-2+3j))
+ z = iv.mpc([1,2],[3,4])
+ t = iv.arg(z)
+ assert t.a.ae(mp.arg(2+3j))
+ assert t.b.ae(mp.arg(1+4j))
+ z = iv.mpc([1,2],[-2,3])
+ t = iv.arg(z)
+ assert t.a.ae(mp.arg(1-2j))
+ assert t.b.ae(mp.arg(1+3j))
+ z = iv.mpc([1,2],[-4,-3])
+ t = iv.arg(z)
+ assert t.a.ae(mp.arg(1-4j))
+ assert t.b.ae(mp.arg(2-3j))
+ z = iv.mpc([-1,2],[-4,-3])
+ t = iv.arg(z)
+ assert t.a.ae(mp.arg(-1-3j))
+ assert t.b.ae(mp.arg(2-3j))
+ z = iv.mpc([-2,-1],[-4,-3])
+ t = iv.arg(z)
+ assert t.a.ae(mp.arg(-2-3j))
+ assert t.b.ae(mp.arg(-1-4j))
+ z = iv.mpc([-2,-1],[-3,3])
+ t = iv.arg(z)
+ assert t.a.ae(-mp.pi)
+ assert t.b.ae(mp.pi)
+ z = iv.mpc([-2,2],[-3,3])
+ t = iv.arg(z)
+ assert t.a.ae(-mp.pi)
+ assert t.b.ae(mp.pi)
+
+def test_interval_ae():
+ iv.dps = 15
+ x = iv.mpf([1,2])
+ assert x.ae(1) is None
+ assert x.ae(1.5) is None
+ assert x.ae(2) is None
+ assert x.ae(2.01) is False
+ assert x.ae(0.99) is False
+ x = iv.mpf(3.5)
+ assert x.ae(3.5) is True
+ assert x.ae(3.5+1e-15) is True
+ assert x.ae(3.5-1e-15) is True
+ assert x.ae(3.501) is False
+ assert x.ae(3.499) is False
+ assert x.ae(iv.mpf([3.5,3.501])) is None
+ assert x.ae(iv.mpf([3.5,4.5+1e-15])) is None
+
+def test_interval_nstr():
+ iv.dps = n = 30
+ x = mpi(1, 2)
+ # FIXME: error_dps should not be necessary
+ assert iv.nstr(x, n, mode='plusminus', error_dps=6) == '1.5 +- 0.5'
+ assert iv.nstr(x, n, mode='plusminus', use_spaces=False, error_dps=6) == '1.5+-0.5'
+ assert iv.nstr(x, n, mode='percent') == '1.5 (33.33%)'
+ assert iv.nstr(x, n, mode='brackets', use_spaces=False) == '[1.0,2.0]'
+ assert iv.nstr(x, n, mode='brackets' , brackets=('<', '>')) == '<1.0, 2.0>'
+ x = mpi('5.2582327113062393041', '5.2582327113062749951')
+ assert iv.nstr(x, n, mode='diff') == '5.2582327113062[393041, 749951]'
+ assert iv.nstr(iv.cos(mpi(1)), n, mode='diff', use_spaces=False) == '0.54030230586813971740093660744[2955,3053]'
+ assert iv.nstr(mpi('1e123', '1e129'), n, mode='diff') == '[1.0e+123, 1.0e+129]'
+ exp = iv.exp
+ assert iv.nstr(iv.exp(mpi('5000.1')), n, mode='diff') == '3.2797365856787867069110487[0926, 1191]e+2171'
+ iv.dps = 15
+
+def test_mpi_from_str():
+ iv.dps = 15
+ assert iv.convert('1.5 +- 0.5') == mpi(mpf('1.0'), mpf('2.0'))
+ assert mpi(1, 2) in iv.convert('1.5 (33.33333333333333333333333333333%)')
+ assert iv.convert('[1, 2]') == mpi(1, 2)
+ assert iv.convert('1[2, 3]') == mpi(12, 13)
+ assert iv.convert('1.[23,46]e-8') == mpi('1.23e-8', '1.46e-8')
+ assert iv.convert('12[3.4,5.9]e4') == mpi('123.4e+4', '125.9e4')
+
+def test_interval_gamma():
+ mp.dps = 15
+ iv.dps = 15
+ # TODO: need many more tests
+ assert iv.rgamma(0) == 0
+ assert iv.fac(0) == 1
+ assert iv.fac(1) == 1
+ assert iv.fac(2) == 2
+ assert iv.fac(3) == 6
+ assert iv.gamma(0) == [-inf,inf]
+ assert iv.gamma(1) == 1
+ assert iv.gamma(2) == 1
+ assert iv.gamma(3) == 2
+ assert -3.5449077018110320546 in iv.gamma(-0.5)
+ assert iv.loggamma(1) == 0
+ assert iv.loggamma(2) == 0
+ assert 0.69314718055994530942 in iv.loggamma(3)
+ # Test tight log-gamma endpoints based on monotonicity
+ xs = [iv.mpc([2,3],[1,4]),
+ iv.mpc([2,3],[-4,-1]),
+ iv.mpc([2,3],[-1,4]),
+ iv.mpc([2,3],[-4,1]),
+ iv.mpc([2,3],[-4,4]),
+ iv.mpc([-3,-2],[2,4]),
+ iv.mpc([-3,-2],[-4,-2])]
+ for x in xs:
+ ys = [mp.loggamma(mp.mpc(x.a,x.c)),
+ mp.loggamma(mp.mpc(x.b,x.c)),
+ mp.loggamma(mp.mpc(x.a,x.d)),
+ mp.loggamma(mp.mpc(x.b,x.d))]
+ if 0 in x.imag:
+ ys += [mp.loggamma(x.a), mp.loggamma(x.b)]
+ min_real = min([y.real for y in ys])
+ max_real = max([y.real for y in ys])
+ min_imag = min([y.imag for y in ys])
+ max_imag = max([y.imag for y in ys])
+ z = iv.loggamma(x)
+ assert z.a.ae(min_real)
+ assert z.b.ae(max_real)
+ assert z.c.ae(min_imag)
+ assert z.d.ae(max_imag)
+
+def test_interval_conversions():
+ mp.dps = 15
+ iv.dps = 15
+ for a, b in ((-0.0, 0), (0.0, 0.5), (1.0, 1), \
+ ('-inf', 20.5), ('-inf', float(sqrt(2)))):
+ r = mpi(a, b)
+ assert int(r.b) == int(b)
+ assert float(r.a) == float(a)
+ assert float(r.b) == float(b)
+ assert complex(r.a) == complex(a)
+ assert complex(r.b) == complex(b)
diff --git a/pythonProject/.venv/Lib/site-packages/mpmath/tests/test_levin.py b/pythonProject/.venv/Lib/site-packages/mpmath/tests/test_levin.py
new file mode 100644
index 0000000000000000000000000000000000000000..b14855df4de1a45da27080dcd239267842a4ac7a
--- /dev/null
+++ b/pythonProject/.venv/Lib/site-packages/mpmath/tests/test_levin.py
@@ -0,0 +1,153 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+from mpmath import mp
+from mpmath import libmp
+
+xrange = libmp.backend.xrange
+
+# Attention:
+# These tests run with 15-20 decimal digits precision. For higher precision the
+# working precision must be raised.
+
+def test_levin_0():
+ mp.dps = 17
+ eps = mp.mpf(mp.eps)
+ with mp.extraprec(2 * mp.prec):
+ L = mp.levin(method = "levin", variant = "u")
+ S, s, n = [], 0, 1
+ while 1:
+ s += mp.one / (n * n)
+ n += 1
+ S.append(s)
+ v, e = L.update_psum(S)
+ if e < eps:
+ break
+ if n > 1000: raise RuntimeError("iteration limit exceeded")
+ eps = mp.exp(0.9 * mp.log(eps))
+ err = abs(v - mp.pi ** 2 / 6)
+ assert err < eps
+ w = mp.nsum(lambda n: 1/(n * n), [1, mp.inf], method = "levin", levin_variant = "u")
+ err = abs(v - w)
+ assert err < eps
+
+def test_levin_1():
+ mp.dps = 17
+ eps = mp.mpf(mp.eps)
+ with mp.extraprec(2 * mp.prec):
+ L = mp.levin(method = "levin", variant = "v")
+ A, n = [], 1
+ while 1:
+ s = mp.mpf(n) ** (2 + 3j)
+ n += 1
+ A.append(s)
+ v, e = L.update(A)
+ if e < eps:
+ break
+ if n > 1000: raise RuntimeError("iteration limit exceeded")
+ eps = mp.exp(0.9 * mp.log(eps))
+ err = abs(v - mp.zeta(-2-3j))
+ assert err < eps
+ w = mp.nsum(lambda n: n ** (2 + 3j), [1, mp.inf], method = "levin", levin_variant = "v")
+ err = abs(v - w)
+ assert err < eps
+
+def test_levin_2():
+ # [2] A. Sidi - "Pratical Extrapolation Methods" p.373
+ mp.dps = 17
+ z=mp.mpf(10)
+ eps = mp.mpf(mp.eps)
+ with mp.extraprec(2 * mp.prec):
+ L = mp.levin(method = "sidi", variant = "t")
+ n = 0
+ while 1:
+ s = (-1)**n * mp.fac(n) * z ** (-n)
+ v, e = L.step(s)
+ n += 1
+ if e < eps:
+ break
+ if n > 1000: raise RuntimeError("iteration limit exceeded")
+ eps = mp.exp(0.9 * mp.log(eps))
+ exact = mp.quad(lambda x: mp.exp(-x)/(1+x/z),[0,mp.inf])
+ # there is also a symbolic expression for the integral:
+ # exact = z * mp.exp(z) * mp.expint(1,z)
+ err = abs(v - exact)
+ assert err < eps
+ w = mp.nsum(lambda n: (-1) ** n * mp.fac(n) * z ** (-n), [0, mp.inf], method = "sidi", levin_variant = "t")
+ assert err < eps
+
+def test_levin_3():
+ mp.dps = 17
+ z=mp.mpf(2)
+ eps = mp.mpf(mp.eps)
+ with mp.extraprec(7*mp.prec): # we need copious amount of precision to sum this highly divergent series
+ L = mp.levin(method = "levin", variant = "t")
+ n, s = 0, 0
+ while 1:
+ s += (-z)**n * mp.fac(4 * n) / (mp.fac(n) * mp.fac(2 * n) * (4 ** n))
+ n += 1
+ v, e = L.step_psum(s)
+ if e < eps:
+ break
+ if n > 1000: raise RuntimeError("iteration limit exceeded")
+ eps = mp.exp(0.8 * mp.log(eps))
+ exact = mp.quad(lambda x: mp.exp( -x * x / 2 - z * x ** 4), [0,mp.inf]) * 2 / mp.sqrt(2 * mp.pi)
+ # there is also a symbolic expression for the integral:
+ # exact = mp.exp(mp.one / (32 * z)) * mp.besselk(mp.one / 4, mp.one / (32 * z)) / (4 * mp.sqrt(z * mp.pi))
+ err = abs(v - exact)
+ assert err < eps
+ w = mp.nsum(lambda n: (-z)**n * mp.fac(4 * n) / (mp.fac(n) * mp.fac(2 * n) * (4 ** n)), [0, mp.inf], method = "levin", levin_variant = "t", workprec = 8*mp.prec, steps = [2] + [1 for x in xrange(1000)])
+ err = abs(v - w)
+ assert err < eps
+
+def test_levin_nsum():
+ mp.dps = 17
+
+ with mp.extraprec(mp.prec):
+ z = mp.mpf(10) ** (-10)
+ a = mp.nsum(lambda n: n**(-(1+z)), [1, mp.inf], method = "l") - 1 / z
+ assert abs(a - mp.euler) < 1e-10
+
+ eps = mp.exp(0.8 * mp.log(mp.eps))
+
+ a = mp.nsum(lambda n: (-1)**(n-1) / n, [1, mp.inf], method = "sidi")
+ assert abs(a - mp.log(2)) < eps
+
+ z = 2 + 1j
+ f = lambda n: mp.rf(2 / mp.mpf(3), n) * mp.rf(4 / mp.mpf(3), n) * z**n / (mp.rf(1 / mp.mpf(3), n) * mp.fac(n))
+ v = mp.nsum(f, [0, mp.inf], method = "levin", steps = [10 for x in xrange(1000)])
+ exact = mp.hyp2f1(2 / mp.mpf(3), 4 / mp.mpf(3), 1 / mp.mpf(3), z)
+ assert abs(exact - v) < eps
+
+def test_cohen_alt_0():
+ mp.dps = 17
+ AC = mp.cohen_alt()
+ S, s, n = [], 0, 1
+ while 1:
+ s += -((-1) ** n) * mp.one / (n * n)
+ n += 1
+ S.append(s)
+ v, e = AC.update_psum(S)
+ if e < mp.eps:
+ break
+ if n > 1000: raise RuntimeError("iteration limit exceeded")
+ eps = mp.exp(0.9 * mp.log(mp.eps))
+ err = abs(v - mp.pi ** 2 / 12)
+ assert err < eps
+
+def test_cohen_alt_1():
+ mp.dps = 17
+ A = []
+ AC = mp.cohen_alt()
+ n = 1
+ while 1:
+ A.append( mp.loggamma(1 + mp.one / (2 * n - 1)))
+ A.append(-mp.loggamma(1 + mp.one / (2 * n)))
+ n += 1
+ v, e = AC.update(A)
+ if e < mp.eps:
+ break
+ if n > 1000: raise RuntimeError("iteration limit exceeded")
+ v = mp.exp(v)
+ err = abs(v - 1.06215090557106)
+ assert err < 1e-12
diff --git a/pythonProject/.venv/Lib/site-packages/mpmath/tests/test_linalg.py b/pythonProject/.venv/Lib/site-packages/mpmath/tests/test_linalg.py
new file mode 100644
index 0000000000000000000000000000000000000000..14256a79f8953d3e4ef8b296258560d48204f547
--- /dev/null
+++ b/pythonProject/.venv/Lib/site-packages/mpmath/tests/test_linalg.py
@@ -0,0 +1,332 @@
+# TODO: don't use round
+
+from __future__ import division
+
+import pytest
+from mpmath import *
+xrange = libmp.backend.xrange
+
+# XXX: these shouldn't be visible(?)
+LU_decomp = mp.LU_decomp
+L_solve = mp.L_solve
+U_solve = mp.U_solve
+householder = mp.householder
+improve_solution = mp.improve_solution
+
+A1 = matrix([[3, 1, 6],
+ [2, 1, 3],
+ [1, 1, 1]])
+b1 = [2, 7, 4]
+
+A2 = matrix([[ 2, -1, -1, 2],
+ [ 6, -2, 3, -1],
+ [-4, 2, 3, -2],
+ [ 2, 0, 4, -3]])
+b2 = [3, -3, -2, -1]
+
+A3 = matrix([[ 1, 0, -1, -1, 0],
+ [ 0, 1, 1, 0, -1],
+ [ 4, -5, 2, 0, 0],
+ [ 0, 0, -2, 9,-12],
+ [ 0, 5, 0, 0, 12]])
+b3 = [0, 0, 0, 0, 50]
+
+A4 = matrix([[10.235, -4.56, 0., -0.035, 5.67],
+ [-2.463, 1.27, 3.97, -8.63, 1.08],
+ [-6.58, 0.86, -0.257, 9.32, -43.6 ],
+ [ 9.83, 7.39, -17.25, 0.036, 24.86],
+ [-9.31, 34.9, 78.56, 1.07, 65.8 ]])
+b4 = [8.95, 20.54, 7.42, 5.60, 58.43]
+
+A5 = matrix([[ 1, 2, -4],
+ [-2, -3, 5],
+ [ 3, 5, -8]])
+
+A6 = matrix([[ 1.377360, 2.481400, 5.359190],
+ [ 2.679280, -1.229560, 25.560210],
+ [-1.225280+1.e6, 9.910180, -35.049900-1.e6]])
+b6 = [23.500000, -15.760000, 2.340000]
+
+A7 = matrix([[1, -0.5],
+ [2, 1],
+ [-2, 6]])
+b7 = [3, 2, -4]
+
+A8 = matrix([[1, 2, 3],
+ [-1, 0, 1],
+ [-1, -2, -1],
+ [1, 0, -1]])
+b8 = [1, 2, 3, 4]
+
+A9 = matrix([[ 4, 2, -2],
+ [ 2, 5, -4],
+ [-2, -4, 5.5]])
+b9 = [10, 16, -15.5]
+
+A10 = matrix([[1.0 + 1.0j, 2.0, 2.0],
+ [4.0, 5.0, 6.0],
+ [7.0, 8.0, 9.0]])
+b10 = [1.0, 1.0 + 1.0j, 1.0]
+
+
+def test_LU_decomp():
+ A = A3.copy()
+ b = b3
+ A, p = LU_decomp(A)
+ y = L_solve(A, b, p)
+ x = U_solve(A, y)
+ assert p == [2, 1, 2, 3]
+ assert [round(i, 14) for i in x] == [3.78953107960742, 2.9989094874591098,
+ -0.081788440567070006, 3.8713195201744801, 2.9171210468920399]
+ A = A4.copy()
+ b = b4
+ A, p = LU_decomp(A)
+ y = L_solve(A, b, p)
+ x = U_solve(A, y)
+ assert p == [0, 3, 4, 3]
+ assert [round(i, 14) for i in x] == [2.6383625899619201, 2.6643834462368399,
+ 0.79208015947958998, -2.5088376454101899, -1.0567657691375001]
+ A = randmatrix(3)
+ bak = A.copy()
+ LU_decomp(A, overwrite=1)
+ assert A != bak
+
+def test_inverse():
+ for A in [A1, A2, A5]:
+ inv = inverse(A)
+ assert mnorm(A*inv - eye(A.rows), 1) < 1.e-14
+
+def test_householder():
+ mp.dps = 15
+ A, b = A8, b8
+ H, p, x, r = householder(extend(A, b))
+ assert H == matrix(
+ [[mpf('3.0'), mpf('-2.0'), mpf('-1.0'), 0],
+ [-1.0,mpf('3.333333333333333'),mpf('-2.9999999999999991'),mpf('2.0')],
+ [-1.0, mpf('-0.66666666666666674'),mpf('2.8142135623730948'),
+ mpf('-2.8284271247461898')],
+ [1.0, mpf('-1.3333333333333333'),mpf('-0.20000000000000018'),
+ mpf('4.2426406871192857')]])
+ assert p == [-2, -2, mpf('-1.4142135623730949')]
+ assert round(norm(r, 2), 10) == 4.2426406870999998
+
+ y = [102.102, 58.344, 36.463, 24.310, 17.017, 12.376, 9.282, 7.140, 5.610,
+ 4.488, 3.6465, 3.003]
+
+ def coeff(n):
+ # similiar to Hilbert matrix
+ A = []
+ for i in range(1, 13):
+ A.append([1. / (i + j - 1) for j in range(1, n + 1)])
+ return matrix(A)
+
+ residuals = []
+ refres = []
+ for n in range(2, 7):
+ A = coeff(n)
+ H, p, x, r = householder(extend(A, y))
+ x = matrix(x)
+ y = matrix(y)
+ residuals.append(norm(r, 2))
+ refres.append(norm(residual(A, x, y), 2))
+ assert [round(res, 10) for res in residuals] == [15.1733888877,
+ 0.82378073210000002, 0.302645887, 0.0260109244,
+ 0.00058653999999999998]
+ assert norm(matrix(residuals) - matrix(refres), inf) < 1.e-13
+
+ def hilbert_cmplx(n):
+ # Complexified Hilbert matrix
+ A = hilbert(2*n,n)
+ v = randmatrix(2*n, 2, min=-1, max=1)
+ v = v.apply(lambda x: exp(1J*pi()*x))
+ A = diag(v[:,0])*A*diag(v[:n,1])
+ return A
+
+ residuals_cmplx = []
+ refres_cmplx = []
+ for n in range(2, 10):
+ A = hilbert_cmplx(n)
+ H, p, x, r = householder(A.copy())
+ residuals_cmplx.append(norm(r, 2))
+ refres_cmplx.append(norm(residual(A[:,:n-1], x, A[:,n-1]), 2))
+ assert norm(matrix(residuals_cmplx) - matrix(refres_cmplx), inf) < 1.e-13
+
+def test_factorization():
+ A = randmatrix(5)
+ P, L, U = lu(A)
+ assert mnorm(P*A - L*U, 1) < 1.e-15
+
+def test_solve():
+ assert norm(residual(A6, lu_solve(A6, b6), b6), inf) < 1.e-10
+ assert norm(residual(A7, lu_solve(A7, b7), b7), inf) < 1.5
+ assert norm(residual(A8, lu_solve(A8, b8), b8), inf) <= 3 + 1.e-10
+ assert norm(residual(A6, qr_solve(A6, b6)[0], b6), inf) < 1.e-10
+ assert norm(residual(A7, qr_solve(A7, b7)[0], b7), inf) < 1.5
+ assert norm(residual(A8, qr_solve(A8, b8)[0], b8), 2) <= 4.3
+ assert norm(residual(A10, lu_solve(A10, b10), b10), 2) < 1.e-10
+ assert norm(residual(A10, qr_solve(A10, b10)[0], b10), 2) < 1.e-10
+
+def test_solve_overdet_complex():
+ A = matrix([[1, 2j], [3, 4j], [5, 6]])
+ b = matrix([1 + j, 2, -j])
+ assert norm(residual(A, lu_solve(A, b), b)) < 1.0208
+
+def test_singular():
+ mp.dps = 15
+ A = [[5.6, 1.2], [7./15, .1]]
+ B = repr(zeros(2))
+ b = [1, 2]
+ for i in ['lu_solve(%s, %s)' % (A, b), 'lu_solve(%s, %s)' % (B, b),
+ 'qr_solve(%s, %s)' % (A, b), 'qr_solve(%s, %s)' % (B, b)]:
+ pytest.raises((ZeroDivisionError, ValueError), lambda: eval(i))
+
+def test_cholesky():
+ assert fp.cholesky(fp.matrix(A9)) == fp.matrix([[2, 0, 0], [1, 2, 0], [-1, -3/2, 3/2]])
+ x = fp.cholesky_solve(A9, b9)
+ assert fp.norm(fp.residual(A9, x, b9), fp.inf) == 0
+
+def test_det():
+ assert det(A1) == 1
+ assert round(det(A2), 14) == 8
+ assert round(det(A3)) == 1834
+ assert round(det(A4)) == 4443376
+ assert det(A5) == 1
+ assert round(det(A6)) == 78356463
+ assert det(zeros(3)) == 0
+
+def test_cond():
+ mp.dps = 15
+ A = matrix([[1.2969, 0.8648], [0.2161, 0.1441]])
+ assert cond(A, lambda x: mnorm(x,1)) == mpf('327065209.73817754')
+ assert cond(A, lambda x: mnorm(x,inf)) == mpf('327065209.73817754')
+ assert cond(A, lambda x: mnorm(x,'F')) == mpf('249729266.80008656')
+
+@extradps(50)
+def test_precision():
+ A = randmatrix(10, 10)
+ assert mnorm(inverse(inverse(A)) - A, 1) < 1.e-45
+
+def test_interval_matrix():
+ mp.dps = 15
+ iv.dps = 15
+ a = iv.matrix([['0.1','0.3','1.0'],['7.1','5.5','4.8'],['3.2','4.4','5.6']])
+ b = iv.matrix(['4','0.6','0.5'])
+ c = iv.lu_solve(a, b)
+ assert c[0].delta < 1e-13
+ assert c[1].delta < 1e-13
+ assert c[2].delta < 1e-13
+ assert 5.25823271130625686059275 in c[0]
+ assert -13.155049396267837541163 in c[1]
+ assert 7.42069154774972557628979 in c[2]
+
+def test_LU_cache():
+ A = randmatrix(3)
+ LU = LU_decomp(A)
+ assert A._LU == LU_decomp(A)
+ A[0,0] = -1000
+ assert A._LU is None
+
+def test_improve_solution():
+ A = randmatrix(5, min=1e-20, max=1e20)
+ b = randmatrix(5, 1, min=-1000, max=1000)
+ x1 = lu_solve(A, b) + randmatrix(5, 1, min=-1e-5, max=1.e-5)
+ x2 = improve_solution(A, x1, b)
+ assert norm(residual(A, x2, b), 2) < norm(residual(A, x1, b), 2)
+
+def test_exp_pade():
+ for i in range(3):
+ dps = 15
+ extra = 15
+ mp.dps = dps + extra
+ dm = 0
+ N = 3
+ dg = range(1,N+1)
+ a = diag(dg)
+ expa = diag([exp(x) for x in dg])
+ # choose a random matrix not close to be singular
+ # to avoid adding too much extra precision in computing
+ # m**-1 * M * m
+ while abs(dm) < 0.01:
+ m = randmatrix(N)
+ dm = det(m)
+ m = m/dm
+ a1 = m**-1 * a * m
+ e2 = m**-1 * expa * m
+ mp.dps = dps
+ e1 = expm(a1, method='pade')
+ mp.dps = dps + extra
+ d = e2 - e1
+ #print d
+ mp.dps = dps
+ assert norm(d, inf).ae(0)
+ mp.dps = 15
+
+def test_qr():
+ mp.dps = 15 # used default value for dps
+ lowlimit = -9 # lower limit of matrix element value
+ uplimit = 9 # uppter limit of matrix element value
+ maxm = 4 # max matrix size
+ flg = False # toggle to create real vs complex matrix
+ zero = mpf('0.0')
+
+ for k in xrange(0,10):
+ exdps = 0
+ mode = 'full'
+ flg = bool(k % 2)
+
+ # generate arbitrary matrix size (2 to maxm)
+ num1 = nint(maxm*rand())
+ num2 = nint(maxm*rand())
+ m = int(max(num1, num2))
+ n = int(min(num1, num2))
+
+ # create matrix
+ A = mp.matrix(m,n)
+
+ # populate matrix values with arbitrary integers
+ if flg:
+ flg = False
+ dtype = 'complex'
+ for j in xrange(0,n):
+ for i in xrange(0,m):
+ val = nint(lowlimit + (uplimit-lowlimit)*rand())
+ val2 = nint(lowlimit + (uplimit-lowlimit)*rand())
+ A[i,j] = mpc(val, val2)
+ else:
+ flg = True
+ dtype = 'real'
+ for j in xrange(0,n):
+ for i in xrange(0,m):
+ val = nint(lowlimit + (uplimit-lowlimit)*rand())
+ A[i,j] = mpf(val)
+
+ # perform A -> QR decomposition
+ Q, R = qr(A, mode, edps = exdps)
+
+ #print('\n\n A = \n', nstr(A, 4))
+ #print('\n Q = \n', nstr(Q, 4))
+ #print('\n R = \n', nstr(R, 4))
+ #print('\n Q*R = \n', nstr(Q*R, 4))
+
+ maxnorm = mpf('1.0E-11')
+ n1 = norm(A - Q * R)
+ #print '\n Norm of A - Q * R = ', n1
+ assert n1 <= maxnorm
+
+ if dtype == 'real':
+ n1 = norm(eye(m) - Q.T * Q)
+ #print ' Norm of I - Q.T * Q = ', n1
+ assert n1 <= maxnorm
+
+ n1 = norm(eye(m) - Q * Q.T)
+ #print ' Norm of I - Q * Q.T = ', n1
+ assert n1 <= maxnorm
+
+ if dtype == 'complex':
+ n1 = norm(eye(m) - Q.T * Q.conjugate())
+ #print ' Norm of I - Q.T * Q.conjugate() = ', n1
+ assert n1 <= maxnorm
+
+ n1 = norm(eye(m) - Q.conjugate() * Q.T)
+ #print ' Norm of I - Q.conjugate() * Q.T = ', n1
+ assert n1 <= maxnorm
diff --git a/pythonProject/.venv/Lib/site-packages/mpmath/tests/test_matrices.py b/pythonProject/.venv/Lib/site-packages/mpmath/tests/test_matrices.py
new file mode 100644
index 0000000000000000000000000000000000000000..1547b90664dba66a98a7f026a04a4ed1aa1ed3b4
--- /dev/null
+++ b/pythonProject/.venv/Lib/site-packages/mpmath/tests/test_matrices.py
@@ -0,0 +1,253 @@
+import pytest
+import sys
+from mpmath import *
+
+def test_matrix_basic():
+ A1 = matrix(3)
+ for i in range(3):
+ A1[i,i] = 1
+ assert A1 == eye(3)
+ assert A1 == matrix(A1)
+ A2 = matrix(3, 2)
+ assert not A2._matrix__data
+ A3 = matrix([[1, 2, 3], [4, 5, 6], [7, 8, 9]])
+ assert list(A3) == list(range(1, 10))
+ A3[1,1] = 0
+ assert not (1, 1) in A3._matrix__data
+ A4 = matrix([[1, 2, 3], [4, 5, 6]])
+ A5 = matrix([[6, -1], [3, 2], [0, -3]])
+ assert A4 * A5 == matrix([[12, -6], [39, -12]])
+ assert A1 * A3 == A3 * A1 == A3
+ pytest.raises(ValueError, lambda: A2*A2)
+ l = [[10, 20, 30], [40, 0, 60], [70, 80, 90]]
+ A6 = matrix(l)
+ assert A6.tolist() == l
+ assert A6 == eval(repr(A6))
+ A6 = fp.matrix(A6)
+ assert A6 == eval(repr(A6))
+ assert A6*1j == eval(repr(A6*1j))
+ assert A3 * 10 == 10 * A3 == A6
+ assert A2.rows == 3
+ assert A2.cols == 2
+ A3.rows = 2
+ A3.cols = 2
+ assert len(A3._matrix__data) == 3
+ assert A4 + A4 == 2*A4
+ pytest.raises(ValueError, lambda: A4 + A2)
+ assert sum(A1 - A1) == 0
+ A7 = matrix([[1, 2], [3, 4], [5, 6], [7, 8]])
+ x = matrix([10, -10])
+ assert A7*x == matrix([-10, -10, -10, -10])
+ A8 = ones(5)
+ assert sum((A8 + 1) - (2 - zeros(5))) == 0
+ assert (1 + ones(4)) / 2 - 1 == zeros(4)
+ assert eye(3)**10 == eye(3)
+ pytest.raises(ValueError, lambda: A7**2)
+ A9 = randmatrix(3)
+ A10 = matrix(A9)
+ A9[0,0] = -100
+ assert A9 != A10
+ assert nstr(A9)
+
+def test_matmul():
+ """
+ Test the PEP465 "@" matrix multiplication syntax.
+ To avoid syntax errors when importing this file in Python 3.5 and below, we have to use exec() - sorry for that.
+ """
+ # TODO remove exec() wrapper as soon as we drop support for Python <= 3.5
+ if sys.hexversion < 0x30500f0:
+ # we are on Python < 3.5
+ pytest.skip("'@' (__matmul__) is only supported in Python 3.5 or newer")
+ A4 = matrix([[1, 2, 3], [4, 5, 6]])
+ A5 = matrix([[6, -1], [3, 2], [0, -3]])
+ exec("assert A4 @ A5 == A4 * A5")
+
+def test_matrix_slices():
+ A = matrix([ [1, 2, 3],
+ [4, 5 ,6],
+ [7, 8 ,9]])
+ V = matrix([1,2,3,4,5])
+
+ # Get slice
+ assert A[:,:] == A
+ assert A[:,1] == matrix([[2],[5],[8]])
+ assert A[2,:] == matrix([[7, 8 ,9]])
+ assert A[1:3,1:3] == matrix([[5,6],[8,9]])
+ assert V[2:4] == matrix([3,4])
+ pytest.raises(IndexError, lambda: A[:,1:6])
+
+ # Assign slice with matrix
+ A1 = matrix(3)
+ A1[:,:] = A
+ assert A1[:,:] == matrix([[1, 2, 3],
+ [4, 5 ,6],
+ [7, 8 ,9]])
+ A1[0,:] = matrix([[10, 11, 12]])
+ assert A1 == matrix([ [10, 11, 12],
+ [4, 5 ,6],
+ [7, 8 ,9]])
+ A1[:,2] = matrix([[13], [14], [15]])
+ assert A1 == matrix([ [10, 11, 13],
+ [4, 5 ,14],
+ [7, 8 ,15]])
+ A1[:2,:2] = matrix([[16, 17], [18 , 19]])
+ assert A1 == matrix([ [16, 17, 13],
+ [18, 19 ,14],
+ [7, 8 ,15]])
+ V[1:3] = 10
+ assert V == matrix([1,10,10,4,5])
+ with pytest.raises(ValueError):
+ A1[2,:] = A[:,1]
+
+ with pytest.raises(IndexError):
+ A1[2,1:20] = A[:,:]
+
+ # Assign slice with scalar
+ A1[:,2] = 10
+ assert A1 == matrix([ [16, 17, 10],
+ [18, 19 ,10],
+ [7, 8 ,10]])
+ A1[:,:] = 40
+ for x in A1:
+ assert x == 40
+
+
+def test_matrix_power():
+ A = matrix([[1, 2], [3, 4]])
+ assert A**2 == A*A
+ assert A**3 == A*A*A
+ assert A**-1 == inverse(A)
+ assert A**-2 == inverse(A*A)
+
+def test_matrix_transform():
+ A = matrix([[1, 2], [3, 4], [5, 6]])
+ assert A.T == A.transpose() == matrix([[1, 3, 5], [2, 4, 6]])
+ swap_row(A, 1, 2)
+ assert A == matrix([[1, 2], [5, 6], [3, 4]])
+ l = [1, 2]
+ swap_row(l, 0, 1)
+ assert l == [2, 1]
+ assert extend(eye(3), [1,2,3]) == matrix([[1,0,0,1],[0,1,0,2],[0,0,1,3]])
+
+def test_matrix_conjugate():
+ A = matrix([[1 + j, 0], [2, j]])
+ assert A.conjugate() == matrix([[mpc(1, -1), 0], [2, mpc(0, -1)]])
+ assert A.transpose_conj() == A.H == matrix([[mpc(1, -1), 2],
+ [0, mpc(0, -1)]])
+
+def test_matrix_creation():
+ assert diag([1, 2, 3]) == matrix([[1, 0, 0], [0, 2, 0], [0, 0, 3]])
+ A1 = ones(2, 3)
+ assert A1.rows == 2 and A1.cols == 3
+ for a in A1:
+ assert a == 1
+ A2 = zeros(3, 2)
+ assert A2.rows == 3 and A2.cols == 2
+ for a in A2:
+ assert a == 0
+ assert randmatrix(10) != randmatrix(10)
+ one = mpf(1)
+ assert hilbert(3) == matrix([[one, one/2, one/3],
+ [one/2, one/3, one/4],
+ [one/3, one/4, one/5]])
+
+def test_norms():
+ # matrix norms
+ A = matrix([[1, -2], [-3, -1], [2, 1]])
+ assert mnorm(A,1) == 6
+ assert mnorm(A,inf) == 4
+ assert mnorm(A,'F') == sqrt(20)
+ # vector norms
+ assert norm(-3) == 3
+ x = [1, -2, 7, -12]
+ assert norm(x, 1) == 22
+ assert round(norm(x, 2), 10) == 14.0712472795
+ assert round(norm(x, 10), 10) == 12.0054633727
+ assert norm(x, inf) == 12
+
+def test_vector():
+ x = matrix([0, 1, 2, 3, 4])
+ assert x == matrix([[0], [1], [2], [3], [4]])
+ assert x[3] == 3
+ assert len(x._matrix__data) == 4
+ assert list(x) == list(range(5))
+ x[0] = -10
+ x[4] = 0
+ assert x[0] == -10
+ assert len(x) == len(x.T) == 5
+ assert x.T*x == matrix([[114]])
+
+def test_matrix_copy():
+ A = ones(6)
+ B = A.copy()
+ C = +A
+ assert A == B
+ assert A == C
+ B[0,0] = 0
+ assert A != B
+ C[0,0] = 42
+ assert A != C
+
+def test_matrix_numpy():
+ try:
+ import numpy
+ except ImportError:
+ return
+ l = [[1, 2], [3, 4], [5, 6]]
+ a = numpy.array(l)
+ assert matrix(l) == matrix(a)
+
+def test_interval_matrix_scalar_mult():
+ """Multiplication of iv.matrix and any scalar type"""
+ a = mpi(-1, 1)
+ b = a + a * 2j
+ c = mpf(42)
+ d = c + c * 2j
+ e = 1.234
+ f = fp.convert(e)
+ g = e + e * 3j
+ h = fp.convert(g)
+ M = iv.ones(1)
+ for x in [a, b, c, d, e, f, g, h]:
+ assert x * M == iv.matrix([x])
+ assert M * x == iv.matrix([x])
+
+@pytest.mark.xfail()
+def test_interval_matrix_matrix_mult():
+ """Multiplication of iv.matrix and other matrix types"""
+ A = ones(1)
+ B = fp.ones(1)
+ M = iv.ones(1)
+ for X in [A, B, M]:
+ assert X * M == iv.matrix(X)
+ assert X * M == X
+ assert M * X == iv.matrix(X)
+ assert M * X == X
+
+def test_matrix_conversion_to_iv():
+ # Test that matrices with foreign datatypes are properly converted
+ for other_type_eye in [eye(3), fp.eye(3), iv.eye(3)]:
+ A = iv.matrix(other_type_eye)
+ B = iv.eye(3)
+ assert type(A[0,0]) == type(B[0,0])
+ assert A.tolist() == B.tolist()
+
+def test_interval_matrix_mult_bug():
+ # regression test for interval matrix multiplication:
+ # result must be nonzero-width and contain the exact result
+ x = convert('1.00000000000001') # note: this is implicitly rounded to some near mpf float value
+ A = matrix([[x]])
+ B = iv.matrix(A)
+ C = iv.matrix([[x]])
+ assert B == C
+ B = B * B
+ C = C * C
+ assert B == C
+ assert B[0, 0].delta > 1e-16
+ assert B[0, 0].delta < 3e-16
+ assert C[0, 0].delta > 1e-16
+ assert C[0, 0].delta < 3e-16
+ assert mp.mpf('1.00000000000001998401444325291756783368705994138804689654') in B[0, 0]
+ assert mp.mpf('1.00000000000001998401444325291756783368705994138804689654') in C[0, 0]
+ # the following caused an error before the bug was fixed
+ assert iv.matrix(mp.eye(2)) * (iv.ones(2) + mpi(1, 2)) == iv.matrix([[mpi(2, 3), mpi(2, 3)], [mpi(2, 3), mpi(2, 3)]])
diff --git a/pythonProject/.venv/Lib/site-packages/mpmath/tests/test_mpmath.py b/pythonProject/.venv/Lib/site-packages/mpmath/tests/test_mpmath.py
new file mode 100644
index 0000000000000000000000000000000000000000..9f1fe36ae9b1b0feca4677eeb90396bfa7ed8f7a
--- /dev/null
+++ b/pythonProject/.venv/Lib/site-packages/mpmath/tests/test_mpmath.py
@@ -0,0 +1,7 @@
+from mpmath.libmp import *
+from mpmath import *
+
+def test_newstyle_classes():
+ for cls in [mp, fp, iv, mpf, mpc]:
+ for s in cls.__class__.__mro__:
+ assert isinstance(s, type)
diff --git a/pythonProject/.venv/Lib/site-packages/mpmath/tests/test_ode.py b/pythonProject/.venv/Lib/site-packages/mpmath/tests/test_ode.py
new file mode 100644
index 0000000000000000000000000000000000000000..6b6dbffa79cfd4ca6dbf14f8591296ee48b16682
--- /dev/null
+++ b/pythonProject/.venv/Lib/site-packages/mpmath/tests/test_ode.py
@@ -0,0 +1,73 @@
+#from mpmath.calculus import ODE_step_euler, ODE_step_rk4, odeint, arange
+from mpmath import odefun, cos, sin, mpf, sinc, mp
+
+'''
+solvers = [ODE_step_euler, ODE_step_rk4]
+
+def test_ode1():
+ """
+ Let's solve:
+
+ x'' + w**2 * x = 0
+
+ i.e. x1 = x, x2 = x1':
+
+ x1' = x2
+ x2' = -x1
+ """
+ def derivs((x1, x2), t):
+ return x2, -x1
+
+ for solver in solvers:
+ t = arange(0, 3.1415926, 0.005)
+ sol = odeint(derivs, (0., 1.), t, solver)
+ x1 = [a[0] for a in sol]
+ x2 = [a[1] for a in sol]
+ # the result is x1 = sin(t), x2 = cos(t)
+ # let's just check the end points for t = pi
+ assert abs(x1[-1]) < 1e-2
+ assert abs(x2[-1] - (-1)) < 1e-2
+
+def test_ode2():
+ """
+ Let's solve:
+
+ x' - x = 0
+
+ i.e. x = exp(x)
+
+ """
+ def derivs((x), t):
+ return x
+
+ for solver in solvers:
+ t = arange(0, 1, 1e-3)
+ sol = odeint(derivs, (1.,), t, solver)
+ x = [a[0] for a in sol]
+ # the result is x = exp(t)
+ # let's just check the end point for t = 1, i.e. x = e
+ assert abs(x[-1] - 2.718281828) < 1e-2
+'''
+
+def test_odefun_rational():
+ mp.dps = 15
+ # A rational function
+ f = lambda t: 1/(1+mpf(t)**2)
+ g = odefun(lambda x, y: [-2*x*y[0]**2], 0, [f(0)])
+ assert f(2).ae(g(2)[0])
+
+def test_odefun_sinc_large():
+ mp.dps = 15
+ # Sinc function; test for large x
+ f = sinc
+ g = odefun(lambda x, y: [(cos(x)-y[0])/x], 1, [f(1)], tol=0.01, degree=5)
+ assert abs(f(100) - g(100)[0])/f(100) < 0.01
+
+def test_odefun_harmonic():
+ mp.dps = 15
+ # Harmonic oscillator
+ f = odefun(lambda x, y: [-y[1], y[0]], 0, [1, 0])
+ for x in [0, 1, 2.5, 8, 3.7]: # we go back to 3.7 to check caching
+ c, s = f(x)
+ assert c.ae(cos(x))
+ assert s.ae(sin(x))
diff --git a/pythonProject/.venv/Lib/site-packages/mpmath/tests/test_pickle.py b/pythonProject/.venv/Lib/site-packages/mpmath/tests/test_pickle.py
new file mode 100644
index 0000000000000000000000000000000000000000..c3d96e73a53603e0fa3f9525c5c0059725bdffb7
--- /dev/null
+++ b/pythonProject/.venv/Lib/site-packages/mpmath/tests/test_pickle.py
@@ -0,0 +1,27 @@
+import os
+import tempfile
+import pickle
+
+from mpmath import *
+
+def pickler(obj):
+ fn = tempfile.mktemp()
+
+ f = open(fn, 'wb')
+ pickle.dump(obj, f)
+ f.close()
+
+ f = open(fn, 'rb')
+ obj2 = pickle.load(f)
+ f.close()
+ os.remove(fn)
+
+ return obj2
+
+def test_pickle():
+
+ obj = mpf('0.5')
+ assert obj == pickler(obj)
+
+ obj = mpc('0.5','0.2')
+ assert obj == pickler(obj)
diff --git a/pythonProject/.venv/Lib/site-packages/mpmath/tests/test_power.py b/pythonProject/.venv/Lib/site-packages/mpmath/tests/test_power.py
new file mode 100644
index 0000000000000000000000000000000000000000..7a2447a62c36f9e02df79b9a40a8603f8a69b1d8
--- /dev/null
+++ b/pythonProject/.venv/Lib/site-packages/mpmath/tests/test_power.py
@@ -0,0 +1,156 @@
+from mpmath import *
+from mpmath.libmp import *
+
+import random
+
+def test_fractional_pow():
+ mp.dps = 15
+ assert mpf(16) ** 2.5 == 1024
+ assert mpf(64) ** 0.5 == 8
+ assert mpf(64) ** -0.5 == 0.125
+ assert mpf(16) ** -2.5 == 0.0009765625
+ assert (mpf(10) ** 0.5).ae(3.1622776601683791)
+ assert (mpf(10) ** 2.5).ae(316.2277660168379)
+ assert (mpf(10) ** -0.5).ae(0.31622776601683794)
+ assert (mpf(10) ** -2.5).ae(0.0031622776601683794)
+ assert (mpf(10) ** 0.3).ae(1.9952623149688795)
+ assert (mpf(10) ** -0.3).ae(0.50118723362727224)
+
+def test_pow_integer_direction():
+ """
+ Test that inexact integer powers are rounded in the right
+ direction.
+ """
+ random.seed(1234)
+ for prec in [10, 53, 200]:
+ for i in range(50):
+ a = random.randint(1<<(prec-1), 1< ab
+
+
+def test_pow_epsilon_rounding():
+ """
+ Stress test directed rounding for powers with integer exponents.
+ Basically, we look at the following cases:
+
+ >>> 1.0001 ** -5 # doctest: +SKIP
+ 0.99950014996500702
+ >>> 0.9999 ** -5 # doctest: +SKIP
+ 1.000500150035007
+ >>> (-1.0001) ** -5 # doctest: +SKIP
+ -0.99950014996500702
+ >>> (-0.9999) ** -5 # doctest: +SKIP
+ -1.000500150035007
+
+ >>> 1.0001 ** -6 # doctest: +SKIP
+ 0.99940020994401269
+ >>> 0.9999 ** -6 # doctest: +SKIP
+ 1.0006002100560125
+ >>> (-1.0001) ** -6 # doctest: +SKIP
+ 0.99940020994401269
+ >>> (-0.9999) ** -6 # doctest: +SKIP
+ 1.0006002100560125
+
+ etc.
+
+ We run the tests with values a very small epsilon away from 1:
+ small enough that the result is indistinguishable from 1 when
+ rounded to nearest at the output precision. We check that the
+ result is not erroneously rounded to 1 in cases where the
+ rounding should be done strictly away from 1.
+ """
+
+ def powr(x, n, r):
+ return make_mpf(mpf_pow_int(x._mpf_, n, mp.prec, r))
+
+ for (inprec, outprec) in [(100, 20), (5000, 3000)]:
+
+ mp.prec = inprec
+
+ pos10001 = mpf(1) + mpf(2)**(-inprec+5)
+ pos09999 = mpf(1) - mpf(2)**(-inprec+5)
+ neg10001 = -pos10001
+ neg09999 = -pos09999
+
+ mp.prec = outprec
+ r = round_up
+ assert powr(pos10001, 5, r) > 1
+ assert powr(pos09999, 5, r) == 1
+ assert powr(neg10001, 5, r) < -1
+ assert powr(neg09999, 5, r) == -1
+ assert powr(pos10001, 6, r) > 1
+ assert powr(pos09999, 6, r) == 1
+ assert powr(neg10001, 6, r) > 1
+ assert powr(neg09999, 6, r) == 1
+
+ assert powr(pos10001, -5, r) == 1
+ assert powr(pos09999, -5, r) > 1
+ assert powr(neg10001, -5, r) == -1
+ assert powr(neg09999, -5, r) < -1
+ assert powr(pos10001, -6, r) == 1
+ assert powr(pos09999, -6, r) > 1
+ assert powr(neg10001, -6, r) == 1
+ assert powr(neg09999, -6, r) > 1
+
+ r = round_down
+ assert powr(pos10001, 5, r) == 1
+ assert powr(pos09999, 5, r) < 1
+ assert powr(neg10001, 5, r) == -1
+ assert powr(neg09999, 5, r) > -1
+ assert powr(pos10001, 6, r) == 1
+ assert powr(pos09999, 6, r) < 1
+ assert powr(neg10001, 6, r) == 1
+ assert powr(neg09999, 6, r) < 1
+
+ assert powr(pos10001, -5, r) < 1
+ assert powr(pos09999, -5, r) == 1
+ assert powr(neg10001, -5, r) > -1
+ assert powr(neg09999, -5, r) == -1
+ assert powr(pos10001, -6, r) < 1
+ assert powr(pos09999, -6, r) == 1
+ assert powr(neg10001, -6, r) < 1
+ assert powr(neg09999, -6, r) == 1
+
+ r = round_ceiling
+ assert powr(pos10001, 5, r) > 1
+ assert powr(pos09999, 5, r) == 1
+ assert powr(neg10001, 5, r) == -1
+ assert powr(neg09999, 5, r) > -1
+ assert powr(pos10001, 6, r) > 1
+ assert powr(pos09999, 6, r) == 1
+ assert powr(neg10001, 6, r) > 1
+ assert powr(neg09999, 6, r) == 1
+
+ assert powr(pos10001, -5, r) == 1
+ assert powr(pos09999, -5, r) > 1
+ assert powr(neg10001, -5, r) > -1
+ assert powr(neg09999, -5, r) == -1
+ assert powr(pos10001, -6, r) == 1
+ assert powr(pos09999, -6, r) > 1
+ assert powr(neg10001, -6, r) == 1
+ assert powr(neg09999, -6, r) > 1
+
+ r = round_floor
+ assert powr(pos10001, 5, r) == 1
+ assert powr(pos09999, 5, r) < 1
+ assert powr(neg10001, 5, r) < -1
+ assert powr(neg09999, 5, r) == -1
+ assert powr(pos10001, 6, r) == 1
+ assert powr(pos09999, 6, r) < 1
+ assert powr(neg10001, 6, r) == 1
+ assert powr(neg09999, 6, r) < 1
+
+ assert powr(pos10001, -5, r) < 1
+ assert powr(pos09999, -5, r) == 1
+ assert powr(neg10001, -5, r) == -1
+ assert powr(neg09999, -5, r) < -1
+ assert powr(pos10001, -6, r) < 1
+ assert powr(pos09999, -6, r) == 1
+ assert powr(neg10001, -6, r) < 1
+ assert powr(neg09999, -6, r) == 1
+
+ mp.dps = 15
diff --git a/pythonProject/.venv/Lib/site-packages/mpmath/tests/test_quad.py b/pythonProject/.venv/Lib/site-packages/mpmath/tests/test_quad.py
new file mode 100644
index 0000000000000000000000000000000000000000..fc71c5f5ef9c0ecd876c988e7d033b321f065cdc
--- /dev/null
+++ b/pythonProject/.venv/Lib/site-packages/mpmath/tests/test_quad.py
@@ -0,0 +1,95 @@
+import pytest
+from mpmath import *
+
+def ae(a, b):
+ return abs(a-b) < 10**(-mp.dps+5)
+
+def test_basic_integrals():
+ for prec in [15, 30, 100]:
+ mp.dps = prec
+ assert ae(quadts(lambda x: x**3 - 3*x**2, [-2, 4]), -12)
+ assert ae(quadgl(lambda x: x**3 - 3*x**2, [-2, 4]), -12)
+ assert ae(quadts(sin, [0, pi]), 2)
+ assert ae(quadts(sin, [0, 2*pi]), 0)
+ assert ae(quadts(exp, [-inf, -1]), 1/e)
+ assert ae(quadts(lambda x: exp(-x), [0, inf]), 1)
+ assert ae(quadts(lambda x: exp(-x*x), [-inf, inf]), sqrt(pi))
+ assert ae(quadts(lambda x: 1/(1+x*x), [-1, 1]), pi/2)
+ assert ae(quadts(lambda x: 1/(1+x*x), [-inf, inf]), pi)
+ assert ae(quadts(lambda x: 2*sqrt(1-x*x), [-1, 1]), pi)
+ mp.dps = 15
+
+def test_multiple_intervals():
+ y,err = quad(lambda x: sign(x), [-0.5, 0.9, 1], maxdegree=2, error=True)
+ assert abs(y-0.5) < 2*err
+
+def test_quad_symmetry():
+ assert quadts(sin, [-1, 1]) == 0
+ assert quadgl(sin, [-1, 1]) == 0
+
+def test_quad_infinite_mirror():
+ # Check mirrored infinite interval
+ assert ae(quad(lambda x: exp(-x*x), [inf,-inf]), -sqrt(pi))
+ assert ae(quad(lambda x: exp(x), [0,-inf]), -1)
+
+def test_quadgl_linear():
+ assert quadgl(lambda x: x, [0, 1], maxdegree=1).ae(0.5)
+
+def test_complex_integration():
+ assert quadts(lambda x: x, [0, 1+j]).ae(j)
+
+def test_quadosc():
+ mp.dps = 15
+ assert quadosc(lambda x: sin(x)/x, [0, inf], period=2*pi).ae(pi/2)
+
+# Double integrals
+def test_double_trivial():
+ assert ae(quadts(lambda x, y: x, [0, 1], [0, 1]), 0.5)
+ assert ae(quadts(lambda x, y: x, [-1, 1], [-1, 1]), 0.0)
+
+def test_double_1():
+ assert ae(quadts(lambda x, y: cos(x+y/2), [-pi/2, pi/2], [0, pi]), 4)
+
+def test_double_2():
+ assert ae(quadts(lambda x, y: (x-1)/((1-x*y)*log(x*y)), [0, 1], [0, 1]), euler)
+
+def test_double_3():
+ assert ae(quadts(lambda x, y: 1/sqrt(1+x*x+y*y), [-1, 1], [-1, 1]), 4*log(2+sqrt(3))-2*pi/3)
+
+def test_double_4():
+ assert ae(quadts(lambda x, y: 1/(1-x*x * y*y), [0, 1], [0, 1]), pi**2 / 8)
+
+def test_double_5():
+ assert ae(quadts(lambda x, y: 1/(1-x*y), [0, 1], [0, 1]), pi**2 / 6)
+
+def test_double_6():
+ assert ae(quadts(lambda x, y: exp(-(x+y)), [0, inf], [0, inf]), 1)
+
+def test_double_7():
+ assert ae(quadts(lambda x, y: exp(-x*x-y*y), [-inf, inf], [-inf, inf]), pi)
+
+
+# Test integrals from "Experimentation in Mathematics" by Borwein,
+# Bailey & Girgensohn
+def test_expmath_integrals():
+ for prec in [15, 30, 50]:
+ mp.dps = prec
+ assert ae(quadts(lambda x: x/sinh(x), [0, inf]), pi**2 / 4)
+ assert ae(quadts(lambda x: log(x)**2 / (1+x**2), [0, inf]), pi**3 / 8)
+ assert ae(quadts(lambda x: (1+x**2)/(1+x**4), [0, inf]), pi/sqrt(2))
+ assert ae(quadts(lambda x: log(x)/cosh(x)**2, [0, inf]), log(pi)-2*log(2)-euler)
+ assert ae(quadts(lambda x: log(1+x**3)/(1-x+x**2), [0, inf]), 2*pi*log(3)/sqrt(3))
+ assert ae(quadts(lambda x: log(x)**2 / (x**2+x+1), [0, 1]), 8*pi**3 / (81*sqrt(3)))
+ assert ae(quadts(lambda x: log(cos(x))**2, [0, pi/2]), pi/2 * (log(2)**2+pi**2/12))
+ assert ae(quadts(lambda x: x**2 / sin(x)**2, [0, pi/2]), pi*log(2))
+ assert ae(quadts(lambda x: x**2/sqrt(exp(x)-1), [0, inf]), 4*pi*(log(2)**2 + pi**2/12))
+ assert ae(quadts(lambda x: x*exp(-x)*sqrt(1-exp(-2*x)), [0, inf]), pi*(1+2*log(2))/8)
+ mp.dps = 15
+
+# Do not reach full accuracy
+@pytest.mark.xfail
+def test_expmath_fail():
+ assert ae(quadts(lambda x: sqrt(tan(x)), [0, pi/2]), pi*sqrt(2)/2)
+ assert ae(quadts(lambda x: atan(x)/(x*sqrt(1-x**2)), [0, 1]), pi*log(1+sqrt(2))/2)
+ assert ae(quadts(lambda x: log(1+x**2)/x**2, [0, 1]), pi/2-log(2))
+ assert ae(quadts(lambda x: x**2/((1+x**4)*sqrt(1-x**4)), [0, 1]), pi/8)
diff --git a/pythonProject/.venv/Lib/site-packages/networkx/__init__.py b/pythonProject/.venv/Lib/site-packages/networkx/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..54fdbd54b55ffd1314ea0a791ade64a8b01a97c4
--- /dev/null
+++ b/pythonProject/.venv/Lib/site-packages/networkx/__init__.py
@@ -0,0 +1,49 @@
+"""
+NetworkX
+========
+
+NetworkX is a Python package for the creation, manipulation, and study of the
+structure, dynamics, and functions of complex networks.
+
+See https://networkx.org for complete documentation.
+"""
+
+__version__ = "3.3"
+
+
+# These are imported in order as listed
+from networkx.lazy_imports import _lazy_import
+
+from networkx.exception import *
+
+from networkx import utils
+from networkx.utils import _clear_cache, _dispatchable, config
+
+from networkx import classes
+from networkx.classes import filters
+from networkx.classes import *
+
+from networkx import convert
+from networkx.convert import *
+
+from networkx import convert_matrix
+from networkx.convert_matrix import *
+
+from networkx import relabel
+from networkx.relabel import *
+
+from networkx import generators
+from networkx.generators import *
+
+from networkx import readwrite
+from networkx.readwrite import *
+
+# Need to test with SciPy, when available
+from networkx import algorithms
+from networkx.algorithms import *
+
+from networkx import linalg
+from networkx.linalg import *
+
+from networkx import drawing
+from networkx.drawing import *
diff --git a/pythonProject/.venv/Lib/site-packages/networkx/algorithms/__init__.py b/pythonProject/.venv/Lib/site-packages/networkx/algorithms/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..56bfb14afdfba168ba2e230c41406799841f6a07
--- /dev/null
+++ b/pythonProject/.venv/Lib/site-packages/networkx/algorithms/__init__.py
@@ -0,0 +1,133 @@
+from networkx.algorithms.assortativity import *
+from networkx.algorithms.asteroidal import *
+from networkx.algorithms.boundary import *
+from networkx.algorithms.broadcasting import *
+from networkx.algorithms.bridges import *
+from networkx.algorithms.chains import *
+from networkx.algorithms.centrality import *
+from networkx.algorithms.chordal import *
+from networkx.algorithms.cluster import *
+from networkx.algorithms.clique import *
+from networkx.algorithms.communicability_alg import *
+from networkx.algorithms.components import *
+from networkx.algorithms.coloring import *
+from networkx.algorithms.core import *
+from networkx.algorithms.covering import *
+from networkx.algorithms.cycles import *
+from networkx.algorithms.cuts import *
+from networkx.algorithms.d_separation import *
+from networkx.algorithms.dag import *
+from networkx.algorithms.distance_measures import *
+from networkx.algorithms.distance_regular import *
+from networkx.algorithms.dominance import *
+from networkx.algorithms.dominating import *
+from networkx.algorithms.efficiency_measures import *
+from networkx.algorithms.euler import *
+from networkx.algorithms.graphical import *
+from networkx.algorithms.hierarchy import *
+from networkx.algorithms.hybrid import *
+from networkx.algorithms.link_analysis import *
+from networkx.algorithms.link_prediction import *
+from networkx.algorithms.lowest_common_ancestors import *
+from networkx.algorithms.isolate import *
+from networkx.algorithms.matching import *
+from networkx.algorithms.minors import *
+from networkx.algorithms.mis import *
+from networkx.algorithms.moral import *
+from networkx.algorithms.non_randomness import *
+from networkx.algorithms.operators import *
+from networkx.algorithms.planarity import *
+from networkx.algorithms.planar_drawing import *
+from networkx.algorithms.polynomials import *
+from networkx.algorithms.reciprocity import *
+from networkx.algorithms.regular import *
+from networkx.algorithms.richclub import *
+from networkx.algorithms.shortest_paths import *
+from networkx.algorithms.similarity import *
+from networkx.algorithms.graph_hashing import *
+from networkx.algorithms.simple_paths import *
+from networkx.algorithms.smallworld import *
+from networkx.algorithms.smetric import *
+from networkx.algorithms.structuralholes import *
+from networkx.algorithms.sparsifiers import *
+from networkx.algorithms.summarization import *
+from networkx.algorithms.swap import *
+from networkx.algorithms.time_dependent import *
+from networkx.algorithms.traversal import *
+from networkx.algorithms.triads import *
+from networkx.algorithms.vitality import *
+from networkx.algorithms.voronoi import *
+from networkx.algorithms.walks import *
+from networkx.algorithms.wiener import *
+
+# Make certain subpackages available to the user as direct imports from
+# the `networkx` namespace.
+from networkx.algorithms import approximation
+from networkx.algorithms import assortativity
+from networkx.algorithms import bipartite
+from networkx.algorithms import node_classification
+from networkx.algorithms import centrality
+from networkx.algorithms import chordal
+from networkx.algorithms import cluster
+from networkx.algorithms import clique
+from networkx.algorithms import components
+from networkx.algorithms import connectivity
+from networkx.algorithms import community
+from networkx.algorithms import coloring
+from networkx.algorithms import flow
+from networkx.algorithms import isomorphism
+from networkx.algorithms import link_analysis
+from networkx.algorithms import lowest_common_ancestors
+from networkx.algorithms import operators
+from networkx.algorithms import shortest_paths
+from networkx.algorithms import tournament
+from networkx.algorithms import traversal
+from networkx.algorithms import tree
+
+# Make certain functions from some of the previous subpackages available
+# to the user as direct imports from the `networkx` namespace.
+from networkx.algorithms.bipartite import complete_bipartite_graph
+from networkx.algorithms.bipartite import is_bipartite
+from networkx.algorithms.bipartite import projected_graph
+from networkx.algorithms.connectivity import all_pairs_node_connectivity
+from networkx.algorithms.connectivity import all_node_cuts
+from networkx.algorithms.connectivity import average_node_connectivity
+from networkx.algorithms.connectivity import edge_connectivity
+from networkx.algorithms.connectivity import edge_disjoint_paths
+from networkx.algorithms.connectivity import k_components
+from networkx.algorithms.connectivity import k_edge_components
+from networkx.algorithms.connectivity import k_edge_subgraphs
+from networkx.algorithms.connectivity import k_edge_augmentation
+from networkx.algorithms.connectivity import is_k_edge_connected
+from networkx.algorithms.connectivity import minimum_edge_cut
+from networkx.algorithms.connectivity import minimum_node_cut
+from networkx.algorithms.connectivity import node_connectivity
+from networkx.algorithms.connectivity import node_disjoint_paths
+from networkx.algorithms.connectivity import stoer_wagner
+from networkx.algorithms.flow import capacity_scaling
+from networkx.algorithms.flow import cost_of_flow
+from networkx.algorithms.flow import gomory_hu_tree
+from networkx.algorithms.flow import max_flow_min_cost
+from networkx.algorithms.flow import maximum_flow
+from networkx.algorithms.flow import maximum_flow_value
+from networkx.algorithms.flow import min_cost_flow
+from networkx.algorithms.flow import min_cost_flow_cost
+from networkx.algorithms.flow import minimum_cut
+from networkx.algorithms.flow import minimum_cut_value
+from networkx.algorithms.flow import network_simplex
+from networkx.algorithms.isomorphism import could_be_isomorphic
+from networkx.algorithms.isomorphism import fast_could_be_isomorphic
+from networkx.algorithms.isomorphism import faster_could_be_isomorphic
+from networkx.algorithms.isomorphism import is_isomorphic
+from networkx.algorithms.isomorphism.vf2pp import *
+from networkx.algorithms.tree.branchings import maximum_branching
+from networkx.algorithms.tree.branchings import maximum_spanning_arborescence
+from networkx.algorithms.tree.branchings import minimum_branching
+from networkx.algorithms.tree.branchings import minimum_spanning_arborescence
+from networkx.algorithms.tree.branchings import ArborescenceIterator
+from networkx.algorithms.tree.coding import *
+from networkx.algorithms.tree.decomposition import *
+from networkx.algorithms.tree.mst import *
+from networkx.algorithms.tree.operations import *
+from networkx.algorithms.tree.recognition import *
+from networkx.algorithms.tournament import is_tournament
diff --git a/pythonProject/.venv/Lib/site-packages/networkx/algorithms/approximation/__init__.py b/pythonProject/.venv/Lib/site-packages/networkx/algorithms/approximation/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..e39dc00aa250b05cbd8f0ce9b38cf32ecc752946
--- /dev/null
+++ b/pythonProject/.venv/Lib/site-packages/networkx/algorithms/approximation/__init__.py
@@ -0,0 +1,24 @@
+"""Approximations of graph properties and Heuristic methods for optimization.
+
+The functions in this class are not imported into the top-level ``networkx``
+namespace so the easiest way to use them is with::
+
+ >>> from networkx.algorithms import approximation
+
+Another option is to import the specific function with
+``from networkx.algorithms.approximation import function_name``.
+
+"""
+from networkx.algorithms.approximation.clustering_coefficient import *
+from networkx.algorithms.approximation.clique import *
+from networkx.algorithms.approximation.connectivity import *
+from networkx.algorithms.approximation.distance_measures import *
+from networkx.algorithms.approximation.dominating_set import *
+from networkx.algorithms.approximation.kcomponents import *
+from networkx.algorithms.approximation.matching import *
+from networkx.algorithms.approximation.ramsey import *
+from networkx.algorithms.approximation.steinertree import *
+from networkx.algorithms.approximation.traveling_salesman import *
+from networkx.algorithms.approximation.treewidth import *
+from networkx.algorithms.approximation.vertex_cover import *
+from networkx.algorithms.approximation.maxcut import *
diff --git a/pythonProject/.venv/Lib/site-packages/networkx/algorithms/approximation/__pycache__/__init__.cpython-310.pyc b/pythonProject/.venv/Lib/site-packages/networkx/algorithms/approximation/__pycache__/__init__.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..9e05058dc70d0449d58c3172dd234102709b9bff
Binary files /dev/null and b/pythonProject/.venv/Lib/site-packages/networkx/algorithms/approximation/__pycache__/__init__.cpython-310.pyc differ
diff --git a/pythonProject/.venv/Lib/site-packages/networkx/algorithms/approximation/__pycache__/clique.cpython-310.pyc b/pythonProject/.venv/Lib/site-packages/networkx/algorithms/approximation/__pycache__/clique.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..38a9ee41b744d92f5145aa6d2005f79b80fe1b52
Binary files /dev/null and b/pythonProject/.venv/Lib/site-packages/networkx/algorithms/approximation/__pycache__/clique.cpython-310.pyc differ
diff --git a/pythonProject/.venv/Lib/site-packages/networkx/algorithms/approximation/__pycache__/clustering_coefficient.cpython-310.pyc b/pythonProject/.venv/Lib/site-packages/networkx/algorithms/approximation/__pycache__/clustering_coefficient.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..9498b9a9d321bb81198a49873cd76bfbbc73349b
Binary files /dev/null and b/pythonProject/.venv/Lib/site-packages/networkx/algorithms/approximation/__pycache__/clustering_coefficient.cpython-310.pyc differ
diff --git a/pythonProject/.venv/Lib/site-packages/networkx/algorithms/approximation/__pycache__/connectivity.cpython-310.pyc b/pythonProject/.venv/Lib/site-packages/networkx/algorithms/approximation/__pycache__/connectivity.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..9f206b38f17a8308e964e16aea8418b9cc9eec4f
Binary files /dev/null and b/pythonProject/.venv/Lib/site-packages/networkx/algorithms/approximation/__pycache__/connectivity.cpython-310.pyc differ
diff --git a/pythonProject/.venv/Lib/site-packages/networkx/algorithms/approximation/__pycache__/distance_measures.cpython-310.pyc b/pythonProject/.venv/Lib/site-packages/networkx/algorithms/approximation/__pycache__/distance_measures.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..19ab3bfe23461904bab9b9da663e7869f08d16d2
Binary files /dev/null and b/pythonProject/.venv/Lib/site-packages/networkx/algorithms/approximation/__pycache__/distance_measures.cpython-310.pyc differ
diff --git a/pythonProject/.venv/Lib/site-packages/networkx/algorithms/approximation/__pycache__/dominating_set.cpython-310.pyc b/pythonProject/.venv/Lib/site-packages/networkx/algorithms/approximation/__pycache__/dominating_set.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..45ad43a5e8500762c54149f4a39a20540bd0dccf
Binary files /dev/null and b/pythonProject/.venv/Lib/site-packages/networkx/algorithms/approximation/__pycache__/dominating_set.cpython-310.pyc differ
diff --git a/pythonProject/.venv/Lib/site-packages/networkx/algorithms/approximation/__pycache__/kcomponents.cpython-310.pyc b/pythonProject/.venv/Lib/site-packages/networkx/algorithms/approximation/__pycache__/kcomponents.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..a06757cd46fb5073776d3a673cc4f81aa8a931b1
Binary files /dev/null and b/pythonProject/.venv/Lib/site-packages/networkx/algorithms/approximation/__pycache__/kcomponents.cpython-310.pyc differ
diff --git a/pythonProject/.venv/Lib/site-packages/networkx/algorithms/approximation/__pycache__/matching.cpython-310.pyc b/pythonProject/.venv/Lib/site-packages/networkx/algorithms/approximation/__pycache__/matching.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..47764c6d28f8b50eb694449f452bd8c0a98a4043
Binary files /dev/null and b/pythonProject/.venv/Lib/site-packages/networkx/algorithms/approximation/__pycache__/matching.cpython-310.pyc differ
diff --git a/pythonProject/.venv/Lib/site-packages/networkx/algorithms/approximation/__pycache__/maxcut.cpython-310.pyc b/pythonProject/.venv/Lib/site-packages/networkx/algorithms/approximation/__pycache__/maxcut.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..bb88a22b3c74dfafdf9f231311bdaa6c06a48830
Binary files /dev/null and b/pythonProject/.venv/Lib/site-packages/networkx/algorithms/approximation/__pycache__/maxcut.cpython-310.pyc differ
diff --git a/pythonProject/.venv/Lib/site-packages/networkx/algorithms/approximation/__pycache__/ramsey.cpython-310.pyc b/pythonProject/.venv/Lib/site-packages/networkx/algorithms/approximation/__pycache__/ramsey.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..fa13f157fc2dad93d1795baa75194f6b41fa82f6
Binary files /dev/null and b/pythonProject/.venv/Lib/site-packages/networkx/algorithms/approximation/__pycache__/ramsey.cpython-310.pyc differ
diff --git a/pythonProject/.venv/Lib/site-packages/networkx/algorithms/approximation/__pycache__/steinertree.cpython-310.pyc b/pythonProject/.venv/Lib/site-packages/networkx/algorithms/approximation/__pycache__/steinertree.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..c4632d08fd360a7f19a93268142091c2909f66a5
Binary files /dev/null and b/pythonProject/.venv/Lib/site-packages/networkx/algorithms/approximation/__pycache__/steinertree.cpython-310.pyc differ
diff --git a/pythonProject/.venv/Lib/site-packages/networkx/algorithms/approximation/__pycache__/traveling_salesman.cpython-310.pyc b/pythonProject/.venv/Lib/site-packages/networkx/algorithms/approximation/__pycache__/traveling_salesman.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..265493d63763f4e8ffdc4b2bab6b387cb8ab8894
Binary files /dev/null and b/pythonProject/.venv/Lib/site-packages/networkx/algorithms/approximation/__pycache__/traveling_salesman.cpython-310.pyc differ
diff --git a/pythonProject/.venv/Lib/site-packages/networkx/algorithms/approximation/__pycache__/treewidth.cpython-310.pyc b/pythonProject/.venv/Lib/site-packages/networkx/algorithms/approximation/__pycache__/treewidth.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..d8576876592ce4c8f67c700a2e353ca59777550d
Binary files /dev/null and b/pythonProject/.venv/Lib/site-packages/networkx/algorithms/approximation/__pycache__/treewidth.cpython-310.pyc differ
diff --git a/pythonProject/.venv/Lib/site-packages/networkx/algorithms/approximation/__pycache__/vertex_cover.cpython-310.pyc b/pythonProject/.venv/Lib/site-packages/networkx/algorithms/approximation/__pycache__/vertex_cover.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..308e9bd620621953d703577c2f59fc0358e217b6
Binary files /dev/null and b/pythonProject/.venv/Lib/site-packages/networkx/algorithms/approximation/__pycache__/vertex_cover.cpython-310.pyc differ
diff --git a/pythonProject/.venv/Lib/site-packages/networkx/algorithms/approximation/clique.py b/pythonProject/.venv/Lib/site-packages/networkx/algorithms/approximation/clique.py
new file mode 100644
index 0000000000000000000000000000000000000000..56443068633f80fa4223afa9ceb88f64c1614faa
--- /dev/null
+++ b/pythonProject/.venv/Lib/site-packages/networkx/algorithms/approximation/clique.py
@@ -0,0 +1,258 @@
+"""Functions for computing large cliques and maximum independent sets."""
+import networkx as nx
+from networkx.algorithms.approximation import ramsey
+from networkx.utils import not_implemented_for
+
+__all__ = [
+ "clique_removal",
+ "max_clique",
+ "large_clique_size",
+ "maximum_independent_set",
+]
+
+
+@not_implemented_for("directed")
+@not_implemented_for("multigraph")
+@nx._dispatchable
+def maximum_independent_set(G):
+ """Returns an approximate maximum independent set.
+
+ Independent set or stable set is a set of vertices in a graph, no two of
+ which are adjacent. That is, it is a set I of vertices such that for every
+ two vertices in I, there is no edge connecting the two. Equivalently, each
+ edge in the graph has at most one endpoint in I. The size of an independent
+ set is the number of vertices it contains [1]_.
+
+ A maximum independent set is a largest independent set for a given graph G
+ and its size is denoted $\\alpha(G)$. The problem of finding such a set is called
+ the maximum independent set problem and is an NP-hard optimization problem.
+ As such, it is unlikely that there exists an efficient algorithm for finding
+ a maximum independent set of a graph.
+
+ The Independent Set algorithm is based on [2]_.
+
+ Parameters
+ ----------
+ G : NetworkX graph
+ Undirected graph
+
+ Returns
+ -------
+ iset : Set
+ The apx-maximum independent set
+
+ Examples
+ --------
+ >>> G = nx.path_graph(10)
+ >>> nx.approximation.maximum_independent_set(G)
+ {0, 2, 4, 6, 9}
+
+ Raises
+ ------
+ NetworkXNotImplemented
+ If the graph is directed or is a multigraph.
+
+ Notes
+ -----
+ Finds the $O(|V|/(log|V|)^2)$ apx of independent set in the worst case.
+
+ References
+ ----------
+ .. [1] `Wikipedia: Independent set
+ `_
+ .. [2] Boppana, R., & Halldórsson, M. M. (1992).
+ Approximating maximum independent sets by excluding subgraphs.
+ BIT Numerical Mathematics, 32(2), 180–196. Springer.
+ """
+ iset, _ = clique_removal(G)
+ return iset
+
+
+@not_implemented_for("directed")
+@not_implemented_for("multigraph")
+@nx._dispatchable
+def max_clique(G):
+ r"""Find the Maximum Clique
+
+ Finds the $O(|V|/(log|V|)^2)$ apx of maximum clique/independent set
+ in the worst case.
+
+ Parameters
+ ----------
+ G : NetworkX graph
+ Undirected graph
+
+ Returns
+ -------
+ clique : set
+ The apx-maximum clique of the graph
+
+ Examples
+ --------
+ >>> G = nx.path_graph(10)
+ >>> nx.approximation.max_clique(G)
+ {8, 9}
+
+ Raises
+ ------
+ NetworkXNotImplemented
+ If the graph is directed or is a multigraph.
+
+ Notes
+ -----
+ A clique in an undirected graph G = (V, E) is a subset of the vertex set
+ `C \subseteq V` such that for every two vertices in C there exists an edge
+ connecting the two. This is equivalent to saying that the subgraph
+ induced by C is complete (in some cases, the term clique may also refer
+ to the subgraph).
+
+ A maximum clique is a clique of the largest possible size in a given graph.
+ The clique number `\omega(G)` of a graph G is the number of
+ vertices in a maximum clique in G. The intersection number of
+ G is the smallest number of cliques that together cover all edges of G.
+
+ https://en.wikipedia.org/wiki/Maximum_clique
+
+ References
+ ----------
+ .. [1] Boppana, R., & Halldórsson, M. M. (1992).
+ Approximating maximum independent sets by excluding subgraphs.
+ BIT Numerical Mathematics, 32(2), 180–196. Springer.
+ doi:10.1007/BF01994876
+ """
+ # finding the maximum clique in a graph is equivalent to finding
+ # the independent set in the complementary graph
+ cgraph = nx.complement(G)
+ iset, _ = clique_removal(cgraph)
+ return iset
+
+
+@not_implemented_for("directed")
+@not_implemented_for("multigraph")
+@nx._dispatchable
+def clique_removal(G):
+ r"""Repeatedly remove cliques from the graph.
+
+ Results in a $O(|V|/(\log |V|)^2)$ approximation of maximum clique
+ and independent set. Returns the largest independent set found, along
+ with found maximal cliques.
+
+ Parameters
+ ----------
+ G : NetworkX graph
+ Undirected graph
+
+ Returns
+ -------
+ max_ind_cliques : (set, list) tuple
+ 2-tuple of Maximal Independent Set and list of maximal cliques (sets).
+
+ Examples
+ --------
+ >>> G = nx.path_graph(10)
+ >>> nx.approximation.clique_removal(G)
+ ({0, 2, 4, 6, 9}, [{0, 1}, {2, 3}, {4, 5}, {6, 7}, {8, 9}])
+
+ Raises
+ ------
+ NetworkXNotImplemented
+ If the graph is directed or is a multigraph.
+
+ References
+ ----------
+ .. [1] Boppana, R., & Halldórsson, M. M. (1992).
+ Approximating maximum independent sets by excluding subgraphs.
+ BIT Numerical Mathematics, 32(2), 180–196. Springer.
+ """
+ graph = G.copy()
+ c_i, i_i = ramsey.ramsey_R2(graph)
+ cliques = [c_i]
+ isets = [i_i]
+ while graph:
+ graph.remove_nodes_from(c_i)
+ c_i, i_i = ramsey.ramsey_R2(graph)
+ if c_i:
+ cliques.append(c_i)
+ if i_i:
+ isets.append(i_i)
+ # Determine the largest independent set as measured by cardinality.
+ maxiset = max(isets, key=len)
+ return maxiset, cliques
+
+
+@not_implemented_for("directed")
+@not_implemented_for("multigraph")
+@nx._dispatchable
+def large_clique_size(G):
+ """Find the size of a large clique in a graph.
+
+ A *clique* is a subset of nodes in which each pair of nodes is
+ adjacent. This function is a heuristic for finding the size of a
+ large clique in the graph.
+
+ Parameters
+ ----------
+ G : NetworkX graph
+
+ Returns
+ -------
+ k: integer
+ The size of a large clique in the graph.
+
+ Examples
+ --------
+ >>> G = nx.path_graph(10)
+ >>> nx.approximation.large_clique_size(G)
+ 2
+
+ Raises
+ ------
+ NetworkXNotImplemented
+ If the graph is directed or is a multigraph.
+
+ Notes
+ -----
+ This implementation is from [1]_. Its worst case time complexity is
+ :math:`O(n d^2)`, where *n* is the number of nodes in the graph and
+ *d* is the maximum degree.
+
+ This function is a heuristic, which means it may work well in
+ practice, but there is no rigorous mathematical guarantee on the
+ ratio between the returned number and the actual largest clique size
+ in the graph.
+
+ References
+ ----------
+ .. [1] Pattabiraman, Bharath, et al.
+ "Fast Algorithms for the Maximum Clique Problem on Massive Graphs
+ with Applications to Overlapping Community Detection."
+ *Internet Mathematics* 11.4-5 (2015): 421--448.
+
+
+ See also
+ --------
+
+ :func:`networkx.algorithms.approximation.clique.max_clique`
+ A function that returns an approximate maximum clique with a
+ guarantee on the approximation ratio.
+
+ :mod:`networkx.algorithms.clique`
+ Functions for finding the exact maximum clique in a graph.
+
+ """
+ degrees = G.degree
+
+ def _clique_heuristic(G, U, size, best_size):
+ if not U:
+ return max(best_size, size)
+ u = max(U, key=degrees)
+ U.remove(u)
+ N_prime = {v for v in G[u] if degrees[v] >= best_size}
+ return _clique_heuristic(G, U & N_prime, size + 1, best_size)
+
+ best_size = 0
+ nodes = (u for u in G if degrees[u] >= best_size)
+ for u in nodes:
+ neighbors = {v for v in G[u] if degrees[v] >= best_size}
+ best_size = _clique_heuristic(G, neighbors, 1, best_size)
+ return best_size
diff --git a/pythonProject/.venv/Lib/site-packages/networkx/algorithms/approximation/clustering_coefficient.py b/pythonProject/.venv/Lib/site-packages/networkx/algorithms/approximation/clustering_coefficient.py
new file mode 100644
index 0000000000000000000000000000000000000000..545fc65533b8d8f44b35498aa7129c97efc0bc52
--- /dev/null
+++ b/pythonProject/.venv/Lib/site-packages/networkx/algorithms/approximation/clustering_coefficient.py
@@ -0,0 +1,71 @@
+import networkx as nx
+from networkx.utils import not_implemented_for, py_random_state
+
+__all__ = ["average_clustering"]
+
+
+@not_implemented_for("directed")
+@py_random_state(2)
+@nx._dispatchable(name="approximate_average_clustering")
+def average_clustering(G, trials=1000, seed=None):
+ r"""Estimates the average clustering coefficient of G.
+
+ The local clustering of each node in `G` is the fraction of triangles
+ that actually exist over all possible triangles in its neighborhood.
+ The average clustering coefficient of a graph `G` is the mean of
+ local clusterings.
+
+ This function finds an approximate average clustering coefficient
+ for G by repeating `n` times (defined in `trials`) the following
+ experiment: choose a node at random, choose two of its neighbors
+ at random, and check if they are connected. The approximate
+ coefficient is the fraction of triangles found over the number
+ of trials [1]_.
+
+ Parameters
+ ----------
+ G : NetworkX graph
+
+ trials : integer
+ Number of trials to perform (default 1000).
+
+ seed : integer, random_state, or None (default)
+ Indicator of random number generation state.
+ See :ref:`Randomness`.
+
+ Returns
+ -------
+ c : float
+ Approximated average clustering coefficient.
+
+ Examples
+ --------
+ >>> from networkx.algorithms import approximation
+ >>> G = nx.erdos_renyi_graph(10, 0.2, seed=10)
+ >>> approximation.average_clustering(G, trials=1000, seed=10)
+ 0.214
+
+ Raises
+ ------
+ NetworkXNotImplemented
+ If G is directed.
+
+ References
+ ----------
+ .. [1] Schank, Thomas, and Dorothea Wagner. Approximating clustering
+ coefficient and transitivity. Universität Karlsruhe, Fakultät für
+ Informatik, 2004.
+ https://doi.org/10.5445/IR/1000001239
+
+ """
+ n = len(G)
+ triangles = 0
+ nodes = list(G)
+ for i in [int(seed.random() * n) for i in range(trials)]:
+ nbrs = list(G[nodes[i]])
+ if len(nbrs) < 2:
+ continue
+ u, v = seed.sample(nbrs, 2)
+ if u in G[v]:
+ triangles += 1
+ return triangles / trials
diff --git a/pythonProject/.venv/Lib/site-packages/networkx/algorithms/approximation/connectivity.py b/pythonProject/.venv/Lib/site-packages/networkx/algorithms/approximation/connectivity.py
new file mode 100644
index 0000000000000000000000000000000000000000..a2214ed128b808196f2700435f03679fc16a8aac
--- /dev/null
+++ b/pythonProject/.venv/Lib/site-packages/networkx/algorithms/approximation/connectivity.py
@@ -0,0 +1,412 @@
+""" Fast approximation for node connectivity
+"""
+import itertools
+from operator import itemgetter
+
+import networkx as nx
+
+__all__ = [
+ "local_node_connectivity",
+ "node_connectivity",
+ "all_pairs_node_connectivity",
+]
+
+
+@nx._dispatchable(name="approximate_local_node_connectivity")
+def local_node_connectivity(G, source, target, cutoff=None):
+ """Compute node connectivity between source and target.
+
+ Pairwise or local node connectivity between two distinct and nonadjacent
+ nodes is the minimum number of nodes that must be removed (minimum
+ separating cutset) to disconnect them. By Menger's theorem, this is equal
+ to the number of node independent paths (paths that share no nodes other
+ than source and target). Which is what we compute in this function.
+
+ This algorithm is a fast approximation that gives an strict lower
+ bound on the actual number of node independent paths between two nodes [1]_.
+ It works for both directed and undirected graphs.
+
+ Parameters
+ ----------
+
+ G : NetworkX graph
+
+ source : node
+ Starting node for node connectivity
+
+ target : node
+ Ending node for node connectivity
+
+ cutoff : integer
+ Maximum node connectivity to consider. If None, the minimum degree
+ of source or target is used as a cutoff. Default value None.
+
+ Returns
+ -------
+ k: integer
+ pairwise node connectivity
+
+ Examples
+ --------
+ >>> # Platonic octahedral graph has node connectivity 4
+ >>> # for each non adjacent node pair
+ >>> from networkx.algorithms import approximation as approx
+ >>> G = nx.octahedral_graph()
+ >>> approx.local_node_connectivity(G, 0, 5)
+ 4
+
+ Notes
+ -----
+ This algorithm [1]_ finds node independents paths between two nodes by
+ computing their shortest path using BFS, marking the nodes of the path
+ found as 'used' and then searching other shortest paths excluding the
+ nodes marked as used until no more paths exist. It is not exact because
+ a shortest path could use nodes that, if the path were longer, may belong
+ to two different node independent paths. Thus it only guarantees an
+ strict lower bound on node connectivity.
+
+ Note that the authors propose a further refinement, losing accuracy and
+ gaining speed, which is not implemented yet.
+
+ See also
+ --------
+ all_pairs_node_connectivity
+ node_connectivity
+
+ References
+ ----------
+ .. [1] White, Douglas R., and Mark Newman. 2001 A Fast Algorithm for
+ Node-Independent Paths. Santa Fe Institute Working Paper #01-07-035
+ http://eclectic.ss.uci.edu/~drwhite/working.pdf
+
+ """
+ if target == source:
+ raise nx.NetworkXError("source and target have to be different nodes.")
+
+ # Maximum possible node independent paths
+ if G.is_directed():
+ possible = min(G.out_degree(source), G.in_degree(target))
+ else:
+ possible = min(G.degree(source), G.degree(target))
+
+ K = 0
+ if not possible:
+ return K
+
+ if cutoff is None:
+ cutoff = float("inf")
+
+ exclude = set()
+ for i in range(min(possible, cutoff)):
+ try:
+ path = _bidirectional_shortest_path(G, source, target, exclude)
+ exclude.update(set(path))
+ K += 1
+ except nx.NetworkXNoPath:
+ break
+
+ return K
+
+
+@nx._dispatchable(name="approximate_node_connectivity")
+def node_connectivity(G, s=None, t=None):
+ r"""Returns an approximation for node connectivity for a graph or digraph G.
+
+ Node connectivity is equal to the minimum number of nodes that
+ must be removed to disconnect G or render it trivial. By Menger's theorem,
+ this is equal to the number of node independent paths (paths that
+ share no nodes other than source and target).
+
+ If source and target nodes are provided, this function returns the
+ local node connectivity: the minimum number of nodes that must be
+ removed to break all paths from source to target in G.
+
+ This algorithm is based on a fast approximation that gives an strict lower
+ bound on the actual number of node independent paths between two nodes [1]_.
+ It works for both directed and undirected graphs.
+
+ Parameters
+ ----------
+ G : NetworkX graph
+ Undirected graph
+
+ s : node
+ Source node. Optional. Default value: None.
+
+ t : node
+ Target node. Optional. Default value: None.
+
+ Returns
+ -------
+ K : integer
+ Node connectivity of G, or local node connectivity if source
+ and target are provided.
+
+ Examples
+ --------
+ >>> # Platonic octahedral graph is 4-node-connected
+ >>> from networkx.algorithms import approximation as approx
+ >>> G = nx.octahedral_graph()
+ >>> approx.node_connectivity(G)
+ 4
+
+ Notes
+ -----
+ This algorithm [1]_ finds node independents paths between two nodes by
+ computing their shortest path using BFS, marking the nodes of the path
+ found as 'used' and then searching other shortest paths excluding the
+ nodes marked as used until no more paths exist. It is not exact because
+ a shortest path could use nodes that, if the path were longer, may belong
+ to two different node independent paths. Thus it only guarantees an
+ strict lower bound on node connectivity.
+
+ See also
+ --------
+ all_pairs_node_connectivity
+ local_node_connectivity
+
+ References
+ ----------
+ .. [1] White, Douglas R., and Mark Newman. 2001 A Fast Algorithm for
+ Node-Independent Paths. Santa Fe Institute Working Paper #01-07-035
+ http://eclectic.ss.uci.edu/~drwhite/working.pdf
+
+ """
+ if (s is not None and t is None) or (s is None and t is not None):
+ raise nx.NetworkXError("Both source and target must be specified.")
+
+ # Local node connectivity
+ if s is not None and t is not None:
+ if s not in G:
+ raise nx.NetworkXError(f"node {s} not in graph")
+ if t not in G:
+ raise nx.NetworkXError(f"node {t} not in graph")
+ return local_node_connectivity(G, s, t)
+
+ # Global node connectivity
+ if G.is_directed():
+ connected_func = nx.is_weakly_connected
+ iter_func = itertools.permutations
+
+ def neighbors(v):
+ return itertools.chain(G.predecessors(v), G.successors(v))
+
+ else:
+ connected_func = nx.is_connected
+ iter_func = itertools.combinations
+ neighbors = G.neighbors
+
+ if not connected_func(G):
+ return 0
+
+ # Choose a node with minimum degree
+ v, minimum_degree = min(G.degree(), key=itemgetter(1))
+ # Node connectivity is bounded by minimum degree
+ K = minimum_degree
+ # compute local node connectivity with all non-neighbors nodes
+ # and store the minimum
+ for w in set(G) - set(neighbors(v)) - {v}:
+ K = min(K, local_node_connectivity(G, v, w, cutoff=K))
+ # Same for non adjacent pairs of neighbors of v
+ for x, y in iter_func(neighbors(v), 2):
+ if y not in G[x] and x != y:
+ K = min(K, local_node_connectivity(G, x, y, cutoff=K))
+ return K
+
+
+@nx._dispatchable(name="approximate_all_pairs_node_connectivity")
+def all_pairs_node_connectivity(G, nbunch=None, cutoff=None):
+ """Compute node connectivity between all pairs of nodes.
+
+ Pairwise or local node connectivity between two distinct and nonadjacent
+ nodes is the minimum number of nodes that must be removed (minimum
+ separating cutset) to disconnect them. By Menger's theorem, this is equal
+ to the number of node independent paths (paths that share no nodes other
+ than source and target). Which is what we compute in this function.
+
+ This algorithm is a fast approximation that gives an strict lower
+ bound on the actual number of node independent paths between two nodes [1]_.
+ It works for both directed and undirected graphs.
+
+
+ Parameters
+ ----------
+ G : NetworkX graph
+
+ nbunch: container
+ Container of nodes. If provided node connectivity will be computed
+ only over pairs of nodes in nbunch.
+
+ cutoff : integer
+ Maximum node connectivity to consider. If None, the minimum degree
+ of source or target is used as a cutoff in each pair of nodes.
+ Default value None.
+
+ Returns
+ -------
+ K : dictionary
+ Dictionary, keyed by source and target, of pairwise node connectivity
+
+ Examples
+ --------
+ A 3 node cycle with one extra node attached has connectivity 2 between all
+ nodes in the cycle and connectivity 1 between the extra node and the rest:
+
+ >>> G = nx.cycle_graph(3)
+ >>> G.add_edge(2, 3)
+ >>> import pprint # for nice dictionary formatting
+ >>> pprint.pprint(nx.all_pairs_node_connectivity(G))
+ {0: {1: 2, 2: 2, 3: 1},
+ 1: {0: 2, 2: 2, 3: 1},
+ 2: {0: 2, 1: 2, 3: 1},
+ 3: {0: 1, 1: 1, 2: 1}}
+
+ See Also
+ --------
+ local_node_connectivity
+ node_connectivity
+
+ References
+ ----------
+ .. [1] White, Douglas R., and Mark Newman. 2001 A Fast Algorithm for
+ Node-Independent Paths. Santa Fe Institute Working Paper #01-07-035
+ http://eclectic.ss.uci.edu/~drwhite/working.pdf
+ """
+ if nbunch is None:
+ nbunch = G
+ else:
+ nbunch = set(nbunch)
+
+ directed = G.is_directed()
+ if directed:
+ iter_func = itertools.permutations
+ else:
+ iter_func = itertools.combinations
+
+ all_pairs = {n: {} for n in nbunch}
+
+ for u, v in iter_func(nbunch, 2):
+ k = local_node_connectivity(G, u, v, cutoff=cutoff)
+ all_pairs[u][v] = k
+ if not directed:
+ all_pairs[v][u] = k
+
+ return all_pairs
+
+
+def _bidirectional_shortest_path(G, source, target, exclude):
+ """Returns shortest path between source and target ignoring nodes in the
+ container 'exclude'.
+
+ Parameters
+ ----------
+
+ G : NetworkX graph
+
+ source : node
+ Starting node for path
+
+ target : node
+ Ending node for path
+
+ exclude: container
+ Container for nodes to exclude from the search for shortest paths
+
+ Returns
+ -------
+ path: list
+ Shortest path between source and target ignoring nodes in 'exclude'
+
+ Raises
+ ------
+ NetworkXNoPath
+ If there is no path or if nodes are adjacent and have only one path
+ between them
+
+ Notes
+ -----
+ This function and its helper are originally from
+ networkx.algorithms.shortest_paths.unweighted and are modified to
+ accept the extra parameter 'exclude', which is a container for nodes
+ already used in other paths that should be ignored.
+
+ References
+ ----------
+ .. [1] White, Douglas R., and Mark Newman. 2001 A Fast Algorithm for
+ Node-Independent Paths. Santa Fe Institute Working Paper #01-07-035
+ http://eclectic.ss.uci.edu/~drwhite/working.pdf
+
+ """
+ # call helper to do the real work
+ results = _bidirectional_pred_succ(G, source, target, exclude)
+ pred, succ, w = results
+
+ # build path from pred+w+succ
+ path = []
+ # from source to w
+ while w is not None:
+ path.append(w)
+ w = pred[w]
+ path.reverse()
+ # from w to target
+ w = succ[path[-1]]
+ while w is not None:
+ path.append(w)
+ w = succ[w]
+
+ return path
+
+
+def _bidirectional_pred_succ(G, source, target, exclude):
+ # does BFS from both source and target and meets in the middle
+ # excludes nodes in the container "exclude" from the search
+
+ # handle either directed or undirected
+ if G.is_directed():
+ Gpred = G.predecessors
+ Gsucc = G.successors
+ else:
+ Gpred = G.neighbors
+ Gsucc = G.neighbors
+
+ # predecessor and successors in search
+ pred = {source: None}
+ succ = {target: None}
+
+ # initialize fringes, start with forward
+ forward_fringe = [source]
+ reverse_fringe = [target]
+
+ level = 0
+
+ while forward_fringe and reverse_fringe:
+ # Make sure that we iterate one step forward and one step backwards
+ # thus source and target will only trigger "found path" when they are
+ # adjacent and then they can be safely included in the container 'exclude'
+ level += 1
+ if level % 2 != 0:
+ this_level = forward_fringe
+ forward_fringe = []
+ for v in this_level:
+ for w in Gsucc(v):
+ if w in exclude:
+ continue
+ if w not in pred:
+ forward_fringe.append(w)
+ pred[w] = v
+ if w in succ:
+ return pred, succ, w # found path
+ else:
+ this_level = reverse_fringe
+ reverse_fringe = []
+ for v in this_level:
+ for w in Gpred(v):
+ if w in exclude:
+ continue
+ if w not in succ:
+ succ[w] = v
+ reverse_fringe.append(w)
+ if w in pred:
+ return pred, succ, w # found path
+
+ raise nx.NetworkXNoPath(f"No path between {source} and {target}.")
diff --git a/pythonProject/.venv/Lib/site-packages/networkx/algorithms/approximation/distance_measures.py b/pythonProject/.venv/Lib/site-packages/networkx/algorithms/approximation/distance_measures.py
new file mode 100644
index 0000000000000000000000000000000000000000..d5847e65a2a401cd607436297fe4c1bbc81db3d9
--- /dev/null
+++ b/pythonProject/.venv/Lib/site-packages/networkx/algorithms/approximation/distance_measures.py
@@ -0,0 +1,150 @@
+"""Distance measures approximated metrics."""
+
+import networkx as nx
+from networkx.utils.decorators import py_random_state
+
+__all__ = ["diameter"]
+
+
+@py_random_state(1)
+@nx._dispatchable(name="approximate_diameter")
+def diameter(G, seed=None):
+ """Returns a lower bound on the diameter of the graph G.
+
+ The function computes a lower bound on the diameter (i.e., the maximum eccentricity)
+ of a directed or undirected graph G. The procedure used varies depending on the graph
+ being directed or not.
+
+ If G is an `undirected` graph, then the function uses the `2-sweep` algorithm [1]_.
+ The main idea is to pick the farthest node from a random node and return its eccentricity.
+
+ Otherwise, if G is a `directed` graph, the function uses the `2-dSweep` algorithm [2]_,
+ The procedure starts by selecting a random source node $s$ from which it performs a
+ forward and a backward BFS. Let $a_1$ and $a_2$ be the farthest nodes in the forward and
+ backward cases, respectively. Then, it computes the backward eccentricity of $a_1$ using
+ a backward BFS and the forward eccentricity of $a_2$ using a forward BFS.
+ Finally, it returns the best lower bound between the two.
+
+ In both cases, the time complexity is linear with respect to the size of G.
+
+ Parameters
+ ----------
+ G : NetworkX graph
+
+ seed : integer, random_state, or None (default)
+ Indicator of random number generation state.
+ See :ref:`Randomness`.
+
+ Returns
+ -------
+ d : integer
+ Lower Bound on the Diameter of G
+
+ Examples
+ --------
+ >>> G = nx.path_graph(10) # undirected graph
+ >>> nx.diameter(G)
+ 9
+ >>> G = nx.cycle_graph(3, create_using=nx.DiGraph) # directed graph
+ >>> nx.diameter(G)
+ 2
+
+ Raises
+ ------
+ NetworkXError
+ If the graph is empty or
+ If the graph is undirected and not connected or
+ If the graph is directed and not strongly connected.
+
+ See Also
+ --------
+ networkx.algorithms.distance_measures.diameter
+
+ References
+ ----------
+ .. [1] Magnien, Clémence, Matthieu Latapy, and Michel Habib.
+ *Fast computation of empirically tight bounds for the diameter of massive graphs.*
+ Journal of Experimental Algorithmics (JEA), 2009.
+ https://arxiv.org/pdf/0904.2728.pdf
+ .. [2] Crescenzi, Pierluigi, Roberto Grossi, Leonardo Lanzi, and Andrea Marino.
+ *On computing the diameter of real-world directed (weighted) graphs.*
+ International Symposium on Experimental Algorithms. Springer, Berlin, Heidelberg, 2012.
+ https://courses.cs.ut.ee/MTAT.03.238/2014_fall/uploads/Main/diameter.pdf
+ """
+ # if G is empty
+ if not G:
+ raise nx.NetworkXError("Expected non-empty NetworkX graph!")
+ # if there's only a node
+ if G.number_of_nodes() == 1:
+ return 0
+ # if G is directed
+ if G.is_directed():
+ return _two_sweep_directed(G, seed)
+ # else if G is undirected
+ return _two_sweep_undirected(G, seed)
+
+
+def _two_sweep_undirected(G, seed):
+ """Helper function for finding a lower bound on the diameter
+ for undirected Graphs.
+
+ The idea is to pick the farthest node from a random node
+ and return its eccentricity.
+
+ ``G`` is a NetworkX undirected graph.
+
+ .. note::
+
+ ``seed`` is a random.Random or numpy.random.RandomState instance
+ """
+ # select a random source node
+ source = seed.choice(list(G))
+ # get the distances to the other nodes
+ distances = nx.shortest_path_length(G, source)
+ # if some nodes have not been visited, then the graph is not connected
+ if len(distances) != len(G):
+ raise nx.NetworkXError("Graph not connected.")
+ # take a node that is (one of) the farthest nodes from the source
+ *_, node = distances
+ # return the eccentricity of the node
+ return nx.eccentricity(G, node)
+
+
+def _two_sweep_directed(G, seed):
+ """Helper function for finding a lower bound on the diameter
+ for directed Graphs.
+
+ It implements 2-dSweep, the directed version of the 2-sweep algorithm.
+ The algorithm follows the following steps.
+ 1. Select a source node $s$ at random.
+ 2. Perform a forward BFS from $s$ to select a node $a_1$ at the maximum
+ distance from the source, and compute $LB_1$, the backward eccentricity of $a_1$.
+ 3. Perform a backward BFS from $s$ to select a node $a_2$ at the maximum
+ distance from the source, and compute $LB_2$, the forward eccentricity of $a_2$.
+ 4. Return the maximum between $LB_1$ and $LB_2$.
+
+ ``G`` is a NetworkX directed graph.
+
+ .. note::
+
+ ``seed`` is a random.Random or numpy.random.RandomState instance
+ """
+ # get a new digraph G' with the edges reversed in the opposite direction
+ G_reversed = G.reverse()
+ # select a random source node
+ source = seed.choice(list(G))
+ # compute forward distances from source
+ forward_distances = nx.shortest_path_length(G, source)
+ # compute backward distances from source
+ backward_distances = nx.shortest_path_length(G_reversed, source)
+ # if either the source can't reach every node or not every node
+ # can reach the source, then the graph is not strongly connected
+ n = len(G)
+ if len(forward_distances) != n or len(backward_distances) != n:
+ raise nx.NetworkXError("DiGraph not strongly connected.")
+ # take a node a_1 at the maximum distance from the source in G
+ *_, a_1 = forward_distances
+ # take a node a_2 at the maximum distance from the source in G_reversed
+ *_, a_2 = backward_distances
+ # return the max between the backward eccentricity of a_1 and the forward eccentricity of a_2
+ return max(nx.eccentricity(G_reversed, a_1), nx.eccentricity(G, a_2))
diff --git a/pythonProject/.venv/Lib/site-packages/networkx/algorithms/approximation/dominating_set.py b/pythonProject/.venv/Lib/site-packages/networkx/algorithms/approximation/dominating_set.py
new file mode 100644
index 0000000000000000000000000000000000000000..06ab97d97612bf6ecf093661648f06db14ada539
--- /dev/null
+++ b/pythonProject/.venv/Lib/site-packages/networkx/algorithms/approximation/dominating_set.py
@@ -0,0 +1,148 @@
+"""Functions for finding node and edge dominating sets.
+
+A `dominating set`_ for an undirected graph *G* with vertex set *V*
+and edge set *E* is a subset *D* of *V* such that every vertex not in
+*D* is adjacent to at least one member of *D*. An `edge dominating set`_
+is a subset *F* of *E* such that every edge not in *F* is
+incident to an endpoint of at least one edge in *F*.
+
+.. _dominating set: https://en.wikipedia.org/wiki/Dominating_set
+.. _edge dominating set: https://en.wikipedia.org/wiki/Edge_dominating_set
+
+"""
+import networkx as nx
+
+from ...utils import not_implemented_for
+from ..matching import maximal_matching
+
+__all__ = ["min_weighted_dominating_set", "min_edge_dominating_set"]
+
+
+# TODO Why doesn't this algorithm work for directed graphs?
+@not_implemented_for("directed")
+@nx._dispatchable(node_attrs="weight")
+def min_weighted_dominating_set(G, weight=None):
+ r"""Returns a dominating set that approximates the minimum weight node
+ dominating set.
+
+ Parameters
+ ----------
+ G : NetworkX graph
+ Undirected graph.
+
+ weight : string
+ The node attribute storing the weight of an node. If provided,
+ the node attribute with this key must be a number for each
+ node. If not provided, each node is assumed to have weight one.
+
+ Returns
+ -------
+ min_weight_dominating_set : set
+ A set of nodes, the sum of whose weights is no more than `(\log
+ w(V)) w(V^*)`, where `w(V)` denotes the sum of the weights of
+ each node in the graph and `w(V^*)` denotes the sum of the
+ weights of each node in the minimum weight dominating set.
+
+ Examples
+ --------
+ >>> G = nx.Graph([(0, 1), (0, 4), (1, 4), (1, 2), (2, 3), (3, 4), (2, 5)])
+ >>> nx.approximation.min_weighted_dominating_set(G)
+ {1, 2, 4}
+
+ Raises
+ ------
+ NetworkXNotImplemented
+ If G is directed.
+
+ Notes
+ -----
+ This algorithm computes an approximate minimum weighted dominating
+ set for the graph `G`. The returned solution has weight `(\log
+ w(V)) w(V^*)`, where `w(V)` denotes the sum of the weights of each
+ node in the graph and `w(V^*)` denotes the sum of the weights of
+ each node in the minimum weight dominating set for the graph.
+
+ This implementation of the algorithm runs in $O(m)$ time, where $m$
+ is the number of edges in the graph.
+
+ References
+ ----------
+ .. [1] Vazirani, Vijay V.
+ *Approximation Algorithms*.
+ Springer Science & Business Media, 2001.
+
+ """
+ # The unique dominating set for the null graph is the empty set.
+ if len(G) == 0:
+ return set()
+
+ # This is the dominating set that will eventually be returned.
+ dom_set = set()
+
+ def _cost(node_and_neighborhood):
+ """Returns the cost-effectiveness of greedily choosing the given
+ node.
+
+ `node_and_neighborhood` is a two-tuple comprising a node and its
+ closed neighborhood.
+
+ """
+ v, neighborhood = node_and_neighborhood
+ return G.nodes[v].get(weight, 1) / len(neighborhood - dom_set)
+
+ # This is a set of all vertices not already covered by the
+ # dominating set.
+ vertices = set(G)
+ # This is a dictionary mapping each node to the closed neighborhood
+ # of that node.
+ neighborhoods = {v: {v} | set(G[v]) for v in G}
+
+ # Continue until all vertices are adjacent to some node in the
+ # dominating set.
+ while vertices:
+ # Find the most cost-effective node to add, along with its
+ # closed neighborhood.
+ dom_node, min_set = min(neighborhoods.items(), key=_cost)
+ # Add the node to the dominating set and reduce the remaining
+ # set of nodes to cover.
+ dom_set.add(dom_node)
+ del neighborhoods[dom_node]
+ vertices -= min_set
+
+ return dom_set
+
+
+@nx._dispatchable
+def min_edge_dominating_set(G):
+ r"""Returns minimum cardinality edge dominating set.
+
+ Parameters
+ ----------
+ G : NetworkX graph
+ Undirected graph
+
+ Returns
+ -------
+ min_edge_dominating_set : set
+ Returns a set of dominating edges whose size is no more than 2 * OPT.
+
+ Examples
+ --------
+ >>> G = nx.petersen_graph()
+ >>> nx.approximation.min_edge_dominating_set(G)
+ {(0, 1), (4, 9), (6, 8), (5, 7), (2, 3)}
+
+ Raises
+ ------
+ ValueError
+ If the input graph `G` is empty.
+
+ Notes
+ -----
+ The algorithm computes an approximate solution to the edge dominating set
+ problem. The result is no more than 2 * OPT in terms of size of the set.
+ Runtime of the algorithm is $O(|E|)$.
+ """
+ if not G:
+ raise ValueError("Expected non-empty NetworkX graph!")
+ return maximal_matching(G)
diff --git a/pythonProject/.venv/Lib/site-packages/networkx/algorithms/approximation/kcomponents.py b/pythonProject/.venv/Lib/site-packages/networkx/algorithms/approximation/kcomponents.py
new file mode 100644
index 0000000000000000000000000000000000000000..b540bd5f4a6fd8bc6e0b14744e562861731f5124
--- /dev/null
+++ b/pythonProject/.venv/Lib/site-packages/networkx/algorithms/approximation/kcomponents.py
@@ -0,0 +1,369 @@
+""" Fast approximation for k-component structure
+"""
+import itertools
+from collections import defaultdict
+from collections.abc import Mapping
+from functools import cached_property
+
+import networkx as nx
+from networkx.algorithms.approximation import local_node_connectivity
+from networkx.exception import NetworkXError
+from networkx.utils import not_implemented_for
+
+__all__ = ["k_components"]
+
+
+@not_implemented_for("directed")
+@nx._dispatchable(name="approximate_k_components")
+def k_components(G, min_density=0.95):
+ r"""Returns the approximate k-component structure of a graph G.
+
+ A `k`-component is a maximal subgraph of a graph G that has, at least,
+ node connectivity `k`: we need to remove at least `k` nodes to break it
+ into more components. `k`-components have an inherent hierarchical
+ structure because they are nested in terms of connectivity: a connected
+ graph can contain several 2-components, each of which can contain
+ one or more 3-components, and so forth.
+
+ This implementation is based on the fast heuristics to approximate
+ the `k`-component structure of a graph [1]_. Which, in turn, it is based on
+ a fast approximation algorithm for finding good lower bounds of the number
+ of node independent paths between two nodes [2]_.
+
+ Parameters
+ ----------
+ G : NetworkX graph
+ Undirected graph
+
+ min_density : Float
+ Density relaxation threshold. Default value 0.95
+
+ Returns
+ -------
+ k_components : dict
+ Dictionary with connectivity level `k` as key and a list of
+ sets of nodes that form a k-component of level `k` as values.
+
+ Raises
+ ------
+ NetworkXNotImplemented
+ If G is directed.
+
+ Examples
+ --------
+ >>> # Petersen graph has 10 nodes and it is triconnected, thus all
+ >>> # nodes are in a single component on all three connectivity levels
+ >>> from networkx.algorithms import approximation as apxa
+ >>> G = nx.petersen_graph()
+ >>> k_components = apxa.k_components(G)
+
+ Notes
+ -----
+ The logic of the approximation algorithm for computing the `k`-component
+ structure [1]_ is based on repeatedly applying simple and fast algorithms
+ for `k`-cores and biconnected components in order to narrow down the
+ number of pairs of nodes over which we have to compute White and Newman's
+ approximation algorithm for finding node independent paths [2]_. More
+ formally, this algorithm is based on Whitney's theorem, which states
+ an inclusion relation among node connectivity, edge connectivity, and
+ minimum degree for any graph G. This theorem implies that every
+ `k`-component is nested inside a `k`-edge-component, which in turn,
+ is contained in a `k`-core. Thus, this algorithm computes node independent
+ paths among pairs of nodes in each biconnected part of each `k`-core,
+ and repeats this procedure for each `k` from 3 to the maximal core number
+ of a node in the input graph.
+
+ Because, in practice, many nodes of the core of level `k` inside a
+ bicomponent actually are part of a component of level k, the auxiliary
+ graph needed for the algorithm is likely to be very dense. Thus, we use
+ a complement graph data structure (see `AntiGraph`) to save memory.
+ AntiGraph only stores information of the edges that are *not* present
+ in the actual auxiliary graph. When applying algorithms to this
+ complement graph data structure, it behaves as if it were the dense
+ version.
+
+ See also
+ --------
+ k_components
+
+ References
+ ----------
+ .. [1] Torrents, J. and F. Ferraro (2015) Structural Cohesion:
+ Visualization and Heuristics for Fast Computation.
+ https://arxiv.org/pdf/1503.04476v1
+
+ .. [2] White, Douglas R., and Mark Newman (2001) A Fast Algorithm for
+ Node-Independent Paths. Santa Fe Institute Working Paper #01-07-035
+ https://www.santafe.edu/research/results/working-papers/fast-approximation-algorithms-for-finding-node-ind
+
+ .. [3] Moody, J. and D. White (2003). Social cohesion and embeddedness:
+ A hierarchical conception of social groups.
+ American Sociological Review 68(1), 103--28.
+ https://doi.org/10.2307/3088904
+
+ """
+ # Dictionary with connectivity level (k) as keys and a list of
+ # sets of nodes that form a k-component as values
+ k_components = defaultdict(list)
+ # make a few functions local for speed
+ node_connectivity = local_node_connectivity
+ k_core = nx.k_core
+ core_number = nx.core_number
+ biconnected_components = nx.biconnected_components
+ combinations = itertools.combinations
+ # Exact solution for k = {1,2}
+ # There is a linear time algorithm for triconnectivity, if we had an
+ # implementation available we could start from k = 4.
+ for component in nx.connected_components(G):
+ # isolated nodes have connectivity 0
+ comp = set(component)
+ if len(comp) > 1:
+ k_components[1].append(comp)
+ for bicomponent in nx.biconnected_components(G):
+ # avoid considering dyads as bicomponents
+ bicomp = set(bicomponent)
+ if len(bicomp) > 2:
+ k_components[2].append(bicomp)
+ # There is no k-component of k > maximum core number
+ # \kappa(G) <= \lambda(G) <= \delta(G)
+ g_cnumber = core_number(G)
+ max_core = max(g_cnumber.values())
+ for k in range(3, max_core + 1):
+ C = k_core(G, k, core_number=g_cnumber)
+ for nodes in biconnected_components(C):
+ # Build a subgraph SG induced by the nodes that are part of
+ # each biconnected component of the k-core subgraph C.
+ if len(nodes) < k:
+ continue
+ SG = G.subgraph(nodes)
+ # Build auxiliary graph
+ H = _AntiGraph()
+ H.add_nodes_from(SG.nodes())
+ for u, v in combinations(SG, 2):
+ K = node_connectivity(SG, u, v, cutoff=k)
+ if k > K:
+ H.add_edge(u, v)
+ for h_nodes in biconnected_components(H):
+ if len(h_nodes) <= k:
+ continue
+ SH = H.subgraph(h_nodes)
+ for Gc in _cliques_heuristic(SG, SH, k, min_density):
+ for k_nodes in biconnected_components(Gc):
+ Gk = nx.k_core(SG.subgraph(k_nodes), k)
+ if len(Gk) <= k:
+ continue
+ k_components[k].append(set(Gk))
+ return k_components
+
+
+def _cliques_heuristic(G, H, k, min_density):
+ h_cnumber = nx.core_number(H)
+ for i, c_value in enumerate(sorted(set(h_cnumber.values()), reverse=True)):
+ cands = {n for n, c in h_cnumber.items() if c == c_value}
+ # Skip checking for overlap for the highest core value
+ if i == 0:
+ overlap = False
+ else:
+ overlap = set.intersection(
+ *[{x for x in H[n] if x not in cands} for n in cands]
+ )
+ if overlap and len(overlap) < k:
+ SH = H.subgraph(cands | overlap)
+ else:
+ SH = H.subgraph(cands)
+ sh_cnumber = nx.core_number(SH)
+ SG = nx.k_core(G.subgraph(SH), k)
+ while not (_same(sh_cnumber) and nx.density(SH) >= min_density):
+ # This subgraph must be writable => .copy()
+ SH = H.subgraph(SG).copy()
+ if len(SH) <= k:
+ break
+ sh_cnumber = nx.core_number(SH)
+ sh_deg = dict(SH.degree())
+ min_deg = min(sh_deg.values())
+ SH.remove_nodes_from(n for n, d in sh_deg.items() if d == min_deg)
+ SG = nx.k_core(G.subgraph(SH), k)
+ else:
+ yield SG
+
+
+def _same(measure, tol=0):
+ vals = set(measure.values())
+ if (max(vals) - min(vals)) <= tol:
+ return True
+ return False
+
+
+class _AntiGraph(nx.Graph):
+ """
+ Class for complement graphs.
+
+ The main goal is to be able to work with big and dense graphs with
+ a low memory footprint.
+
+ In this class you add the edges that *do not exist* in the dense graph,
+ the report methods of the class return the neighbors, the edges and
+ the degree as if it was the dense graph. Thus it's possible to use
+ an instance of this class with some of NetworkX functions. In this
+ case we only use k-core, connected_components, and biconnected_components.
+ """
+
+ all_edge_dict = {"weight": 1}
+
+ def single_edge_dict(self):
+ return self.all_edge_dict
+
+ edge_attr_dict_factory = single_edge_dict # type: ignore[assignment]
+
+ def __getitem__(self, n):
+ """Returns a dict of neighbors of node n in the dense graph.
+
+ Parameters
+ ----------
+ n : node
+ A node in the graph.
+
+ Returns
+ -------
+ adj_dict : dictionary
+ The adjacency dictionary for nodes connected to n.
+
+ """
+ all_edge_dict = self.all_edge_dict
+ return {
+ node: all_edge_dict for node in set(self._adj) - set(self._adj[n]) - {n}
+ }
+
+ def neighbors(self, n):
+ """Returns an iterator over all neighbors of node n in the
+ dense graph.
+ """
+ try:
+ return iter(set(self._adj) - set(self._adj[n]) - {n})
+ except KeyError as err:
+ raise NetworkXError(f"The node {n} is not in the graph.") from err
+
+ class AntiAtlasView(Mapping):
+ """An adjacency inner dict for AntiGraph"""
+
+ def __init__(self, graph, node):
+ self._graph = graph
+ self._atlas = graph._adj[node]
+ self._node = node
+
+ def __len__(self):
+ return len(self._graph) - len(self._atlas) - 1
+
+ def __iter__(self):
+ return (n for n in self._graph if n not in self._atlas and n != self._node)
+
+ def __getitem__(self, nbr):
+ nbrs = set(self._graph._adj) - set(self._atlas) - {self._node}
+ if nbr in nbrs:
+ return self._graph.all_edge_dict
+ raise KeyError(nbr)
+
+ class AntiAdjacencyView(AntiAtlasView):
+ """An adjacency outer dict for AntiGraph"""
+
+ def __init__(self, graph):
+ self._graph = graph
+ self._atlas = graph._adj
+
+ def __len__(self):
+ return len(self._atlas)
+
+ def __iter__(self):
+ return iter(self._graph)
+
+ def __getitem__(self, node):
+ if node not in self._graph:
+ raise KeyError(node)
+ return self._graph.AntiAtlasView(self._graph, node)
+
+ @cached_property
+ def adj(self):
+ return self.AntiAdjacencyView(self)
+
+ def subgraph(self, nodes):
+ """This subgraph method returns a full AntiGraph. Not a View"""
+ nodes = set(nodes)
+ G = _AntiGraph()
+ G.add_nodes_from(nodes)
+ for n in G:
+ Gnbrs = G.adjlist_inner_dict_factory()
+ G._adj[n] = Gnbrs
+ for nbr, d in self._adj[n].items():
+ if nbr in G._adj:
+ Gnbrs[nbr] = d
+ G._adj[nbr][n] = d
+ G.graph = self.graph
+ return G
+
+ class AntiDegreeView(nx.reportviews.DegreeView):
+ def __iter__(self):
+ all_nodes = set(self._succ)
+ for n in self._nodes:
+ nbrs = all_nodes - set(self._succ[n]) - {n}
+ yield (n, len(nbrs))
+
+ def __getitem__(self, n):
+ nbrs = set(self._succ) - set(self._succ[n]) - {n}
+ # AntiGraph is a ThinGraph so all edges have weight 1
+ return len(nbrs) + (n in nbrs)
+
+ @cached_property
+ def degree(self):
+ """Returns an iterator for (node, degree) and degree for single node.
+
+ The node degree is the number of edges adjacent to the node.
+
+ Parameters
+ ----------
+ nbunch : iterable container, optional (default=all nodes)
+ A container of nodes. The container will be iterated
+ through once.
+
+ weight : string or None, optional (default=None)
+ The edge attribute that holds the numerical value used
+ as a weight. If None, then each edge has weight 1.
+ The degree is the sum of the edge weights adjacent to the node.
+
+ Returns
+ -------
+ deg:
+ Degree of the node, if a single node is passed as argument.
+ nd_iter : an iterator
+ The iterator returns two-tuples of (node, degree).
+
+ See Also
+ --------
+ degree
+
+ Examples
+ --------
+ >>> G = nx.path_graph(4)
+ >>> G.degree(0) # node 0 with degree 1
+ 1
+ >>> list(G.degree([0, 1]))
+ [(0, 1), (1, 2)]
+
+ """
+ return self.AntiDegreeView(self)
+
+ def adjacency(self):
+ """Returns an iterator of (node, adjacency set) tuples for all nodes
+ in the dense graph.
+
+ This is the fastest way to look at every edge.
+ For directed graphs, only outgoing adjacencies are included.
+
+ Returns
+ -------
+ adj_iter : iterator
+ An iterator of (node, adjacency set) for all nodes in
+ the graph.
+
+ """
+ for n in self._adj:
+ yield (n, set(self._adj) - set(self._adj[n]) - {n})
diff --git a/pythonProject/.venv/Lib/site-packages/networkx/algorithms/approximation/matching.py b/pythonProject/.venv/Lib/site-packages/networkx/algorithms/approximation/matching.py
new file mode 100644
index 0000000000000000000000000000000000000000..3a7c8a39b2e2884ac5a8f1ff8ca795e7c7bdb73c
--- /dev/null
+++ b/pythonProject/.venv/Lib/site-packages/networkx/algorithms/approximation/matching.py
@@ -0,0 +1,43 @@
+"""
+**************
+Graph Matching
+**************
+
+Given a graph G = (V,E), a matching M in G is a set of pairwise non-adjacent
+edges; that is, no two edges share a common vertex.
+
+`Wikipedia: Matching `_
+"""
+import networkx as nx
+
+__all__ = ["min_maximal_matching"]
+
+
+@nx._dispatchable
+def min_maximal_matching(G):
+ r"""Returns the minimum maximal matching of G. That is, out of all maximal
+ matchings of the graph G, the smallest is returned.
+
+ Parameters
+ ----------
+ G : NetworkX graph
+ Undirected graph
+
+ Returns
+ -------
+ min_maximal_matching : set
+ Returns a set of edges such that no two edges share a common endpoint
+ and every edge not in the set shares some common endpoint in the set.
+ Cardinality will be 2*OPT in the worst case.
+
+ Notes
+ -----
+ The algorithm computes an approximate solution for the minimum maximal
+ cardinality matching problem. The solution is no more than 2 * OPT in size.
+ Runtime is $O(|E|)$.
+
+ References
+ ----------
+ .. [1] Vazirani, Vijay Approximation Algorithms (2001)
+ """
+ return nx.maximal_matching(G)
diff --git a/pythonProject/.venv/Lib/site-packages/networkx/algorithms/approximation/maxcut.py b/pythonProject/.venv/Lib/site-packages/networkx/algorithms/approximation/maxcut.py
new file mode 100644
index 0000000000000000000000000000000000000000..f4e1da87c35ab821f4b3d0851bba19d599d8fa6a
--- /dev/null
+++ b/pythonProject/.venv/Lib/site-packages/networkx/algorithms/approximation/maxcut.py
@@ -0,0 +1,143 @@
+import networkx as nx
+from networkx.utils.decorators import not_implemented_for, py_random_state
+
+__all__ = ["randomized_partitioning", "one_exchange"]
+
+
+@not_implemented_for("directed")
+@not_implemented_for("multigraph")
+@py_random_state(1)
+@nx._dispatchable(edge_attrs="weight")
+def randomized_partitioning(G, seed=None, p=0.5, weight=None):
+ """Compute a random partitioning of the graph nodes and its cut value.
+
+ A partitioning is calculated by observing each node
+ and deciding to add it to the partition with probability `p`,
+ returning a random cut and its corresponding value (the
+ sum of weights of edges connecting different partitions).
+
+ Parameters
+ ----------
+ G : NetworkX graph
+
+ seed : integer, random_state, or None (default)
+ Indicator of random number generation state.
+ See :ref:`Randomness`.
+
+ p : scalar
+ Probability for each node to be part of the first partition.
+ Should be in [0,1]
+
+ weight : object
+ Edge attribute key to use as weight. If not specified, edges
+ have weight one.
+
+ Returns
+ -------
+ cut_size : scalar
+ Value of the minimum cut.
+
+ partition : pair of node sets
+ A partitioning of the nodes that defines a minimum cut.
+
+ Examples
+ --------
+ >>> G = nx.complete_graph(5)
+ >>> cut_size, partition = nx.approximation.randomized_partitioning(G, seed=1)
+ >>> cut_size
+ 6
+ >>> partition
+ ({0, 3, 4}, {1, 2})
+
+ Raises
+ ------
+ NetworkXNotImplemented
+ If the graph is directed or is a multigraph.
+ """
+ cut = {node for node in G.nodes() if seed.random() < p}
+ cut_size = nx.algorithms.cut_size(G, cut, weight=weight)
+ partition = (cut, G.nodes - cut)
+ return cut_size, partition
+
+
+def _swap_node_partition(cut, node):
+ return cut - {node} if node in cut else cut.union({node})
+
+
+@not_implemented_for("directed")
+@not_implemented_for("multigraph")
+@py_random_state(2)
+@nx._dispatchable(edge_attrs="weight")
+def one_exchange(G, initial_cut=None, seed=None, weight=None):
+ """Compute a partitioning of the graphs nodes and the corresponding cut value.
+
+ Use a greedy one exchange strategy to find a locally maximal cut
+ and its value, it works by finding the best node (one that gives
+ the highest gain to the cut value) to add to the current cut
+ and repeats this process until no improvement can be made.
+
+ Parameters
+ ----------
+ G : networkx Graph
+ Graph to find a maximum cut for.
+
+ initial_cut : set
+ Cut to use as a starting point. If not supplied the algorithm
+ starts with an empty cut.
+
+ seed : integer, random_state, or None (default)
+ Indicator of random number generation state.
+ See :ref:`Randomness`.
+
+ weight : object
+ Edge attribute key to use as weight. If not specified, edges
+ have weight one.
+
+ Returns
+ -------
+ cut_value : scalar
+ Value of the maximum cut.
+
+ partition : pair of node sets
+ A partitioning of the nodes that defines a maximum cut.
+
+ Examples
+ --------
+ >>> G = nx.complete_graph(5)
+ >>> curr_cut_size, partition = nx.approximation.one_exchange(G, seed=1)
+ >>> curr_cut_size
+ 6
+ >>> partition
+ ({0, 2}, {1, 3, 4})
+
+ Raises
+ ------
+ NetworkXNotImplemented
+ If the graph is directed or is a multigraph.
+ """
+ if initial_cut is None:
+ initial_cut = set()
+ cut = set(initial_cut)
+ current_cut_size = nx.algorithms.cut_size(G, cut, weight=weight)
+ while True:
+ nodes = list(G.nodes())
+ # Shuffling the nodes ensures random tie-breaks in the following call to max
+ seed.shuffle(nodes)
+ best_node_to_swap = max(
+ nodes,
+ key=lambda v: nx.algorithms.cut_size(
+ G, _swap_node_partition(cut, v), weight=weight
+ ),
+ default=None,
+ )
+ potential_cut = _swap_node_partition(cut, best_node_to_swap)
+ potential_cut_size = nx.algorithms.cut_size(G, potential_cut, weight=weight)
+
+ if potential_cut_size > current_cut_size:
+ cut = potential_cut
+ current_cut_size = potential_cut_size
+ else:
+ break
+
+ partition = (cut, G.nodes - cut)
+ return current_cut_size, partition
diff --git a/pythonProject/.venv/Lib/site-packages/networkx/algorithms/approximation/ramsey.py b/pythonProject/.venv/Lib/site-packages/networkx/algorithms/approximation/ramsey.py
new file mode 100644
index 0000000000000000000000000000000000000000..5cb9fda04494718e1bd9d908d77e08a3a9ec0495
--- /dev/null
+++ b/pythonProject/.venv/Lib/site-packages/networkx/algorithms/approximation/ramsey.py
@@ -0,0 +1,52 @@
+"""
+Ramsey numbers.
+"""
+import networkx as nx
+from networkx.utils import not_implemented_for
+
+from ...utils import arbitrary_element
+
+__all__ = ["ramsey_R2"]
+
+
+@not_implemented_for("directed")
+@not_implemented_for("multigraph")
+@nx._dispatchable
+def ramsey_R2(G):
+ r"""Compute the largest clique and largest independent set in `G`.
+
+ This can be used to estimate bounds for the 2-color
+ Ramsey number `R(2;s,t)` for `G`.
+
+ This is a recursive implementation which could run into trouble
+ for large recursions. Note that self-loop edges are ignored.
+
+ Parameters
+ ----------
+ G : NetworkX graph
+ Undirected graph
+
+ Returns
+ -------
+ max_pair : (set, set) tuple
+ Maximum clique, Maximum independent set.
+
+ Raises
+ ------
+ NetworkXNotImplemented
+ If the graph is directed or is a multigraph.
+ """
+ if not G:
+ return set(), set()
+
+ node = arbitrary_element(G)
+ nbrs = (nbr for nbr in nx.all_neighbors(G, node) if nbr != node)
+ nnbrs = nx.non_neighbors(G, node)
+ c_1, i_1 = ramsey_R2(G.subgraph(nbrs).copy())
+ c_2, i_2 = ramsey_R2(G.subgraph(nnbrs).copy())
+
+ c_1.add(node)
+ i_2.add(node)
+ # Choose the larger of the two cliques and the larger of the two
+ # independent sets, according to cardinality.
+ return max(c_1, c_2, key=len), max(i_1, i_2, key=len)
diff --git a/pythonProject/.venv/Lib/site-packages/networkx/algorithms/approximation/steinertree.py b/pythonProject/.venv/Lib/site-packages/networkx/algorithms/approximation/steinertree.py
new file mode 100644
index 0000000000000000000000000000000000000000..c6c834f422c79ba252397ab939a5b06d73cbcb35
--- /dev/null
+++ b/pythonProject/.venv/Lib/site-packages/networkx/algorithms/approximation/steinertree.py
@@ -0,0 +1,220 @@
+from itertools import chain
+
+import networkx as nx
+from networkx.utils import not_implemented_for, pairwise
+
+__all__ = ["metric_closure", "steiner_tree"]
+
+
+@not_implemented_for("directed")
+@nx._dispatchable(edge_attrs="weight", returns_graph=True)
+def metric_closure(G, weight="weight"):
+ """Return the metric closure of a graph.
+
+ The metric closure of a graph *G* is the complete graph in which each edge
+ is weighted by the shortest path distance between the nodes in *G* .
+
+ Parameters
+ ----------
+ G : NetworkX graph
+
+ Returns
+ -------
+ NetworkX graph
+ Metric closure of the graph `G`.
+
+ """
+ M = nx.Graph()
+
+ Gnodes = set(G)
+
+ # check for connected graph while processing first node
+ all_paths_iter = nx.all_pairs_dijkstra(G, weight=weight)
+ u, (distance, path) = next(all_paths_iter)
+ if Gnodes - set(distance):
+ msg = "G is not a connected graph. metric_closure is not defined."
+ raise nx.NetworkXError(msg)
+ Gnodes.remove(u)
+ for v in Gnodes:
+ M.add_edge(u, v, distance=distance[v], path=path[v])
+
+ # first node done -- now process the rest
+ for u, (distance, path) in all_paths_iter:
+ Gnodes.remove(u)
+ for v in Gnodes:
+ M.add_edge(u, v, distance=distance[v], path=path[v])
+
+ return M
+
+
+def _mehlhorn_steiner_tree(G, terminal_nodes, weight):
+ paths = nx.multi_source_dijkstra_path(G, terminal_nodes)
+
+ d_1 = {}
+ s = {}
+ for v in G.nodes():
+ s[v] = paths[v][0]
+ d_1[(v, s[v])] = len(paths[v]) - 1
+
+ # G1-G4 names match those from the Mehlhorn 1988 paper.
+ G_1_prime = nx.Graph()
+ for u, v, data in G.edges(data=True):
+ su, sv = s[u], s[v]
+ weight_here = d_1[(u, su)] + data.get(weight, 1) + d_1[(v, sv)]
+ if not G_1_prime.has_edge(su, sv):
+ G_1_prime.add_edge(su, sv, weight=weight_here)
+ else:
+ new_weight = min(weight_here, G_1_prime[su][sv]["weight"])
+ G_1_prime.add_edge(su, sv, weight=new_weight)
+
+ G_2 = nx.minimum_spanning_edges(G_1_prime, data=True)
+
+ G_3 = nx.Graph()
+ for u, v, d in G_2:
+ path = nx.shortest_path(G, u, v, weight)
+ for n1, n2 in pairwise(path):
+ G_3.add_edge(n1, n2)
+
+ G_3_mst = list(nx.minimum_spanning_edges(G_3, data=False))
+ if G.is_multigraph():
+ G_3_mst = (
+ (u, v, min(G[u][v], key=lambda k: G[u][v][k][weight])) for u, v in G_3_mst
+ )
+ G_4 = G.edge_subgraph(G_3_mst).copy()
+ _remove_nonterminal_leaves(G_4, terminal_nodes)
+ return G_4.edges()
+
+
+def _kou_steiner_tree(G, terminal_nodes, weight):
+ # H is the subgraph induced by terminal_nodes in the metric closure M of G.
+ M = metric_closure(G, weight=weight)
+ H = M.subgraph(terminal_nodes)
+
+ # Use the 'distance' attribute of each edge provided by M.
+ mst_edges = nx.minimum_spanning_edges(H, weight="distance", data=True)
+
+ # Create an iterator over each edge in each shortest path; repeats are okay
+ mst_all_edges = chain.from_iterable(pairwise(d["path"]) for u, v, d in mst_edges)
+ if G.is_multigraph():
+ mst_all_edges = (
+ (u, v, min(G[u][v], key=lambda k: G[u][v][k][weight]))
+ for u, v in mst_all_edges
+ )
+
+ # Find the MST again, over this new set of edges
+ G_S = G.edge_subgraph(mst_all_edges)
+ T_S = nx.minimum_spanning_edges(G_S, weight="weight", data=False)
+
+ # Leaf nodes that are not terminal might still remain; remove them here
+ T_H = G.edge_subgraph(T_S).copy()
+ _remove_nonterminal_leaves(T_H, terminal_nodes)
+
+ return T_H.edges()
+
+
+def _remove_nonterminal_leaves(G, terminals):
+ terminals_set = set(terminals)
+ for n in list(G.nodes):
+ if n not in terminals_set and G.degree(n) == 1:
+ G.remove_node(n)
+
+
+ALGORITHMS = {
+ "kou": _kou_steiner_tree,
+ "mehlhorn": _mehlhorn_steiner_tree,
+}
+
+
+@not_implemented_for("directed")
+@nx._dispatchable(preserve_all_attrs=True, returns_graph=True)
+def steiner_tree(G, terminal_nodes, weight="weight", method=None):
+ r"""Return an approximation to the minimum Steiner tree of a graph.
+
+ The minimum Steiner tree of `G` w.r.t a set of `terminal_nodes` (also *S*)
+ is a tree within `G` that spans those nodes and has minimum size (sum of
+ edge weights) among all such trees.
+
+ The approximation algorithm is specified with the `method` keyword
+ argument. All three available algorithms produce a tree whose weight is
+ within a ``(2 - (2 / l))`` factor of the weight of the optimal Steiner tree,
+ where ``l`` is the minimum number of leaf nodes across all possible Steiner
+ trees.
+
+ * ``"kou"`` [2]_ (runtime $O(|S| |V|^2)$) computes the minimum spanning tree of
+ the subgraph of the metric closure of *G* induced by the terminal nodes,
+ where the metric closure of *G* is the complete graph in which each edge is
+ weighted by the shortest path distance between the nodes in *G*.
+
+ * ``"mehlhorn"`` [3]_ (runtime $O(|E|+|V|\log|V|)$) modifies Kou et al.'s
+ algorithm, beginning by finding the closest terminal node for each
+ non-terminal. This data is used to create a complete graph containing only
+ the terminal nodes, in which edge is weighted with the shortest path
+ distance between them. The algorithm then proceeds in the same way as Kou
+ et al..
+
+ Parameters
+ ----------
+ G : NetworkX graph
+
+ terminal_nodes : list
+ A list of terminal nodes for which minimum steiner tree is
+ to be found.
+
+ weight : string (default = 'weight')
+ Use the edge attribute specified by this string as the edge weight.
+ Any edge attribute not present defaults to 1.
+
+ method : string, optional (default = 'mehlhorn')
+ The algorithm to use to approximate the Steiner tree.
+ Supported options: 'kou', 'mehlhorn'.
+ Other inputs produce a ValueError.
+
+ Returns
+ -------
+ NetworkX graph
+ Approximation to the minimum steiner tree of `G` induced by
+ `terminal_nodes` .
+
+ Raises
+ ------
+ NetworkXNotImplemented
+ If `G` is directed.
+
+ ValueError
+ If the specified `method` is not supported.
+
+ Notes
+ -----
+ For multigraphs, the edge between two nodes with minimum weight is the
+ edge put into the Steiner tree.
+
+
+ References
+ ----------
+ .. [1] Steiner_tree_problem on Wikipedia.
+ https://en.wikipedia.org/wiki/Steiner_tree_problem
+ .. [2] Kou, L., G. Markowsky, and L. Berman. 1981.
+ ‘A Fast Algorithm for Steiner Trees’.
+ Acta Informatica 15 (2): 141–45.
+ https://doi.org/10.1007/BF00288961.
+ .. [3] Mehlhorn, Kurt. 1988.
+ ‘A Faster Approximation Algorithm for the Steiner Problem in Graphs’.
+ Information Processing Letters 27 (3): 125–28.
+ https://doi.org/10.1016/0020-0190(88)90066-X.
+ """
+ if method is None:
+ method = "mehlhorn"
+
+ try:
+ algo = ALGORITHMS[method]
+ except KeyError as e:
+ raise ValueError(f"{method} is not a valid choice for an algorithm.") from e
+
+ edges = algo(G, terminal_nodes, weight)
+ # For multigraph we should add the minimal weight edge keys
+ if G.is_multigraph():
+ edges = (
+ (u, v, min(G[u][v], key=lambda k: G[u][v][k][weight])) for u, v in edges
+ )
+ T = G.edge_subgraph(edges)
+ return T
diff --git a/pythonProject/.venv/Lib/site-packages/networkx/algorithms/approximation/tests/__init__.py b/pythonProject/.venv/Lib/site-packages/networkx/algorithms/approximation/tests/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391
diff --git a/pythonProject/.venv/Lib/site-packages/networkx/algorithms/approximation/tests/__pycache__/__init__.cpython-310.pyc b/pythonProject/.venv/Lib/site-packages/networkx/algorithms/approximation/tests/__pycache__/__init__.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..95ac253600cc1daad25290865bc22767470c9da0
Binary files /dev/null and b/pythonProject/.venv/Lib/site-packages/networkx/algorithms/approximation/tests/__pycache__/__init__.cpython-310.pyc differ
diff --git a/pythonProject/.venv/Lib/site-packages/networkx/algorithms/approximation/tests/__pycache__/test_approx_clust_coeff.cpython-310.pyc b/pythonProject/.venv/Lib/site-packages/networkx/algorithms/approximation/tests/__pycache__/test_approx_clust_coeff.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..8dd4ca48f3b2183599342735622601e7ae8d937b
Binary files /dev/null and b/pythonProject/.venv/Lib/site-packages/networkx/algorithms/approximation/tests/__pycache__/test_approx_clust_coeff.cpython-310.pyc differ
diff --git a/pythonProject/.venv/Lib/site-packages/networkx/algorithms/approximation/tests/__pycache__/test_clique.cpython-310.pyc b/pythonProject/.venv/Lib/site-packages/networkx/algorithms/approximation/tests/__pycache__/test_clique.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..755c608416155ae5698d419e839e88f6daeac143
Binary files /dev/null and b/pythonProject/.venv/Lib/site-packages/networkx/algorithms/approximation/tests/__pycache__/test_clique.cpython-310.pyc differ
diff --git a/pythonProject/.venv/Lib/site-packages/networkx/algorithms/approximation/tests/__pycache__/test_connectivity.cpython-310.pyc b/pythonProject/.venv/Lib/site-packages/networkx/algorithms/approximation/tests/__pycache__/test_connectivity.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..8dda7826c8b9d8c9f221a53776704684807af6ea
Binary files /dev/null and b/pythonProject/.venv/Lib/site-packages/networkx/algorithms/approximation/tests/__pycache__/test_connectivity.cpython-310.pyc differ
diff --git a/pythonProject/.venv/Lib/site-packages/networkx/algorithms/approximation/tests/__pycache__/test_distance_measures.cpython-310.pyc b/pythonProject/.venv/Lib/site-packages/networkx/algorithms/approximation/tests/__pycache__/test_distance_measures.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..3917e26501d2b6abfee90a9d4490c6b9675f270d
Binary files /dev/null and b/pythonProject/.venv/Lib/site-packages/networkx/algorithms/approximation/tests/__pycache__/test_distance_measures.cpython-310.pyc differ
diff --git a/pythonProject/.venv/Lib/site-packages/networkx/algorithms/approximation/tests/__pycache__/test_dominating_set.cpython-310.pyc b/pythonProject/.venv/Lib/site-packages/networkx/algorithms/approximation/tests/__pycache__/test_dominating_set.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..fcf24aad2024fd1284cb96512ab0b627286930e4
Binary files /dev/null and b/pythonProject/.venv/Lib/site-packages/networkx/algorithms/approximation/tests/__pycache__/test_dominating_set.cpython-310.pyc differ
diff --git a/pythonProject/.venv/Lib/site-packages/networkx/algorithms/approximation/tests/__pycache__/test_kcomponents.cpython-310.pyc b/pythonProject/.venv/Lib/site-packages/networkx/algorithms/approximation/tests/__pycache__/test_kcomponents.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..fb05814e4a855cceb9320441a422b4996ee9fa28
Binary files /dev/null and b/pythonProject/.venv/Lib/site-packages/networkx/algorithms/approximation/tests/__pycache__/test_kcomponents.cpython-310.pyc differ
diff --git a/pythonProject/.venv/Lib/site-packages/networkx/algorithms/approximation/tests/__pycache__/test_matching.cpython-310.pyc b/pythonProject/.venv/Lib/site-packages/networkx/algorithms/approximation/tests/__pycache__/test_matching.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..49d8ea932748d071f942b15ed82dfe544f6c562c
Binary files /dev/null and b/pythonProject/.venv/Lib/site-packages/networkx/algorithms/approximation/tests/__pycache__/test_matching.cpython-310.pyc differ
diff --git a/pythonProject/.venv/Lib/site-packages/networkx/algorithms/approximation/tests/__pycache__/test_maxcut.cpython-310.pyc b/pythonProject/.venv/Lib/site-packages/networkx/algorithms/approximation/tests/__pycache__/test_maxcut.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..9c06bd76b7d16f6f3112b9728f6a1536b859f9fd
Binary files /dev/null and b/pythonProject/.venv/Lib/site-packages/networkx/algorithms/approximation/tests/__pycache__/test_maxcut.cpython-310.pyc differ
diff --git a/pythonProject/.venv/Lib/site-packages/networkx/algorithms/approximation/tests/__pycache__/test_ramsey.cpython-310.pyc b/pythonProject/.venv/Lib/site-packages/networkx/algorithms/approximation/tests/__pycache__/test_ramsey.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..ec1f5d782e65f8084f9a07fbc7193cc920dc6eb3
Binary files /dev/null and b/pythonProject/.venv/Lib/site-packages/networkx/algorithms/approximation/tests/__pycache__/test_ramsey.cpython-310.pyc differ
diff --git a/pythonProject/.venv/Lib/site-packages/networkx/algorithms/approximation/tests/__pycache__/test_steinertree.cpython-310.pyc b/pythonProject/.venv/Lib/site-packages/networkx/algorithms/approximation/tests/__pycache__/test_steinertree.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..0261a246a4ce8fac7fec40b549c59e3ca41728e9
Binary files /dev/null and b/pythonProject/.venv/Lib/site-packages/networkx/algorithms/approximation/tests/__pycache__/test_steinertree.cpython-310.pyc differ
diff --git a/pythonProject/.venv/Lib/site-packages/networkx/algorithms/approximation/tests/__pycache__/test_traveling_salesman.cpython-310.pyc b/pythonProject/.venv/Lib/site-packages/networkx/algorithms/approximation/tests/__pycache__/test_traveling_salesman.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..cdbc16980c90e65578989e733aa037f6a21c2bea
Binary files /dev/null and b/pythonProject/.venv/Lib/site-packages/networkx/algorithms/approximation/tests/__pycache__/test_traveling_salesman.cpython-310.pyc differ
diff --git a/pythonProject/.venv/Lib/site-packages/networkx/algorithms/approximation/tests/__pycache__/test_treewidth.cpython-310.pyc b/pythonProject/.venv/Lib/site-packages/networkx/algorithms/approximation/tests/__pycache__/test_treewidth.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..a896423a5001f5c1f38d31f64d7f361ce9767b38
Binary files /dev/null and b/pythonProject/.venv/Lib/site-packages/networkx/algorithms/approximation/tests/__pycache__/test_treewidth.cpython-310.pyc differ
diff --git a/pythonProject/.venv/Lib/site-packages/networkx/algorithms/approximation/tests/__pycache__/test_vertex_cover.cpython-310.pyc b/pythonProject/.venv/Lib/site-packages/networkx/algorithms/approximation/tests/__pycache__/test_vertex_cover.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..166326d7b47c9c0d8e963a7d6d9835db4a2abda8
Binary files /dev/null and b/pythonProject/.venv/Lib/site-packages/networkx/algorithms/approximation/tests/__pycache__/test_vertex_cover.cpython-310.pyc differ
diff --git a/pythonProject/.venv/Lib/site-packages/networkx/algorithms/approximation/tests/test_approx_clust_coeff.py b/pythonProject/.venv/Lib/site-packages/networkx/algorithms/approximation/tests/test_approx_clust_coeff.py
new file mode 100644
index 0000000000000000000000000000000000000000..5eab5c1ee79408c9f90a1993415a6c3d7d957141
--- /dev/null
+++ b/pythonProject/.venv/Lib/site-packages/networkx/algorithms/approximation/tests/test_approx_clust_coeff.py
@@ -0,0 +1,41 @@
+import networkx as nx
+from networkx.algorithms.approximation import average_clustering
+
+# This approximation has to be exact in regular graphs
+# with no triangles or with all possible triangles.
+
+
+def test_petersen():
+ # Actual coefficient is 0
+ G = nx.petersen_graph()
+ assert average_clustering(G, trials=len(G) // 2) == nx.average_clustering(G)
+
+
+def test_petersen_seed():
+ # Actual coefficient is 0
+ G = nx.petersen_graph()
+ assert average_clustering(G, trials=len(G) // 2, seed=1) == nx.average_clustering(G)
+
+
+def test_tetrahedral():
+ # Actual coefficient is 1
+ G = nx.tetrahedral_graph()
+ assert average_clustering(G, trials=len(G) // 2) == nx.average_clustering(G)
+
+
+def test_dodecahedral():
+ # Actual coefficient is 0
+ G = nx.dodecahedral_graph()
+ assert average_clustering(G, trials=len(G) // 2) == nx.average_clustering(G)
+
+
+def test_empty():
+ G = nx.empty_graph(5)
+ assert average_clustering(G, trials=len(G) // 2) == 0
+
+
+def test_complete():
+ G = nx.complete_graph(5)
+ assert average_clustering(G, trials=len(G) // 2) == 1
+ G = nx.complete_graph(7)
+ assert average_clustering(G, trials=len(G) // 2) == 1
diff --git a/pythonProject/.venv/Lib/site-packages/networkx/algorithms/approximation/tests/test_clique.py b/pythonProject/.venv/Lib/site-packages/networkx/algorithms/approximation/tests/test_clique.py
new file mode 100644
index 0000000000000000000000000000000000000000..ebda285b7d8c887a37cc7064cb41a10acdb074d5
--- /dev/null
+++ b/pythonProject/.venv/Lib/site-packages/networkx/algorithms/approximation/tests/test_clique.py
@@ -0,0 +1,113 @@
+"""Unit tests for the :mod:`networkx.algorithms.approximation.clique` module."""
+
+
+import networkx as nx
+from networkx.algorithms.approximation import (
+ clique_removal,
+ large_clique_size,
+ max_clique,
+ maximum_independent_set,
+)
+
+
+def is_independent_set(G, nodes):
+ """Returns True if and only if `nodes` is a clique in `G`.
+
+ `G` is a NetworkX graph. `nodes` is an iterable of nodes in
+ `G`.
+
+ """
+ return G.subgraph(nodes).number_of_edges() == 0
+
+
+def is_clique(G, nodes):
+ """Returns True if and only if `nodes` is an independent set
+ in `G`.
+
+ `G` is an undirected simple graph. `nodes` is an iterable of
+ nodes in `G`.
+
+ """
+ H = G.subgraph(nodes)
+ n = len(H)
+ return H.number_of_edges() == n * (n - 1) // 2
+
+
+class TestCliqueRemoval:
+ """Unit tests for the
+ :func:`~networkx.algorithms.approximation.clique_removal` function.
+
+ """
+
+ def test_trivial_graph(self):
+ G = nx.trivial_graph()
+ independent_set, cliques = clique_removal(G)
+ assert is_independent_set(G, independent_set)
+ assert all(is_clique(G, clique) for clique in cliques)
+ # In fact, we should only have 1-cliques, that is, singleton nodes.
+ assert all(len(clique) == 1 for clique in cliques)
+
+ def test_complete_graph(self):
+ G = nx.complete_graph(10)
+ independent_set, cliques = clique_removal(G)
+ assert is_independent_set(G, independent_set)
+ assert all(is_clique(G, clique) for clique in cliques)
+
+ def test_barbell_graph(self):
+ G = nx.barbell_graph(10, 5)
+ independent_set, cliques = clique_removal(G)
+ assert is_independent_set(G, independent_set)
+ assert all(is_clique(G, clique) for clique in cliques)
+
+
+class TestMaxClique:
+ """Unit tests for the :func:`networkx.algorithms.approximation.max_clique`
+ function.
+
+ """
+
+ def test_null_graph(self):
+ G = nx.null_graph()
+ assert len(max_clique(G)) == 0
+
+ def test_complete_graph(self):
+ graph = nx.complete_graph(30)
+ # this should return the entire graph
+ mc = max_clique(graph)
+ assert 30 == len(mc)
+
+ def test_maximal_by_cardinality(self):
+ """Tests that the maximal clique is computed according to maximum
+ cardinality of the sets.
+
+ For more information, see pull request #1531.
+
+ """
+ G = nx.complete_graph(5)
+ G.add_edge(4, 5)
+ clique = max_clique(G)
+ assert len(clique) > 1
+
+ G = nx.lollipop_graph(30, 2)
+ clique = max_clique(G)
+ assert len(clique) > 2
+
+
+def test_large_clique_size():
+ G = nx.complete_graph(9)
+ nx.add_cycle(G, [9, 10, 11])
+ G.add_edge(8, 9)
+ G.add_edge(1, 12)
+ G.add_node(13)
+
+ assert large_clique_size(G) == 9
+ G.remove_node(5)
+ assert large_clique_size(G) == 8
+ G.remove_edge(2, 3)
+ assert large_clique_size(G) == 7
+
+
+def test_independent_set():
+ # smoke test
+ G = nx.Graph()
+ assert len(maximum_independent_set(G)) == 0
diff --git a/pythonProject/.venv/Lib/site-packages/networkx/algorithms/approximation/tests/test_connectivity.py b/pythonProject/.venv/Lib/site-packages/networkx/algorithms/approximation/tests/test_connectivity.py
new file mode 100644
index 0000000000000000000000000000000000000000..887db20bcaef8dd2641c64e963c789234aecbb20
--- /dev/null
+++ b/pythonProject/.venv/Lib/site-packages/networkx/algorithms/approximation/tests/test_connectivity.py
@@ -0,0 +1,199 @@
+import pytest
+
+import networkx as nx
+from networkx.algorithms import approximation as approx
+
+
+def test_global_node_connectivity():
+ # Figure 1 chapter on Connectivity
+ G = nx.Graph()
+ G.add_edges_from(
+ [
+ (1, 2),
+ (1, 3),
+ (1, 4),
+ (1, 5),
+ (2, 3),
+ (2, 6),
+ (3, 4),
+ (3, 6),
+ (4, 6),
+ (4, 7),
+ (5, 7),
+ (6, 8),
+ (6, 9),
+ (7, 8),
+ (7, 10),
+ (8, 11),
+ (9, 10),
+ (9, 11),
+ (10, 11),
+ ]
+ )
+ assert 2 == approx.local_node_connectivity(G, 1, 11)
+ assert 2 == approx.node_connectivity(G)
+ assert 2 == approx.node_connectivity(G, 1, 11)
+
+
+def test_white_harary1():
+ # Figure 1b white and harary (2001)
+ # A graph with high adhesion (edge connectivity) and low cohesion
+ # (node connectivity)
+ G = nx.disjoint_union(nx.complete_graph(4), nx.complete_graph(4))
+ G.remove_node(7)
+ for i in range(4, 7):
+ G.add_edge(0, i)
+ G = nx.disjoint_union(G, nx.complete_graph(4))
+ G.remove_node(G.order() - 1)
+ for i in range(7, 10):
+ G.add_edge(0, i)
+ assert 1 == approx.node_connectivity(G)
+
+
+def test_complete_graphs():
+ for n in range(5, 25, 5):
+ G = nx.complete_graph(n)
+ assert n - 1 == approx.node_connectivity(G)
+ assert n - 1 == approx.node_connectivity(G, 0, 3)
+
+
+def test_empty_graphs():
+ for k in range(5, 25, 5):
+ G = nx.empty_graph(k)
+ assert 0 == approx.node_connectivity(G)
+ assert 0 == approx.node_connectivity(G, 0, 3)
+
+
+def test_petersen():
+ G = nx.petersen_graph()
+ assert 3 == approx.node_connectivity(G)
+ assert 3 == approx.node_connectivity(G, 0, 5)
+
+
+# Approximation fails with tutte graph
+# def test_tutte():
+# G = nx.tutte_graph()
+# assert_equal(3, approx.node_connectivity(G))
+
+
+def test_dodecahedral():
+ G = nx.dodecahedral_graph()
+ assert 3 == approx.node_connectivity(G)
+ assert 3 == approx.node_connectivity(G, 0, 5)
+
+
+def test_octahedral():
+ G = nx.octahedral_graph()
+ assert 4 == approx.node_connectivity(G)
+ assert 4 == approx.node_connectivity(G, 0, 5)
+
+
+# Approximation can fail with icosahedral graph depending
+# on iteration order.
+# def test_icosahedral():
+# G=nx.icosahedral_graph()
+# assert_equal(5, approx.node_connectivity(G))
+# assert_equal(5, approx.node_connectivity(G, 0, 5))
+
+
+def test_only_source():
+ G = nx.complete_graph(5)
+ pytest.raises(nx.NetworkXError, approx.node_connectivity, G, s=0)
+
+
+def test_only_target():
+ G = nx.complete_graph(5)
+ pytest.raises(nx.NetworkXError, approx.node_connectivity, G, t=0)
+
+
+def test_missing_source():
+ G = nx.path_graph(4)
+ pytest.raises(nx.NetworkXError, approx.node_connectivity, G, 10, 1)
+
+
+def test_missing_target():
+ G = nx.path_graph(4)
+ pytest.raises(nx.NetworkXError, approx.node_connectivity, G, 1, 10)
+
+
+def test_source_equals_target():
+ G = nx.complete_graph(5)
+ pytest.raises(nx.NetworkXError, approx.local_node_connectivity, G, 0, 0)
+
+
+def test_directed_node_connectivity():
+ G = nx.cycle_graph(10, create_using=nx.DiGraph()) # only one direction
+ D = nx.cycle_graph(10).to_directed() # 2 reciprocal edges
+ assert 1 == approx.node_connectivity(G)
+ assert 1 == approx.node_connectivity(G, 1, 4)
+ assert 2 == approx.node_connectivity(D)
+ assert 2 == approx.node_connectivity(D, 1, 4)
+
+
+class TestAllPairsNodeConnectivityApprox:
+ @classmethod
+ def setup_class(cls):
+ cls.path = nx.path_graph(7)
+ cls.directed_path = nx.path_graph(7, create_using=nx.DiGraph())
+ cls.cycle = nx.cycle_graph(7)
+ cls.directed_cycle = nx.cycle_graph(7, create_using=nx.DiGraph())
+ cls.gnp = nx.gnp_random_graph(30, 0.1)
+ cls.directed_gnp = nx.gnp_random_graph(30, 0.1, directed=True)
+ cls.K20 = nx.complete_graph(20)
+ cls.K10 = nx.complete_graph(10)
+ cls.K5 = nx.complete_graph(5)
+ cls.G_list = [
+ cls.path,
+ cls.directed_path,
+ cls.cycle,
+ cls.directed_cycle,
+ cls.gnp,
+ cls.directed_gnp,
+ cls.K10,
+ cls.K5,
+ cls.K20,
+ ]
+
+ def test_cycles(self):
+ K_undir = approx.all_pairs_node_connectivity(self.cycle)
+ for source in K_undir:
+ for target, k in K_undir[source].items():
+ assert k == 2
+ K_dir = approx.all_pairs_node_connectivity(self.directed_cycle)
+ for source in K_dir:
+ for target, k in K_dir[source].items():
+ assert k == 1
+
+ def test_complete(self):
+ for G in [self.K10, self.K5, self.K20]:
+ K = approx.all_pairs_node_connectivity(G)
+ for source in K:
+ for target, k in K[source].items():
+ assert k == len(G) - 1
+
+ def test_paths(self):
+ K_undir = approx.all_pairs_node_connectivity(self.path)
+ for source in K_undir:
+ for target, k in K_undir[source].items():
+ assert k == 1
+ K_dir = approx.all_pairs_node_connectivity(self.directed_path)
+ for source in K_dir:
+ for target, k in K_dir[source].items():
+ if source < target:
+ assert k == 1
+ else:
+ assert k == 0
+
+ def test_cutoff(self):
+ for G in [self.K10, self.K5, self.K20]:
+ for mp in [2, 3, 4]:
+ paths = approx.all_pairs_node_connectivity(G, cutoff=mp)
+ for source in paths:
+ for target, K in paths[source].items():
+ assert K == mp
+
+ def test_all_pairs_connectivity_nbunch(self):
+ G = nx.complete_graph(5)
+ nbunch = [0, 2, 3]
+ C = approx.all_pairs_node_connectivity(G, nbunch=nbunch)
+ assert len(C) == len(nbunch)
diff --git a/pythonProject/.venv/Lib/site-packages/networkx/algorithms/approximation/tests/test_distance_measures.py b/pythonProject/.venv/Lib/site-packages/networkx/algorithms/approximation/tests/test_distance_measures.py
new file mode 100644
index 0000000000000000000000000000000000000000..81251503c5d55a6a2d50071414ecc6e1e8cc8a67
--- /dev/null
+++ b/pythonProject/.venv/Lib/site-packages/networkx/algorithms/approximation/tests/test_distance_measures.py
@@ -0,0 +1,60 @@
+"""Unit tests for the :mod:`networkx.algorithms.approximation.distance_measures` module.
+"""
+
+import pytest
+
+import networkx as nx
+from networkx.algorithms.approximation import diameter
+
+
+class TestDiameter:
+ """Unit tests for the approximate diameter function
+ :func:`~networkx.algorithms.approximation.distance_measures.diameter`.
+ """
+
+ def test_null_graph(self):
+ """Test empty graph."""
+ G = nx.null_graph()
+ with pytest.raises(
+ nx.NetworkXError, match="Expected non-empty NetworkX graph!"
+ ):
+ diameter(G)
+
+ def test_undirected_non_connected(self):
+ """Test an undirected disconnected graph."""
+ graph = nx.path_graph(10)
+ graph.remove_edge(3, 4)
+ with pytest.raises(nx.NetworkXError, match="Graph not connected."):
+ diameter(graph)
+
+ def test_directed_non_strongly_connected(self):
+ """Test a directed non strongly connected graph."""
+ graph = nx.path_graph(10, create_using=nx.DiGraph())
+ with pytest.raises(nx.NetworkXError, match="DiGraph not strongly connected."):
+ diameter(graph)
+
+ def test_complete_undirected_graph(self):
+ """Test a complete undirected graph."""
+ graph = nx.complete_graph(10)
+ assert diameter(graph) == 1
+
+ def test_complete_directed_graph(self):
+ """Test a complete directed graph."""
+ graph = nx.complete_graph(10, create_using=nx.DiGraph())
+ assert diameter(graph) == 1
+
+ def test_undirected_path_graph(self):
+ """Test an undirected path graph with 10 nodes."""
+ graph = nx.path_graph(10)
+ assert diameter(graph) == 9
+
+ def test_directed_path_graph(self):
+ """Test a directed path graph with 10 nodes."""
+ graph = nx.path_graph(10).to_directed()
+ assert diameter(graph) == 9
+
+ def test_single_node(self):
+ """Test a graph which contains just a node."""
+ graph = nx.Graph()
+ graph.add_node(1)
+ assert diameter(graph) == 0
diff --git a/pythonProject/.venv/Lib/site-packages/networkx/algorithms/approximation/tests/test_dominating_set.py b/pythonProject/.venv/Lib/site-packages/networkx/algorithms/approximation/tests/test_dominating_set.py
new file mode 100644
index 0000000000000000000000000000000000000000..6b90d85ecf73bb56370fd92fdec25e3bbbb91ce3
--- /dev/null
+++ b/pythonProject/.venv/Lib/site-packages/networkx/algorithms/approximation/tests/test_dominating_set.py
@@ -0,0 +1,78 @@
+import pytest
+
+import networkx as nx
+from networkx.algorithms.approximation import (
+ min_edge_dominating_set,
+ min_weighted_dominating_set,
+)
+
+
+class TestMinWeightDominatingSet:
+ def test_min_weighted_dominating_set(self):
+ graph = nx.Graph()
+ graph.add_edge(1, 2)
+ graph.add_edge(1, 5)
+ graph.add_edge(2, 3)
+ graph.add_edge(2, 5)
+ graph.add_edge(3, 4)
+ graph.add_edge(3, 6)
+ graph.add_edge(5, 6)
+
+ vertices = {1, 2, 3, 4, 5, 6}
+ # due to ties, this might be hard to test tight bounds
+ dom_set = min_weighted_dominating_set(graph)
+ for vertex in vertices - dom_set:
+ neighbors = set(graph.neighbors(vertex))
+ assert len(neighbors & dom_set) > 0, "Non dominating set found!"
+
+ def test_star_graph(self):
+ """Tests that an approximate dominating set for the star graph,
+ even when the center node does not have the smallest integer
+ label, gives just the center node.
+
+ For more information, see #1527.
+
+ """
+ # Create a star graph in which the center node has the highest
+ # label instead of the lowest.
+ G = nx.star_graph(10)
+ G = nx.relabel_nodes(G, {0: 9, 9: 0})
+ assert min_weighted_dominating_set(G) == {9}
+
+ def test_null_graph(self):
+ """Tests that the unique dominating set for the null graph is an empty set"""
+ G = nx.Graph()
+ assert min_weighted_dominating_set(G) == set()
+
+ def test_min_edge_dominating_set(self):
+ graph = nx.path_graph(5)
+ dom_set = min_edge_dominating_set(graph)
+
+ # this is a crappy way to test, but good enough for now.
+ for edge in graph.edges():
+ if edge in dom_set:
+ continue
+ else:
+ u, v = edge
+ found = False
+ for dom_edge in dom_set:
+ found |= u == dom_edge[0] or u == dom_edge[1]
+ assert found, "Non adjacent edge found!"
+
+ graph = nx.complete_graph(10)
+ dom_set = min_edge_dominating_set(graph)
+
+ # this is a crappy way to test, but good enough for now.
+ for edge in graph.edges():
+ if edge in dom_set:
+ continue
+ else:
+ u, v = edge
+ found = False
+ for dom_edge in dom_set:
+ found |= u == dom_edge[0] or u == dom_edge[1]
+ assert found, "Non adjacent edge found!"
+
+ graph = nx.Graph() # empty Networkx graph
+ with pytest.raises(ValueError, match="Expected non-empty NetworkX graph!"):
+ min_edge_dominating_set(graph)
diff --git a/pythonProject/.venv/Lib/site-packages/networkx/algorithms/approximation/tests/test_kcomponents.py b/pythonProject/.venv/Lib/site-packages/networkx/algorithms/approximation/tests/test_kcomponents.py
new file mode 100644
index 0000000000000000000000000000000000000000..65ba802171a6b43a5157f12010c8164e5e867eb8
--- /dev/null
+++ b/pythonProject/.venv/Lib/site-packages/networkx/algorithms/approximation/tests/test_kcomponents.py
@@ -0,0 +1,303 @@
+# Test for approximation to k-components algorithm
+import pytest
+
+import networkx as nx
+from networkx.algorithms.approximation import k_components
+from networkx.algorithms.approximation.kcomponents import _AntiGraph, _same
+
+
+def build_k_number_dict(k_components):
+ k_num = {}
+ for k, comps in sorted(k_components.items()):
+ for comp in comps:
+ for node in comp:
+ k_num[node] = k
+ return k_num
+
+
+##
+# Some nice synthetic graphs
+##
+
+
+def graph_example_1():
+ G = nx.convert_node_labels_to_integers(
+ nx.grid_graph([5, 5]), label_attribute="labels"
+ )
+ rlabels = nx.get_node_attributes(G, "labels")
+ labels = {v: k for k, v in rlabels.items()}
+
+ for nodes in [
+ (labels[(0, 0)], labels[(1, 0)]),
+ (labels[(0, 4)], labels[(1, 4)]),
+ (labels[(3, 0)], labels[(4, 0)]),
+ (labels[(3, 4)], labels[(4, 4)]),
+ ]:
+ new_node = G.order() + 1
+ # Petersen graph is triconnected
+ P = nx.petersen_graph()
+ G = nx.disjoint_union(G, P)
+ # Add two edges between the grid and P
+ G.add_edge(new_node + 1, nodes[0])
+ G.add_edge(new_node, nodes[1])
+ # K5 is 4-connected
+ K = nx.complete_graph(5)
+ G = nx.disjoint_union(G, K)
+ # Add three edges between P and K5
+ G.add_edge(new_node + 2, new_node + 11)
+ G.add_edge(new_node + 3, new_node + 12)
+ G.add_edge(new_node + 4, new_node + 13)
+ # Add another K5 sharing a node
+ G = nx.disjoint_union(G, K)
+ nbrs = G[new_node + 10]
+ G.remove_node(new_node + 10)
+ for nbr in nbrs:
+ G.add_edge(new_node + 17, nbr)
+ G.add_edge(new_node + 16, new_node + 5)
+ return G
+
+
+def torrents_and_ferraro_graph():
+ G = nx.convert_node_labels_to_integers(
+ nx.grid_graph([5, 5]), label_attribute="labels"
+ )
+ rlabels = nx.get_node_attributes(G, "labels")
+ labels = {v: k for k, v in rlabels.items()}
+
+ for nodes in [(labels[(0, 4)], labels[(1, 4)]), (labels[(3, 4)], labels[(4, 4)])]:
+ new_node = G.order() + 1
+ # Petersen graph is triconnected
+ P = nx.petersen_graph()
+ G = nx.disjoint_union(G, P)
+ # Add two edges between the grid and P
+ G.add_edge(new_node + 1, nodes[0])
+ G.add_edge(new_node, nodes[1])
+ # K5 is 4-connected
+ K = nx.complete_graph(5)
+ G = nx.disjoint_union(G, K)
+ # Add three edges between P and K5
+ G.add_edge(new_node + 2, new_node + 11)
+ G.add_edge(new_node + 3, new_node + 12)
+ G.add_edge(new_node + 4, new_node + 13)
+ # Add another K5 sharing a node
+ G = nx.disjoint_union(G, K)
+ nbrs = G[new_node + 10]
+ G.remove_node(new_node + 10)
+ for nbr in nbrs:
+ G.add_edge(new_node + 17, nbr)
+ # Commenting this makes the graph not biconnected !!
+ # This stupid mistake make one reviewer very angry :P
+ G.add_edge(new_node + 16, new_node + 8)
+
+ for nodes in [(labels[(0, 0)], labels[(1, 0)]), (labels[(3, 0)], labels[(4, 0)])]:
+ new_node = G.order() + 1
+ # Petersen graph is triconnected
+ P = nx.petersen_graph()
+ G = nx.disjoint_union(G, P)
+ # Add two edges between the grid and P
+ G.add_edge(new_node + 1, nodes[0])
+ G.add_edge(new_node, nodes[1])
+ # K5 is 4-connected
+ K = nx.complete_graph(5)
+ G = nx.disjoint_union(G, K)
+ # Add three edges between P and K5
+ G.add_edge(new_node + 2, new_node + 11)
+ G.add_edge(new_node + 3, new_node + 12)
+ G.add_edge(new_node + 4, new_node + 13)
+ # Add another K5 sharing two nodes
+ G = nx.disjoint_union(G, K)
+ nbrs = G[new_node + 10]
+ G.remove_node(new_node + 10)
+ for nbr in nbrs:
+ G.add_edge(new_node + 17, nbr)
+ nbrs2 = G[new_node + 9]
+ G.remove_node(new_node + 9)
+ for nbr in nbrs2:
+ G.add_edge(new_node + 18, nbr)
+ return G
+
+
+# Helper function
+
+
+def _check_connectivity(G):
+ result = k_components(G)
+ for k, components in result.items():
+ if k < 3:
+ continue
+ for component in components:
+ C = G.subgraph(component)
+ K = nx.node_connectivity(C)
+ assert K >= k
+
+
+def test_torrents_and_ferraro_graph():
+ G = torrents_and_ferraro_graph()
+ _check_connectivity(G)
+
+
+def test_example_1():
+ G = graph_example_1()
+ _check_connectivity(G)
+
+
+def test_karate_0():
+ G = nx.karate_club_graph()
+ _check_connectivity(G)
+
+
+def test_karate_1():
+ karate_k_num = {
+ 0: 4,
+ 1: 4,
+ 2: 4,
+ 3: 4,
+ 4: 3,
+ 5: 3,
+ 6: 3,
+ 7: 4,
+ 8: 4,
+ 9: 2,
+ 10: 3,
+ 11: 1,
+ 12: 2,
+ 13: 4,
+ 14: 2,
+ 15: 2,
+ 16: 2,
+ 17: 2,
+ 18: 2,
+ 19: 3,
+ 20: 2,
+ 21: 2,
+ 22: 2,
+ 23: 3,
+ 24: 3,
+ 25: 3,
+ 26: 2,
+ 27: 3,
+ 28: 3,
+ 29: 3,
+ 30: 4,
+ 31: 3,
+ 32: 4,
+ 33: 4,
+ }
+ approx_karate_k_num = karate_k_num.copy()
+ approx_karate_k_num[24] = 2
+ approx_karate_k_num[25] = 2
+ G = nx.karate_club_graph()
+ k_comps = k_components(G)
+ k_num = build_k_number_dict(k_comps)
+ assert k_num in (karate_k_num, approx_karate_k_num)
+
+
+def test_example_1_detail_3_and_4():
+ G = graph_example_1()
+ result = k_components(G)
+ # In this example graph there are 8 3-components, 4 with 15 nodes
+ # and 4 with 5 nodes.
+ assert len(result[3]) == 8
+ assert len([c for c in result[3] if len(c) == 15]) == 4
+ assert len([c for c in result[3] if len(c) == 5]) == 4
+ # There are also 8 4-components all with 5 nodes.
+ assert len(result[4]) == 8
+ assert all(len(c) == 5 for c in result[4])
+ # Finally check that the k-components detected have actually node
+ # connectivity >= k.
+ for k, components in result.items():
+ if k < 3:
+ continue
+ for component in components:
+ K = nx.node_connectivity(G.subgraph(component))
+ assert K >= k
+
+
+def test_directed():
+ with pytest.raises(nx.NetworkXNotImplemented):
+ G = nx.gnp_random_graph(10, 0.4, directed=True)
+ kc = k_components(G)
+
+
+def test_same():
+ equal = {"A": 2, "B": 2, "C": 2}
+ slightly_different = {"A": 2, "B": 1, "C": 2}
+ different = {"A": 2, "B": 8, "C": 18}
+ assert _same(equal)
+ assert not _same(slightly_different)
+ assert _same(slightly_different, tol=1)
+ assert not _same(different)
+ assert not _same(different, tol=4)
+
+
+class TestAntiGraph:
+ @classmethod
+ def setup_class(cls):
+ cls.Gnp = nx.gnp_random_graph(20, 0.8, seed=42)
+ cls.Anp = _AntiGraph(nx.complement(cls.Gnp))
+ cls.Gd = nx.davis_southern_women_graph()
+ cls.Ad = _AntiGraph(nx.complement(cls.Gd))
+ cls.Gk = nx.karate_club_graph()
+ cls.Ak = _AntiGraph(nx.complement(cls.Gk))
+ cls.GA = [(cls.Gnp, cls.Anp), (cls.Gd, cls.Ad), (cls.Gk, cls.Ak)]
+
+ def test_size(self):
+ for G, A in self.GA:
+ n = G.order()
+ s = len(list(G.edges())) + len(list(A.edges()))
+ assert s == (n * (n - 1)) / 2
+
+ def test_degree(self):
+ for G, A in self.GA:
+ assert sorted(G.degree()) == sorted(A.degree())
+
+ def test_core_number(self):
+ for G, A in self.GA:
+ assert nx.core_number(G) == nx.core_number(A)
+
+ def test_connected_components(self):
+ # ccs are same unless isolated nodes or any node has degree=len(G)-1
+ # graphs in self.GA avoid this problem
+ for G, A in self.GA:
+ gc = [set(c) for c in nx.connected_components(G)]
+ ac = [set(c) for c in nx.connected_components(A)]
+ for comp in ac:
+ assert comp in gc
+
+ def test_adj(self):
+ for G, A in self.GA:
+ for n, nbrs in G.adj.items():
+ a_adj = sorted((n, sorted(ad)) for n, ad in A.adj.items())
+ g_adj = sorted((n, sorted(ad)) for n, ad in G.adj.items())
+ assert a_adj == g_adj
+
+ def test_adjacency(self):
+ for G, A in self.GA:
+ a_adj = list(A.adjacency())
+ for n, nbrs in G.adjacency():
+ assert (n, set(nbrs)) in a_adj
+
+ def test_neighbors(self):
+ for G, A in self.GA:
+ node = list(G.nodes())[0]
+ assert set(G.neighbors(node)) == set(A.neighbors(node))
+
+ def test_node_not_in_graph(self):
+ for G, A in self.GA:
+ node = "non_existent_node"
+ pytest.raises(nx.NetworkXError, A.neighbors, node)
+ pytest.raises(nx.NetworkXError, G.neighbors, node)
+
+ def test_degree_thingraph(self):
+ for G, A in self.GA:
+ node = list(G.nodes())[0]
+ nodes = list(G.nodes())[1:4]
+ assert G.degree(node) == A.degree(node)
+ assert sum(d for n, d in G.degree()) == sum(d for n, d in A.degree())
+ # AntiGraph is a ThinGraph, so all the weights are 1
+ assert sum(d for n, d in A.degree()) == sum(
+ d for n, d in A.degree(weight="weight")
+ )
+ assert sum(d for n, d in G.degree(nodes)) == sum(
+ d for n, d in A.degree(nodes)
+ )
diff --git a/pythonProject/.venv/Lib/site-packages/networkx/algorithms/approximation/tests/test_matching.py b/pythonProject/.venv/Lib/site-packages/networkx/algorithms/approximation/tests/test_matching.py
new file mode 100644
index 0000000000000000000000000000000000000000..f50da3d2e07310fc19e1db2bd18fdce23223771c
--- /dev/null
+++ b/pythonProject/.venv/Lib/site-packages/networkx/algorithms/approximation/tests/test_matching.py
@@ -0,0 +1,8 @@
+import networkx as nx
+import networkx.algorithms.approximation as a
+
+
+def test_min_maximal_matching():
+ # smoke test
+ G = nx.Graph()
+ assert len(a.min_maximal_matching(G)) == 0
diff --git a/pythonProject/.venv/Lib/site-packages/networkx/algorithms/approximation/tests/test_maxcut.py b/pythonProject/.venv/Lib/site-packages/networkx/algorithms/approximation/tests/test_maxcut.py
new file mode 100644
index 0000000000000000000000000000000000000000..ef0424401e4b2a7e6d580c762f23cea240e55b3c
--- /dev/null
+++ b/pythonProject/.venv/Lib/site-packages/networkx/algorithms/approximation/tests/test_maxcut.py
@@ -0,0 +1,94 @@
+import random
+
+import pytest
+
+import networkx as nx
+from networkx.algorithms.approximation import maxcut
+
+
+@pytest.mark.parametrize(
+ "f", (nx.approximation.randomized_partitioning, nx.approximation.one_exchange)
+)
+@pytest.mark.parametrize("graph_constructor", (nx.DiGraph, nx.MultiGraph))
+def test_raises_on_directed_and_multigraphs(f, graph_constructor):
+ G = graph_constructor([(0, 1), (1, 2)])
+ with pytest.raises(nx.NetworkXNotImplemented):
+ f(G)
+
+
+def _is_valid_cut(G, set1, set2):
+ union = set1.union(set2)
+ assert union == set(G.nodes)
+ assert len(set1) + len(set2) == G.number_of_nodes()
+
+
+def _cut_is_locally_optimal(G, cut_size, set1):
+ # test if cut can be locally improved
+ for i, node in enumerate(set1):
+ cut_size_without_node = nx.algorithms.cut_size(
+ G, set1 - {node}, weight="weight"
+ )
+ assert cut_size_without_node <= cut_size
+
+
+def test_random_partitioning():
+ G = nx.complete_graph(5)
+ _, (set1, set2) = maxcut.randomized_partitioning(G, seed=5)
+ _is_valid_cut(G, set1, set2)
+
+
+def test_random_partitioning_all_to_one():
+ G = nx.complete_graph(5)
+ _, (set1, set2) = maxcut.randomized_partitioning(G, p=1)
+ _is_valid_cut(G, set1, set2)
+ assert len(set1) == G.number_of_nodes()
+ assert len(set2) == 0
+
+
+def test_one_exchange_basic():
+ G = nx.complete_graph(5)
+ random.seed(5)
+ for u, v, w in G.edges(data=True):
+ w["weight"] = random.randrange(-100, 100, 1) / 10
+
+ initial_cut = set(random.sample(sorted(G.nodes()), k=5))
+ cut_size, (set1, set2) = maxcut.one_exchange(
+ G, initial_cut, weight="weight", seed=5
+ )
+
+ _is_valid_cut(G, set1, set2)
+ _cut_is_locally_optimal(G, cut_size, set1)
+
+
+def test_one_exchange_optimal():
+ # Greedy one exchange should find the optimal solution for this graph (14)
+ G = nx.Graph()
+ G.add_edge(1, 2, weight=3)
+ G.add_edge(1, 3, weight=3)
+ G.add_edge(1, 4, weight=3)
+ G.add_edge(1, 5, weight=3)
+ G.add_edge(2, 3, weight=5)
+
+ cut_size, (set1, set2) = maxcut.one_exchange(G, weight="weight", seed=5)
+
+ _is_valid_cut(G, set1, set2)
+ _cut_is_locally_optimal(G, cut_size, set1)
+ # check global optimality
+ assert cut_size == 14
+
+
+def test_negative_weights():
+ G = nx.complete_graph(5)
+ random.seed(5)
+ for u, v, w in G.edges(data=True):
+ w["weight"] = -1 * random.random()
+
+ initial_cut = set(random.sample(sorted(G.nodes()), k=5))
+ cut_size, (set1, set2) = maxcut.one_exchange(G, initial_cut, weight="weight")
+
+ # make sure it is a valid cut
+ _is_valid_cut(G, set1, set2)
+ # check local optimality
+ _cut_is_locally_optimal(G, cut_size, set1)
+ # test that all nodes are in the same partition
+ assert len(set1) == len(G.nodes) or len(set2) == len(G.nodes)
diff --git a/pythonProject/.venv/Lib/site-packages/networkx/algorithms/approximation/tests/test_ramsey.py b/pythonProject/.venv/Lib/site-packages/networkx/algorithms/approximation/tests/test_ramsey.py
new file mode 100644
index 0000000000000000000000000000000000000000..32fe1fb8fa917c557954d9da0d960895a6953a11
--- /dev/null
+++ b/pythonProject/.venv/Lib/site-packages/networkx/algorithms/approximation/tests/test_ramsey.py
@@ -0,0 +1,31 @@
+import networkx as nx
+import networkx.algorithms.approximation as apxa
+
+
+def test_ramsey():
+ # this should only find the complete graph
+ graph = nx.complete_graph(10)
+ c, i = apxa.ramsey_R2(graph)
+ cdens = nx.density(graph.subgraph(c))
+ assert cdens == 1.0, "clique not correctly found by ramsey!"
+ idens = nx.density(graph.subgraph(i))
+ assert idens == 0.0, "i-set not correctly found by ramsey!"
+
+ # this trivial graph has no cliques. should just find i-sets
+ graph = nx.trivial_graph()
+ c, i = apxa.ramsey_R2(graph)
+ assert c == {0}, "clique not correctly found by ramsey!"
+ assert i == {0}, "i-set not correctly found by ramsey!"
+
+ graph = nx.barbell_graph(10, 5, nx.Graph())
+ c, i = apxa.ramsey_R2(graph)
+ cdens = nx.density(graph.subgraph(c))
+ assert cdens == 1.0, "clique not correctly found by ramsey!"
+ idens = nx.density(graph.subgraph(i))
+ assert idens == 0.0, "i-set not correctly found by ramsey!"
+
+ # add self-loops and test again
+ graph.add_edges_from([(n, n) for n in range(0, len(graph), 2)])
+ cc, ii = apxa.ramsey_R2(graph)
+ assert cc == c
+ assert ii == i
diff --git a/pythonProject/.venv/Lib/site-packages/networkx/algorithms/approximation/tests/test_steinertree.py b/pythonProject/.venv/Lib/site-packages/networkx/algorithms/approximation/tests/test_steinertree.py
new file mode 100644
index 0000000000000000000000000000000000000000..23c3193e42efc83a201e6ee83a539b8a142c5964
--- /dev/null
+++ b/pythonProject/.venv/Lib/site-packages/networkx/algorithms/approximation/tests/test_steinertree.py
@@ -0,0 +1,226 @@
+import pytest
+
+import networkx as nx
+from networkx.algorithms.approximation.steinertree import metric_closure, steiner_tree
+from networkx.utils import edges_equal
+
+
+class TestSteinerTree:
+ @classmethod
+ def setup_class(cls):
+ G1 = nx.Graph()
+ G1.add_edge(1, 2, weight=10)
+ G1.add_edge(2, 3, weight=10)
+ G1.add_edge(3, 4, weight=10)
+ G1.add_edge(4, 5, weight=10)
+ G1.add_edge(5, 6, weight=10)
+ G1.add_edge(2, 7, weight=1)
+ G1.add_edge(7, 5, weight=1)
+
+ G2 = nx.Graph()
+ G2.add_edge(0, 5, weight=6)
+ G2.add_edge(1, 2, weight=2)
+ G2.add_edge(1, 5, weight=3)
+ G2.add_edge(2, 4, weight=4)
+ G2.add_edge(3, 5, weight=5)
+ G2.add_edge(4, 5, weight=1)
+
+ G3 = nx.Graph()
+ G3.add_edge(1, 2, weight=8)
+ G3.add_edge(1, 9, weight=3)
+ G3.add_edge(1, 8, weight=6)
+ G3.add_edge(1, 10, weight=2)
+ G3.add_edge(1, 14, weight=3)
+ G3.add_edge(2, 3, weight=6)
+ G3.add_edge(3, 4, weight=3)
+ G3.add_edge(3, 10, weight=2)
+ G3.add_edge(3, 11, weight=1)
+ G3.add_edge(4, 5, weight=1)
+ G3.add_edge(4, 11, weight=1)
+ G3.add_edge(5, 6, weight=4)
+ G3.add_edge(5, 11, weight=2)
+ G3.add_edge(5, 12, weight=1)
+ G3.add_edge(5, 13, weight=3)
+ G3.add_edge(6, 7, weight=2)
+ G3.add_edge(6, 12, weight=3)
+ G3.add_edge(6, 13, weight=1)
+ G3.add_edge(7, 8, weight=3)
+ G3.add_edge(7, 9, weight=3)
+ G3.add_edge(7, 11, weight=5)
+ G3.add_edge(7, 13, weight=2)
+ G3.add_edge(7, 14, weight=4)
+ G3.add_edge(8, 9, weight=2)
+ G3.add_edge(9, 14, weight=1)
+ G3.add_edge(10, 11, weight=2)
+ G3.add_edge(10, 14, weight=1)
+ G3.add_edge(11, 12, weight=1)
+ G3.add_edge(11, 14, weight=7)
+ G3.add_edge(12, 14, weight=3)
+ G3.add_edge(12, 15, weight=1)
+ G3.add_edge(13, 14, weight=4)
+ G3.add_edge(13, 15, weight=1)
+ G3.add_edge(14, 15, weight=2)
+
+ cls.G1 = G1
+ cls.G2 = G2
+ cls.G3 = G3
+ cls.G1_term_nodes = [1, 2, 3, 4, 5]
+ cls.G2_term_nodes = [0, 2, 3]
+ cls.G3_term_nodes = [1, 3, 5, 6, 8, 10, 11, 12, 13]
+
+ cls.methods = ["kou", "mehlhorn"]
+
+ def test_connected_metric_closure(self):
+ G = self.G1.copy()
+ G.add_node(100)
+ pytest.raises(nx.NetworkXError, metric_closure, G)
+
+ def test_metric_closure(self):
+ M = metric_closure(self.G1)
+ mc = [
+ (1, 2, {"distance": 10, "path": [1, 2]}),
+ (1, 3, {"distance": 20, "path": [1, 2, 3]}),
+ (1, 4, {"distance": 22, "path": [1, 2, 7, 5, 4]}),
+ (1, 5, {"distance": 12, "path": [1, 2, 7, 5]}),
+ (1, 6, {"distance": 22, "path": [1, 2, 7, 5, 6]}),
+ (1, 7, {"distance": 11, "path": [1, 2, 7]}),
+ (2, 3, {"distance": 10, "path": [2, 3]}),
+ (2, 4, {"distance": 12, "path": [2, 7, 5, 4]}),
+ (2, 5, {"distance": 2, "path": [2, 7, 5]}),
+ (2, 6, {"distance": 12, "path": [2, 7, 5, 6]}),
+ (2, 7, {"distance": 1, "path": [2, 7]}),
+ (3, 4, {"distance": 10, "path": [3, 4]}),
+ (3, 5, {"distance": 12, "path": [3, 2, 7, 5]}),
+ (3, 6, {"distance": 22, "path": [3, 2, 7, 5, 6]}),
+ (3, 7, {"distance": 11, "path": [3, 2, 7]}),
+ (4, 5, {"distance": 10, "path": [4, 5]}),
+ (4, 6, {"distance": 20, "path": [4, 5, 6]}),
+ (4, 7, {"distance": 11, "path": [4, 5, 7]}),
+ (5, 6, {"distance": 10, "path": [5, 6]}),
+ (5, 7, {"distance": 1, "path": [5, 7]}),
+ (6, 7, {"distance": 11, "path": [6, 5, 7]}),
+ ]
+ assert edges_equal(list(M.edges(data=True)), mc)
+
+ def test_steiner_tree(self):
+ valid_steiner_trees = [
+ [
+ [
+ (1, 2, {"weight": 10}),
+ (2, 3, {"weight": 10}),
+ (2, 7, {"weight": 1}),
+ (3, 4, {"weight": 10}),
+ (5, 7, {"weight": 1}),
+ ],
+ [
+ (1, 2, {"weight": 10}),
+ (2, 7, {"weight": 1}),
+ (3, 4, {"weight": 10}),
+ (4, 5, {"weight": 10}),
+ (5, 7, {"weight": 1}),
+ ],
+ [
+ (1, 2, {"weight": 10}),
+ (2, 3, {"weight": 10}),
+ (2, 7, {"weight": 1}),
+ (4, 5, {"weight": 10}),
+ (5, 7, {"weight": 1}),
+ ],
+ ],
+ [
+ [
+ (0, 5, {"weight": 6}),
+ (1, 2, {"weight": 2}),
+ (1, 5, {"weight": 3}),
+ (3, 5, {"weight": 5}),
+ ],
+ [
+ (0, 5, {"weight": 6}),
+ (4, 2, {"weight": 4}),
+ (4, 5, {"weight": 1}),
+ (3, 5, {"weight": 5}),
+ ],
+ ],
+ [
+ [
+ (1, 10, {"weight": 2}),
+ (3, 10, {"weight": 2}),
+ (3, 11, {"weight": 1}),
+ (5, 12, {"weight": 1}),
+ (6, 13, {"weight": 1}),
+ (8, 9, {"weight": 2}),
+ (9, 14, {"weight": 1}),
+ (10, 14, {"weight": 1}),
+ (11, 12, {"weight": 1}),
+ (12, 15, {"weight": 1}),
+ (13, 15, {"weight": 1}),
+ ]
+ ],
+ ]
+ for method in self.methods:
+ for G, term_nodes, valid_trees in zip(
+ [self.G1, self.G2, self.G3],
+ [self.G1_term_nodes, self.G2_term_nodes, self.G3_term_nodes],
+ valid_steiner_trees,
+ ):
+ S = steiner_tree(G, term_nodes, method=method)
+ assert any(
+ edges_equal(list(S.edges(data=True)), valid_tree)
+ for valid_tree in valid_trees
+ )
+
+ def test_multigraph_steiner_tree(self):
+ G = nx.MultiGraph()
+ G.add_edges_from(
+ [
+ (1, 2, 0, {"weight": 1}),
+ (2, 3, 0, {"weight": 999}),
+ (2, 3, 1, {"weight": 1}),
+ (3, 4, 0, {"weight": 1}),
+ (3, 5, 0, {"weight": 1}),
+ ]
+ )
+ terminal_nodes = [2, 4, 5]
+ expected_edges = [
+ (2, 3, 1, {"weight": 1}), # edge with key 1 has lower weight
+ (3, 4, 0, {"weight": 1}),
+ (3, 5, 0, {"weight": 1}),
+ ]
+ for method in self.methods:
+ S = steiner_tree(G, terminal_nodes, method=method)
+ assert edges_equal(S.edges(data=True, keys=True), expected_edges)
+
+
+@pytest.mark.parametrize("method", ("kou", "mehlhorn"))
+def test_steiner_tree_weight_attribute(method):
+ G = nx.star_graph(4)
+ # Add an edge attribute that is named something other than "weight"
+ nx.set_edge_attributes(G, {e: 10 for e in G.edges}, name="distance")
+ H = nx.approximation.steiner_tree(G, [1, 3], method=method, weight="distance")
+ assert nx.utils.edges_equal(H.edges, [(0, 1), (0, 3)])
+
+
+@pytest.mark.parametrize("method", ("kou", "mehlhorn"))
+def test_steiner_tree_multigraph_weight_attribute(method):
+ G = nx.cycle_graph(3, create_using=nx.MultiGraph)
+ nx.set_edge_attributes(G, {e: 10 for e in G.edges}, name="distance")
+ G.add_edge(2, 0, distance=5)
+ H = nx.approximation.steiner_tree(G, list(G), method=method, weight="distance")
+ assert len(H.edges) == 2 and H.has_edge(2, 0, key=1)
+ assert sum(dist for *_, dist in H.edges(data="distance")) == 15
+
+
+@pytest.mark.parametrize("method", (None, "mehlhorn", "kou"))
+def test_steiner_tree_methods(method):
+ G = nx.star_graph(4)
+ expected = nx.Graph([(0, 1), (0, 3)])
+ st = nx.approximation.steiner_tree(G, [1, 3], method=method)
+ assert nx.utils.edges_equal(st.edges, expected.edges)
+
+
+def test_steiner_tree_method_invalid():
+ G = nx.star_graph(4)
+ with pytest.raises(
+ ValueError, match="invalid_method is not a valid choice for an algorithm."
+ ):
+ nx.approximation.steiner_tree(G, terminal_nodes=[1, 3], method="invalid_method")
diff --git a/pythonProject/.venv/Lib/site-packages/networkx/algorithms/approximation/tests/test_traveling_salesman.py b/pythonProject/.venv/Lib/site-packages/networkx/algorithms/approximation/tests/test_traveling_salesman.py
new file mode 100644
index 0000000000000000000000000000000000000000..445fe913ac0538556babef811eb449faa4ae8a77
--- /dev/null
+++ b/pythonProject/.venv/Lib/site-packages/networkx/algorithms/approximation/tests/test_traveling_salesman.py
@@ -0,0 +1,979 @@
+"""Unit tests for the traveling_salesman module."""
+
+import random
+
+import pytest
+
+import networkx as nx
+import networkx.algorithms.approximation as nx_app
+
+pairwise = nx.utils.pairwise
+
+
+def test_christofides_hamiltonian():
+ random.seed(42)
+ G = nx.complete_graph(20)
+ for u, v in G.edges():
+ G[u][v]["weight"] = random.randint(0, 10)
+
+ H = nx.Graph()
+ H.add_edges_from(pairwise(nx_app.christofides(G)))
+ H.remove_edges_from(nx.find_cycle(H))
+ assert len(H.edges) == 0
+
+ tree = nx.minimum_spanning_tree(G, weight="weight")
+ H = nx.Graph()
+ H.add_edges_from(pairwise(nx_app.christofides(G, tree)))
+ H.remove_edges_from(nx.find_cycle(H))
+ assert len(H.edges) == 0
+
+
+def test_christofides_incomplete_graph():
+ G = nx.complete_graph(10)
+ G.remove_edge(0, 1)
+ pytest.raises(nx.NetworkXError, nx_app.christofides, G)
+
+
+def test_christofides_ignore_selfloops():
+ G = nx.complete_graph(5)
+ G.add_edge(3, 3)
+ cycle = nx_app.christofides(G)
+ assert len(cycle) - 1 == len(G) == len(set(cycle))
+
+
+# set up graphs for other tests
+class TestBase:
+ @classmethod
+ def setup_class(cls):
+ cls.DG = nx.DiGraph()
+ cls.DG.add_weighted_edges_from(
+ {
+ ("A", "B", 3),
+ ("A", "C", 17),
+ ("A", "D", 14),
+ ("B", "A", 3),
+ ("B", "C", 12),
+ ("B", "D", 16),
+ ("C", "A", 13),
+ ("C", "B", 12),
+ ("C", "D", 4),
+ ("D", "A", 14),
+ ("D", "B", 15),
+ ("D", "C", 2),
+ }
+ )
+ cls.DG_cycle = ["D", "C", "B", "A", "D"]
+ cls.DG_cost = 31.0
+
+ cls.DG2 = nx.DiGraph()
+ cls.DG2.add_weighted_edges_from(
+ {
+ ("A", "B", 3),
+ ("A", "C", 17),
+ ("A", "D", 14),
+ ("B", "A", 30),
+ ("B", "C", 2),
+ ("B", "D", 16),
+ ("C", "A", 33),
+ ("C", "B", 32),
+ ("C", "D", 34),
+ ("D", "A", 14),
+ ("D", "B", 15),
+ ("D", "C", 2),
+ }
+ )
+ cls.DG2_cycle = ["D", "A", "B", "C", "D"]
+ cls.DG2_cost = 53.0
+
+ cls.unweightedUG = nx.complete_graph(5, nx.Graph())
+ cls.unweightedDG = nx.complete_graph(5, nx.DiGraph())
+
+ cls.incompleteUG = nx.Graph()
+ cls.incompleteUG.add_weighted_edges_from({(0, 1, 1), (1, 2, 3)})
+ cls.incompleteDG = nx.DiGraph()
+ cls.incompleteDG.add_weighted_edges_from({(0, 1, 1), (1, 2, 3)})
+
+ cls.UG = nx.Graph()
+ cls.UG.add_weighted_edges_from(
+ {
+ ("A", "B", 3),
+ ("A", "C", 17),
+ ("A", "D", 14),
+ ("B", "C", 12),
+ ("B", "D", 16),
+ ("C", "D", 4),
+ }
+ )
+ cls.UG_cycle = ["D", "C", "B", "A", "D"]
+ cls.UG_cost = 33.0
+
+ cls.UG2 = nx.Graph()
+ cls.UG2.add_weighted_edges_from(
+ {
+ ("A", "B", 1),
+ ("A", "C", 15),
+ ("A", "D", 5),
+ ("B", "C", 16),
+ ("B", "D", 8),
+ ("C", "D", 3),
+ }
+ )
+ cls.UG2_cycle = ["D", "C", "B", "A", "D"]
+ cls.UG2_cost = 25.0
+
+
+def validate_solution(soln, cost, exp_soln, exp_cost):
+ assert soln == exp_soln
+ assert cost == exp_cost
+
+
+def validate_symmetric_solution(soln, cost, exp_soln, exp_cost):
+ assert soln == exp_soln or soln == exp_soln[::-1]
+ assert cost == exp_cost
+
+
+class TestGreedyTSP(TestBase):
+ def test_greedy(self):
+ cycle = nx_app.greedy_tsp(self.DG, source="D")
+ cost = sum(self.DG[n][nbr]["weight"] for n, nbr in pairwise(cycle))
+ validate_solution(cycle, cost, ["D", "C", "B", "A", "D"], 31.0)
+
+ cycle = nx_app.greedy_tsp(self.DG2, source="D")
+ cost = sum(self.DG2[n][nbr]["weight"] for n, nbr in pairwise(cycle))
+ validate_solution(cycle, cost, ["D", "C", "B", "A", "D"], 78.0)
+
+ cycle = nx_app.greedy_tsp(self.UG, source="D")
+ cost = sum(self.UG[n][nbr]["weight"] for n, nbr in pairwise(cycle))
+ validate_solution(cycle, cost, ["D", "C", "B", "A", "D"], 33.0)
+
+ cycle = nx_app.greedy_tsp(self.UG2, source="D")
+ cost = sum(self.UG2[n][nbr]["weight"] for n, nbr in pairwise(cycle))
+ validate_solution(cycle, cost, ["D", "C", "A", "B", "D"], 27.0)
+
+ def test_not_complete_graph(self):
+ pytest.raises(nx.NetworkXError, nx_app.greedy_tsp, self.incompleteUG)
+ pytest.raises(nx.NetworkXError, nx_app.greedy_tsp, self.incompleteDG)
+
+ def test_not_weighted_graph(self):
+ nx_app.greedy_tsp(self.unweightedUG)
+ nx_app.greedy_tsp(self.unweightedDG)
+
+ def test_two_nodes(self):
+ G = nx.Graph()
+ G.add_weighted_edges_from({(1, 2, 1)})
+ cycle = nx_app.greedy_tsp(G)
+ cost = sum(G[n][nbr]["weight"] for n, nbr in pairwise(cycle))
+ validate_solution(cycle, cost, [1, 2, 1], 2)
+
+ def test_ignore_selfloops(self):
+ G = nx.complete_graph(5)
+ G.add_edge(3, 3)
+ cycle = nx_app.greedy_tsp(G)
+ assert len(cycle) - 1 == len(G) == len(set(cycle))
+
+
+class TestSimulatedAnnealingTSP(TestBase):
+ tsp = staticmethod(nx_app.simulated_annealing_tsp)
+
+ def test_simulated_annealing_directed(self):
+ cycle = self.tsp(self.DG, "greedy", source="D", seed=42)
+ cost = sum(self.DG[n][nbr]["weight"] for n, nbr in pairwise(cycle))
+ validate_solution(cycle, cost, self.DG_cycle, self.DG_cost)
+
+ initial_sol = ["D", "B", "A", "C", "D"]
+ cycle = self.tsp(self.DG, initial_sol, source="D", seed=42)
+ cost = sum(self.DG[n][nbr]["weight"] for n, nbr in pairwise(cycle))
+ validate_solution(cycle, cost, self.DG_cycle, self.DG_cost)
+
+ initial_sol = ["D", "A", "C", "B", "D"]
+ cycle = self.tsp(self.DG, initial_sol, move="1-0", source="D", seed=42)
+ cost = sum(self.DG[n][nbr]["weight"] for n, nbr in pairwise(cycle))
+ validate_solution(cycle, cost, self.DG_cycle, self.DG_cost)
+
+ cycle = self.tsp(self.DG2, "greedy", source="D", seed=42)
+ cost = sum(self.DG2[n][nbr]["weight"] for n, nbr in pairwise(cycle))
+ validate_solution(cycle, cost, self.DG2_cycle, self.DG2_cost)
+
+ cycle = self.tsp(self.DG2, "greedy", move="1-0", source="D", seed=42)
+ cost = sum(self.DG2[n][nbr]["weight"] for n, nbr in pairwise(cycle))
+ validate_solution(cycle, cost, self.DG2_cycle, self.DG2_cost)
+
+ def test_simulated_annealing_undirected(self):
+ cycle = self.tsp(self.UG, "greedy", source="D", seed=42)
+ cost = sum(self.UG[n][nbr]["weight"] for n, nbr in pairwise(cycle))
+ validate_solution(cycle, cost, self.UG_cycle, self.UG_cost)
+
+ cycle = self.tsp(self.UG2, "greedy", source="D", seed=42)
+ cost = sum(self.UG2[n][nbr]["weight"] for n, nbr in pairwise(cycle))
+ validate_symmetric_solution(cycle, cost, self.UG2_cycle, self.UG2_cost)
+
+ cycle = self.tsp(self.UG2, "greedy", move="1-0", source="D", seed=42)
+ cost = sum(self.UG2[n][nbr]["weight"] for n, nbr in pairwise(cycle))
+ validate_symmetric_solution(cycle, cost, self.UG2_cycle, self.UG2_cost)
+
+ def test_error_on_input_order_mistake(self):
+ # see issue #4846 https://github.com/networkx/networkx/issues/4846
+ pytest.raises(TypeError, self.tsp, self.UG, weight="weight")
+ pytest.raises(nx.NetworkXError, self.tsp, self.UG, "weight")
+
+ def test_not_complete_graph(self):
+ pytest.raises(nx.NetworkXError, self.tsp, self.incompleteUG, "greedy", source=0)
+ pytest.raises(nx.NetworkXError, self.tsp, self.incompleteDG, "greedy", source=0)
+
+ def test_ignore_selfloops(self):
+ G = nx.complete_graph(5)
+ G.add_edge(3, 3)
+ cycle = self.tsp(G, "greedy")
+ assert len(cycle) - 1 == len(G) == len(set(cycle))
+
+ def test_not_weighted_graph(self):
+ self.tsp(self.unweightedUG, "greedy")
+ self.tsp(self.unweightedDG, "greedy")
+
+ def test_two_nodes(self):
+ G = nx.Graph()
+ G.add_weighted_edges_from({(1, 2, 1)})
+
+ cycle = self.tsp(G, "greedy", source=1, seed=42)
+ cost = sum(G[n][nbr]["weight"] for n, nbr in pairwise(cycle))
+ validate_solution(cycle, cost, [1, 2, 1], 2)
+
+ cycle = self.tsp(G, [1, 2, 1], source=1, seed=42)
+ cost = sum(G[n][nbr]["weight"] for n, nbr in pairwise(cycle))
+ validate_solution(cycle, cost, [1, 2, 1], 2)
+
+ def test_failure_of_costs_too_high_when_iterations_low(self):
+ # Simulated Annealing Version:
+ # set number of moves low and alpha high
+ cycle = self.tsp(
+ self.DG2, "greedy", source="D", move="1-0", alpha=1, N_inner=1, seed=42
+ )
+ cost = sum(self.DG2[n][nbr]["weight"] for n, nbr in pairwise(cycle))
+ print(cycle, cost)
+ assert cost > self.DG2_cost
+
+ # Try with an incorrect initial guess
+ initial_sol = ["D", "A", "B", "C", "D"]
+ cycle = self.tsp(
+ self.DG,
+ initial_sol,
+ source="D",
+ move="1-0",
+ alpha=0.1,
+ N_inner=1,
+ max_iterations=1,
+ seed=42,
+ )
+ cost = sum(self.DG[n][nbr]["weight"] for n, nbr in pairwise(cycle))
+ print(cycle, cost)
+ assert cost > self.DG_cost
+
+
+class TestThresholdAcceptingTSP(TestSimulatedAnnealingTSP):
+ tsp = staticmethod(nx_app.threshold_accepting_tsp)
+
+ def test_failure_of_costs_too_high_when_iterations_low(self):
+ # Threshold Version:
+ # set number of moves low and number of iterations low
+ cycle = self.tsp(
+ self.DG2,
+ "greedy",
+ source="D",
+ move="1-0",
+ N_inner=1,
+ max_iterations=1,
+ seed=4,
+ )
+ cost = sum(self.DG2[n][nbr]["weight"] for n, nbr in pairwise(cycle))
+ assert cost > self.DG2_cost
+
+ # set threshold too low
+ initial_sol = ["D", "A", "B", "C", "D"]
+ cycle = self.tsp(
+ self.DG, initial_sol, source="D", move="1-0", threshold=-3, seed=42
+ )
+ cost = sum(self.DG[n][nbr]["weight"] for n, nbr in pairwise(cycle))
+ assert cost > self.DG_cost
+
+
+# Tests for function traveling_salesman_problem
+def test_TSP_method():
+ G = nx.cycle_graph(9)
+ G[4][5]["weight"] = 10
+
+ # Test using the old currying method
+ sa_tsp = lambda G, weight: nx_app.simulated_annealing_tsp(
+ G, "greedy", weight, source=4, seed=1
+ )
+
+ path = nx_app.traveling_salesman_problem(
+ G,
+ method=sa_tsp,
+ cycle=False,
+ )
+ print(path)
+ assert path == [4, 3, 2, 1, 0, 8, 7, 6, 5]
+
+
+def test_TSP_unweighted():
+ G = nx.cycle_graph(9)
+ path = nx_app.traveling_salesman_problem(G, nodes=[3, 6], cycle=False)
+ assert path in ([3, 4, 5, 6], [6, 5, 4, 3])
+
+ cycle = nx_app.traveling_salesman_problem(G, nodes=[3, 6])
+ assert cycle in ([3, 4, 5, 6, 5, 4, 3], [6, 5, 4, 3, 4, 5, 6])
+
+
+def test_TSP_weighted():
+ G = nx.cycle_graph(9)
+ G[0][1]["weight"] = 2
+ G[1][2]["weight"] = 2
+ G[2][3]["weight"] = 2
+ G[3][4]["weight"] = 4
+ G[4][5]["weight"] = 5
+ G[5][6]["weight"] = 4
+ G[6][7]["weight"] = 2
+ G[7][8]["weight"] = 2
+ G[8][0]["weight"] = 2
+ tsp = nx_app.traveling_salesman_problem
+
+ # path between 3 and 6
+ expected_paths = ([3, 2, 1, 0, 8, 7, 6], [6, 7, 8, 0, 1, 2, 3])
+ # cycle between 3 and 6
+ expected_cycles = (
+ [3, 2, 1, 0, 8, 7, 6, 7, 8, 0, 1, 2, 3],
+ [6, 7, 8, 0, 1, 2, 3, 2, 1, 0, 8, 7, 6],
+ )
+ # path through all nodes
+ expected_tourpaths = ([5, 6, 7, 8, 0, 1, 2, 3, 4], [4, 3, 2, 1, 0, 8, 7, 6, 5])
+
+ # Check default method
+ cycle = tsp(G, nodes=[3, 6], weight="weight")
+ assert cycle in expected_cycles
+
+ path = tsp(G, nodes=[3, 6], weight="weight", cycle=False)
+ assert path in expected_paths
+
+ tourpath = tsp(G, weight="weight", cycle=False)
+ assert tourpath in expected_tourpaths
+
+ # Check all methods
+ methods = [
+ (nx_app.christofides, {}),
+ (nx_app.greedy_tsp, {}),
+ (
+ nx_app.simulated_annealing_tsp,
+ {"init_cycle": "greedy"},
+ ),
+ (
+ nx_app.threshold_accepting_tsp,
+ {"init_cycle": "greedy"},
+ ),
+ ]
+ for method, kwargs in methods:
+ cycle = tsp(G, nodes=[3, 6], weight="weight", method=method, **kwargs)
+ assert cycle in expected_cycles
+
+ path = tsp(
+ G, nodes=[3, 6], weight="weight", method=method, cycle=False, **kwargs
+ )
+ assert path in expected_paths
+
+ tourpath = tsp(G, weight="weight", method=method, cycle=False, **kwargs)
+ assert tourpath in expected_tourpaths
+
+
+def test_TSP_incomplete_graph_short_path():
+ G = nx.cycle_graph(9)
+ G.add_edges_from([(4, 9), (9, 10), (10, 11), (11, 0)])
+ G[4][5]["weight"] = 5
+
+ cycle = nx_app.traveling_salesman_problem(G)
+ print(cycle)
+ assert len(cycle) == 17 and len(set(cycle)) == 12
+
+ # make sure that cutting one edge out of complete graph formulation
+ # cuts out many edges out of the path of the TSP
+ path = nx_app.traveling_salesman_problem(G, cycle=False)
+ print(path)
+ assert len(path) == 13 and len(set(path)) == 12
+
+
+def test_held_karp_ascent():
+ """
+ Test the Held-Karp relaxation with the ascent method
+ """
+ import networkx.algorithms.approximation.traveling_salesman as tsp
+
+ np = pytest.importorskip("numpy")
+ pytest.importorskip("scipy")
+
+ # Adjacency matrix from page 1153 of the 1970 Held and Karp paper
+ # which have been edited to be directional, but also symmetric
+ G_array = np.array(
+ [
+ [0, 97, 60, 73, 17, 52],
+ [97, 0, 41, 52, 90, 30],
+ [60, 41, 0, 21, 35, 41],
+ [73, 52, 21, 0, 95, 46],
+ [17, 90, 35, 95, 0, 81],
+ [52, 30, 41, 46, 81, 0],
+ ]
+ )
+
+ solution_edges = [(1, 3), (2, 4), (3, 2), (4, 0), (5, 1), (0, 5)]
+
+ G = nx.from_numpy_array(G_array, create_using=nx.DiGraph)
+ opt_hk, z_star = tsp.held_karp_ascent(G)
+
+ # Check that the optimal weights are the same
+ assert round(opt_hk, 2) == 207.00
+ # Check that the z_stars are the same
+ solution = nx.DiGraph()
+ solution.add_edges_from(solution_edges)
+ assert nx.utils.edges_equal(z_star.edges, solution.edges)
+
+
+def test_ascent_fractional_solution():
+ """
+ Test the ascent method using a modified version of Figure 2 on page 1140
+ in 'The Traveling Salesman Problem and Minimum Spanning Trees' by Held and
+ Karp
+ """
+ import networkx.algorithms.approximation.traveling_salesman as tsp
+
+ np = pytest.importorskip("numpy")
+ pytest.importorskip("scipy")
+
+ # This version of Figure 2 has all of the edge weights multiplied by 100
+ # and is a complete directed graph with infinite edge weights for the
+ # edges not listed in the original graph
+ G_array = np.array(
+ [
+ [0, 100, 100, 100000, 100000, 1],
+ [100, 0, 100, 100000, 1, 100000],
+ [100, 100, 0, 1, 100000, 100000],
+ [100000, 100000, 1, 0, 100, 100],
+ [100000, 1, 100000, 100, 0, 100],
+ [1, 100000, 100000, 100, 100, 0],
+ ]
+ )
+
+ solution_z_star = {
+ (0, 1): 5 / 12,
+ (0, 2): 5 / 12,
+ (0, 5): 5 / 6,
+ (1, 0): 5 / 12,
+ (1, 2): 1 / 3,
+ (1, 4): 5 / 6,
+ (2, 0): 5 / 12,
+ (2, 1): 1 / 3,
+ (2, 3): 5 / 6,
+ (3, 2): 5 / 6,
+ (3, 4): 1 / 3,
+ (3, 5): 1 / 2,
+ (4, 1): 5 / 6,
+ (4, 3): 1 / 3,
+ (4, 5): 1 / 2,
+ (5, 0): 5 / 6,
+ (5, 3): 1 / 2,
+ (5, 4): 1 / 2,
+ }
+
+ G = nx.from_numpy_array(G_array, create_using=nx.DiGraph)
+ opt_hk, z_star = tsp.held_karp_ascent(G)
+
+ # Check that the optimal weights are the same
+ assert round(opt_hk, 2) == 303.00
+ # Check that the z_stars are the same
+ assert {key: round(z_star[key], 4) for key in z_star} == {
+ key: round(solution_z_star[key], 4) for key in solution_z_star
+ }
+
+
+def test_ascent_method_asymmetric():
+ """
+ Tests the ascent method using a truly asymmetric graph for which the
+ solution has been brute forced
+ """
+ import networkx.algorithms.approximation.traveling_salesman as tsp
+
+ np = pytest.importorskip("numpy")
+ pytest.importorskip("scipy")
+
+ G_array = np.array(
+ [
+ [0, 26, 63, 59, 69, 31, 41],
+ [62, 0, 91, 53, 75, 87, 47],
+ [47, 82, 0, 90, 15, 9, 18],
+ [68, 19, 5, 0, 58, 34, 93],
+ [11, 58, 53, 55, 0, 61, 79],
+ [88, 75, 13, 76, 98, 0, 40],
+ [41, 61, 55, 88, 46, 45, 0],
+ ]
+ )
+
+ solution_edges = [(0, 1), (1, 3), (3, 2), (2, 5), (5, 6), (4, 0), (6, 4)]
+
+ G = nx.from_numpy_array(G_array, create_using=nx.DiGraph)
+ opt_hk, z_star = tsp.held_karp_ascent(G)
+
+ # Check that the optimal weights are the same
+ assert round(opt_hk, 2) == 190.00
+ # Check that the z_stars match.
+ solution = nx.DiGraph()
+ solution.add_edges_from(solution_edges)
+ assert nx.utils.edges_equal(z_star.edges, solution.edges)
+
+
+def test_ascent_method_asymmetric_2():
+ """
+ Tests the ascent method using a truly asymmetric graph for which the
+ solution has been brute forced
+ """
+ import networkx.algorithms.approximation.traveling_salesman as tsp
+
+ np = pytest.importorskip("numpy")
+ pytest.importorskip("scipy")
+
+ G_array = np.array(
+ [
+ [0, 45, 39, 92, 29, 31],
+ [72, 0, 4, 12, 21, 60],
+ [81, 6, 0, 98, 70, 53],
+ [49, 71, 59, 0, 98, 94],
+ [74, 95, 24, 43, 0, 47],
+ [56, 43, 3, 65, 22, 0],
+ ]
+ )
+
+ solution_edges = [(0, 5), (5, 4), (1, 3), (3, 0), (2, 1), (4, 2)]
+
+ G = nx.from_numpy_array(G_array, create_using=nx.DiGraph)
+ opt_hk, z_star = tsp.held_karp_ascent(G)
+
+ # Check that the optimal weights are the same
+ assert round(opt_hk, 2) == 144.00
+ # Check that the z_stars match.
+ solution = nx.DiGraph()
+ solution.add_edges_from(solution_edges)
+ assert nx.utils.edges_equal(z_star.edges, solution.edges)
+
+
+def test_held_karp_ascent_asymmetric_3():
+ """
+ Tests the ascent method using a truly asymmetric graph with a fractional
+ solution for which the solution has been brute forced.
+
+ In this graph their are two different optimal, integral solutions (which
+ are also the overall atsp solutions) to the Held Karp relaxation. However,
+ this particular graph has two different tours of optimal value and the
+ possible solutions in the held_karp_ascent function are not stored in an
+ ordered data structure.
+ """
+ import networkx.algorithms.approximation.traveling_salesman as tsp
+
+ np = pytest.importorskip("numpy")
+ pytest.importorskip("scipy")
+
+ G_array = np.array(
+ [
+ [0, 1, 5, 2, 7, 4],
+ [7, 0, 7, 7, 1, 4],
+ [4, 7, 0, 9, 2, 1],
+ [7, 2, 7, 0, 4, 4],
+ [5, 5, 4, 4, 0, 3],
+ [3, 9, 1, 3, 4, 0],
+ ]
+ )
+
+ solution1_edges = [(0, 3), (1, 4), (2, 5), (3, 1), (4, 2), (5, 0)]
+
+ solution2_edges = [(0, 3), (3, 1), (1, 4), (4, 5), (2, 0), (5, 2)]
+
+ G = nx.from_numpy_array(G_array, create_using=nx.DiGraph)
+ opt_hk, z_star = tsp.held_karp_ascent(G)
+
+ assert round(opt_hk, 2) == 13.00
+ # Check that the z_stars are the same
+ solution1 = nx.DiGraph()
+ solution1.add_edges_from(solution1_edges)
+ solution2 = nx.DiGraph()
+ solution2.add_edges_from(solution2_edges)
+ assert nx.utils.edges_equal(z_star.edges, solution1.edges) or nx.utils.edges_equal(
+ z_star.edges, solution2.edges
+ )
+
+
+def test_held_karp_ascent_fractional_asymmetric():
+ """
+ Tests the ascent method using a truly asymmetric graph with a fractional
+ solution for which the solution has been brute forced
+ """
+ import networkx.algorithms.approximation.traveling_salesman as tsp
+
+ np = pytest.importorskip("numpy")
+ pytest.importorskip("scipy")
+
+ G_array = np.array(
+ [
+ [0, 100, 150, 100000, 100000, 1],
+ [150, 0, 100, 100000, 1, 100000],
+ [100, 150, 0, 1, 100000, 100000],
+ [100000, 100000, 1, 0, 150, 100],
+ [100000, 2, 100000, 100, 0, 150],
+ [2, 100000, 100000, 150, 100, 0],
+ ]
+ )
+
+ solution_z_star = {
+ (0, 1): 5 / 12,
+ (0, 2): 5 / 12,
+ (0, 5): 5 / 6,
+ (1, 0): 5 / 12,
+ (1, 2): 5 / 12,
+ (1, 4): 5 / 6,
+ (2, 0): 5 / 12,
+ (2, 1): 5 / 12,
+ (2, 3): 5 / 6,
+ (3, 2): 5 / 6,
+ (3, 4): 5 / 12,
+ (3, 5): 5 / 12,
+ (4, 1): 5 / 6,
+ (4, 3): 5 / 12,
+ (4, 5): 5 / 12,
+ (5, 0): 5 / 6,
+ (5, 3): 5 / 12,
+ (5, 4): 5 / 12,
+ }
+
+ G = nx.from_numpy_array(G_array, create_using=nx.DiGraph)
+ opt_hk, z_star = tsp.held_karp_ascent(G)
+
+ # Check that the optimal weights are the same
+ assert round(opt_hk, 2) == 304.00
+ # Check that the z_stars are the same
+ assert {key: round(z_star[key], 4) for key in z_star} == {
+ key: round(solution_z_star[key], 4) for key in solution_z_star
+ }
+
+
+def test_spanning_tree_distribution():
+ """
+ Test that we can create an exponential distribution of spanning trees such
+ that the probability of each tree is proportional to the product of edge
+ weights.
+
+ Results of this test have been confirmed with hypothesis testing from the
+ created distribution.
+
+ This test uses the symmetric, fractional Held Karp solution.
+ """
+ import networkx.algorithms.approximation.traveling_salesman as tsp
+
+ pytest.importorskip("numpy")
+ pytest.importorskip("scipy")
+
+ z_star = {
+ (0, 1): 5 / 12,
+ (0, 2): 5 / 12,
+ (0, 5): 5 / 6,
+ (1, 0): 5 / 12,
+ (1, 2): 1 / 3,
+ (1, 4): 5 / 6,
+ (2, 0): 5 / 12,
+ (2, 1): 1 / 3,
+ (2, 3): 5 / 6,
+ (3, 2): 5 / 6,
+ (3, 4): 1 / 3,
+ (3, 5): 1 / 2,
+ (4, 1): 5 / 6,
+ (4, 3): 1 / 3,
+ (4, 5): 1 / 2,
+ (5, 0): 5 / 6,
+ (5, 3): 1 / 2,
+ (5, 4): 1 / 2,
+ }
+
+ solution_gamma = {
+ (0, 1): -0.6383,
+ (0, 2): -0.6827,
+ (0, 5): 0,
+ (1, 2): -1.0781,
+ (1, 4): 0,
+ (2, 3): 0,
+ (5, 3): -0.2820,
+ (5, 4): -0.3327,
+ (4, 3): -0.9927,
+ }
+
+ # The undirected support of z_star
+ G = nx.MultiGraph()
+ for u, v in z_star:
+ if (u, v) in G.edges or (v, u) in G.edges:
+ continue
+ G.add_edge(u, v)
+
+ gamma = tsp.spanning_tree_distribution(G, z_star)
+
+ assert {key: round(gamma[key], 4) for key in gamma} == solution_gamma
+
+
+def test_asadpour_tsp():
+ """
+ Test the complete asadpour tsp algorithm with the fractional, symmetric
+ Held Karp solution. This test also uses an incomplete graph as input.
+ """
+ # This version of Figure 2 has all of the edge weights multiplied by 100
+ # and the 0 weight edges have a weight of 1.
+ pytest.importorskip("numpy")
+ pytest.importorskip("scipy")
+
+ edge_list = [
+ (0, 1, 100),
+ (0, 2, 100),
+ (0, 5, 1),
+ (1, 2, 100),
+ (1, 4, 1),
+ (2, 3, 1),
+ (3, 4, 100),
+ (3, 5, 100),
+ (4, 5, 100),
+ (1, 0, 100),
+ (2, 0, 100),
+ (5, 0, 1),
+ (2, 1, 100),
+ (4, 1, 1),
+ (3, 2, 1),
+ (4, 3, 100),
+ (5, 3, 100),
+ (5, 4, 100),
+ ]
+
+ G = nx.DiGraph()
+ G.add_weighted_edges_from(edge_list)
+
+ tour = nx_app.traveling_salesman_problem(
+ G, weight="weight", method=nx_app.asadpour_atsp, seed=19
+ )
+
+ # Check that the returned list is a valid tour. Because this is an
+ # incomplete graph, the conditions are not as strict. We need the tour to
+ #
+ # Start and end at the same node
+ # Pass through every vertex at least once
+ # Have a total cost at most ln(6) / ln(ln(6)) = 3.0723 times the optimal
+ #
+ # For the second condition it is possible to have the tour pass through the
+ # same vertex more then. Imagine that the tour on the complete version takes
+ # an edge not in the original graph. In the output this is substituted with
+ # the shortest path between those vertices, allowing vertices to appear more
+ # than once.
+ #
+ # Even though we are using a fixed seed, multiple tours have been known to
+ # be returned. The first two are from the original delevopment of this test,
+ # and the third one from issue #5913 on GitHub. If other tours are returned,
+ # add it on the list of expected tours.
+ expected_tours = [
+ [1, 4, 5, 0, 2, 3, 2, 1],
+ [3, 2, 0, 1, 4, 5, 3],
+ [3, 2, 1, 0, 5, 4, 3],
+ ]
+
+ assert tour in expected_tours
+
+
+def test_asadpour_real_world():
+ """
+ This test uses airline prices between the six largest cities in the US.
+
+ * New York City -> JFK
+ * Los Angeles -> LAX
+ * Chicago -> ORD
+ * Houston -> IAH
+ * Phoenix -> PHX
+ * Philadelphia -> PHL
+
+ Flight prices from August 2021 using Delta or American airlines to get
+ nonstop flight. The brute force solution found the optimal tour to cost $872
+
+ This test also uses the `source` keyword argument to ensure that the tour
+ always starts at city 0.
+ """
+ np = pytest.importorskip("numpy")
+ pytest.importorskip("scipy")
+
+ G_array = np.array(
+ [
+ # JFK LAX ORD IAH PHX PHL
+ [0, 243, 199, 208, 169, 183], # JFK
+ [277, 0, 217, 123, 127, 252], # LAX
+ [297, 197, 0, 197, 123, 177], # ORD
+ [303, 169, 197, 0, 117, 117], # IAH
+ [257, 127, 160, 117, 0, 319], # PHX
+ [183, 332, 217, 117, 319, 0], # PHL
+ ]
+ )
+
+ node_map = {0: "JFK", 1: "LAX", 2: "ORD", 3: "IAH", 4: "PHX", 5: "PHL"}
+
+ expected_tours = [
+ ["JFK", "LAX", "PHX", "ORD", "IAH", "PHL", "JFK"],
+ ["JFK", "ORD", "PHX", "LAX", "IAH", "PHL", "JFK"],
+ ]
+
+ G = nx.from_numpy_array(G_array, create_using=nx.DiGraph)
+ nx.relabel_nodes(G, node_map, copy=False)
+
+ tour = nx_app.traveling_salesman_problem(
+ G, weight="weight", method=nx_app.asadpour_atsp, seed=37, source="JFK"
+ )
+
+ assert tour in expected_tours
+
+
+def test_asadpour_real_world_path():
+ """
+ This test uses airline prices between the six largest cities in the US. This
+ time using a path, not a cycle.
+
+ * New York City -> JFK
+ * Los Angeles -> LAX
+ * Chicago -> ORD
+ * Houston -> IAH
+ * Phoenix -> PHX
+ * Philadelphia -> PHL
+
+ Flight prices from August 2021 using Delta or American airlines to get
+ nonstop flight. The brute force solution found the optimal tour to cost $872
+ """
+ np = pytest.importorskip("numpy")
+ pytest.importorskip("scipy")
+
+ G_array = np.array(
+ [
+ # JFK LAX ORD IAH PHX PHL
+ [0, 243, 199, 208, 169, 183], # JFK
+ [277, 0, 217, 123, 127, 252], # LAX
+ [297, 197, 0, 197, 123, 177], # ORD
+ [303, 169, 197, 0, 117, 117], # IAH
+ [257, 127, 160, 117, 0, 319], # PHX
+ [183, 332, 217, 117, 319, 0], # PHL
+ ]
+ )
+
+ node_map = {0: "JFK", 1: "LAX", 2: "ORD", 3: "IAH", 4: "PHX", 5: "PHL"}
+
+ expected_paths = [
+ ["ORD", "PHX", "LAX", "IAH", "PHL", "JFK"],
+ ["JFK", "PHL", "IAH", "ORD", "PHX", "LAX"],
+ ]
+
+ G = nx.from_numpy_array(G_array, create_using=nx.DiGraph)
+ nx.relabel_nodes(G, node_map, copy=False)
+
+ path = nx_app.traveling_salesman_problem(
+ G, weight="weight", cycle=False, method=nx_app.asadpour_atsp, seed=56
+ )
+
+ assert path in expected_paths
+
+
+def test_asadpour_disconnected_graph():
+ """
+ Test that the proper exception is raised when asadpour_atsp is given an
+ disconnected graph.
+ """
+
+ G = nx.complete_graph(4, create_using=nx.DiGraph)
+ # have to set edge weights so that if the exception is not raised, the
+ # function will complete and we will fail the test
+ nx.set_edge_attributes(G, 1, "weight")
+ G.add_node(5)
+
+ pytest.raises(nx.NetworkXError, nx_app.asadpour_atsp, G)
+
+
+def test_asadpour_incomplete_graph():
+ """
+ Test that the proper exception is raised when asadpour_atsp is given an
+ incomplete graph
+ """
+
+ G = nx.complete_graph(4, create_using=nx.DiGraph)
+ # have to set edge weights so that if the exception is not raised, the
+ # function will complete and we will fail the test
+ nx.set_edge_attributes(G, 1, "weight")
+ G.remove_edge(0, 1)
+
+ pytest.raises(nx.NetworkXError, nx_app.asadpour_atsp, G)
+
+
+def test_asadpour_empty_graph():
+ """
+ Test the asadpour_atsp function with an empty graph
+ """
+ G = nx.DiGraph()
+
+ pytest.raises(nx.NetworkXError, nx_app.asadpour_atsp, G)
+
+
+@pytest.mark.slow
+def test_asadpour_integral_held_karp():
+ """
+ This test uses an integral held karp solution and the held karp function
+ will return a graph rather than a dict, bypassing most of the asadpour
+ algorithm.
+
+ At first glance, this test probably doesn't look like it ensures that we
+ skip the rest of the asadpour algorithm, but it does. We are not fixing a
+ see for the random number generator, so if we sample any spanning trees
+ the approximation would be different basically every time this test is
+ executed but it is not since held karp is deterministic and we do not
+ reach the portion of the code with the dependence on random numbers.
+ """
+ np = pytest.importorskip("numpy")
+
+ G_array = np.array(
+ [
+ [0, 26, 63, 59, 69, 31, 41],
+ [62, 0, 91, 53, 75, 87, 47],
+ [47, 82, 0, 90, 15, 9, 18],
+ [68, 19, 5, 0, 58, 34, 93],
+ [11, 58, 53, 55, 0, 61, 79],
+ [88, 75, 13, 76, 98, 0, 40],
+ [41, 61, 55, 88, 46, 45, 0],
+ ]
+ )
+
+ G = nx.from_numpy_array(G_array, create_using=nx.DiGraph)
+
+ for _ in range(2):
+ tour = nx_app.traveling_salesman_problem(G, method=nx_app.asadpour_atsp)
+
+ assert [1, 3, 2, 5, 2, 6, 4, 0, 1] == tour
+
+
+def test_directed_tsp_impossible():
+ """
+ Test the asadpour algorithm with a graph without a hamiltonian circuit
+ """
+ pytest.importorskip("numpy")
+
+ # In this graph, once we leave node 0 we cannot return
+ edges = [
+ (0, 1, 10),
+ (0, 2, 11),
+ (0, 3, 12),
+ (1, 2, 4),
+ (1, 3, 6),
+ (2, 1, 3),
+ (2, 3, 2),
+ (3, 1, 5),
+ (3, 2, 1),
+ ]
+
+ G = nx.DiGraph()
+ G.add_weighted_edges_from(edges)
+
+ pytest.raises(nx.NetworkXError, nx_app.traveling_salesman_problem, G)
diff --git a/pythonProject/.venv/Lib/site-packages/networkx/algorithms/approximation/tests/test_treewidth.py b/pythonProject/.venv/Lib/site-packages/networkx/algorithms/approximation/tests/test_treewidth.py
new file mode 100644
index 0000000000000000000000000000000000000000..461b0f2ed2dd4d043902d054e10a5f39ffb069c9
--- /dev/null
+++ b/pythonProject/.venv/Lib/site-packages/networkx/algorithms/approximation/tests/test_treewidth.py
@@ -0,0 +1,280 @@
+import itertools
+
+import networkx as nx
+from networkx.algorithms.approximation import (
+ treewidth_min_degree,
+ treewidth_min_fill_in,
+)
+from networkx.algorithms.approximation.treewidth import (
+ MinDegreeHeuristic,
+ min_fill_in_heuristic,
+)
+
+
+def is_tree_decomp(graph, decomp):
+ """Check if the given tree decomposition is valid."""
+ for x in graph.nodes():
+ appear_once = False
+ for bag in decomp.nodes():
+ if x in bag:
+ appear_once = True
+ break
+ assert appear_once
+
+ # Check if each connected pair of nodes are at least once together in a bag
+ for x, y in graph.edges():
+ appear_together = False
+ for bag in decomp.nodes():
+ if x in bag and y in bag:
+ appear_together = True
+ break
+ assert appear_together
+
+ # Check if the nodes associated with vertex v form a connected subset of T
+ for v in graph.nodes():
+ subset = []
+ for bag in decomp.nodes():
+ if v in bag:
+ subset.append(bag)
+ sub_graph = decomp.subgraph(subset)
+ assert nx.is_connected(sub_graph)
+
+
+class TestTreewidthMinDegree:
+ """Unit tests for the min_degree function"""
+
+ @classmethod
+ def setup_class(cls):
+ """Setup for different kinds of trees"""
+ cls.complete = nx.Graph()
+ cls.complete.add_edge(1, 2)
+ cls.complete.add_edge(2, 3)
+ cls.complete.add_edge(1, 3)
+
+ cls.small_tree = nx.Graph()
+ cls.small_tree.add_edge(1, 3)
+ cls.small_tree.add_edge(4, 3)
+ cls.small_tree.add_edge(2, 3)
+ cls.small_tree.add_edge(3, 5)
+ cls.small_tree.add_edge(5, 6)
+ cls.small_tree.add_edge(5, 7)
+ cls.small_tree.add_edge(6, 7)
+
+ cls.deterministic_graph = nx.Graph()
+ cls.deterministic_graph.add_edge(0, 1) # deg(0) = 1
+
+ cls.deterministic_graph.add_edge(1, 2) # deg(1) = 2
+
+ cls.deterministic_graph.add_edge(2, 3)
+ cls.deterministic_graph.add_edge(2, 4) # deg(2) = 3
+
+ cls.deterministic_graph.add_edge(3, 4)
+ cls.deterministic_graph.add_edge(3, 5)
+ cls.deterministic_graph.add_edge(3, 6) # deg(3) = 4
+
+ cls.deterministic_graph.add_edge(4, 5)
+ cls.deterministic_graph.add_edge(4, 6)
+ cls.deterministic_graph.add_edge(4, 7) # deg(4) = 5
+
+ cls.deterministic_graph.add_edge(5, 6)
+ cls.deterministic_graph.add_edge(5, 7)
+ cls.deterministic_graph.add_edge(5, 8)
+ cls.deterministic_graph.add_edge(5, 9) # deg(5) = 6
+
+ cls.deterministic_graph.add_edge(6, 7)
+ cls.deterministic_graph.add_edge(6, 8)
+ cls.deterministic_graph.add_edge(6, 9) # deg(6) = 6
+
+ cls.deterministic_graph.add_edge(7, 8)
+ cls.deterministic_graph.add_edge(7, 9) # deg(7) = 5
+
+ cls.deterministic_graph.add_edge(8, 9) # deg(8) = 4
+
+ def test_petersen_graph(self):
+ """Test Petersen graph tree decomposition result"""
+ G = nx.petersen_graph()
+ _, decomp = treewidth_min_degree(G)
+ is_tree_decomp(G, decomp)
+
+ def test_small_tree_treewidth(self):
+ """Test small tree
+
+ Test if the computed treewidth of the known self.small_tree is 2.
+ As we know which value we can expect from our heuristic, values other
+ than two are regressions
+ """
+ G = self.small_tree
+ # the order of removal should be [1,2,4]3[5,6,7]
+ # (with [] denoting any order of the containing nodes)
+ # resulting in treewidth 2 for the heuristic
+ treewidth, _ = treewidth_min_fill_in(G)
+ assert treewidth == 2
+
+ def test_heuristic_abort(self):
+ """Test heuristic abort condition for fully connected graph"""
+ graph = {}
+ for u in self.complete:
+ graph[u] = set()
+ for v in self.complete[u]:
+ if u != v: # ignore self-loop
+ graph[u].add(v)
+
+ deg_heuristic = MinDegreeHeuristic(graph)
+ node = deg_heuristic.best_node(graph)
+ if node is None:
+ pass
+ else:
+ assert False
+
+ def test_empty_graph(self):
+ """Test empty graph"""
+ G = nx.Graph()
+ _, _ = treewidth_min_degree(G)
+
+ def test_two_component_graph(self):
+ G = nx.Graph()
+ G.add_node(1)
+ G.add_node(2)
+ treewidth, _ = treewidth_min_degree(G)
+ assert treewidth == 0
+
+ def test_not_sortable_nodes(self):
+ G = nx.Graph([(0, "a")])
+ treewidth_min_degree(G)
+
+ def test_heuristic_first_steps(self):
+ """Test first steps of min_degree heuristic"""
+ graph = {
+ n: set(self.deterministic_graph[n]) - {n} for n in self.deterministic_graph
+ }
+ deg_heuristic = MinDegreeHeuristic(graph)
+ elim_node = deg_heuristic.best_node(graph)
+ print(f"Graph {graph}:")
+ steps = []
+
+ while elim_node is not None:
+ print(f"Removing {elim_node}:")
+ steps.append(elim_node)
+ nbrs = graph[elim_node]
+
+ for u, v in itertools.permutations(nbrs, 2):
+ if v not in graph[u]:
+ graph[u].add(v)
+
+ for u in graph:
+ if elim_node in graph[u]:
+ graph[u].remove(elim_node)
+
+ del graph[elim_node]
+ print(f"Graph {graph}:")
+ elim_node = deg_heuristic.best_node(graph)
+
+ # check only the first 5 elements for equality
+ assert steps[:5] == [0, 1, 2, 3, 4]
+
+
+class TestTreewidthMinFillIn:
+ """Unit tests for the treewidth_min_fill_in function."""
+
+ @classmethod
+ def setup_class(cls):
+ """Setup for different kinds of trees"""
+ cls.complete = nx.Graph()
+ cls.complete.add_edge(1, 2)
+ cls.complete.add_edge(2, 3)
+ cls.complete.add_edge(1, 3)
+
+ cls.small_tree = nx.Graph()
+ cls.small_tree.add_edge(1, 2)
+ cls.small_tree.add_edge(2, 3)
+ cls.small_tree.add_edge(3, 4)
+ cls.small_tree.add_edge(1, 4)
+ cls.small_tree.add_edge(2, 4)
+ cls.small_tree.add_edge(4, 5)
+ cls.small_tree.add_edge(5, 6)
+ cls.small_tree.add_edge(5, 7)
+ cls.small_tree.add_edge(6, 7)
+
+ cls.deterministic_graph = nx.Graph()
+ cls.deterministic_graph.add_edge(1, 2)
+ cls.deterministic_graph.add_edge(1, 3)
+ cls.deterministic_graph.add_edge(3, 4)
+ cls.deterministic_graph.add_edge(2, 4)
+ cls.deterministic_graph.add_edge(3, 5)
+ cls.deterministic_graph.add_edge(4, 5)
+ cls.deterministic_graph.add_edge(3, 6)
+ cls.deterministic_graph.add_edge(5, 6)
+
+ def test_petersen_graph(self):
+ """Test Petersen graph tree decomposition result"""
+ G = nx.petersen_graph()
+ _, decomp = treewidth_min_fill_in(G)
+ is_tree_decomp(G, decomp)
+
+ def test_small_tree_treewidth(self):
+ """Test if the computed treewidth of the known self.small_tree is 2"""
+ G = self.small_tree
+ # the order of removal should be [1,2,4]3[5,6,7]
+ # (with [] denoting any order of the containing nodes)
+ # resulting in treewidth 2 for the heuristic
+ treewidth, _ = treewidth_min_fill_in(G)
+ assert treewidth == 2
+
+ def test_heuristic_abort(self):
+ """Test if min_fill_in returns None for fully connected graph"""
+ graph = {}
+ for u in self.complete:
+ graph[u] = set()
+ for v in self.complete[u]:
+ if u != v: # ignore self-loop
+ graph[u].add(v)
+ next_node = min_fill_in_heuristic(graph)
+ if next_node is None:
+ pass
+ else:
+ assert False
+
+ def test_empty_graph(self):
+ """Test empty graph"""
+ G = nx.Graph()
+ _, _ = treewidth_min_fill_in(G)
+
+ def test_two_component_graph(self):
+ G = nx.Graph()
+ G.add_node(1)
+ G.add_node(2)
+ treewidth, _ = treewidth_min_fill_in(G)
+ assert treewidth == 0
+
+ def test_not_sortable_nodes(self):
+ G = nx.Graph([(0, "a")])
+ treewidth_min_fill_in(G)
+
+ def test_heuristic_first_steps(self):
+ """Test first steps of min_fill_in heuristic"""
+ graph = {
+ n: set(self.deterministic_graph[n]) - {n} for n in self.deterministic_graph
+ }
+ print(f"Graph {graph}:")
+ elim_node = min_fill_in_heuristic(graph)
+ steps = []
+
+ while elim_node is not None:
+ print(f"Removing {elim_node}:")
+ steps.append(elim_node)
+ nbrs = graph[elim_node]
+
+ for u, v in itertools.permutations(nbrs, 2):
+ if v not in graph[u]:
+ graph[u].add(v)
+
+ for u in graph:
+ if elim_node in graph[u]:
+ graph[u].remove(elim_node)
+
+ del graph[elim_node]
+ print(f"Graph {graph}:")
+ elim_node = min_fill_in_heuristic(graph)
+
+ # check only the first 2 elements for equality
+ assert steps[:2] == [6, 5]
diff --git a/pythonProject/.venv/Lib/site-packages/networkx/algorithms/approximation/tests/test_vertex_cover.py b/pythonProject/.venv/Lib/site-packages/networkx/algorithms/approximation/tests/test_vertex_cover.py
new file mode 100644
index 0000000000000000000000000000000000000000..5cc5a38df9a4139684005491e0183cd563487154
--- /dev/null
+++ b/pythonProject/.venv/Lib/site-packages/networkx/algorithms/approximation/tests/test_vertex_cover.py
@@ -0,0 +1,68 @@
+import networkx as nx
+from networkx.algorithms.approximation import min_weighted_vertex_cover
+
+
+def is_cover(G, node_cover):
+ return all({u, v} & node_cover for u, v in G.edges())
+
+
+class TestMWVC:
+ """Unit tests for the approximate minimum weighted vertex cover
+ function,
+ :func:`~networkx.algorithms.approximation.vertex_cover.min_weighted_vertex_cover`.
+
+ """
+
+ def test_unweighted_directed(self):
+ # Create a star graph in which half the nodes are directed in
+ # and half are directed out.
+ G = nx.DiGraph()
+ G.add_edges_from((0, v) for v in range(1, 26))
+ G.add_edges_from((v, 0) for v in range(26, 51))
+ cover = min_weighted_vertex_cover(G)
+ assert 1 == len(cover)
+ assert is_cover(G, cover)
+
+ def test_unweighted_undirected(self):
+ # create a simple star graph
+ size = 50
+ sg = nx.star_graph(size)
+ cover = min_weighted_vertex_cover(sg)
+ assert 1 == len(cover)
+ assert is_cover(sg, cover)
+
+ def test_weighted(self):
+ wg = nx.Graph()
+ wg.add_node(0, weight=10)
+ wg.add_node(1, weight=1)
+ wg.add_node(2, weight=1)
+ wg.add_node(3, weight=1)
+ wg.add_node(4, weight=1)
+
+ wg.add_edge(0, 1)
+ wg.add_edge(0, 2)
+ wg.add_edge(0, 3)
+ wg.add_edge(0, 4)
+
+ wg.add_edge(1, 2)
+ wg.add_edge(2, 3)
+ wg.add_edge(3, 4)
+ wg.add_edge(4, 1)
+
+ cover = min_weighted_vertex_cover(wg, weight="weight")
+ csum = sum(wg.nodes[node]["weight"] for node in cover)
+ assert 4 == csum
+ assert is_cover(wg, cover)
+
+ def test_unweighted_self_loop(self):
+ slg = nx.Graph()
+ slg.add_node(0)
+ slg.add_node(1)
+ slg.add_node(2)
+
+ slg.add_edge(0, 1)
+ slg.add_edge(2, 2)
+
+ cover = min_weighted_vertex_cover(slg)
+ assert 2 == len(cover)
+ assert is_cover(slg, cover)
diff --git a/pythonProject/.venv/Lib/site-packages/networkx/algorithms/approximation/traveling_salesman.py b/pythonProject/.venv/Lib/site-packages/networkx/algorithms/approximation/traveling_salesman.py
new file mode 100644
index 0000000000000000000000000000000000000000..2a31b72810c55c71946e074306ab1853d296aa47
--- /dev/null
+++ b/pythonProject/.venv/Lib/site-packages/networkx/algorithms/approximation/traveling_salesman.py
@@ -0,0 +1,1498 @@
+"""
+=================================
+Travelling Salesman Problem (TSP)
+=================================
+
+Implementation of approximate algorithms
+for solving and approximating the TSP problem.
+
+Categories of algorithms which are implemented:
+
+- Christofides (provides a 3/2-approximation of TSP)
+- Greedy
+- Simulated Annealing (SA)
+- Threshold Accepting (TA)
+- Asadpour Asymmetric Traveling Salesman Algorithm
+
+The Travelling Salesman Problem tries to find, given the weight
+(distance) between all points where a salesman has to visit, the
+route so that:
+
+- The total distance (cost) which the salesman travels is minimized.
+- The salesman returns to the starting point.
+- Note that for a complete graph, the salesman visits each point once.
+
+The function `travelling_salesman_problem` allows for incomplete
+graphs by finding all-pairs shortest paths, effectively converting
+the problem to a complete graph problem. It calls one of the
+approximate methods on that problem and then converts the result
+back to the original graph using the previously found shortest paths.
+
+TSP is an NP-hard problem in combinatorial optimization,
+important in operations research and theoretical computer science.
+
+http://en.wikipedia.org/wiki/Travelling_salesman_problem
+"""
+import math
+
+import networkx as nx
+from networkx.algorithms.tree.mst import random_spanning_tree
+from networkx.utils import not_implemented_for, pairwise, py_random_state
+
+__all__ = [
+ "traveling_salesman_problem",
+ "christofides",
+ "asadpour_atsp",
+ "greedy_tsp",
+ "simulated_annealing_tsp",
+ "threshold_accepting_tsp",
+]
+
+
+def swap_two_nodes(soln, seed):
+ """Swap two nodes in `soln` to give a neighbor solution.
+
+ Parameters
+ ----------
+ soln : list of nodes
+ Current cycle of nodes
+
+ seed : integer, random_state, or None (default)
+ Indicator of random number generation state.
+ See :ref:`Randomness`.
+
+ Returns
+ -------
+ list
+ The solution after move is applied. (A neighbor solution.)
+
+ Notes
+ -----
+ This function assumes that the incoming list `soln` is a cycle
+ (that the first and last element are the same) and also that
+ we don't want any move to change the first node in the list
+ (and thus not the last node either).
+
+ The input list is changed as well as returned. Make a copy if needed.
+
+ See Also
+ --------
+ move_one_node
+ """
+ a, b = seed.sample(range(1, len(soln) - 1), k=2)
+ soln[a], soln[b] = soln[b], soln[a]
+ return soln
+
+
+def move_one_node(soln, seed):
+ """Move one node to another position to give a neighbor solution.
+
+ The node to move and the position to move to are chosen randomly.
+ The first and last nodes are left untouched as soln must be a cycle
+ starting at that node.
+
+ Parameters
+ ----------
+ soln : list of nodes
+ Current cycle of nodes
+
+ seed : integer, random_state, or None (default)
+ Indicator of random number generation state.
+ See :ref:`Randomness`.
+
+ Returns
+ -------
+ list
+ The solution after move is applied. (A neighbor solution.)
+
+ Notes
+ -----
+ This function assumes that the incoming list `soln` is a cycle
+ (that the first and last element are the same) and also that
+ we don't want any move to change the first node in the list
+ (and thus not the last node either).
+
+ The input list is changed as well as returned. Make a copy if needed.
+
+ See Also
+ --------
+ swap_two_nodes
+ """
+ a, b = seed.sample(range(1, len(soln) - 1), k=2)
+ soln.insert(b, soln.pop(a))
+ return soln
+
+
+@not_implemented_for("directed")
+@nx._dispatchable(edge_attrs="weight")
+def christofides(G, weight="weight", tree=None):
+ """Approximate a solution of the traveling salesman problem
+
+ Compute a 3/2-approximation of the traveling salesman problem
+ in a complete undirected graph using Christofides [1]_ algorithm.
+
+ Parameters
+ ----------
+ G : Graph
+ `G` should be a complete weighted undirected graph.
+ The distance between all pairs of nodes should be included.
+
+ weight : string, optional (default="weight")
+ Edge data key corresponding to the edge weight.
+ If any edge does not have this attribute the weight is set to 1.
+
+ tree : NetworkX graph or None (default: None)
+ A minimum spanning tree of G. Or, if None, the minimum spanning
+ tree is computed using :func:`networkx.minimum_spanning_tree`
+
+ Returns
+ -------
+ list
+ List of nodes in `G` along a cycle with a 3/2-approximation of
+ the minimal Hamiltonian cycle.
+
+ References
+ ----------
+ .. [1] Christofides, Nicos. "Worst-case analysis of a new heuristic for
+ the travelling salesman problem." No. RR-388. Carnegie-Mellon Univ
+ Pittsburgh Pa Management Sciences Research Group, 1976.
+ """
+ # Remove selfloops if necessary
+ loop_nodes = nx.nodes_with_selfloops(G)
+ try:
+ node = next(loop_nodes)
+ except StopIteration:
+ pass
+ else:
+ G = G.copy()
+ G.remove_edge(node, node)
+ G.remove_edges_from((n, n) for n in loop_nodes)
+ # Check that G is a complete graph
+ N = len(G) - 1
+ # This check ignores selfloops which is what we want here.
+ if any(len(nbrdict) != N for n, nbrdict in G.adj.items()):
+ raise nx.NetworkXError("G must be a complete graph.")
+
+ if tree is None:
+ tree = nx.minimum_spanning_tree(G, weight=weight)
+ L = G.copy()
+ L.remove_nodes_from([v for v, degree in tree.degree if not (degree % 2)])
+ MG = nx.MultiGraph()
+ MG.add_edges_from(tree.edges)
+ edges = nx.min_weight_matching(L, weight=weight)
+ MG.add_edges_from(edges)
+ return _shortcutting(nx.eulerian_circuit(MG))
+
+
+def _shortcutting(circuit):
+ """Remove duplicate nodes in the path"""
+ nodes = []
+ for u, v in circuit:
+ if v in nodes:
+ continue
+ if not nodes:
+ nodes.append(u)
+ nodes.append(v)
+ nodes.append(nodes[0])
+ return nodes
+
+
+@nx._dispatchable(edge_attrs="weight")
+def traveling_salesman_problem(
+ G, weight="weight", nodes=None, cycle=True, method=None, **kwargs
+):
+ """Find the shortest path in `G` connecting specified nodes
+
+ This function allows approximate solution to the traveling salesman
+ problem on networks that are not complete graphs and/or where the
+ salesman does not need to visit all nodes.
+
+ This function proceeds in two steps. First, it creates a complete
+ graph using the all-pairs shortest_paths between nodes in `nodes`.
+ Edge weights in the new graph are the lengths of the paths
+ between each pair of nodes in the original graph.
+ Second, an algorithm (default: `christofides` for undirected and
+ `asadpour_atsp` for directed) is used to approximate the minimal Hamiltonian
+ cycle on this new graph. The available algorithms are:
+
+ - christofides
+ - greedy_tsp
+ - simulated_annealing_tsp
+ - threshold_accepting_tsp
+ - asadpour_atsp
+
+ Once the Hamiltonian Cycle is found, this function post-processes to
+ accommodate the structure of the original graph. If `cycle` is ``False``,
+ the biggest weight edge is removed to make a Hamiltonian path.
+ Then each edge on the new complete graph used for that analysis is
+ replaced by the shortest_path between those nodes on the original graph.
+ If the input graph `G` includes edges with weights that do not adhere to
+ the triangle inequality, such as when `G` is not a complete graph (i.e
+ length of non-existent edges is infinity), then the returned path may
+ contain some repeating nodes (other than the starting node).
+
+ Parameters
+ ----------
+ G : NetworkX graph
+ A possibly weighted graph
+
+ nodes : collection of nodes (default=G.nodes)
+ collection (list, set, etc.) of nodes to visit
+
+ weight : string, optional (default="weight")
+ Edge data key corresponding to the edge weight.
+ If any edge does not have this attribute the weight is set to 1.
+
+ cycle : bool (default: True)
+ Indicates whether a cycle should be returned, or a path.
+ Note: the cycle is the approximate minimal cycle.
+ The path simply removes the biggest edge in that cycle.
+
+ method : function (default: None)
+ A function that returns a cycle on all nodes and approximates
+ the solution to the traveling salesman problem on a complete
+ graph. The returned cycle is then used to find a corresponding
+ solution on `G`. `method` should be callable; take inputs
+ `G`, and `weight`; and return a list of nodes along the cycle.
+
+ Provided options include :func:`christofides`, :func:`greedy_tsp`,
+ :func:`simulated_annealing_tsp` and :func:`threshold_accepting_tsp`.
+
+ If `method is None`: use :func:`christofides` for undirected `G` and
+ :func:`asadpour_atsp` for directed `G`.
+
+ **kwargs : dict
+ Other keyword arguments to be passed to the `method` function passed in.
+
+ Returns
+ -------
+ list
+ List of nodes in `G` along a path with an approximation of the minimal
+ path through `nodes`.
+
+ Raises
+ ------
+ NetworkXError
+ If `G` is a directed graph it has to be strongly connected or the
+ complete version cannot be generated.
+
+ Examples
+ --------
+ >>> tsp = nx.approximation.traveling_salesman_problem
+ >>> G = nx.cycle_graph(9)
+ >>> G[4][5]["weight"] = 5 # all other weights are 1
+ >>> tsp(G, nodes=[3, 6])
+ [3, 2, 1, 0, 8, 7, 6, 7, 8, 0, 1, 2, 3]
+ >>> path = tsp(G, cycle=False)
+ >>> path in ([4, 3, 2, 1, 0, 8, 7, 6, 5], [5, 6, 7, 8, 0, 1, 2, 3, 4])
+ True
+
+ While no longer required, you can still build (curry) your own function
+ to provide parameter values to the methods.
+
+ >>> SA_tsp = nx.approximation.simulated_annealing_tsp
+ >>> method = lambda G, weight: SA_tsp(G, "greedy", weight=weight, temp=500)
+ >>> path = tsp(G, cycle=False, method=method)
+ >>> path in ([4, 3, 2, 1, 0, 8, 7, 6, 5], [5, 6, 7, 8, 0, 1, 2, 3, 4])
+ True
+
+ Otherwise, pass other keyword arguments directly into the tsp function.
+
+ >>> path = tsp(
+ ... G,
+ ... cycle=False,
+ ... method=nx.approximation.simulated_annealing_tsp,
+ ... init_cycle="greedy",
+ ... temp=500,
+ ... )
+ >>> path in ([4, 3, 2, 1, 0, 8, 7, 6, 5], [5, 6, 7, 8, 0, 1, 2, 3, 4])
+ True
+ """
+ if method is None:
+ if G.is_directed():
+ method = asadpour_atsp
+ else:
+ method = christofides
+ if nodes is None:
+ nodes = list(G.nodes)
+
+ dist = {}
+ path = {}
+ for n, (d, p) in nx.all_pairs_dijkstra(G, weight=weight):
+ dist[n] = d
+ path[n] = p
+
+ if G.is_directed():
+ # If the graph is not strongly connected, raise an exception
+ if not nx.is_strongly_connected(G):
+ raise nx.NetworkXError("G is not strongly connected")
+ GG = nx.DiGraph()
+ else:
+ GG = nx.Graph()
+ for u in nodes:
+ for v in nodes:
+ if u == v:
+ continue
+ GG.add_edge(u, v, weight=dist[u][v])
+
+ best_GG = method(GG, weight=weight, **kwargs)
+
+ if not cycle:
+ # find and remove the biggest edge
+ (u, v) = max(pairwise(best_GG), key=lambda x: dist[x[0]][x[1]])
+ pos = best_GG.index(u) + 1
+ while best_GG[pos] != v:
+ pos = best_GG[pos:].index(u) + 1
+ best_GG = best_GG[pos:-1] + best_GG[:pos]
+
+ best_path = []
+ for u, v in pairwise(best_GG):
+ best_path.extend(path[u][v][:-1])
+ best_path.append(v)
+ return best_path
+
+
+@not_implemented_for("undirected")
+@py_random_state(2)
+@nx._dispatchable(edge_attrs="weight", mutates_input=True)
+def asadpour_atsp(G, weight="weight", seed=None, source=None):
+ """
+ Returns an approximate solution to the traveling salesman problem.
+
+ This approximate solution is one of the best known approximations for the
+ asymmetric traveling salesman problem developed by Asadpour et al,
+ [1]_. The algorithm first solves the Held-Karp relaxation to find a lower
+ bound for the weight of the cycle. Next, it constructs an exponential
+ distribution of undirected spanning trees where the probability of an
+ edge being in the tree corresponds to the weight of that edge using a
+ maximum entropy rounding scheme. Next we sample that distribution
+ $2 \\lceil \\ln n \\rceil$ times and save the minimum sampled tree once the
+ direction of the arcs is added back to the edges. Finally, we augment
+ then short circuit that graph to find the approximate tour for the
+ salesman.
+
+ Parameters
+ ----------
+ G : nx.DiGraph
+ The graph should be a complete weighted directed graph. The
+ distance between all paris of nodes should be included and the triangle
+ inequality should hold. That is, the direct edge between any two nodes
+ should be the path of least cost.
+
+ weight : string, optional (default="weight")
+ Edge data key corresponding to the edge weight.
+ If any edge does not have this attribute the weight is set to 1.
+
+ seed : integer, random_state, or None (default)
+ Indicator of random number generation state.
+ See :ref:`Randomness`.
+
+ source : node label (default=`None`)
+ If given, return the cycle starting and ending at the given node.
+
+ Returns
+ -------
+ cycle : list of nodes
+ Returns the cycle (list of nodes) that a salesman can follow to minimize
+ the total weight of the trip.
+
+ Raises
+ ------
+ NetworkXError
+ If `G` is not complete or has less than two nodes, the algorithm raises
+ an exception.
+
+ NetworkXError
+ If `source` is not `None` and is not a node in `G`, the algorithm raises
+ an exception.
+
+ NetworkXNotImplemented
+ If `G` is an undirected graph.
+
+ References
+ ----------
+ .. [1] A. Asadpour, M. X. Goemans, A. Madry, S. O. Gharan, and A. Saberi,
+ An o(log n/log log n)-approximation algorithm for the asymmetric
+ traveling salesman problem, Operations research, 65 (2017),
+ pp. 1043–1061
+
+ Examples
+ --------
+ >>> import networkx as nx
+ >>> import networkx.algorithms.approximation as approx
+ >>> G = nx.complete_graph(3, create_using=nx.DiGraph)
+ >>> nx.set_edge_attributes(
+ ... G, {(0, 1): 2, (1, 2): 2, (2, 0): 2, (0, 2): 1, (2, 1): 1, (1, 0): 1}, "weight"
+ ... )
+ >>> tour = approx.asadpour_atsp(G, source=0)
+ >>> tour
+ [0, 2, 1, 0]
+ """
+ from math import ceil, exp
+ from math import log as ln
+
+ # Check that G is a complete graph
+ N = len(G) - 1
+ if N < 2:
+ raise nx.NetworkXError("G must have at least two nodes")
+ # This check ignores selfloops which is what we want here.
+ if any(len(nbrdict) - (n in nbrdict) != N for n, nbrdict in G.adj.items()):
+ raise nx.NetworkXError("G is not a complete DiGraph")
+ # Check that the source vertex, if given, is in the graph
+ if source is not None and source not in G.nodes:
+ raise nx.NetworkXError("Given source node not in G.")
+
+ opt_hk, z_star = held_karp_ascent(G, weight)
+
+ # Test to see if the ascent method found an integer solution or a fractional
+ # solution. If it is integral then z_star is a nx.Graph, otherwise it is
+ # a dict
+ if not isinstance(z_star, dict):
+ # Here we are using the shortcutting method to go from the list of edges
+ # returned from eulerian_circuit to a list of nodes
+ return _shortcutting(nx.eulerian_circuit(z_star, source=source))
+
+ # Create the undirected support of z_star
+ z_support = nx.MultiGraph()
+ for u, v in z_star:
+ if (u, v) not in z_support.edges:
+ edge_weight = min(G[u][v][weight], G[v][u][weight])
+ z_support.add_edge(u, v, **{weight: edge_weight})
+
+ # Create the exponential distribution of spanning trees
+ gamma = spanning_tree_distribution(z_support, z_star)
+
+ # Write the lambda values to the edges of z_support
+ z_support = nx.Graph(z_support)
+ lambda_dict = {(u, v): exp(gamma[(u, v)]) for u, v in z_support.edges()}
+ nx.set_edge_attributes(z_support, lambda_dict, "weight")
+ del gamma, lambda_dict
+
+ # Sample 2 * ceil( ln(n) ) spanning trees and record the minimum one
+ minimum_sampled_tree = None
+ minimum_sampled_tree_weight = math.inf
+ for _ in range(2 * ceil(ln(G.number_of_nodes()))):
+ sampled_tree = random_spanning_tree(z_support, "weight", seed=seed)
+ sampled_tree_weight = sampled_tree.size(weight)
+ if sampled_tree_weight < minimum_sampled_tree_weight:
+ minimum_sampled_tree = sampled_tree.copy()
+ minimum_sampled_tree_weight = sampled_tree_weight
+
+ # Orient the edges in that tree to keep the cost of the tree the same.
+ t_star = nx.MultiDiGraph()
+ for u, v, d in minimum_sampled_tree.edges(data=weight):
+ if d == G[u][v][weight]:
+ t_star.add_edge(u, v, **{weight: d})
+ else:
+ t_star.add_edge(v, u, **{weight: d})
+
+ # Find the node demands needed to neutralize the flow of t_star in G
+ node_demands = {n: t_star.out_degree(n) - t_star.in_degree(n) for n in t_star}
+ nx.set_node_attributes(G, node_demands, "demand")
+
+ # Find the min_cost_flow
+ flow_dict = nx.min_cost_flow(G, "demand")
+
+ # Build the flow into t_star
+ for source, values in flow_dict.items():
+ for target in values:
+ if (source, target) not in t_star.edges and values[target] > 0:
+ # IF values[target] > 0 we have to add that many edges
+ for _ in range(values[target]):
+ t_star.add_edge(source, target)
+
+ # Return the shortcut eulerian circuit
+ circuit = nx.eulerian_circuit(t_star, source=source)
+ return _shortcutting(circuit)
+
+
+@nx._dispatchable(edge_attrs="weight", mutates_input=True, returns_graph=True)
+def held_karp_ascent(G, weight="weight"):
+ """
+ Minimizes the Held-Karp relaxation of the TSP for `G`
+
+ Solves the Held-Karp relaxation of the input complete digraph and scales
+ the output solution for use in the Asadpour [1]_ ASTP algorithm.
+
+ The Held-Karp relaxation defines the lower bound for solutions to the
+ ATSP, although it does return a fractional solution. This is used in the
+ Asadpour algorithm as an initial solution which is later rounded to a
+ integral tree within the spanning tree polytopes. This function solves
+ the relaxation with the branch and bound method in [2]_.
+
+ Parameters
+ ----------
+ G : nx.DiGraph
+ The graph should be a complete weighted directed graph.
+ The distance between all paris of nodes should be included.
+
+ weight : string, optional (default="weight")
+ Edge data key corresponding to the edge weight.
+ If any edge does not have this attribute the weight is set to 1.
+
+ Returns
+ -------
+ OPT : float
+ The cost for the optimal solution to the Held-Karp relaxation
+ z : dict or nx.Graph
+ A symmetrized and scaled version of the optimal solution to the
+ Held-Karp relaxation for use in the Asadpour algorithm.
+
+ If an integral solution is found, then that is an optimal solution for
+ the ATSP problem and that is returned instead.
+
+ References
+ ----------
+ .. [1] A. Asadpour, M. X. Goemans, A. Madry, S. O. Gharan, and A. Saberi,
+ An o(log n/log log n)-approximation algorithm for the asymmetric
+ traveling salesman problem, Operations research, 65 (2017),
+ pp. 1043–1061
+
+ .. [2] M. Held, R. M. Karp, The traveling-salesman problem and minimum
+ spanning trees, Operations Research, 1970-11-01, Vol. 18 (6),
+ pp.1138-1162
+ """
+ import numpy as np
+ from scipy import optimize
+
+ def k_pi():
+ """
+ Find the set of minimum 1-Arborescences for G at point pi.
+
+ Returns
+ -------
+ Set
+ The set of minimum 1-Arborescences
+ """
+ # Create a copy of G without vertex 1.
+ G_1 = G.copy()
+ minimum_1_arborescences = set()
+ minimum_1_arborescence_weight = math.inf
+
+ # node is node '1' in the Held and Karp paper
+ n = next(G.__iter__())
+ G_1.remove_node(n)
+
+ # Iterate over the spanning arborescences of the graph until we know
+ # that we have found the minimum 1-arborescences. My proposed strategy
+ # is to find the most extensive root to connect to from 'node 1' and
+ # the least expensive one. We then iterate over arborescences until
+ # the cost of the basic arborescence is the cost of the minimum one
+ # plus the difference between the most and least expensive roots,
+ # that way the cost of connecting 'node 1' will by definition not by
+ # minimum
+ min_root = {"node": None, weight: math.inf}
+ max_root = {"node": None, weight: -math.inf}
+ for u, v, d in G.edges(n, data=True):
+ if d[weight] < min_root[weight]:
+ min_root = {"node": v, weight: d[weight]}
+ if d[weight] > max_root[weight]:
+ max_root = {"node": v, weight: d[weight]}
+
+ min_in_edge = min(G.in_edges(n, data=True), key=lambda x: x[2][weight])
+ min_root[weight] = min_root[weight] + min_in_edge[2][weight]
+ max_root[weight] = max_root[weight] + min_in_edge[2][weight]
+
+ min_arb_weight = math.inf
+ for arb in nx.ArborescenceIterator(G_1):
+ arb_weight = arb.size(weight)
+ if min_arb_weight == math.inf:
+ min_arb_weight = arb_weight
+ elif arb_weight > min_arb_weight + max_root[weight] - min_root[weight]:
+ break
+ # We have to pick the root node of the arborescence for the out
+ # edge of the first vertex as that is the only node without an
+ # edge directed into it.
+ for N, deg in arb.in_degree:
+ if deg == 0:
+ # root found
+ arb.add_edge(n, N, **{weight: G[n][N][weight]})
+ arb_weight += G[n][N][weight]
+ break
+
+ # We can pick the minimum weight in-edge for the vertex with
+ # a cycle. If there are multiple edges with the same, minimum
+ # weight, We need to add all of them.
+ #
+ # Delete the edge (N, v) so that we cannot pick it.
+ edge_data = G[N][n]
+ G.remove_edge(N, n)
+ min_weight = min(G.in_edges(n, data=weight), key=lambda x: x[2])[2]
+ min_edges = [
+ (u, v, d) for u, v, d in G.in_edges(n, data=weight) if d == min_weight
+ ]
+ for u, v, d in min_edges:
+ new_arb = arb.copy()
+ new_arb.add_edge(u, v, **{weight: d})
+ new_arb_weight = arb_weight + d
+ # Check to see the weight of the arborescence, if it is a
+ # new minimum, clear all of the old potential minimum
+ # 1-arborescences and add this is the only one. If its
+ # weight is above the known minimum, do not add it.
+ if new_arb_weight < minimum_1_arborescence_weight:
+ minimum_1_arborescences.clear()
+ minimum_1_arborescence_weight = new_arb_weight
+ # We have a 1-arborescence, add it to the set
+ if new_arb_weight == minimum_1_arborescence_weight:
+ minimum_1_arborescences.add(new_arb)
+ G.add_edge(N, n, **edge_data)
+
+ return minimum_1_arborescences
+
+ def direction_of_ascent():
+ """
+ Find the direction of ascent at point pi.
+
+ See [1]_ for more information.
+
+ Returns
+ -------
+ dict
+ A mapping from the nodes of the graph which represents the direction
+ of ascent.
+
+ References
+ ----------
+ .. [1] M. Held, R. M. Karp, The traveling-salesman problem and minimum
+ spanning trees, Operations Research, 1970-11-01, Vol. 18 (6),
+ pp.1138-1162
+ """
+ # 1. Set d equal to the zero n-vector.
+ d = {}
+ for n in G:
+ d[n] = 0
+ del n
+ # 2. Find a 1-Arborescence T^k such that k is in K(pi, d).
+ minimum_1_arborescences = k_pi()
+ while True:
+ # Reduce K(pi) to K(pi, d)
+ # Find the arborescence in K(pi) which increases the lest in
+ # direction d
+ min_k_d_weight = math.inf
+ min_k_d = None
+ for arborescence in minimum_1_arborescences:
+ weighted_cost = 0
+ for n, deg in arborescence.degree:
+ weighted_cost += d[n] * (deg - 2)
+ if weighted_cost < min_k_d_weight:
+ min_k_d_weight = weighted_cost
+ min_k_d = arborescence
+
+ # 3. If sum of d_i * v_{i, k} is greater than zero, terminate
+ if min_k_d_weight > 0:
+ return d, min_k_d
+ # 4. d_i = d_i + v_{i, k}
+ for n, deg in min_k_d.degree:
+ d[n] += deg - 2
+ # Check that we do not need to terminate because the direction
+ # of ascent does not exist. This is done with linear
+ # programming.
+ c = np.full(len(minimum_1_arborescences), -1, dtype=int)
+ a_eq = np.empty((len(G) + 1, len(minimum_1_arborescences)), dtype=int)
+ b_eq = np.zeros(len(G) + 1, dtype=int)
+ b_eq[len(G)] = 1
+ for arb_count, arborescence in enumerate(minimum_1_arborescences):
+ n_count = len(G) - 1
+ for n, deg in arborescence.degree:
+ a_eq[n_count][arb_count] = deg - 2
+ n_count -= 1
+ a_eq[len(G)][arb_count] = 1
+ program_result = optimize.linprog(
+ c, A_eq=a_eq, b_eq=b_eq, method="highs-ipm"
+ )
+ # If the constants exist, then the direction of ascent doesn't
+ if program_result.success:
+ # There is no direction of ascent
+ return None, minimum_1_arborescences
+
+ # 5. GO TO 2
+
+ def find_epsilon(k, d):
+ """
+ Given the direction of ascent at pi, find the maximum distance we can go
+ in that direction.
+
+ Parameters
+ ----------
+ k_xy : set
+ The set of 1-arborescences which have the minimum rate of increase
+ in the direction of ascent
+
+ d : dict
+ The direction of ascent
+
+ Returns
+ -------
+ float
+ The distance we can travel in direction `d`
+ """
+ min_epsilon = math.inf
+ for e_u, e_v, e_w in G.edges(data=weight):
+ if (e_u, e_v) in k.edges:
+ continue
+ # Now, I have found a condition which MUST be true for the edges to
+ # be a valid substitute. The edge in the graph which is the
+ # substitute is the one with the same terminated end. This can be
+ # checked rather simply.
+ #
+ # Find the edge within k which is the substitute. Because k is a
+ # 1-arborescence, we know that they is only one such edges
+ # leading into every vertex.
+ if len(k.in_edges(e_v, data=weight)) > 1:
+ raise Exception
+ sub_u, sub_v, sub_w = next(k.in_edges(e_v, data=weight).__iter__())
+ k.add_edge(e_u, e_v, **{weight: e_w})
+ k.remove_edge(sub_u, sub_v)
+ if (
+ max(d for n, d in k.in_degree()) <= 1
+ and len(G) == k.number_of_edges()
+ and nx.is_weakly_connected(k)
+ ):
+ # Ascent method calculation
+ if d[sub_u] == d[e_u] or sub_w == e_w:
+ # Revert to the original graph
+ k.remove_edge(e_u, e_v)
+ k.add_edge(sub_u, sub_v, **{weight: sub_w})
+ continue
+ epsilon = (sub_w - e_w) / (d[e_u] - d[sub_u])
+ if 0 < epsilon < min_epsilon:
+ min_epsilon = epsilon
+ # Revert to the original graph
+ k.remove_edge(e_u, e_v)
+ k.add_edge(sub_u, sub_v, **{weight: sub_w})
+
+ return min_epsilon
+
+ # I have to know that the elements in pi correspond to the correct elements
+ # in the direction of ascent, even if the node labels are not integers.
+ # Thus, I will use dictionaries to made that mapping.
+ pi_dict = {}
+ for n in G:
+ pi_dict[n] = 0
+ del n
+ original_edge_weights = {}
+ for u, v, d in G.edges(data=True):
+ original_edge_weights[(u, v)] = d[weight]
+ dir_ascent, k_d = direction_of_ascent()
+ while dir_ascent is not None:
+ max_distance = find_epsilon(k_d, dir_ascent)
+ for n, v in dir_ascent.items():
+ pi_dict[n] += max_distance * v
+ for u, v, d in G.edges(data=True):
+ d[weight] = original_edge_weights[(u, v)] + pi_dict[u]
+ dir_ascent, k_d = direction_of_ascent()
+ nx._clear_cache(G)
+ # k_d is no longer an individual 1-arborescence but rather a set of
+ # minimal 1-arborescences at the maximum point of the polytope and should
+ # be reflected as such
+ k_max = k_d
+
+ # Search for a cycle within k_max. If a cycle exists, return it as the
+ # solution
+ for k in k_max:
+ if len([n for n in k if k.degree(n) == 2]) == G.order():
+ # Tour found
+ # TODO: this branch does not restore original_edge_weights of G!
+ return k.size(weight), k
+
+ # Write the original edge weights back to G and every member of k_max at
+ # the maximum point. Also average the number of times that edge appears in
+ # the set of minimal 1-arborescences.
+ x_star = {}
+ size_k_max = len(k_max)
+ for u, v, d in G.edges(data=True):
+ edge_count = 0
+ d[weight] = original_edge_weights[(u, v)]
+ for k in k_max:
+ if (u, v) in k.edges():
+ edge_count += 1
+ k[u][v][weight] = original_edge_weights[(u, v)]
+ x_star[(u, v)] = edge_count / size_k_max
+ # Now symmetrize the edges in x_star and scale them according to (5) in
+ # reference [1]
+ z_star = {}
+ scale_factor = (G.order() - 1) / G.order()
+ for u, v in x_star:
+ frequency = x_star[(u, v)] + x_star[(v, u)]
+ if frequency > 0:
+ z_star[(u, v)] = scale_factor * frequency
+ del x_star
+ # Return the optimal weight and the z dict
+ return next(k_max.__iter__()).size(weight), z_star
+
+
+@nx._dispatchable
+def spanning_tree_distribution(G, z):
+ """
+ Find the asadpour exponential distribution of spanning trees.
+
+ Solves the Maximum Entropy Convex Program in the Asadpour algorithm [1]_
+ using the approach in section 7 to build an exponential distribution of
+ undirected spanning trees.
+
+ This algorithm ensures that the probability of any edge in a spanning
+ tree is proportional to the sum of the probabilities of the tress
+ containing that edge over the sum of the probabilities of all spanning
+ trees of the graph.
+
+ Parameters
+ ----------
+ G : nx.MultiGraph
+ The undirected support graph for the Held Karp relaxation
+
+ z : dict
+ The output of `held_karp_ascent()`, a scaled version of the Held-Karp
+ solution.
+
+ Returns
+ -------
+ gamma : dict
+ The probability distribution which approximately preserves the marginal
+ probabilities of `z`.
+ """
+ from math import exp
+ from math import log as ln
+
+ def q(e):
+ """
+ The value of q(e) is described in the Asadpour paper is "the
+ probability that edge e will be included in a spanning tree T that is
+ chosen with probability proportional to exp(gamma(T))" which
+ basically means that it is the total probability of the edge appearing
+ across the whole distribution.
+
+ Parameters
+ ----------
+ e : tuple
+ The `(u, v)` tuple describing the edge we are interested in
+
+ Returns
+ -------
+ float
+ The probability that a spanning tree chosen according to the
+ current values of gamma will include edge `e`.
+ """
+ # Create the laplacian matrices
+ for u, v, d in G.edges(data=True):
+ d[lambda_key] = exp(gamma[(u, v)])
+ G_Kirchhoff = nx.total_spanning_tree_weight(G, lambda_key)
+ G_e = nx.contracted_edge(G, e, self_loops=False)
+ G_e_Kirchhoff = nx.total_spanning_tree_weight(G_e, lambda_key)
+
+ # Multiply by the weight of the contracted edge since it is not included
+ # in the total weight of the contracted graph.
+ return exp(gamma[(e[0], e[1])]) * G_e_Kirchhoff / G_Kirchhoff
+
+ # initialize gamma to the zero dict
+ gamma = {}
+ for u, v, _ in G.edges:
+ gamma[(u, v)] = 0
+
+ # set epsilon
+ EPSILON = 0.2
+
+ # pick an edge attribute name that is unlikely to be in the graph
+ lambda_key = "spanning_tree_distribution's secret attribute name for lambda"
+
+ while True:
+ # We need to know that know that no values of q_e are greater than
+ # (1 + epsilon) * z_e, however changing one gamma value can increase the
+ # value of a different q_e, so we have to complete the for loop without
+ # changing anything for the condition to be meet
+ in_range_count = 0
+ # Search for an edge with q_e > (1 + epsilon) * z_e
+ for u, v in gamma:
+ e = (u, v)
+ q_e = q(e)
+ z_e = z[e]
+ if q_e > (1 + EPSILON) * z_e:
+ delta = ln(
+ (q_e * (1 - (1 + EPSILON / 2) * z_e))
+ / ((1 - q_e) * (1 + EPSILON / 2) * z_e)
+ )
+ gamma[e] -= delta
+ # Check that delta had the desired effect
+ new_q_e = q(e)
+ desired_q_e = (1 + EPSILON / 2) * z_e
+ if round(new_q_e, 8) != round(desired_q_e, 8):
+ raise nx.NetworkXError(
+ f"Unable to modify probability for edge ({u}, {v})"
+ )
+ else:
+ in_range_count += 1
+ # Check if the for loop terminated without changing any gamma
+ if in_range_count == len(gamma):
+ break
+
+ # Remove the new edge attributes
+ for _, _, d in G.edges(data=True):
+ if lambda_key in d:
+ del d[lambda_key]
+
+ return gamma
+
+
+@nx._dispatchable(edge_attrs="weight")
+def greedy_tsp(G, weight="weight", source=None):
+ """Return a low cost cycle starting at `source` and its cost.
+
+ This approximates a solution to the traveling salesman problem.
+ It finds a cycle of all the nodes that a salesman can visit in order
+ to visit many nodes while minimizing total distance.
+ It uses a simple greedy algorithm.
+ In essence, this function returns a large cycle given a source point
+ for which the total cost of the cycle is minimized.
+
+ Parameters
+ ----------
+ G : Graph
+ The Graph should be a complete weighted undirected graph.
+ The distance between all pairs of nodes should be included.
+
+ weight : string, optional (default="weight")
+ Edge data key corresponding to the edge weight.
+ If any edge does not have this attribute the weight is set to 1.
+
+ source : node, optional (default: first node in list(G))
+ Starting node. If None, defaults to ``next(iter(G))``
+
+ Returns
+ -------
+ cycle : list of nodes
+ Returns the cycle (list of nodes) that a salesman
+ can follow to minimize total weight of the trip.
+
+ Raises
+ ------
+ NetworkXError
+ If `G` is not complete, the algorithm raises an exception.
+
+ Examples
+ --------
+ >>> from networkx.algorithms import approximation as approx
+ >>> G = nx.DiGraph()
+ >>> G.add_weighted_edges_from(
+ ... {
+ ... ("A", "B", 3),
+ ... ("A", "C", 17),
+ ... ("A", "D", 14),
+ ... ("B", "A", 3),
+ ... ("B", "C", 12),
+ ... ("B", "D", 16),
+ ... ("C", "A", 13),
+ ... ("C", "B", 12),
+ ... ("C", "D", 4),
+ ... ("D", "A", 14),
+ ... ("D", "B", 15),
+ ... ("D", "C", 2),
+ ... }
+ ... )
+ >>> cycle = approx.greedy_tsp(G, source="D")
+ >>> cost = sum(G[n][nbr]["weight"] for n, nbr in nx.utils.pairwise(cycle))
+ >>> cycle
+ ['D', 'C', 'B', 'A', 'D']
+ >>> cost
+ 31
+
+ Notes
+ -----
+ This implementation of a greedy algorithm is based on the following:
+
+ - The algorithm adds a node to the solution at every iteration.
+ - The algorithm selects a node not already in the cycle whose connection
+ to the previous node adds the least cost to the cycle.
+
+ A greedy algorithm does not always give the best solution.
+ However, it can construct a first feasible solution which can
+ be passed as a parameter to an iterative improvement algorithm such
+ as Simulated Annealing, or Threshold Accepting.
+
+ Time complexity: It has a running time $O(|V|^2)$
+ """
+ # Check that G is a complete graph
+ N = len(G) - 1
+ # This check ignores selfloops which is what we want here.
+ if any(len(nbrdict) - (n in nbrdict) != N for n, nbrdict in G.adj.items()):
+ raise nx.NetworkXError("G must be a complete graph.")
+
+ if source is None:
+ source = nx.utils.arbitrary_element(G)
+
+ if G.number_of_nodes() == 2:
+ neighbor = next(G.neighbors(source))
+ return [source, neighbor, source]
+
+ nodeset = set(G)
+ nodeset.remove(source)
+ cycle = [source]
+ next_node = source
+ while nodeset:
+ nbrdict = G[next_node]
+ next_node = min(nodeset, key=lambda n: nbrdict[n].get(weight, 1))
+ cycle.append(next_node)
+ nodeset.remove(next_node)
+ cycle.append(cycle[0])
+ return cycle
+
+
+@py_random_state(9)
+@nx._dispatchable(edge_attrs="weight")
+def simulated_annealing_tsp(
+ G,
+ init_cycle,
+ weight="weight",
+ source=None,
+ temp=100,
+ move="1-1",
+ max_iterations=10,
+ N_inner=100,
+ alpha=0.01,
+ seed=None,
+):
+ """Returns an approximate solution to the traveling salesman problem.
+
+ This function uses simulated annealing to approximate the minimal cost
+ cycle through the nodes. Starting from a suboptimal solution, simulated
+ annealing perturbs that solution, occasionally accepting changes that make
+ the solution worse to escape from a locally optimal solution. The chance
+ of accepting such changes decreases over the iterations to encourage
+ an optimal result. In summary, the function returns a cycle starting
+ at `source` for which the total cost is minimized. It also returns the cost.
+
+ The chance of accepting a proposed change is related to a parameter called
+ the temperature (annealing has a physical analogue of steel hardening
+ as it cools). As the temperature is reduced, the chance of moves that
+ increase cost goes down.
+
+ Parameters
+ ----------
+ G : Graph
+ `G` should be a complete weighted graph.
+ The distance between all pairs of nodes should be included.
+
+ init_cycle : list of all nodes or "greedy"
+ The initial solution (a cycle through all nodes returning to the start).
+ This argument has no default to make you think about it.
+ If "greedy", use `greedy_tsp(G, weight)`.
+ Other common starting cycles are `list(G) + [next(iter(G))]` or the final
+ result of `simulated_annealing_tsp` when doing `threshold_accepting_tsp`.
+
+ weight : string, optional (default="weight")
+ Edge data key corresponding to the edge weight.
+ If any edge does not have this attribute the weight is set to 1.
+
+ source : node, optional (default: first node in list(G))
+ Starting node. If None, defaults to ``next(iter(G))``
+
+ temp : int, optional (default=100)
+ The algorithm's temperature parameter. It represents the initial
+ value of temperature
+
+ move : "1-1" or "1-0" or function, optional (default="1-1")
+ Indicator of what move to use when finding new trial solutions.
+ Strings indicate two special built-in moves:
+
+ - "1-1": 1-1 exchange which transposes the position
+ of two elements of the current solution.
+ The function called is :func:`swap_two_nodes`.
+ For example if we apply 1-1 exchange in the solution
+ ``A = [3, 2, 1, 4, 3]``
+ we can get the following by the transposition of 1 and 4 elements:
+ ``A' = [3, 2, 4, 1, 3]``
+ - "1-0": 1-0 exchange which moves an node in the solution
+ to a new position.
+ The function called is :func:`move_one_node`.
+ For example if we apply 1-0 exchange in the solution
+ ``A = [3, 2, 1, 4, 3]``
+ we can transfer the fourth element to the second position:
+ ``A' = [3, 4, 2, 1, 3]``
+
+ You may provide your own functions to enact a move from
+ one solution to a neighbor solution. The function must take
+ the solution as input along with a `seed` input to control
+ random number generation (see the `seed` input here).
+ Your function should maintain the solution as a cycle with
+ equal first and last node and all others appearing once.
+ Your function should return the new solution.
+
+ max_iterations : int, optional (default=10)
+ Declared done when this number of consecutive iterations of
+ the outer loop occurs without any change in the best cost solution.
+
+ N_inner : int, optional (default=100)
+ The number of iterations of the inner loop.
+
+ alpha : float between (0, 1), optional (default=0.01)
+ Percentage of temperature decrease in each iteration
+ of outer loop
+
+ seed : integer, random_state, or None (default)
+ Indicator of random number generation state.
+ See :ref:`Randomness`.
+
+ Returns
+ -------
+ cycle : list of nodes
+ Returns the cycle (list of nodes) that a salesman
+ can follow to minimize total weight of the trip.
+
+ Raises
+ ------
+ NetworkXError
+ If `G` is not complete the algorithm raises an exception.
+
+ Examples
+ --------
+ >>> from networkx.algorithms import approximation as approx
+ >>> G = nx.DiGraph()
+ >>> G.add_weighted_edges_from(
+ ... {
+ ... ("A", "B", 3),
+ ... ("A", "C", 17),
+ ... ("A", "D", 14),
+ ... ("B", "A", 3),
+ ... ("B", "C", 12),
+ ... ("B", "D", 16),
+ ... ("C", "A", 13),
+ ... ("C", "B", 12),
+ ... ("C", "D", 4),
+ ... ("D", "A", 14),
+ ... ("D", "B", 15),
+ ... ("D", "C", 2),
+ ... }
+ ... )
+ >>> cycle = approx.simulated_annealing_tsp(G, "greedy", source="D")
+ >>> cost = sum(G[n][nbr]["weight"] for n, nbr in nx.utils.pairwise(cycle))
+ >>> cycle
+ ['D', 'C', 'B', 'A', 'D']
+ >>> cost
+ 31
+ >>> incycle = ["D", "B", "A", "C", "D"]
+ >>> cycle = approx.simulated_annealing_tsp(G, incycle, source="D")
+ >>> cost = sum(G[n][nbr]["weight"] for n, nbr in nx.utils.pairwise(cycle))
+ >>> cycle
+ ['D', 'C', 'B', 'A', 'D']
+ >>> cost
+ 31
+
+ Notes
+ -----
+ Simulated Annealing is a metaheuristic local search algorithm.
+ The main characteristic of this algorithm is that it accepts
+ even solutions which lead to the increase of the cost in order
+ to escape from low quality local optimal solutions.
+
+ This algorithm needs an initial solution. If not provided, it is
+ constructed by a simple greedy algorithm. At every iteration, the
+ algorithm selects thoughtfully a neighbor solution.
+ Consider $c(x)$ cost of current solution and $c(x')$ cost of a
+ neighbor solution.
+ If $c(x') - c(x) <= 0$ then the neighbor solution becomes the current
+ solution for the next iteration. Otherwise, the algorithm accepts
+ the neighbor solution with probability $p = exp - ([c(x') - c(x)] / temp)$.
+ Otherwise the current solution is retained.
+
+ `temp` is a parameter of the algorithm and represents temperature.
+
+ Time complexity:
+ For $N_i$ iterations of the inner loop and $N_o$ iterations of the
+ outer loop, this algorithm has running time $O(N_i * N_o * |V|)$.
+
+ For more information and how the algorithm is inspired see:
+ http://en.wikipedia.org/wiki/Simulated_annealing
+ """
+ if move == "1-1":
+ move = swap_two_nodes
+ elif move == "1-0":
+ move = move_one_node
+ if init_cycle == "greedy":
+ # Construct an initial solution using a greedy algorithm.
+ cycle = greedy_tsp(G, weight=weight, source=source)
+ if G.number_of_nodes() == 2:
+ return cycle
+
+ else:
+ cycle = list(init_cycle)
+ if source is None:
+ source = cycle[0]
+ elif source != cycle[0]:
+ raise nx.NetworkXError("source must be first node in init_cycle")
+ if cycle[0] != cycle[-1]:
+ raise nx.NetworkXError("init_cycle must be a cycle. (return to start)")
+
+ if len(cycle) - 1 != len(G) or len(set(G.nbunch_iter(cycle))) != len(G):
+ raise nx.NetworkXError("init_cycle should be a cycle over all nodes in G.")
+
+ # Check that G is a complete graph
+ N = len(G) - 1
+ # This check ignores selfloops which is what we want here.
+ if any(len(nbrdict) - (n in nbrdict) != N for n, nbrdict in G.adj.items()):
+ raise nx.NetworkXError("G must be a complete graph.")
+
+ if G.number_of_nodes() == 2:
+ neighbor = next(G.neighbors(source))
+ return [source, neighbor, source]
+
+ # Find the cost of initial solution
+ cost = sum(G[u][v].get(weight, 1) for u, v in pairwise(cycle))
+
+ count = 0
+ best_cycle = cycle.copy()
+ best_cost = cost
+ while count <= max_iterations and temp > 0:
+ count += 1
+ for i in range(N_inner):
+ adj_sol = move(cycle, seed)
+ adj_cost = sum(G[u][v].get(weight, 1) for u, v in pairwise(adj_sol))
+ delta = adj_cost - cost
+ if delta <= 0:
+ # Set current solution the adjacent solution.
+ cycle = adj_sol
+ cost = adj_cost
+
+ if cost < best_cost:
+ count = 0
+ best_cycle = cycle.copy()
+ best_cost = cost
+ else:
+ # Accept even a worse solution with probability p.
+ p = math.exp(-delta / temp)
+ if p >= seed.random():
+ cycle = adj_sol
+ cost = adj_cost
+ temp -= temp * alpha
+
+ return best_cycle
+
+
+@py_random_state(9)
+@nx._dispatchable(edge_attrs="weight")
+def threshold_accepting_tsp(
+ G,
+ init_cycle,
+ weight="weight",
+ source=None,
+ threshold=1,
+ move="1-1",
+ max_iterations=10,
+ N_inner=100,
+ alpha=0.1,
+ seed=None,
+):
+ """Returns an approximate solution to the traveling salesman problem.
+
+ This function uses threshold accepting methods to approximate the minimal cost
+ cycle through the nodes. Starting from a suboptimal solution, threshold
+ accepting methods perturb that solution, accepting any changes that make
+ the solution no worse than increasing by a threshold amount. Improvements
+ in cost are accepted, but so are changes leading to small increases in cost.
+ This allows the solution to leave suboptimal local minima in solution space.
+ The threshold is decreased slowly as iterations proceed helping to ensure
+ an optimum. In summary, the function returns a cycle starting at `source`
+ for which the total cost is minimized.
+
+ Parameters
+ ----------
+ G : Graph
+ `G` should be a complete weighted graph.
+ The distance between all pairs of nodes should be included.
+
+ init_cycle : list or "greedy"
+ The initial solution (a cycle through all nodes returning to the start).
+ This argument has no default to make you think about it.
+ If "greedy", use `greedy_tsp(G, weight)`.
+ Other common starting cycles are `list(G) + [next(iter(G))]` or the final
+ result of `simulated_annealing_tsp` when doing `threshold_accepting_tsp`.
+
+ weight : string, optional (default="weight")
+ Edge data key corresponding to the edge weight.
+ If any edge does not have this attribute the weight is set to 1.
+
+ source : node, optional (default: first node in list(G))
+ Starting node. If None, defaults to ``next(iter(G))``
+
+ threshold : int, optional (default=1)
+ The algorithm's threshold parameter. It represents the initial
+ threshold's value
+
+ move : "1-1" or "1-0" or function, optional (default="1-1")
+ Indicator of what move to use when finding new trial solutions.
+ Strings indicate two special built-in moves:
+
+ - "1-1": 1-1 exchange which transposes the position
+ of two elements of the current solution.
+ The function called is :func:`swap_two_nodes`.
+ For example if we apply 1-1 exchange in the solution
+ ``A = [3, 2, 1, 4, 3]``
+ we can get the following by the transposition of 1 and 4 elements:
+ ``A' = [3, 2, 4, 1, 3]``
+ - "1-0": 1-0 exchange which moves an node in the solution
+ to a new position.
+ The function called is :func:`move_one_node`.
+ For example if we apply 1-0 exchange in the solution
+ ``A = [3, 2, 1, 4, 3]``
+ we can transfer the fourth element to the second position:
+ ``A' = [3, 4, 2, 1, 3]``
+
+ You may provide your own functions to enact a move from
+ one solution to a neighbor solution. The function must take
+ the solution as input along with a `seed` input to control
+ random number generation (see the `seed` input here).
+ Your function should maintain the solution as a cycle with
+ equal first and last node and all others appearing once.
+ Your function should return the new solution.
+
+ max_iterations : int, optional (default=10)
+ Declared done when this number of consecutive iterations of
+ the outer loop occurs without any change in the best cost solution.
+
+ N_inner : int, optional (default=100)
+ The number of iterations of the inner loop.
+
+ alpha : float between (0, 1), optional (default=0.1)
+ Percentage of threshold decrease when there is at
+ least one acceptance of a neighbor solution.
+ If no inner loop moves are accepted the threshold remains unchanged.
+
+ seed : integer, random_state, or None (default)
+ Indicator of random number generation state.
+ See :ref:`Randomness`.
+
+ Returns
+ -------
+ cycle : list of nodes
+ Returns the cycle (list of nodes) that a salesman
+ can follow to minimize total weight of the trip.
+
+ Raises
+ ------
+ NetworkXError
+ If `G` is not complete the algorithm raises an exception.
+
+ Examples
+ --------
+ >>> from networkx.algorithms import approximation as approx
+ >>> G = nx.DiGraph()
+ >>> G.add_weighted_edges_from(
+ ... {
+ ... ("A", "B", 3),
+ ... ("A", "C", 17),
+ ... ("A", "D", 14),
+ ... ("B", "A", 3),
+ ... ("B", "C", 12),
+ ... ("B", "D", 16),
+ ... ("C", "A", 13),
+ ... ("C", "B", 12),
+ ... ("C", "D", 4),
+ ... ("D", "A", 14),
+ ... ("D", "B", 15),
+ ... ("D", "C", 2),
+ ... }
+ ... )
+ >>> cycle = approx.threshold_accepting_tsp(G, "greedy", source="D")
+ >>> cost = sum(G[n][nbr]["weight"] for n, nbr in nx.utils.pairwise(cycle))
+ >>> cycle
+ ['D', 'C', 'B', 'A', 'D']
+ >>> cost
+ 31
+ >>> incycle = ["D", "B", "A", "C", "D"]
+ >>> cycle = approx.threshold_accepting_tsp(G, incycle, source="D")
+ >>> cost = sum(G[n][nbr]["weight"] for n, nbr in nx.utils.pairwise(cycle))
+ >>> cycle
+ ['D', 'C', 'B', 'A', 'D']
+ >>> cost
+ 31
+
+ Notes
+ -----
+ Threshold Accepting is a metaheuristic local search algorithm.
+ The main characteristic of this algorithm is that it accepts
+ even solutions which lead to the increase of the cost in order
+ to escape from low quality local optimal solutions.
+
+ This algorithm needs an initial solution. This solution can be
+ constructed by a simple greedy algorithm. At every iteration, it
+ selects thoughtfully a neighbor solution.
+ Consider $c(x)$ cost of current solution and $c(x')$ cost of
+ neighbor solution.
+ If $c(x') - c(x) <= threshold$ then the neighbor solution becomes the current
+ solution for the next iteration, where the threshold is named threshold.
+
+ In comparison to the Simulated Annealing algorithm, the Threshold
+ Accepting algorithm does not accept very low quality solutions
+ (due to the presence of the threshold value). In the case of
+ Simulated Annealing, even a very low quality solution can
+ be accepted with probability $p$.
+
+ Time complexity:
+ It has a running time $O(m * n * |V|)$ where $m$ and $n$ are the number
+ of times the outer and inner loop run respectively.
+
+ For more information and how algorithm is inspired see:
+ https://doi.org/10.1016/0021-9991(90)90201-B
+
+ See Also
+ --------
+ simulated_annealing_tsp
+
+ """
+ if move == "1-1":
+ move = swap_two_nodes
+ elif move == "1-0":
+ move = move_one_node
+ if init_cycle == "greedy":
+ # Construct an initial solution using a greedy algorithm.
+ cycle = greedy_tsp(G, weight=weight, source=source)
+ if G.number_of_nodes() == 2:
+ return cycle
+
+ else:
+ cycle = list(init_cycle)
+ if source is None:
+ source = cycle[0]
+ elif source != cycle[0]:
+ raise nx.NetworkXError("source must be first node in init_cycle")
+ if cycle[0] != cycle[-1]:
+ raise nx.NetworkXError("init_cycle must be a cycle. (return to start)")
+
+ if len(cycle) - 1 != len(G) or len(set(G.nbunch_iter(cycle))) != len(G):
+ raise nx.NetworkXError("init_cycle is not all and only nodes.")
+
+ # Check that G is a complete graph
+ N = len(G) - 1
+ # This check ignores selfloops which is what we want here.
+ if any(len(nbrdict) - (n in nbrdict) != N for n, nbrdict in G.adj.items()):
+ raise nx.NetworkXError("G must be a complete graph.")
+
+ if G.number_of_nodes() == 2:
+ neighbor = list(G.neighbors(source))[0]
+ return [source, neighbor, source]
+
+ # Find the cost of initial solution
+ cost = sum(G[u][v].get(weight, 1) for u, v in pairwise(cycle))
+
+ count = 0
+ best_cycle = cycle.copy()
+ best_cost = cost
+ while count <= max_iterations:
+ count += 1
+ accepted = False
+ for i in range(N_inner):
+ adj_sol = move(cycle, seed)
+ adj_cost = sum(G[u][v].get(weight, 1) for u, v in pairwise(adj_sol))
+ delta = adj_cost - cost
+ if delta <= threshold:
+ accepted = True
+
+ # Set current solution the adjacent solution.
+ cycle = adj_sol
+ cost = adj_cost
+
+ if cost < best_cost:
+ count = 0
+ best_cycle = cycle.copy()
+ best_cost = cost
+ if accepted:
+ threshold -= threshold * alpha
+
+ return best_cycle
diff --git a/pythonProject/.venv/Lib/site-packages/networkx/algorithms/approximation/treewidth.py b/pythonProject/.venv/Lib/site-packages/networkx/algorithms/approximation/treewidth.py
new file mode 100644
index 0000000000000000000000000000000000000000..31d73f6368237c16ab6a66efee45e768ddfaef52
--- /dev/null
+++ b/pythonProject/.venv/Lib/site-packages/networkx/algorithms/approximation/treewidth.py
@@ -0,0 +1,252 @@
+"""Functions for computing treewidth decomposition.
+
+Treewidth of an undirected graph is a number associated with the graph.
+It can be defined as the size of the largest vertex set (bag) in a tree
+decomposition of the graph minus one.
+
+`Wikipedia: Treewidth `_
+
+The notions of treewidth and tree decomposition have gained their
+attractiveness partly because many graph and network problems that are
+intractable (e.g., NP-hard) on arbitrary graphs become efficiently
+solvable (e.g., with a linear time algorithm) when the treewidth of the
+input graphs is bounded by a constant [1]_ [2]_.
+
+There are two different functions for computing a tree decomposition:
+:func:`treewidth_min_degree` and :func:`treewidth_min_fill_in`.
+
+.. [1] Hans L. Bodlaender and Arie M. C. A. Koster. 2010. "Treewidth
+ computations I.Upper bounds". Inf. Comput. 208, 3 (March 2010),259-275.
+ http://dx.doi.org/10.1016/j.ic.2009.03.008
+
+.. [2] Hans L. Bodlaender. "Discovering Treewidth". Institute of Information
+ and Computing Sciences, Utrecht University.
+ Technical Report UU-CS-2005-018.
+ http://www.cs.uu.nl
+
+.. [3] K. Wang, Z. Lu, and J. Hicks *Treewidth*.
+ https://web.archive.org/web/20210507025929/http://web.eecs.utk.edu/~cphill25/cs594_spring2015_projects/treewidth.pdf
+
+"""
+
+import itertools
+import sys
+from heapq import heapify, heappop, heappush
+
+import networkx as nx
+from networkx.utils import not_implemented_for
+
+__all__ = ["treewidth_min_degree", "treewidth_min_fill_in"]
+
+
+@not_implemented_for("directed")
+@not_implemented_for("multigraph")
+@nx._dispatchable(returns_graph=True)
+def treewidth_min_degree(G):
+ """Returns a treewidth decomposition using the Minimum Degree heuristic.
+
+ The heuristic chooses the nodes according to their degree, i.e., first
+ the node with the lowest degree is chosen, then the graph is updated
+ and the corresponding node is removed. Next, a new node with the lowest
+ degree is chosen, and so on.
+
+ Parameters
+ ----------
+ G : NetworkX graph
+
+ Returns
+ -------
+ Treewidth decomposition : (int, Graph) tuple
+ 2-tuple with treewidth and the corresponding decomposed tree.
+ """
+ deg_heuristic = MinDegreeHeuristic(G)
+ return treewidth_decomp(G, lambda graph: deg_heuristic.best_node(graph))
+
+
+@not_implemented_for("directed")
+@not_implemented_for("multigraph")
+@nx._dispatchable(returns_graph=True)
+def treewidth_min_fill_in(G):
+ """Returns a treewidth decomposition using the Minimum Fill-in heuristic.
+
+ The heuristic chooses a node from the graph, where the number of edges
+ added turning the neighborhood of the chosen node into clique is as
+ small as possible.
+
+ Parameters
+ ----------
+ G : NetworkX graph
+
+ Returns
+ -------
+ Treewidth decomposition : (int, Graph) tuple
+ 2-tuple with treewidth and the corresponding decomposed tree.
+ """
+ return treewidth_decomp(G, min_fill_in_heuristic)
+
+
+class MinDegreeHeuristic:
+ """Implements the Minimum Degree heuristic.
+
+ The heuristic chooses the nodes according to their degree
+ (number of neighbors), i.e., first the node with the lowest degree is
+ chosen, then the graph is updated and the corresponding node is
+ removed. Next, a new node with the lowest degree is chosen, and so on.
+ """
+
+ def __init__(self, graph):
+ self._graph = graph
+
+ # nodes that have to be updated in the heap before each iteration
+ self._update_nodes = []
+
+ self._degreeq = [] # a heapq with 3-tuples (degree,unique_id,node)
+ self.count = itertools.count()
+
+ # build heap with initial degrees
+ for n in graph:
+ self._degreeq.append((len(graph[n]), next(self.count), n))
+ heapify(self._degreeq)
+
+ def best_node(self, graph):
+ # update nodes in self._update_nodes
+ for n in self._update_nodes:
+ # insert changed degrees into degreeq
+ heappush(self._degreeq, (len(graph[n]), next(self.count), n))
+
+ # get the next valid (minimum degree) node
+ while self._degreeq:
+ (min_degree, _, elim_node) = heappop(self._degreeq)
+ if elim_node not in graph or len(graph[elim_node]) != min_degree:
+ # outdated entry in degreeq
+ continue
+ elif min_degree == len(graph) - 1:
+ # fully connected: abort condition
+ return None
+
+ # remember to update nodes in the heap before getting the next node
+ self._update_nodes = graph[elim_node]
+ return elim_node
+
+ # the heap is empty: abort
+ return None
+
+
+def min_fill_in_heuristic(graph):
+ """Implements the Minimum Degree heuristic.
+
+ Returns the node from the graph, where the number of edges added when
+ turning the neighborhood of the chosen node into clique is as small as
+ possible. This algorithm chooses the nodes using the Minimum Fill-In
+ heuristic. The running time of the algorithm is :math:`O(V^3)` and it uses
+ additional constant memory."""
+
+ if len(graph) == 0:
+ return None
+
+ min_fill_in_node = None
+
+ min_fill_in = sys.maxsize
+
+ # sort nodes by degree
+ nodes_by_degree = sorted(graph, key=lambda x: len(graph[x]))
+ min_degree = len(graph[nodes_by_degree[0]])
+
+ # abort condition (handle complete graph)
+ if min_degree == len(graph) - 1:
+ return None
+
+ for node in nodes_by_degree:
+ num_fill_in = 0
+ nbrs = graph[node]
+ for nbr in nbrs:
+ # count how many nodes in nbrs current nbr is not connected to
+ # subtract 1 for the node itself
+ num_fill_in += len(nbrs - graph[nbr]) - 1
+ if num_fill_in >= 2 * min_fill_in:
+ break
+
+ num_fill_in /= 2 # divide by 2 because of double counting
+
+ if num_fill_in < min_fill_in: # update min-fill-in node
+ if num_fill_in == 0:
+ return node
+ min_fill_in = num_fill_in
+ min_fill_in_node = node
+
+ return min_fill_in_node
+
+
+@nx._dispatchable(returns_graph=True)
+def treewidth_decomp(G, heuristic=min_fill_in_heuristic):
+ """Returns a treewidth decomposition using the passed heuristic.
+
+ Parameters
+ ----------
+ G : NetworkX graph
+ heuristic : heuristic function
+
+ Returns
+ -------
+ Treewidth decomposition : (int, Graph) tuple
+ 2-tuple with treewidth and the corresponding decomposed tree.
+ """
+
+ # make dict-of-sets structure
+ graph = {n: set(G[n]) - {n} for n in G}
+
+ # stack containing nodes and neighbors in the order from the heuristic
+ node_stack = []
+
+ # get first node from heuristic
+ elim_node = heuristic(graph)
+ while elim_node is not None:
+ # connect all neighbors with each other
+ nbrs = graph[elim_node]
+ for u, v in itertools.permutations(nbrs, 2):
+ if v not in graph[u]:
+ graph[u].add(v)
+
+ # push node and its current neighbors on stack
+ node_stack.append((elim_node, nbrs))
+
+ # remove node from graph
+ for u in graph[elim_node]:
+ graph[u].remove(elim_node)
+
+ del graph[elim_node]
+ elim_node = heuristic(graph)
+
+ # the abort condition is met; put all remaining nodes into one bag
+ decomp = nx.Graph()
+ first_bag = frozenset(graph.keys())
+ decomp.add_node(first_bag)
+
+ treewidth = len(first_bag) - 1
+
+ while node_stack:
+ # get node and its neighbors from the stack
+ (curr_node, nbrs) = node_stack.pop()
+
+ # find a bag all neighbors are in
+ old_bag = None
+ for bag in decomp.nodes:
+ if nbrs <= bag:
+ old_bag = bag
+ break
+
+ if old_bag is None:
+ # no old_bag was found: just connect to the first_bag
+ old_bag = first_bag
+
+ # create new node for decomposition
+ nbrs.add(curr_node)
+ new_bag = frozenset(nbrs)
+
+ # update treewidth
+ treewidth = max(treewidth, len(new_bag) - 1)
+
+ # add edge to decomposition (implicitly also adds the new node)
+ decomp.add_edge(old_bag, new_bag)
+
+ return treewidth, decomp
diff --git a/pythonProject/.venv/Lib/site-packages/networkx/algorithms/approximation/vertex_cover.py b/pythonProject/.venv/Lib/site-packages/networkx/algorithms/approximation/vertex_cover.py
new file mode 100644
index 0000000000000000000000000000000000000000..c71399ebcc9a91a9aa5eead63a7c7307746ae06b
--- /dev/null
+++ b/pythonProject/.venv/Lib/site-packages/networkx/algorithms/approximation/vertex_cover.py
@@ -0,0 +1,82 @@
+"""Functions for computing an approximate minimum weight vertex cover.
+
+A |vertex cover|_ is a subset of nodes such that each edge in the graph
+is incident to at least one node in the subset.
+
+.. _vertex cover: https://en.wikipedia.org/wiki/Vertex_cover
+.. |vertex cover| replace:: *vertex cover*
+
+"""
+import networkx as nx
+
+__all__ = ["min_weighted_vertex_cover"]
+
+
+@nx._dispatchable(node_attrs="weight")
+def min_weighted_vertex_cover(G, weight=None):
+ r"""Returns an approximate minimum weighted vertex cover.
+
+ The set of nodes returned by this function is guaranteed to be a
+ vertex cover, and the total weight of the set is guaranteed to be at
+ most twice the total weight of the minimum weight vertex cover. In
+ other words,
+
+ .. math::
+
+ w(S) \leq 2 * w(S^*),
+
+ where $S$ is the vertex cover returned by this function,
+ $S^*$ is the vertex cover of minimum weight out of all vertex
+ covers of the graph, and $w$ is the function that computes the
+ sum of the weights of each node in that given set.
+
+ Parameters
+ ----------
+ G : NetworkX graph
+
+ weight : string, optional (default = None)
+ If None, every node has weight 1. If a string, use this node
+ attribute as the node weight. A node without this attribute is
+ assumed to have weight 1.
+
+ Returns
+ -------
+ min_weighted_cover : set
+ Returns a set of nodes whose weight sum is no more than twice
+ the weight sum of the minimum weight vertex cover.
+
+ Notes
+ -----
+ For a directed graph, a vertex cover has the same definition: a set
+ of nodes such that each edge in the graph is incident to at least
+ one node in the set. Whether the node is the head or tail of the
+ directed edge is ignored.
+
+ This is the local-ratio algorithm for computing an approximate
+ vertex cover. The algorithm greedily reduces the costs over edges,
+ iteratively building a cover. The worst-case runtime of this
+ implementation is $O(m \log n)$, where $n$ is the number
+ of nodes and $m$ the number of edges in the graph.
+
+ References
+ ----------
+ .. [1] Bar-Yehuda, R., and Even, S. (1985). "A local-ratio theorem for
+ approximating the weighted vertex cover problem."
+ *Annals of Discrete Mathematics*, 25, 27–46
+
+
+ """
+ cost = dict(G.nodes(data=weight, default=1))
+ # While there are uncovered edges, choose an uncovered and update
+ # the cost of the remaining edges.
+ cover = set()
+ for u, v in G.edges():
+ if u in cover or v in cover:
+ continue
+ if cost[u] <= cost[v]:
+ cover.add(u)
+ cost[v] -= cost[u]
+ else:
+ cover.add(v)
+ cost[u] -= cost[v]
+ return cover
diff --git a/pythonProject/.venv/Lib/site-packages/networkx/algorithms/assortativity/__init__.py b/pythonProject/.venv/Lib/site-packages/networkx/algorithms/assortativity/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..4d9888609cbc43d4ba2121fcd0feda0985d1aebd
--- /dev/null
+++ b/pythonProject/.venv/Lib/site-packages/networkx/algorithms/assortativity/__init__.py
@@ -0,0 +1,5 @@
+from networkx.algorithms.assortativity.connectivity import *
+from networkx.algorithms.assortativity.correlation import *
+from networkx.algorithms.assortativity.mixing import *
+from networkx.algorithms.assortativity.neighbor_degree import *
+from networkx.algorithms.assortativity.pairs import *
diff --git a/pythonProject/.venv/Lib/site-packages/networkx/algorithms/assortativity/__pycache__/__init__.cpython-310.pyc b/pythonProject/.venv/Lib/site-packages/networkx/algorithms/assortativity/__pycache__/__init__.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..7269185970e050b0c778c47020f2536e2986b99d
Binary files /dev/null and b/pythonProject/.venv/Lib/site-packages/networkx/algorithms/assortativity/__pycache__/__init__.cpython-310.pyc differ
diff --git a/pythonProject/.venv/Lib/site-packages/networkx/algorithms/assortativity/__pycache__/pairs.cpython-310.pyc b/pythonProject/.venv/Lib/site-packages/networkx/algorithms/assortativity/__pycache__/pairs.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..cd5db8c6d4742a26161c70a133b3741627b82d06
Binary files /dev/null and b/pythonProject/.venv/Lib/site-packages/networkx/algorithms/assortativity/__pycache__/pairs.cpython-310.pyc differ
diff --git a/pythonProject/.venv/Lib/site-packages/networkx/algorithms/assortativity/connectivity.py b/pythonProject/.venv/Lib/site-packages/networkx/algorithms/assortativity/connectivity.py
new file mode 100644
index 0000000000000000000000000000000000000000..c3fde0da68a1990da29ced6996620d709c52c13d
--- /dev/null
+++ b/pythonProject/.venv/Lib/site-packages/networkx/algorithms/assortativity/connectivity.py
@@ -0,0 +1,122 @@
+from collections import defaultdict
+
+import networkx as nx
+
+__all__ = ["average_degree_connectivity"]
+
+
+@nx._dispatchable(edge_attrs="weight")
+def average_degree_connectivity(
+ G, source="in+out", target="in+out", nodes=None, weight=None
+):
+ r"""Compute the average degree connectivity of graph.
+
+ The average degree connectivity is the average nearest neighbor degree of
+ nodes with degree k. For weighted graphs, an analogous measure can
+ be computed using the weighted average neighbors degree defined in
+ [1]_, for a node `i`, as
+
+ .. math::
+
+ k_{nn,i}^{w} = \frac{1}{s_i} \sum_{j \in N(i)} w_{ij} k_j
+
+ where `s_i` is the weighted degree of node `i`,
+ `w_{ij}` is the weight of the edge that links `i` and `j`,
+ and `N(i)` are the neighbors of node `i`.
+
+ Parameters
+ ----------
+ G : NetworkX graph
+
+ source : "in"|"out"|"in+out" (default:"in+out")
+ Directed graphs only. Use "in"- or "out"-degree for source node.
+
+ target : "in"|"out"|"in+out" (default:"in+out"
+ Directed graphs only. Use "in"- or "out"-degree for target node.
+
+ nodes : list or iterable (optional)
+ Compute neighbor connectivity for these nodes. The default is all
+ nodes.
+
+ weight : string or None, optional (default=None)
+ The edge attribute that holds the numerical value used as a weight.
+ If None, then each edge has weight 1.
+
+ Returns
+ -------
+ d : dict
+ A dictionary keyed by degree k with the value of average connectivity.
+
+ Raises
+ ------
+ NetworkXError
+ If either `source` or `target` are not one of 'in',
+ 'out', or 'in+out'.
+ If either `source` or `target` is passed for an undirected graph.
+
+ Examples
+ --------
+ >>> G = nx.path_graph(4)
+ >>> G.edges[1, 2]["weight"] = 3
+ >>> nx.average_degree_connectivity(G)
+ {1: 2.0, 2: 1.5}
+ >>> nx.average_degree_connectivity(G, weight="weight")
+ {1: 2.0, 2: 1.75}
+
+ See Also
+ --------
+ average_neighbor_degree
+
+ References
+ ----------
+ .. [1] A. Barrat, M. Barthélemy, R. Pastor-Satorras, and A. Vespignani,
+ "The architecture of complex weighted networks".
+ PNAS 101 (11): 3747–3752 (2004).
+ """
+ # First, determine the type of neighbors and the type of degree to use.
+ if G.is_directed():
+ if source not in ("in", "out", "in+out"):
+ raise nx.NetworkXError('source must be one of "in", "out", or "in+out"')
+ if target not in ("in", "out", "in+out"):
+ raise nx.NetworkXError('target must be one of "in", "out", or "in+out"')
+ direction = {"out": G.out_degree, "in": G.in_degree, "in+out": G.degree}
+ neighbor_funcs = {
+ "out": G.successors,
+ "in": G.predecessors,
+ "in+out": G.neighbors,
+ }
+ source_degree = direction[source]
+ target_degree = direction[target]
+ neighbors = neighbor_funcs[source]
+ # `reverse` indicates whether to look at the in-edge when
+ # computing the weight of an edge.
+ reverse = source == "in"
+ else:
+ if source != "in+out" or target != "in+out":
+ raise nx.NetworkXError(
+ f"source and target arguments are only supported for directed graphs"
+ )
+ source_degree = G.degree
+ target_degree = G.degree
+ neighbors = G.neighbors
+ reverse = False
+ dsum = defaultdict(int)
+ dnorm = defaultdict(int)
+ # Check if `source_nodes` is actually a single node in the graph.
+ source_nodes = source_degree(nodes)
+ if nodes in G:
+ source_nodes = [(nodes, source_degree(nodes))]
+ for n, k in source_nodes:
+ nbrdeg = target_degree(neighbors(n))
+ if weight is None:
+ s = sum(d for n, d in nbrdeg)
+ else: # weight nbr degree by weight of (n,nbr) edge
+ if reverse:
+ s = sum(G[nbr][n].get(weight, 1) * d for nbr, d in nbrdeg)
+ else:
+ s = sum(G[n][nbr].get(weight, 1) * d for nbr, d in nbrdeg)
+ dnorm[k] += source_degree(n, weight=weight)
+ dsum[k] += s
+
+ # normalize
+ return {k: avg if dnorm[k] == 0 else avg / dnorm[k] for k, avg in dsum.items()}
diff --git a/pythonProject/.venv/Lib/site-packages/networkx/algorithms/assortativity/correlation.py b/pythonProject/.venv/Lib/site-packages/networkx/algorithms/assortativity/correlation.py
new file mode 100644
index 0000000000000000000000000000000000000000..170d219a5d4ba92d2c1d9933768f547e4750e4ba
--- /dev/null
+++ b/pythonProject/.venv/Lib/site-packages/networkx/algorithms/assortativity/correlation.py
@@ -0,0 +1,302 @@
+"""Node assortativity coefficients and correlation measures.
+"""
+import networkx as nx
+from networkx.algorithms.assortativity.mixing import (
+ attribute_mixing_matrix,
+ degree_mixing_matrix,
+)
+from networkx.algorithms.assortativity.pairs import node_degree_xy
+
+__all__ = [
+ "degree_pearson_correlation_coefficient",
+ "degree_assortativity_coefficient",
+ "attribute_assortativity_coefficient",
+ "numeric_assortativity_coefficient",
+]
+
+
+@nx._dispatchable(edge_attrs="weight")
+def degree_assortativity_coefficient(G, x="out", y="in", weight=None, nodes=None):
+ """Compute degree assortativity of graph.
+
+ Assortativity measures the similarity of connections
+ in the graph with respect to the node degree.
+
+ Parameters
+ ----------
+ G : NetworkX graph
+
+ x: string ('in','out')
+ The degree type for source node (directed graphs only).
+
+ y: string ('in','out')
+ The degree type for target node (directed graphs only).
+
+ weight: string or None, optional (default=None)
+ The edge attribute that holds the numerical value used
+ as a weight. If None, then each edge has weight 1.
+ The degree is the sum of the edge weights adjacent to the node.
+
+ nodes: list or iterable (optional)
+ Compute degree assortativity only for nodes in container.
+ The default is all nodes.
+
+ Returns
+ -------
+ r : float
+ Assortativity of graph by degree.
+
+ Examples
+ --------
+ >>> G = nx.path_graph(4)
+ >>> r = nx.degree_assortativity_coefficient(G)
+ >>> print(f"{r:3.1f}")
+ -0.5
+
+ See Also
+ --------
+ attribute_assortativity_coefficient
+ numeric_assortativity_coefficient
+ degree_mixing_dict
+ degree_mixing_matrix
+
+ Notes
+ -----
+ This computes Eq. (21) in Ref. [1]_ , where e is the joint
+ probability distribution (mixing matrix) of the degrees. If G is
+ directed than the matrix e is the joint probability of the
+ user-specified degree type for the source and target.
+
+ References
+ ----------
+ .. [1] M. E. J. Newman, Mixing patterns in networks,
+ Physical Review E, 67 026126, 2003
+ .. [2] Foster, J.G., Foster, D.V., Grassberger, P. & Paczuski, M.
+ Edge direction and the structure of networks, PNAS 107, 10815-20 (2010).
+ """
+ if nodes is None:
+ nodes = G.nodes
+
+ degrees = None
+
+ if G.is_directed():
+ indeg = (
+ {d for _, d in G.in_degree(nodes, weight=weight)}
+ if "in" in (x, y)
+ else set()
+ )
+ outdeg = (
+ {d for _, d in G.out_degree(nodes, weight=weight)}
+ if "out" in (x, y)
+ else set()
+ )
+ degrees = set.union(indeg, outdeg)
+ else:
+ degrees = {d for _, d in G.degree(nodes, weight=weight)}
+
+ mapping = {d: i for i, d in enumerate(degrees)}
+ M = degree_mixing_matrix(G, x=x, y=y, nodes=nodes, weight=weight, mapping=mapping)
+
+ return _numeric_ac(M, mapping=mapping)
+
+
+@nx._dispatchable(edge_attrs="weight")
+def degree_pearson_correlation_coefficient(G, x="out", y="in", weight=None, nodes=None):
+ """Compute degree assortativity of graph.
+
+ Assortativity measures the similarity of connections
+ in the graph with respect to the node degree.
+
+ This is the same as degree_assortativity_coefficient but uses the
+ potentially faster scipy.stats.pearsonr function.
+
+ Parameters
+ ----------
+ G : NetworkX graph
+
+ x: string ('in','out')
+ The degree type for source node (directed graphs only).
+
+ y: string ('in','out')
+ The degree type for target node (directed graphs only).
+
+ weight: string or None, optional (default=None)
+ The edge attribute that holds the numerical value used
+ as a weight. If None, then each edge has weight 1.
+ The degree is the sum of the edge weights adjacent to the node.
+
+ nodes: list or iterable (optional)
+ Compute pearson correlation of degrees only for specified nodes.
+ The default is all nodes.
+
+ Returns
+ -------
+ r : float
+ Assortativity of graph by degree.
+
+ Examples
+ --------
+ >>> G = nx.path_graph(4)
+ >>> r = nx.degree_pearson_correlation_coefficient(G)
+ >>> print(f"{r:3.1f}")
+ -0.5
+
+ Notes
+ -----
+ This calls scipy.stats.pearsonr.
+
+ References
+ ----------
+ .. [1] M. E. J. Newman, Mixing patterns in networks
+ Physical Review E, 67 026126, 2003
+ .. [2] Foster, J.G., Foster, D.V., Grassberger, P. & Paczuski, M.
+ Edge direction and the structure of networks, PNAS 107, 10815-20 (2010).
+ """
+ import scipy as sp
+
+ xy = node_degree_xy(G, x=x, y=y, nodes=nodes, weight=weight)
+ x, y = zip(*xy)
+ return float(sp.stats.pearsonr(x, y)[0])
+
+
+@nx._dispatchable(node_attrs="attribute")
+def attribute_assortativity_coefficient(G, attribute, nodes=None):
+ """Compute assortativity for node attributes.
+
+ Assortativity measures the similarity of connections
+ in the graph with respect to the given attribute.
+
+ Parameters
+ ----------
+ G : NetworkX graph
+
+ attribute : string
+ Node attribute key
+
+ nodes: list or iterable (optional)
+ Compute attribute assortativity for nodes in container.
+ The default is all nodes.
+
+ Returns
+ -------
+ r: float
+ Assortativity of graph for given attribute
+
+ Examples
+ --------
+ >>> G = nx.Graph()
+ >>> G.add_nodes_from([0, 1], color="red")
+ >>> G.add_nodes_from([2, 3], color="blue")
+ >>> G.add_edges_from([(0, 1), (2, 3)])
+ >>> print(nx.attribute_assortativity_coefficient(G, "color"))
+ 1.0
+
+ Notes
+ -----
+ This computes Eq. (2) in Ref. [1]_ , (trace(M)-sum(M^2))/(1-sum(M^2)),
+ where M is the joint probability distribution (mixing matrix)
+ of the specified attribute.
+
+ References
+ ----------
+ .. [1] M. E. J. Newman, Mixing patterns in networks,
+ Physical Review E, 67 026126, 2003
+ """
+ M = attribute_mixing_matrix(G, attribute, nodes)
+ return attribute_ac(M)
+
+
+@nx._dispatchable(node_attrs="attribute")
+def numeric_assortativity_coefficient(G, attribute, nodes=None):
+ """Compute assortativity for numerical node attributes.
+
+ Assortativity measures the similarity of connections
+ in the graph with respect to the given numeric attribute.
+
+ Parameters
+ ----------
+ G : NetworkX graph
+
+ attribute : string
+ Node attribute key.
+
+ nodes: list or iterable (optional)
+ Compute numeric assortativity only for attributes of nodes in
+ container. The default is all nodes.
+
+ Returns
+ -------
+ r: float
+ Assortativity of graph for given attribute
+
+ Examples
+ --------
+ >>> G = nx.Graph()
+ >>> G.add_nodes_from([0, 1], size=2)
+ >>> G.add_nodes_from([2, 3], size=3)
+ >>> G.add_edges_from([(0, 1), (2, 3)])
+ >>> print(nx.numeric_assortativity_coefficient(G, "size"))
+ 1.0
+
+ Notes
+ -----
+ This computes Eq. (21) in Ref. [1]_ , which is the Pearson correlation
+ coefficient of the specified (scalar valued) attribute across edges.
+
+ References
+ ----------
+ .. [1] M. E. J. Newman, Mixing patterns in networks
+ Physical Review E, 67 026126, 2003
+ """
+ if nodes is None:
+ nodes = G.nodes
+ vals = {G.nodes[n][attribute] for n in nodes}
+ mapping = {d: i for i, d in enumerate(vals)}
+ M = attribute_mixing_matrix(G, attribute, nodes, mapping)
+ return _numeric_ac(M, mapping)
+
+
+def attribute_ac(M):
+ """Compute assortativity for attribute matrix M.
+
+ Parameters
+ ----------
+ M : numpy.ndarray
+ 2D ndarray representing the attribute mixing matrix.
+
+ Notes
+ -----
+ This computes Eq. (2) in Ref. [1]_ , (trace(e)-sum(e^2))/(1-sum(e^2)),
+ where e is the joint probability distribution (mixing matrix)
+ of the specified attribute.
+
+ References
+ ----------
+ .. [1] M. E. J. Newman, Mixing patterns in networks,
+ Physical Review E, 67 026126, 2003
+ """
+ if M.sum() != 1.0:
+ M = M / M.sum()
+ s = (M @ M).sum()
+ t = M.trace()
+ r = (t - s) / (1 - s)
+ return float(r)
+
+
+def _numeric_ac(M, mapping):
+ # M is a 2D numpy array
+ # numeric assortativity coefficient, pearsonr
+ import numpy as np
+
+ if M.sum() != 1.0:
+ M = M / M.sum()
+ x = np.array(list(mapping.keys()))
+ y = x # x and y have the same support
+ idx = list(mapping.values())
+ a = M.sum(axis=0)
+ b = M.sum(axis=1)
+ vara = (a[idx] * x**2).sum() - ((a[idx] * x).sum()) ** 2
+ varb = (b[idx] * y**2).sum() - ((b[idx] * y).sum()) ** 2
+ xy = np.outer(x, y)
+ ab = np.outer(a[idx], b[idx])
+ return float((xy * (M - ab)).sum() / np.sqrt(vara * varb))
diff --git a/pythonProject/.venv/Lib/site-packages/networkx/algorithms/assortativity/mixing.py b/pythonProject/.venv/Lib/site-packages/networkx/algorithms/assortativity/mixing.py
new file mode 100644
index 0000000000000000000000000000000000000000..852ad82a4f6b9710571c19159786e94260c50330
--- /dev/null
+++ b/pythonProject/.venv/Lib/site-packages/networkx/algorithms/assortativity/mixing.py
@@ -0,0 +1,254 @@
+"""
+Mixing matrices for node attributes and degree.
+"""
+import networkx as nx
+from networkx.algorithms.assortativity.pairs import node_attribute_xy, node_degree_xy
+from networkx.utils import dict_to_numpy_array
+
+__all__ = [
+ "attribute_mixing_matrix",
+ "attribute_mixing_dict",
+ "degree_mixing_matrix",
+ "degree_mixing_dict",
+ "mixing_dict",
+]
+
+
+@nx._dispatchable(node_attrs="attribute")
+def attribute_mixing_dict(G, attribute, nodes=None, normalized=False):
+ """Returns dictionary representation of mixing matrix for attribute.
+
+ Parameters
+ ----------
+ G : graph
+ NetworkX graph object.
+
+ attribute : string
+ Node attribute key.
+
+ nodes: list or iterable (optional)
+ Unse nodes in container to build the dict. The default is all nodes.
+
+ normalized : bool (default=False)
+ Return counts if False or probabilities if True.
+
+ Examples
+ --------
+ >>> G = nx.Graph()
+ >>> G.add_nodes_from([0, 1], color="red")
+ >>> G.add_nodes_from([2, 3], color="blue")
+ >>> G.add_edge(1, 3)
+ >>> d = nx.attribute_mixing_dict(G, "color")
+ >>> print(d["red"]["blue"])
+ 1
+ >>> print(d["blue"]["red"]) # d symmetric for undirected graphs
+ 1
+
+ Returns
+ -------
+ d : dictionary
+ Counts or joint probability of occurrence of attribute pairs.
+ """
+ xy_iter = node_attribute_xy(G, attribute, nodes)
+ return mixing_dict(xy_iter, normalized=normalized)
+
+
+@nx._dispatchable(node_attrs="attribute")
+def attribute_mixing_matrix(G, attribute, nodes=None, mapping=None, normalized=True):
+ """Returns mixing matrix for attribute.
+
+ Parameters
+ ----------
+ G : graph
+ NetworkX graph object.
+
+ attribute : string
+ Node attribute key.
+
+ nodes: list or iterable (optional)
+ Use only nodes in container to build the matrix. The default is
+ all nodes.
+
+ mapping : dictionary, optional
+ Mapping from node attribute to integer index in matrix.
+ If not specified, an arbitrary ordering will be used.
+
+ normalized : bool (default=True)
+ Return counts if False or probabilities if True.
+
+ Returns
+ -------
+ m: numpy array
+ Counts or joint probability of occurrence of attribute pairs.
+
+ Notes
+ -----
+ If each node has a unique attribute value, the unnormalized mixing matrix
+ will be equal to the adjacency matrix. To get a denser mixing matrix,
+ the rounding can be performed to form groups of nodes with equal values.
+ For example, the exact height of persons in cm (180.79155222, 163.9080892,
+ 163.30095355, 167.99016217, 168.21590163, ...) can be rounded to (180, 163,
+ 163, 168, 168, ...).
+
+ Definitions of attribute mixing matrix vary on whether the matrix
+ should include rows for attribute values that don't arise. Here we
+ do not include such empty-rows. But you can force them to appear
+ by inputting a `mapping` that includes those values.
+
+ Examples
+ --------
+ >>> G = nx.path_graph(3)
+ >>> gender = {0: "male", 1: "female", 2: "female"}
+ >>> nx.set_node_attributes(G, gender, "gender")
+ >>> mapping = {"male": 0, "female": 1}
+ >>> mix_mat = nx.attribute_mixing_matrix(G, "gender", mapping=mapping)
+ >>> mix_mat
+ array([[0. , 0.25],
+ [0.25, 0.5 ]])
+ """
+ d = attribute_mixing_dict(G, attribute, nodes)
+ a = dict_to_numpy_array(d, mapping=mapping)
+ if normalized:
+ a = a / a.sum()
+ return a
+
+
+@nx._dispatchable(edge_attrs="weight")
+def degree_mixing_dict(G, x="out", y="in", weight=None, nodes=None, normalized=False):
+ """Returns dictionary representation of mixing matrix for degree.
+
+ Parameters
+ ----------
+ G : graph
+ NetworkX graph object.
+
+ x: string ('in','out')
+ The degree type for source node (directed graphs only).
+
+ y: string ('in','out')
+ The degree type for target node (directed graphs only).
+
+ weight: string or None, optional (default=None)
+ The edge attribute that holds the numerical value used
+ as a weight. If None, then each edge has weight 1.
+ The degree is the sum of the edge weights adjacent to the node.
+
+ normalized : bool (default=False)
+ Return counts if False or probabilities if True.
+
+ Returns
+ -------
+ d: dictionary
+ Counts or joint probability of occurrence of degree pairs.
+ """
+ xy_iter = node_degree_xy(G, x=x, y=y, nodes=nodes, weight=weight)
+ return mixing_dict(xy_iter, normalized=normalized)
+
+
+@nx._dispatchable(edge_attrs="weight")
+def degree_mixing_matrix(
+ G, x="out", y="in", weight=None, nodes=None, normalized=True, mapping=None
+):
+ """Returns mixing matrix for attribute.
+
+ Parameters
+ ----------
+ G : graph
+ NetworkX graph object.
+
+ x: string ('in','out')
+ The degree type for source node (directed graphs only).
+
+ y: string ('in','out')
+ The degree type for target node (directed graphs only).
+
+ nodes: list or iterable (optional)
+ Build the matrix using only nodes in container.
+ The default is all nodes.
+
+ weight: string or None, optional (default=None)
+ The edge attribute that holds the numerical value used
+ as a weight. If None, then each edge has weight 1.
+ The degree is the sum of the edge weights adjacent to the node.
+
+ normalized : bool (default=True)
+ Return counts if False or probabilities if True.
+
+ mapping : dictionary, optional
+ Mapping from node degree to integer index in matrix.
+ If not specified, an arbitrary ordering will be used.
+
+ Returns
+ -------
+ m: numpy array
+ Counts, or joint probability, of occurrence of node degree.
+
+ Notes
+ -----
+ Definitions of degree mixing matrix vary on whether the matrix
+ should include rows for degree values that don't arise. Here we
+ do not include such empty-rows. But you can force them to appear
+ by inputting a `mapping` that includes those values. See examples.
+
+ Examples
+ --------
+ >>> G = nx.star_graph(3)
+ >>> mix_mat = nx.degree_mixing_matrix(G)
+ >>> mix_mat
+ array([[0. , 0.5],
+ [0.5, 0. ]])
+
+ If you want every possible degree to appear as a row, even if no nodes
+ have that degree, use `mapping` as follows,
+
+ >>> max_degree = max(deg for n, deg in G.degree)
+ >>> mapping = {x: x for x in range(max_degree + 1)} # identity mapping
+ >>> mix_mat = nx.degree_mixing_matrix(G, mapping=mapping)
+ >>> mix_mat
+ array([[0. , 0. , 0. , 0. ],
+ [0. , 0. , 0. , 0.5],
+ [0. , 0. , 0. , 0. ],
+ [0. , 0.5, 0. , 0. ]])
+ """
+ d = degree_mixing_dict(G, x=x, y=y, nodes=nodes, weight=weight)
+ a = dict_to_numpy_array(d, mapping=mapping)
+ if normalized:
+ a = a / a.sum()
+ return a
+
+
+def mixing_dict(xy, normalized=False):
+ """Returns a dictionary representation of mixing matrix.
+
+ Parameters
+ ----------
+ xy : list or container of two-tuples
+ Pairs of (x,y) items.
+
+ attribute : string
+ Node attribute key
+
+ normalized : bool (default=False)
+ Return counts if False or probabilities if True.
+
+ Returns
+ -------
+ d: dictionary
+ Counts or Joint probability of occurrence of values in xy.
+ """
+ d = {}
+ psum = 0.0
+ for x, y in xy:
+ if x not in d:
+ d[x] = {}
+ if y not in d:
+ d[y] = {}
+ v = d[x].get(y, 0)
+ d[x][y] = v + 1
+ psum += 1
+
+ if normalized:
+ for _, jdict in d.items():
+ for j in jdict:
+ jdict[j] /= psum
+ return d
diff --git a/pythonProject/.venv/Lib/site-packages/networkx/algorithms/assortativity/neighbor_degree.py b/pythonProject/.venv/Lib/site-packages/networkx/algorithms/assortativity/neighbor_degree.py
new file mode 100644
index 0000000000000000000000000000000000000000..6488d041a8bdc93ef3591283781b81bcf7f47dab
--- /dev/null
+++ b/pythonProject/.venv/Lib/site-packages/networkx/algorithms/assortativity/neighbor_degree.py
@@ -0,0 +1,160 @@
+import networkx as nx
+
+__all__ = ["average_neighbor_degree"]
+
+
+@nx._dispatchable(edge_attrs="weight")
+def average_neighbor_degree(G, source="out", target="out", nodes=None, weight=None):
+ r"""Returns the average degree of the neighborhood of each node.
+
+ In an undirected graph, the neighborhood `N(i)` of node `i` contains the
+ nodes that are connected to `i` by an edge.
+
+ For directed graphs, `N(i)` is defined according to the parameter `source`:
+
+ - if source is 'in', then `N(i)` consists of predecessors of node `i`.
+ - if source is 'out', then `N(i)` consists of successors of node `i`.
+ - if source is 'in+out', then `N(i)` is both predecessors and successors.
+
+ The average neighborhood degree of a node `i` is
+
+ .. math::
+
+ k_{nn,i} = \frac{1}{|N(i)|} \sum_{j \in N(i)} k_j
+
+ where `N(i)` are the neighbors of node `i` and `k_j` is
+ the degree of node `j` which belongs to `N(i)`. For weighted
+ graphs, an analogous measure can be defined [1]_,
+
+ .. math::
+
+ k_{nn,i}^{w} = \frac{1}{s_i} \sum_{j \in N(i)} w_{ij} k_j
+
+ where `s_i` is the weighted degree of node `i`, `w_{ij}`
+ is the weight of the edge that links `i` and `j` and
+ `N(i)` are the neighbors of node `i`.
+
+
+ Parameters
+ ----------
+ G : NetworkX graph
+
+ source : string ("in"|"out"|"in+out"), optional (default="out")
+ Directed graphs only.
+ Use "in"- or "out"-neighbors of source node.
+
+ target : string ("in"|"out"|"in+out"), optional (default="out")
+ Directed graphs only.
+ Use "in"- or "out"-degree for target node.
+
+ nodes : list or iterable, optional (default=G.nodes)
+ Compute neighbor degree only for specified nodes.
+
+ weight : string or None, optional (default=None)
+ The edge attribute that holds the numerical value used as a weight.
+ If None, then each edge has weight 1.
+
+ Returns
+ -------
+ d: dict
+ A dictionary keyed by node to the average degree of its neighbors.
+
+ Raises
+ ------
+ NetworkXError
+ If either `source` or `target` are not one of 'in', 'out', or 'in+out'.
+ If either `source` or `target` is passed for an undirected graph.
+
+ Examples
+ --------
+ >>> G = nx.path_graph(4)
+ >>> G.edges[0, 1]["weight"] = 5
+ >>> G.edges[2, 3]["weight"] = 3
+
+ >>> nx.average_neighbor_degree(G)
+ {0: 2.0, 1: 1.5, 2: 1.5, 3: 2.0}
+ >>> nx.average_neighbor_degree(G, weight="weight")
+ {0: 2.0, 1: 1.1666666666666667, 2: 1.25, 3: 2.0}
+
+ >>> G = nx.DiGraph()
+ >>> nx.add_path(G, [0, 1, 2, 3])
+ >>> nx.average_neighbor_degree(G, source="in", target="in")
+ {0: 0.0, 1: 0.0, 2: 1.0, 3: 1.0}
+
+ >>> nx.average_neighbor_degree(G, source="out", target="out")
+ {0: 1.0, 1: 1.0, 2: 0.0, 3: 0.0}
+
+ See Also
+ --------
+ average_degree_connectivity
+
+ References
+ ----------
+ .. [1] A. Barrat, M. Barthélemy, R. Pastor-Satorras, and A. Vespignani,
+ "The architecture of complex weighted networks".
+ PNAS 101 (11): 3747–3752 (2004).
+ """
+ if G.is_directed():
+ if source == "in":
+ source_degree = G.in_degree
+ elif source == "out":
+ source_degree = G.out_degree
+ elif source == "in+out":
+ source_degree = G.degree
+ else:
+ raise nx.NetworkXError(
+ f"source argument {source} must be 'in', 'out' or 'in+out'"
+ )
+
+ if target == "in":
+ target_degree = G.in_degree
+ elif target == "out":
+ target_degree = G.out_degree
+ elif target == "in+out":
+ target_degree = G.degree
+ else:
+ raise nx.NetworkXError(
+ f"target argument {target} must be 'in', 'out' or 'in+out'"
+ )
+ else:
+ if source != "out" or target != "out":
+ raise nx.NetworkXError(
+ f"source and target arguments are only supported for directed graphs"
+ )
+ source_degree = target_degree = G.degree
+
+ # precompute target degrees -- should *not* be weighted degree
+ t_deg = dict(target_degree())
+
+ # Set up both predecessor and successor neighbor dicts leaving empty if not needed
+ G_P = G_S = {n: {} for n in G}
+ if G.is_directed():
+ # "in" or "in+out" cases: G_P contains predecessors
+ if "in" in source:
+ G_P = G.pred
+ # "out" or "in+out" cases: G_S contains successors
+ if "out" in source:
+ G_S = G.succ
+ else:
+ # undirected leave G_P empty but G_S is the adjacency
+ G_S = G.adj
+
+ # Main loop: Compute average degree of neighbors
+ avg = {}
+ for n, deg in source_degree(nodes, weight=weight):
+ # handle degree zero average
+ if deg == 0:
+ avg[n] = 0.0
+ continue
+
+ # we sum over both G_P and G_S, but one of the two is usually empty.
+ if weight is None:
+ avg[n] = (
+ sum(t_deg[nbr] for nbr in G_S[n]) + sum(t_deg[nbr] for nbr in G_P[n])
+ ) / deg
+ else:
+ avg[n] = (
+ sum(dd.get(weight, 1) * t_deg[nbr] for nbr, dd in G_S[n].items())
+ + sum(dd.get(weight, 1) * t_deg[nbr] for nbr, dd in G_P[n].items())
+ ) / deg
+ return avg
diff --git a/pythonProject/.venv/Lib/site-packages/networkx/algorithms/assortativity/pairs.py b/pythonProject/.venv/Lib/site-packages/networkx/algorithms/assortativity/pairs.py
new file mode 100644
index 0000000000000000000000000000000000000000..5a1d6f8e1df99a0159e030156385df3c1322a73a
--- /dev/null
+++ b/pythonProject/.venv/Lib/site-packages/networkx/algorithms/assortativity/pairs.py
@@ -0,0 +1,118 @@
+"""Generators of x-y pairs of node data."""
+import networkx as nx
+
+__all__ = ["node_attribute_xy", "node_degree_xy"]
+
+
+@nx._dispatchable(node_attrs="attribute")
+def node_attribute_xy(G, attribute, nodes=None):
+ """Returns iterator of node-attribute pairs for all edges in G.
+
+ Parameters
+ ----------
+ G: NetworkX graph
+
+ attribute: key
+ The node attribute key.
+
+ nodes: list or iterable (optional)
+ Use only edges that are incident to specified nodes.
+ The default is all nodes.
+
+ Returns
+ -------
+ (x, y): 2-tuple
+ Generates 2-tuple of (attribute, attribute) values.
+
+ Examples
+ --------
+ >>> G = nx.DiGraph()
+ >>> G.add_node(1, color="red")
+ >>> G.add_node(2, color="blue")
+ >>> G.add_edge(1, 2)
+ >>> list(nx.node_attribute_xy(G, "color"))
+ [('red', 'blue')]
+
+ Notes
+ -----
+ For undirected graphs each edge is produced twice, once for each edge
+ representation (u, v) and (v, u), with the exception of self-loop edges
+ which only appear once.
+ """
+ if nodes is None:
+ nodes = set(G)
+ else:
+ nodes = set(nodes)
+ Gnodes = G.nodes
+ for u, nbrsdict in G.adjacency():
+ if u not in nodes:
+ continue
+ uattr = Gnodes[u].get(attribute, None)
+ if G.is_multigraph():
+ for v, keys in nbrsdict.items():
+ vattr = Gnodes[v].get(attribute, None)
+ for _ in keys:
+ yield (uattr, vattr)
+ else:
+ for v in nbrsdict:
+ vattr = Gnodes[v].get(attribute, None)
+ yield (uattr, vattr)
+
+
+@nx._dispatchable(edge_attrs="weight")
+def node_degree_xy(G, x="out", y="in", weight=None, nodes=None):
+ """Generate node degree-degree pairs for edges in G.
+
+ Parameters
+ ----------
+ G: NetworkX graph
+
+ x: string ('in','out')
+ The degree type for source node (directed graphs only).
+
+ y: string ('in','out')
+ The degree type for target node (directed graphs only).
+
+ weight: string or None, optional (default=None)
+ The edge attribute that holds the numerical value used
+ as a weight. If None, then each edge has weight 1.
+ The degree is the sum of the edge weights adjacent to the node.
+
+ nodes: list or iterable (optional)
+ Use only edges that are adjacency to specified nodes.
+ The default is all nodes.
+
+ Returns
+ -------
+ (x, y): 2-tuple
+ Generates 2-tuple of (degree, degree) values.
+
+
+ Examples
+ --------
+ >>> G = nx.DiGraph()
+ >>> G.add_edge(1, 2)
+ >>> list(nx.node_degree_xy(G, x="out", y="in"))
+ [(1, 1)]
+ >>> list(nx.node_degree_xy(G, x="in", y="out"))
+ [(0, 0)]
+
+ Notes
+ -----
+ For undirected graphs each edge is produced twice, once for each edge
+ representation (u, v) and (v, u), with the exception of self-loop edges
+ which only appear once.
+ """
+ nodes = set(G) if nodes is None else set(nodes)
+ if G.is_directed():
+ direction = {"out": G.out_degree, "in": G.in_degree}
+ xdeg = direction[x]
+ ydeg = direction[y]
+ else:
+ xdeg = ydeg = G.degree
+
+ for u, degu in xdeg(nodes, weight=weight):
+ # use G.edges to treat multigraphs correctly
+ neighbors = (nbr for _, nbr in G.edges(u) if nbr in nodes)
+ for _, degv in ydeg(neighbors, weight=weight):
+ yield degu, degv
diff --git a/pythonProject/.venv/Lib/site-packages/networkx/algorithms/assortativity/tests/__init__.py b/pythonProject/.venv/Lib/site-packages/networkx/algorithms/assortativity/tests/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391
diff --git a/pythonProject/.venv/Lib/site-packages/networkx/algorithms/assortativity/tests/__pycache__/base_test.cpython-310.pyc b/pythonProject/.venv/Lib/site-packages/networkx/algorithms/assortativity/tests/__pycache__/base_test.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..fc65524b004da745e5812c165cbcd3c96d841de1
Binary files /dev/null and b/pythonProject/.venv/Lib/site-packages/networkx/algorithms/assortativity/tests/__pycache__/base_test.cpython-310.pyc differ
diff --git a/pythonProject/.venv/Lib/site-packages/networkx/algorithms/assortativity/tests/__pycache__/test_connectivity.cpython-310.pyc b/pythonProject/.venv/Lib/site-packages/networkx/algorithms/assortativity/tests/__pycache__/test_connectivity.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..aa8f3a4acbb14e499a11cb1861e4d3a0a4939640
Binary files /dev/null and b/pythonProject/.venv/Lib/site-packages/networkx/algorithms/assortativity/tests/__pycache__/test_connectivity.cpython-310.pyc differ
diff --git a/pythonProject/.venv/Lib/site-packages/networkx/algorithms/assortativity/tests/__pycache__/test_correlation.cpython-310.pyc b/pythonProject/.venv/Lib/site-packages/networkx/algorithms/assortativity/tests/__pycache__/test_correlation.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..fac40ef0087aca38c21a054ce63ec20e49eb87b0
Binary files /dev/null and b/pythonProject/.venv/Lib/site-packages/networkx/algorithms/assortativity/tests/__pycache__/test_correlation.cpython-310.pyc differ
diff --git a/pythonProject/.venv/Lib/site-packages/networkx/algorithms/assortativity/tests/__pycache__/test_mixing.cpython-310.pyc b/pythonProject/.venv/Lib/site-packages/networkx/algorithms/assortativity/tests/__pycache__/test_mixing.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..6ef52f0d31e4910e61200e67fc1e9d2f91ca68b7
Binary files /dev/null and b/pythonProject/.venv/Lib/site-packages/networkx/algorithms/assortativity/tests/__pycache__/test_mixing.cpython-310.pyc differ
diff --git a/pythonProject/.venv/Lib/site-packages/networkx/algorithms/assortativity/tests/base_test.py b/pythonProject/.venv/Lib/site-packages/networkx/algorithms/assortativity/tests/base_test.py
new file mode 100644
index 0000000000000000000000000000000000000000..46d6300649d3b4658a7263cad04354988b4da312
--- /dev/null
+++ b/pythonProject/.venv/Lib/site-packages/networkx/algorithms/assortativity/tests/base_test.py
@@ -0,0 +1,81 @@
+import networkx as nx
+
+
+class BaseTestAttributeMixing:
+ @classmethod
+ def setup_class(cls):
+ G = nx.Graph()
+ G.add_nodes_from([0, 1], fish="one")
+ G.add_nodes_from([2, 3], fish="two")
+ G.add_nodes_from([4], fish="red")
+ G.add_nodes_from([5], fish="blue")
+ G.add_edges_from([(0, 1), (2, 3), (0, 4), (2, 5)])
+ cls.G = G
+
+ D = nx.DiGraph()
+ D.add_nodes_from([0, 1], fish="one")
+ D.add_nodes_from([2, 3], fish="two")
+ D.add_nodes_from([4], fish="red")
+ D.add_nodes_from([5], fish="blue")
+ D.add_edges_from([(0, 1), (2, 3), (0, 4), (2, 5)])
+ cls.D = D
+
+ M = nx.MultiGraph()
+ M.add_nodes_from([0, 1], fish="one")
+ M.add_nodes_from([2, 3], fish="two")
+ M.add_nodes_from([4], fish="red")
+ M.add_nodes_from([5], fish="blue")
+ M.add_edges_from([(0, 1), (0, 1), (2, 3)])
+ cls.M = M
+
+ S = nx.Graph()
+ S.add_nodes_from([0, 1], fish="one")
+ S.add_nodes_from([2, 3], fish="two")
+ S.add_nodes_from([4], fish="red")
+ S.add_nodes_from([5], fish="blue")
+ S.add_edge(0, 0)
+ S.add_edge(2, 2)
+ cls.S = S
+
+ N = nx.Graph()
+ N.add_nodes_from([0, 1], margin=-2)
+ N.add_nodes_from([2, 3], margin=-2)
+ N.add_nodes_from([4], margin=-3)
+ N.add_nodes_from([5], margin=-4)
+ N.add_edges_from([(0, 1), (2, 3), (0, 4), (2, 5)])
+ cls.N = N
+
+ F = nx.Graph()
+ F.add_edges_from([(0, 3), (1, 3), (2, 3)], weight=0.5)
+ F.add_edge(0, 2, weight=1)
+ nx.set_node_attributes(F, dict(F.degree(weight="weight")), "margin")
+ cls.F = F
+
+ K = nx.Graph()
+ K.add_nodes_from([1, 2], margin=-1)
+ K.add_nodes_from([3], margin=1)
+ K.add_nodes_from([4], margin=2)
+ K.add_edges_from([(3, 4), (1, 2), (1, 3)])
+ cls.K = K
+
+
+class BaseTestDegreeMixing:
+ @classmethod
+ def setup_class(cls):
+ cls.P4 = nx.path_graph(4)
+ cls.D = nx.DiGraph()
+ cls.D.add_edges_from([(0, 2), (0, 3), (1, 3), (2, 3)])
+ cls.D2 = nx.DiGraph()
+ cls.D2.add_edges_from([(0, 3), (1, 0), (1, 2), (2, 4), (4, 1), (4, 3), (4, 2)])
+ cls.M = nx.MultiGraph()
+ nx.add_path(cls.M, range(4))
+ cls.M.add_edge(0, 1)
+ cls.S = nx.Graph()
+ cls.S.add_edges_from([(0, 0), (1, 1)])
+ cls.W = nx.Graph()
+ cls.W.add_edges_from([(0, 3), (1, 3), (2, 3)], weight=0.5)
+ cls.W.add_edge(0, 2, weight=1)
+ S1 = nx.star_graph(4)
+ S2 = nx.star_graph(4)
+ cls.DS = nx.disjoint_union(S1, S2)
+ cls.DS.add_edge(4, 5)
diff --git a/pythonProject/.venv/Lib/site-packages/networkx/algorithms/assortativity/tests/test_connectivity.py b/pythonProject/.venv/Lib/site-packages/networkx/algorithms/assortativity/tests/test_connectivity.py
new file mode 100644
index 0000000000000000000000000000000000000000..21c6287bbe6b0bfc9aa41201b593f342b2d3976e
--- /dev/null
+++ b/pythonProject/.venv/Lib/site-packages/networkx/algorithms/assortativity/tests/test_connectivity.py
@@ -0,0 +1,143 @@
+from itertools import permutations
+
+import pytest
+
+import networkx as nx
+
+
+class TestNeighborConnectivity:
+ def test_degree_p4(self):
+ G = nx.path_graph(4)
+ answer = {1: 2.0, 2: 1.5}
+ nd = nx.average_degree_connectivity(G)
+ assert nd == answer
+
+ D = G.to_directed()
+ answer = {2: 2.0, 4: 1.5}
+ nd = nx.average_degree_connectivity(D)
+ assert nd == answer
+
+ answer = {1: 2.0, 2: 1.5}
+ D = G.to_directed()
+ nd = nx.average_degree_connectivity(D, source="in", target="in")
+ assert nd == answer
+
+ D = G.to_directed()
+ nd = nx.average_degree_connectivity(D, source="in", target="in")
+ assert nd == answer
+
+ def test_degree_p4_weighted(self):
+ G = nx.path_graph(4)
+ G[1][2]["weight"] = 4
+ answer = {1: 2.0, 2: 1.8}
+ nd = nx.average_degree_connectivity(G, weight="weight")
+ assert nd == answer
+ answer = {1: 2.0, 2: 1.5}
+ nd = nx.average_degree_connectivity(G)
+ assert nd == answer
+
+ D = G.to_directed()
+ answer = {2: 2.0, 4: 1.8}
+ nd = nx.average_degree_connectivity(D, weight="weight")
+ assert nd == answer
+
+ answer = {1: 2.0, 2: 1.8}
+ D = G.to_directed()
+ nd = nx.average_degree_connectivity(
+ D, weight="weight", source="in", target="in"
+ )
+ assert nd == answer
+
+ D = G.to_directed()
+ nd = nx.average_degree_connectivity(
+ D, source="in", target="out", weight="weight"
+ )
+ assert nd == answer
+
+ def test_weight_keyword(self):
+ G = nx.path_graph(4)
+ G[1][2]["other"] = 4
+ answer = {1: 2.0, 2: 1.8}
+ nd = nx.average_degree_connectivity(G, weight="other")
+ assert nd == answer
+ answer = {1: 2.0, 2: 1.5}
+ nd = nx.average_degree_connectivity(G, weight=None)
+ assert nd == answer
+
+ D = G.to_directed()
+ answer = {2: 2.0, 4: 1.8}
+ nd = nx.average_degree_connectivity(D, weight="other")
+ assert nd == answer
+
+ answer = {1: 2.0, 2: 1.8}
+ D = G.to_directed()
+ nd = nx.average_degree_connectivity(D, weight="other", source="in", target="in")
+ assert nd == answer
+
+ D = G.to_directed()
+ nd = nx.average_degree_connectivity(D, weight="other", source="in", target="in")
+ assert nd == answer
+
+ def test_degree_barrat(self):
+ G = nx.star_graph(5)
+ G.add_edges_from([(5, 6), (5, 7), (5, 8), (5, 9)])
+ G[0][5]["weight"] = 5
+ nd = nx.average_degree_connectivity(G)[5]
+ assert nd == 1.8
+ nd = nx.average_degree_connectivity(G, weight="weight")[5]
+ assert nd == pytest.approx(3.222222, abs=1e-5)
+
+ def test_zero_deg(self):
+ G = nx.DiGraph()
+ G.add_edge(1, 2)
+ G.add_edge(1, 3)
+ G.add_edge(1, 4)
+ c = nx.average_degree_connectivity(G)
+ assert c == {1: 0, 3: 1}
+ c = nx.average_degree_connectivity(G, source="in", target="in")
+ assert c == {0: 0, 1: 0}
+ c = nx.average_degree_connectivity(G, source="in", target="out")
+ assert c == {0: 0, 1: 3}
+ c = nx.average_degree_connectivity(G, source="in", target="in+out")
+ assert c == {0: 0, 1: 3}
+ c = nx.average_degree_connectivity(G, source="out", target="out")
+ assert c == {0: 0, 3: 0}
+ c = nx.average_degree_connectivity(G, source="out", target="in")
+ assert c == {0: 0, 3: 1}
+ c = nx.average_degree_connectivity(G, source="out", target="in+out")
+ assert c == {0: 0, 3: 1}
+
+ def test_in_out_weight(self):
+ G = nx.DiGraph()
+ G.add_edge(1, 2, weight=1)
+ G.add_edge(1, 3, weight=1)
+ G.add_edge(3, 1, weight=1)
+ for s, t in permutations(["in", "out", "in+out"], 2):
+ c = nx.average_degree_connectivity(G, source=s, target=t)
+ cw = nx.average_degree_connectivity(G, source=s, target=t, weight="weight")
+ assert c == cw
+
+ def test_invalid_source(self):
+ with pytest.raises(nx.NetworkXError):
+ G = nx.DiGraph()
+ nx.average_degree_connectivity(G, source="bogus")
+
+ def test_invalid_target(self):
+ with pytest.raises(nx.NetworkXError):
+ G = nx.DiGraph()
+ nx.average_degree_connectivity(G, target="bogus")
+
+ def test_invalid_undirected_graph(self):
+ G = nx.Graph()
+ with pytest.raises(nx.NetworkXError):
+ nx.average_degree_connectivity(G, target="bogus")
+ with pytest.raises(nx.NetworkXError):
+ nx.average_degree_connectivity(G, source="bogus")
+
+ def test_single_node(self):
+ # TODO Is this really the intended behavior for providing a
+ # single node as the argument `nodes`? Shouldn't the function
+ # just return the connectivity value itself?
+ G = nx.trivial_graph()
+ conn = nx.average_degree_connectivity(G, nodes=0)
+ assert conn == {0: 0}
diff --git a/pythonProject/.venv/Lib/site-packages/networkx/algorithms/assortativity/tests/test_correlation.py b/pythonProject/.venv/Lib/site-packages/networkx/algorithms/assortativity/tests/test_correlation.py
new file mode 100644
index 0000000000000000000000000000000000000000..5203f9449fd022525b97a19cbe78498e33fb09a3
--- /dev/null
+++ b/pythonProject/.venv/Lib/site-packages/networkx/algorithms/assortativity/tests/test_correlation.py
@@ -0,0 +1,123 @@
+import pytest
+
+np = pytest.importorskip("numpy")
+pytest.importorskip("scipy")
+
+
+import networkx as nx
+from networkx.algorithms.assortativity.correlation import attribute_ac
+
+from .base_test import BaseTestAttributeMixing, BaseTestDegreeMixing
+
+
+class TestDegreeMixingCorrelation(BaseTestDegreeMixing):
+ def test_degree_assortativity_undirected(self):
+ r = nx.degree_assortativity_coefficient(self.P4)
+ np.testing.assert_almost_equal(r, -1.0 / 2, decimal=4)
+
+ def test_degree_assortativity_node_kwargs(self):
+ G = nx.Graph()
+ edges = [(0, 1), (0, 3), (1, 2), (1, 3), (1, 4), (5, 9), (9, 0)]
+ G.add_edges_from(edges)
+ r = nx.degree_assortativity_coefficient(G, nodes=[1, 2, 4])
+ np.testing.assert_almost_equal(r, -1.0, decimal=4)
+
+ def test_degree_assortativity_directed(self):
+ r = nx.degree_assortativity_coefficient(self.D)
+ np.testing.assert_almost_equal(r, -0.57735, decimal=4)
+
+ def test_degree_assortativity_directed2(self):
+ """Test degree assortativity for a directed graph where the set of
+ in/out degree does not equal the total degree."""
+ r = nx.degree_assortativity_coefficient(self.D2)
+ np.testing.assert_almost_equal(r, 0.14852, decimal=4)
+
+ def test_degree_assortativity_multigraph(self):
+ r = nx.degree_assortativity_coefficient(self.M)
+ np.testing.assert_almost_equal(r, -1.0 / 7.0, decimal=4)
+
+ def test_degree_pearson_assortativity_undirected(self):
+ r = nx.degree_pearson_correlation_coefficient(self.P4)
+ np.testing.assert_almost_equal(r, -1.0 / 2, decimal=4)
+
+ def test_degree_pearson_assortativity_directed(self):
+ r = nx.degree_pearson_correlation_coefficient(self.D)
+ np.testing.assert_almost_equal(r, -0.57735, decimal=4)
+
+ def test_degree_pearson_assortativity_directed2(self):
+ """Test degree assortativity with Pearson for a directed graph where
+ the set of in/out degree does not equal the total degree."""
+ r = nx.degree_pearson_correlation_coefficient(self.D2)
+ np.testing.assert_almost_equal(r, 0.14852, decimal=4)
+
+ def test_degree_pearson_assortativity_multigraph(self):
+ r = nx.degree_pearson_correlation_coefficient(self.M)
+ np.testing.assert_almost_equal(r, -1.0 / 7.0, decimal=4)
+
+ def test_degree_assortativity_weighted(self):
+ r = nx.degree_assortativity_coefficient(self.W, weight="weight")
+ np.testing.assert_almost_equal(r, -0.1429, decimal=4)
+
+ def test_degree_assortativity_double_star(self):
+ r = nx.degree_assortativity_coefficient(self.DS)
+ np.testing.assert_almost_equal(r, -0.9339, decimal=4)
+
+
+class TestAttributeMixingCorrelation(BaseTestAttributeMixing):
+ def test_attribute_assortativity_undirected(self):
+ r = nx.attribute_assortativity_coefficient(self.G, "fish")
+ assert r == 6.0 / 22.0
+
+ def test_attribute_assortativity_directed(self):
+ r = nx.attribute_assortativity_coefficient(self.D, "fish")
+ assert r == 1.0 / 3.0
+
+ def test_attribute_assortativity_multigraph(self):
+ r = nx.attribute_assortativity_coefficient(self.M, "fish")
+ assert r == 1.0
+
+ def test_attribute_assortativity_coefficient(self):
+ # from "Mixing patterns in networks"
+ # fmt: off
+ a = np.array([[0.258, 0.016, 0.035, 0.013],
+ [0.012, 0.157, 0.058, 0.019],
+ [0.013, 0.023, 0.306, 0.035],
+ [0.005, 0.007, 0.024, 0.016]])
+ # fmt: on
+ r = attribute_ac(a)
+ np.testing.assert_almost_equal(r, 0.623, decimal=3)
+
+ def test_attribute_assortativity_coefficient2(self):
+ # fmt: off
+ a = np.array([[0.18, 0.02, 0.01, 0.03],
+ [0.02, 0.20, 0.03, 0.02],
+ [0.01, 0.03, 0.16, 0.01],
+ [0.03, 0.02, 0.01, 0.22]])
+ # fmt: on
+ r = attribute_ac(a)
+ np.testing.assert_almost_equal(r, 0.68, decimal=2)
+
+ def test_attribute_assortativity(self):
+ a = np.array([[50, 50, 0], [50, 50, 0], [0, 0, 2]])
+ r = attribute_ac(a)
+ np.testing.assert_almost_equal(r, 0.029, decimal=3)
+
+ def test_attribute_assortativity_negative(self):
+ r = nx.numeric_assortativity_coefficient(self.N, "margin")
+ np.testing.assert_almost_equal(r, -0.2903, decimal=4)
+
+ def test_assortativity_node_kwargs(self):
+ G = nx.Graph()
+ G.add_nodes_from([0, 1], size=2)
+ G.add_nodes_from([2, 3], size=3)
+ G.add_edges_from([(0, 1), (2, 3)])
+ r = nx.numeric_assortativity_coefficient(G, "size", nodes=[0, 3])
+ np.testing.assert_almost_equal(r, 1.0, decimal=4)
+
+ def test_attribute_assortativity_float(self):
+ r = nx.numeric_assortativity_coefficient(self.F, "margin")
+ np.testing.assert_almost_equal(r, -0.1429, decimal=4)
+
+ def test_attribute_assortativity_mixed(self):
+ r = nx.numeric_assortativity_coefficient(self.K, "margin")
+ np.testing.assert_almost_equal(r, 0.4340, decimal=4)
diff --git a/pythonProject/.venv/Lib/site-packages/networkx/algorithms/assortativity/tests/test_mixing.py b/pythonProject/.venv/Lib/site-packages/networkx/algorithms/assortativity/tests/test_mixing.py
new file mode 100644
index 0000000000000000000000000000000000000000..9af09867235b9092837b517ca542e8a85eb602ac
--- /dev/null
+++ b/pythonProject/.venv/Lib/site-packages/networkx/algorithms/assortativity/tests/test_mixing.py
@@ -0,0 +1,176 @@
+import pytest
+
+np = pytest.importorskip("numpy")
+
+
+import networkx as nx
+
+from .base_test import BaseTestAttributeMixing, BaseTestDegreeMixing
+
+
+class TestDegreeMixingDict(BaseTestDegreeMixing):
+ def test_degree_mixing_dict_undirected(self):
+ d = nx.degree_mixing_dict(self.P4)
+ d_result = {1: {2: 2}, 2: {1: 2, 2: 2}}
+ assert d == d_result
+
+ def test_degree_mixing_dict_undirected_normalized(self):
+ d = nx.degree_mixing_dict(self.P4, normalized=True)
+ d_result = {1: {2: 1.0 / 3}, 2: {1: 1.0 / 3, 2: 1.0 / 3}}
+ assert d == d_result
+
+ def test_degree_mixing_dict_directed(self):
+ d = nx.degree_mixing_dict(self.D)
+ print(d)
+ d_result = {1: {3: 2}, 2: {1: 1, 3: 1}, 3: {}}
+ assert d == d_result
+
+ def test_degree_mixing_dict_multigraph(self):
+ d = nx.degree_mixing_dict(self.M)
+ d_result = {1: {2: 1}, 2: {1: 1, 3: 3}, 3: {2: 3}}
+ assert d == d_result
+
+ def test_degree_mixing_dict_weighted(self):
+ d = nx.degree_mixing_dict(self.W, weight="weight")
+ d_result = {0.5: {1.5: 1}, 1.5: {1.5: 6, 0.5: 1}}
+ assert d == d_result
+
+
+class TestDegreeMixingMatrix(BaseTestDegreeMixing):
+ def test_degree_mixing_matrix_undirected(self):
+ # fmt: off
+ a_result = np.array([[0, 2],
+ [2, 2]]
+ )
+ # fmt: on
+ a = nx.degree_mixing_matrix(self.P4, normalized=False)
+ np.testing.assert_equal(a, a_result)
+ a = nx.degree_mixing_matrix(self.P4)
+ np.testing.assert_equal(a, a_result / a_result.sum())
+
+ def test_degree_mixing_matrix_directed(self):
+ # fmt: off
+ a_result = np.array([[0, 0, 2],
+ [1, 0, 1],
+ [0, 0, 0]]
+ )
+ # fmt: on
+ a = nx.degree_mixing_matrix(self.D, normalized=False)
+ np.testing.assert_equal(a, a_result)
+ a = nx.degree_mixing_matrix(self.D)
+ np.testing.assert_equal(a, a_result / a_result.sum())
+
+ def test_degree_mixing_matrix_multigraph(self):
+ # fmt: off
+ a_result = np.array([[0, 1, 0],
+ [1, 0, 3],
+ [0, 3, 0]]
+ )
+ # fmt: on
+ a = nx.degree_mixing_matrix(self.M, normalized=False)
+ np.testing.assert_equal(a, a_result)
+ a = nx.degree_mixing_matrix(self.M)
+ np.testing.assert_equal(a, a_result / a_result.sum())
+
+ def test_degree_mixing_matrix_selfloop(self):
+ # fmt: off
+ a_result = np.array([[2]])
+ # fmt: on
+ a = nx.degree_mixing_matrix(self.S, normalized=False)
+ np.testing.assert_equal(a, a_result)
+ a = nx.degree_mixing_matrix(self.S)
+ np.testing.assert_equal(a, a_result / a_result.sum())
+
+ def test_degree_mixing_matrix_weighted(self):
+ a_result = np.array([[0.0, 1.0], [1.0, 6.0]])
+ a = nx.degree_mixing_matrix(self.W, weight="weight", normalized=False)
+ np.testing.assert_equal(a, a_result)
+ a = nx.degree_mixing_matrix(self.W, weight="weight")
+ np.testing.assert_equal(a, a_result / float(a_result.sum()))
+
+ def test_degree_mixing_matrix_mapping(self):
+ a_result = np.array([[6.0, 1.0], [1.0, 0.0]])
+ mapping = {0.5: 1, 1.5: 0}
+ a = nx.degree_mixing_matrix(
+ self.W, weight="weight", normalized=False, mapping=mapping
+ )
+ np.testing.assert_equal(a, a_result)
+
+
+class TestAttributeMixingDict(BaseTestAttributeMixing):
+ def test_attribute_mixing_dict_undirected(self):
+ d = nx.attribute_mixing_dict(self.G, "fish")
+ d_result = {
+ "one": {"one": 2, "red": 1},
+ "two": {"two": 2, "blue": 1},
+ "red": {"one": 1},
+ "blue": {"two": 1},
+ }
+ assert d == d_result
+
+ def test_attribute_mixing_dict_directed(self):
+ d = nx.attribute_mixing_dict(self.D, "fish")
+ d_result = {
+ "one": {"one": 1, "red": 1},
+ "two": {"two": 1, "blue": 1},
+ "red": {},
+ "blue": {},
+ }
+ assert d == d_result
+
+ def test_attribute_mixing_dict_multigraph(self):
+ d = nx.attribute_mixing_dict(self.M, "fish")
+ d_result = {"one": {"one": 4}, "two": {"two": 2}}
+ assert d == d_result
+
+
+class TestAttributeMixingMatrix(BaseTestAttributeMixing):
+ def test_attribute_mixing_matrix_undirected(self):
+ mapping = {"one": 0, "two": 1, "red": 2, "blue": 3}
+ a_result = np.array([[2, 0, 1, 0], [0, 2, 0, 1], [1, 0, 0, 0], [0, 1, 0, 0]])
+ a = nx.attribute_mixing_matrix(
+ self.G, "fish", mapping=mapping, normalized=False
+ )
+ np.testing.assert_equal(a, a_result)
+ a = nx.attribute_mixing_matrix(self.G, "fish", mapping=mapping)
+ np.testing.assert_equal(a, a_result / a_result.sum())
+
+ def test_attribute_mixing_matrix_directed(self):
+ mapping = {"one": 0, "two": 1, "red": 2, "blue": 3}
+ a_result = np.array([[1, 0, 1, 0], [0, 1, 0, 1], [0, 0, 0, 0], [0, 0, 0, 0]])
+ a = nx.attribute_mixing_matrix(
+ self.D, "fish", mapping=mapping, normalized=False
+ )
+ np.testing.assert_equal(a, a_result)
+ a = nx.attribute_mixing_matrix(self.D, "fish", mapping=mapping)
+ np.testing.assert_equal(a, a_result / a_result.sum())
+
+ def test_attribute_mixing_matrix_multigraph(self):
+ mapping = {"one": 0, "two": 1, "red": 2, "blue": 3}
+ a_result = np.array([[4, 0, 0, 0], [0, 2, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0]])
+ a = nx.attribute_mixing_matrix(
+ self.M, "fish", mapping=mapping, normalized=False
+ )
+ np.testing.assert_equal(a, a_result)
+ a = nx.attribute_mixing_matrix(self.M, "fish", mapping=mapping)
+ np.testing.assert_equal(a, a_result / a_result.sum())
+
+ def test_attribute_mixing_matrix_negative(self):
+ mapping = {-2: 0, -3: 1, -4: 2}
+ a_result = np.array([[4.0, 1.0, 1.0], [1.0, 0.0, 0.0], [1.0, 0.0, 0.0]])
+ a = nx.attribute_mixing_matrix(
+ self.N, "margin", mapping=mapping, normalized=False
+ )
+ np.testing.assert_equal(a, a_result)
+ a = nx.attribute_mixing_matrix(self.N, "margin", mapping=mapping)
+ np.testing.assert_equal(a, a_result / float(a_result.sum()))
+
+ def test_attribute_mixing_matrix_float(self):
+ mapping = {0.5: 1, 1.5: 0}
+ a_result = np.array([[6.0, 1.0], [1.0, 0.0]])
+ a = nx.attribute_mixing_matrix(
+ self.F, "margin", mapping=mapping, normalized=False
+ )
+ np.testing.assert_equal(a, a_result)
+ a = nx.attribute_mixing_matrix(self.F, "margin", mapping=mapping)
+ np.testing.assert_equal(a, a_result / a_result.sum())
diff --git a/pythonProject/.venv/Lib/site-packages/networkx/algorithms/assortativity/tests/test_neighbor_degree.py b/pythonProject/.venv/Lib/site-packages/networkx/algorithms/assortativity/tests/test_neighbor_degree.py
new file mode 100644
index 0000000000000000000000000000000000000000..bf1252d532079d4de6de4659943ce008eb9018b3
--- /dev/null
+++ b/pythonProject/.venv/Lib/site-packages/networkx/algorithms/assortativity/tests/test_neighbor_degree.py
@@ -0,0 +1,108 @@
+import pytest
+
+import networkx as nx
+
+
+class TestAverageNeighbor:
+ def test_degree_p4(self):
+ G = nx.path_graph(4)
+ answer = {0: 2, 1: 1.5, 2: 1.5, 3: 2}
+ nd = nx.average_neighbor_degree(G)
+ assert nd == answer
+
+ D = G.to_directed()
+ nd = nx.average_neighbor_degree(D)
+ assert nd == answer
+
+ D = nx.DiGraph(G.edges(data=True))
+ nd = nx.average_neighbor_degree(D)
+ assert nd == {0: 1, 1: 1, 2: 0, 3: 0}
+ nd = nx.average_neighbor_degree(D, "in", "out")
+ assert nd == {0: 0, 1: 1, 2: 1, 3: 1}
+ nd = nx.average_neighbor_degree(D, "out", "in")
+ assert nd == {0: 1, 1: 1, 2: 1, 3: 0}
+ nd = nx.average_neighbor_degree(D, "in", "in")
+ assert nd == {0: 0, 1: 0, 2: 1, 3: 1}
+
+ def test_degree_p4_weighted(self):
+ G = nx.path_graph(4)
+ G[1][2]["weight"] = 4
+ answer = {0: 2, 1: 1.8, 2: 1.8, 3: 2}
+ nd = nx.average_neighbor_degree(G, weight="weight")
+ assert nd == answer
+
+ D = G.to_directed()
+ nd = nx.average_neighbor_degree(D, weight="weight")
+ assert nd == answer
+
+ D = nx.DiGraph(G.edges(data=True))
+ print(D.edges(data=True))
+ nd = nx.average_neighbor_degree(D, weight="weight")
+ assert nd == {0: 1, 1: 1, 2: 0, 3: 0}
+ nd = nx.average_neighbor_degree(D, "out", "out", weight="weight")
+ assert nd == {0: 1, 1: 1, 2: 0, 3: 0}
+ nd = nx.average_neighbor_degree(D, "in", "in", weight="weight")
+ assert nd == {0: 0, 1: 0, 2: 1, 3: 1}
+ nd = nx.average_neighbor_degree(D, "in", "out", weight="weight")
+ assert nd == {0: 0, 1: 1, 2: 1, 3: 1}
+ nd = nx.average_neighbor_degree(D, "out", "in", weight="weight")
+ assert nd == {0: 1, 1: 1, 2: 1, 3: 0}
+ nd = nx.average_neighbor_degree(D, source="in+out", weight="weight")
+ assert nd == {0: 1.0, 1: 1.0, 2: 0.8, 3: 1.0}
+ nd = nx.average_neighbor_degree(D, target="in+out", weight="weight")
+ assert nd == {0: 2.0, 1: 2.0, 2: 1.0, 3: 0.0}
+
+ D = G.to_directed()
+ nd = nx.average_neighbor_degree(D, weight="weight")
+ assert nd == answer
+ nd = nx.average_neighbor_degree(D, source="out", target="out", weight="weight")
+ assert nd == answer
+
+ D = G.to_directed()
+ nd = nx.average_neighbor_degree(D, source="in", target="in", weight="weight")
+ assert nd == answer
+
+ def test_degree_k4(self):
+ G = nx.complete_graph(4)
+ answer = {0: 3, 1: 3, 2: 3, 3: 3}
+ nd = nx.average_neighbor_degree(G)
+ assert nd == answer
+
+ D = G.to_directed()
+ nd = nx.average_neighbor_degree(D)
+ assert nd == answer
+
+ D = G.to_directed()
+ nd = nx.average_neighbor_degree(D)
+ assert nd == answer
+
+ D = G.to_directed()
+ nd = nx.average_neighbor_degree(D, source="in", target="in")
+ assert nd == answer
+
+ def test_degree_k4_nodes(self):
+ G = nx.complete_graph(4)
+ answer = {1: 3.0, 2: 3.0}
+ nd = nx.average_neighbor_degree(G, nodes=[1, 2])
+ assert nd == answer
+
+ def test_degree_barrat(self):
+ G = nx.star_graph(5)
+ G.add_edges_from([(5, 6), (5, 7), (5, 8), (5, 9)])
+ G[0][5]["weight"] = 5
+ nd = nx.average_neighbor_degree(G)[5]
+ assert nd == 1.8
+ nd = nx.average_neighbor_degree(G, weight="weight")[5]
+ assert nd == pytest.approx(3.222222, abs=1e-5)
+
+ def test_error_invalid_source_target(self):
+ G = nx.path_graph(4)
+ with pytest.raises(nx.NetworkXError):
+ nx.average_neighbor_degree(G, "error")
+ with pytest.raises(nx.NetworkXError):
+ nx.average_neighbor_degree(G, "in", "error")
+ G = G.to_directed()
+ with pytest.raises(nx.NetworkXError):
+ nx.average_neighbor_degree(G, "error")
+ with pytest.raises(nx.NetworkXError):
+ nx.average_neighbor_degree(G, "in", "error")
diff --git a/pythonProject/.venv/Lib/site-packages/networkx/algorithms/assortativity/tests/test_pairs.py b/pythonProject/.venv/Lib/site-packages/networkx/algorithms/assortativity/tests/test_pairs.py
new file mode 100644
index 0000000000000000000000000000000000000000..3984292be84dd7b306066809fb3c50a7cf0424f4
--- /dev/null
+++ b/pythonProject/.venv/Lib/site-packages/networkx/algorithms/assortativity/tests/test_pairs.py
@@ -0,0 +1,87 @@
+import networkx as nx
+
+from .base_test import BaseTestAttributeMixing, BaseTestDegreeMixing
+
+
+class TestAttributeMixingXY(BaseTestAttributeMixing):
+ def test_node_attribute_xy_undirected(self):
+ attrxy = sorted(nx.node_attribute_xy(self.G, "fish"))
+ attrxy_result = sorted(
+ [
+ ("one", "one"),
+ ("one", "one"),
+ ("two", "two"),
+ ("two", "two"),
+ ("one", "red"),
+ ("red", "one"),
+ ("blue", "two"),
+ ("two", "blue"),
+ ]
+ )
+ assert attrxy == attrxy_result
+
+ def test_node_attribute_xy_undirected_nodes(self):
+ attrxy = sorted(nx.node_attribute_xy(self.G, "fish", nodes=["one", "yellow"]))
+ attrxy_result = sorted([])
+ assert attrxy == attrxy_result
+
+ def test_node_attribute_xy_directed(self):
+ attrxy = sorted(nx.node_attribute_xy(self.D, "fish"))
+ attrxy_result = sorted(
+ [("one", "one"), ("two", "two"), ("one", "red"), ("two", "blue")]
+ )
+ assert attrxy == attrxy_result
+
+ def test_node_attribute_xy_multigraph(self):
+ attrxy = sorted(nx.node_attribute_xy(self.M, "fish"))
+ attrxy_result = [
+ ("one", "one"),
+ ("one", "one"),
+ ("one", "one"),
+ ("one", "one"),
+ ("two", "two"),
+ ("two", "two"),
+ ]
+ assert attrxy == attrxy_result
+
+ def test_node_attribute_xy_selfloop(self):
+ attrxy = sorted(nx.node_attribute_xy(self.S, "fish"))
+ attrxy_result = [("one", "one"), ("two", "two")]
+ assert attrxy == attrxy_result
+
+
+class TestDegreeMixingXY(BaseTestDegreeMixing):
+ def test_node_degree_xy_undirected(self):
+ xy = sorted(nx.node_degree_xy(self.P4))
+ xy_result = sorted([(1, 2), (2, 1), (2, 2), (2, 2), (1, 2), (2, 1)])
+ assert xy == xy_result
+
+ def test_node_degree_xy_undirected_nodes(self):
+ xy = sorted(nx.node_degree_xy(self.P4, nodes=[0, 1, -1]))
+ xy_result = sorted([(1, 2), (2, 1)])
+ assert xy == xy_result
+
+ def test_node_degree_xy_directed(self):
+ xy = sorted(nx.node_degree_xy(self.D))
+ xy_result = sorted([(2, 1), (2, 3), (1, 3), (1, 3)])
+ assert xy == xy_result
+
+ def test_node_degree_xy_multigraph(self):
+ xy = sorted(nx.node_degree_xy(self.M))
+ xy_result = sorted(
+ [(2, 3), (2, 3), (3, 2), (3, 2), (2, 3), (3, 2), (1, 2), (2, 1)]
+ )
+ assert xy == xy_result
+
+ def test_node_degree_xy_selfloop(self):
+ xy = sorted(nx.node_degree_xy(self.S))
+ xy_result = sorted([(2, 2), (2, 2)])
+ assert xy == xy_result
+
+ def test_node_degree_xy_weighted(self):
+ G = nx.Graph()
+ G.add_edge(1, 2, weight=7)
+ G.add_edge(2, 3, weight=10)
+ xy = sorted(nx.node_degree_xy(G, weight="weight"))
+ xy_result = sorted([(7, 17), (17, 10), (17, 7), (10, 17)])
+ assert xy == xy_result
diff --git a/pythonProject/.venv/Lib/site-packages/networkx/algorithms/asteroidal.py b/pythonProject/.venv/Lib/site-packages/networkx/algorithms/asteroidal.py
new file mode 100644
index 0000000000000000000000000000000000000000..41e91390dff759d2658738bca18e474c4820085e
--- /dev/null
+++ b/pythonProject/.venv/Lib/site-packages/networkx/algorithms/asteroidal.py
@@ -0,0 +1,170 @@
+"""
+Algorithms for asteroidal triples and asteroidal numbers in graphs.
+
+An asteroidal triple in a graph G is a set of three non-adjacent vertices
+u, v and w such that there exist a path between any two of them that avoids
+closed neighborhood of the third. More formally, v_j, v_k belongs to the same
+connected component of G - N[v_i], where N[v_i] denotes the closed neighborhood
+of v_i. A graph which does not contain any asteroidal triples is called
+an AT-free graph. The class of AT-free graphs is a graph class for which
+many NP-complete problems are solvable in polynomial time. Amongst them,
+independent set and coloring.
+"""
+import networkx as nx
+from networkx.utils import not_implemented_for
+
+__all__ = ["is_at_free", "find_asteroidal_triple"]
+
+
+@not_implemented_for("directed")
+@not_implemented_for("multigraph")
+@nx._dispatchable
+def find_asteroidal_triple(G):
+ r"""Find an asteroidal triple in the given graph.
+
+ An asteroidal triple is a triple of non-adjacent vertices such that
+ there exists a path between any two of them which avoids the closed
+ neighborhood of the third. It checks all independent triples of vertices
+ and whether they are an asteroidal triple or not. This is done with the
+ help of a data structure called a component structure.
+ A component structure encodes information about which vertices belongs to
+ the same connected component when the closed neighborhood of a given vertex
+ is removed from the graph. The algorithm used to check is the trivial
+ one, outlined in [1]_, which has a runtime of
+ :math:`O(|V||\overline{E} + |V||E|)`, where the second term is the
+ creation of the component structure.
+
+ Parameters
+ ----------
+ G : NetworkX Graph
+ The graph to check whether is AT-free or not
+
+ Returns
+ -------
+ list or None
+ An asteroidal triple is returned as a list of nodes. If no asteroidal
+ triple exists, i.e. the graph is AT-free, then None is returned.
+ The returned value depends on the certificate parameter. The default
+ option is a bool which is True if the graph is AT-free, i.e. the
+ given graph contains no asteroidal triples, and False otherwise, i.e.
+ if the graph contains at least one asteroidal triple.
+
+ Notes
+ -----
+ The component structure and the algorithm is described in [1]_. The current
+ implementation implements the trivial algorithm for simple graphs.
+
+ References
+ ----------
+ .. [1] Ekkehard Köhler,
+ "Recognizing Graphs without asteroidal triples",
+ Journal of Discrete Algorithms 2, pages 439-452, 2004.
+ https://www.sciencedirect.com/science/article/pii/S157086670400019X
+ """
+ V = set(G.nodes)
+
+ if len(V) < 6:
+ # An asteroidal triple cannot exist in a graph with 5 or less vertices.
+ return None
+
+ component_structure = create_component_structure(G)
+ E_complement = set(nx.complement(G).edges)
+
+ for e in E_complement:
+ u = e[0]
+ v = e[1]
+ u_neighborhood = set(G[u]).union([u])
+ v_neighborhood = set(G[v]).union([v])
+ union_of_neighborhoods = u_neighborhood.union(v_neighborhood)
+ for w in V - union_of_neighborhoods:
+ # Check for each pair of vertices whether they belong to the
+ # same connected component when the closed neighborhood of the
+ # third is removed.
+ if (
+ component_structure[u][v] == component_structure[u][w]
+ and component_structure[v][u] == component_structure[v][w]
+ and component_structure[w][u] == component_structure[w][v]
+ ):
+ return [u, v, w]
+ return None
+
+
+@not_implemented_for("directed")
+@not_implemented_for("multigraph")
+@nx._dispatchable
+def is_at_free(G):
+ """Check if a graph is AT-free.
+
+ The method uses the `find_asteroidal_triple` method to recognize
+ an AT-free graph. If no asteroidal triple is found the graph is
+ AT-free and True is returned. If at least one asteroidal triple is
+ found the graph is not AT-free and False is returned.
+
+ Parameters
+ ----------
+ G : NetworkX Graph
+ The graph to check whether is AT-free or not.
+
+ Returns
+ -------
+ bool
+ True if G is AT-free and False otherwise.
+
+ Examples
+ --------
+ >>> G = nx.Graph([(0, 1), (0, 2), (1, 2), (1, 3), (1, 4), (4, 5)])
+ >>> nx.is_at_free(G)
+ True
+
+ >>> G = nx.cycle_graph(6)
+ >>> nx.is_at_free(G)
+ False
+ """
+ return find_asteroidal_triple(G) is None
+
+
+@not_implemented_for("directed")
+@not_implemented_for("multigraph")
+@nx._dispatchable
+def create_component_structure(G):
+ r"""Create component structure for G.
+
+ A *component structure* is an `nxn` array, denoted `c`, where `n` is
+ the number of vertices, where each row and column corresponds to a vertex.
+
+ .. math::
+ c_{uv} = \begin{cases} 0, if v \in N[u] \\
+ k, if v \in component k of G \setminus N[u] \end{cases}
+
+ Where `k` is an arbitrary label for each component. The structure is used
+ to simplify the detection of asteroidal triples.
+
+ Parameters
+ ----------
+ G : NetworkX Graph
+ Undirected, simple graph.
+
+ Returns
+ -------
+ component_structure : dictionary
+ A dictionary of dictionaries, keyed by pairs of vertices.
+
+ """
+ V = set(G.nodes)
+ component_structure = {}
+ for v in V:
+ label = 0
+ closed_neighborhood = set(G[v]).union({v})
+ row_dict = {}
+ for u in closed_neighborhood:
+ row_dict[u] = 0
+
+ G_reduced = G.subgraph(set(G.nodes) - closed_neighborhood)
+ for cc in nx.connected_components(G_reduced):
+ label += 1
+ for u in cc:
+ row_dict[u] = label
+
+ component_structure[v] = row_dict
+
+ return component_structure
diff --git a/pythonProject/.venv/Lib/site-packages/networkx/algorithms/bipartite/__init__.py b/pythonProject/.venv/Lib/site-packages/networkx/algorithms/bipartite/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..bd60020b122d38b8d13569d8f636ca45d771fb31
--- /dev/null
+++ b/pythonProject/.venv/Lib/site-packages/networkx/algorithms/bipartite/__init__.py
@@ -0,0 +1,87 @@
+r""" This module provides functions and operations for bipartite
+graphs. Bipartite graphs `B = (U, V, E)` have two node sets `U,V` and edges in
+`E` that only connect nodes from opposite sets. It is common in the literature
+to use an spatial analogy referring to the two node sets as top and bottom nodes.
+
+The bipartite algorithms are not imported into the networkx namespace
+at the top level so the easiest way to use them is with:
+
+>>> from networkx.algorithms import bipartite
+
+NetworkX does not have a custom bipartite graph class but the Graph()
+or DiGraph() classes can be used to represent bipartite graphs. However,
+you have to keep track of which set each node belongs to, and make
+sure that there is no edge between nodes of the same set. The convention used
+in NetworkX is to use a node attribute named `bipartite` with values 0 or 1 to
+identify the sets each node belongs to. This convention is not enforced in
+the source code of bipartite functions, it's only a recommendation.
+
+For example:
+
+>>> B = nx.Graph()
+>>> # Add nodes with the node attribute "bipartite"
+>>> B.add_nodes_from([1, 2, 3, 4], bipartite=0)
+>>> B.add_nodes_from(["a", "b", "c"], bipartite=1)
+>>> # Add edges only between nodes of opposite node sets
+>>> B.add_edges_from([(1, "a"), (1, "b"), (2, "b"), (2, "c"), (3, "c"), (4, "a")])
+
+Many algorithms of the bipartite module of NetworkX require, as an argument, a
+container with all the nodes that belong to one set, in addition to the bipartite
+graph `B`. The functions in the bipartite package do not check that the node set
+is actually correct nor that the input graph is actually bipartite.
+If `B` is connected, you can find the two node sets using a two-coloring
+algorithm:
+
+>>> nx.is_connected(B)
+True
+>>> bottom_nodes, top_nodes = bipartite.sets(B)
+
+However, if the input graph is not connected, there are more than one possible
+colorations. This is the reason why we require the user to pass a container
+with all nodes of one bipartite node set as an argument to most bipartite
+functions. In the face of ambiguity, we refuse the temptation to guess and
+raise an :exc:`AmbiguousSolution `
+Exception if the input graph for
+:func:`bipartite.sets `
+is disconnected.
+
+Using the `bipartite` node attribute, you can easily get the two node sets:
+
+>>> top_nodes = {n for n, d in B.nodes(data=True) if d["bipartite"] == 0}
+>>> bottom_nodes = set(B) - top_nodes
+
+So you can easily use the bipartite algorithms that require, as an argument, a
+container with all nodes that belong to one node set:
+
+>>> print(round(bipartite.density(B, bottom_nodes), 2))
+0.5
+>>> G = bipartite.projected_graph(B, top_nodes)
+
+All bipartite graph generators in NetworkX build bipartite graphs with the
+`bipartite` node attribute. Thus, you can use the same approach:
+
+>>> RB = bipartite.random_graph(5, 7, 0.2)
+>>> RB_top = {n for n, d in RB.nodes(data=True) if d["bipartite"] == 0}
+>>> RB_bottom = set(RB) - RB_top
+>>> list(RB_top)
+[0, 1, 2, 3, 4]
+>>> list(RB_bottom)
+[5, 6, 7, 8, 9, 10, 11]
+
+For other bipartite graph generators see
+:mod:`Generators `.
+
+"""
+
+from networkx.algorithms.bipartite.basic import *
+from networkx.algorithms.bipartite.centrality import *
+from networkx.algorithms.bipartite.cluster import *
+from networkx.algorithms.bipartite.covering import *
+from networkx.algorithms.bipartite.edgelist import *
+from networkx.algorithms.bipartite.matching import *
+from networkx.algorithms.bipartite.matrix import *
+from networkx.algorithms.bipartite.projection import *
+from networkx.algorithms.bipartite.redundancy import *
+from networkx.algorithms.bipartite.spectral import *
+from networkx.algorithms.bipartite.generators import *
+from networkx.algorithms.bipartite.extendability import *
diff --git a/pythonProject/.venv/Lib/site-packages/networkx/algorithms/bipartite/__pycache__/__init__.cpython-310.pyc b/pythonProject/.venv/Lib/site-packages/networkx/algorithms/bipartite/__pycache__/__init__.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..20a713feef9df8d4b8f601b893d2c836fdbffdd1
Binary files /dev/null and b/pythonProject/.venv/Lib/site-packages/networkx/algorithms/bipartite/__pycache__/__init__.cpython-310.pyc differ
diff --git a/pythonProject/.venv/Lib/site-packages/networkx/algorithms/bipartite/__pycache__/basic.cpython-310.pyc b/pythonProject/.venv/Lib/site-packages/networkx/algorithms/bipartite/__pycache__/basic.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..6e508f673fdfe9f5d7703ac7636ca26b937196f1
Binary files /dev/null and b/pythonProject/.venv/Lib/site-packages/networkx/algorithms/bipartite/__pycache__/basic.cpython-310.pyc differ
diff --git a/pythonProject/.venv/Lib/site-packages/networkx/algorithms/bipartite/__pycache__/centrality.cpython-310.pyc b/pythonProject/.venv/Lib/site-packages/networkx/algorithms/bipartite/__pycache__/centrality.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..29558aab04ae53f20e5a2200f4d9399c4020052f
Binary files /dev/null and b/pythonProject/.venv/Lib/site-packages/networkx/algorithms/bipartite/__pycache__/centrality.cpython-310.pyc differ
diff --git a/pythonProject/.venv/Lib/site-packages/networkx/algorithms/bipartite/__pycache__/cluster.cpython-310.pyc b/pythonProject/.venv/Lib/site-packages/networkx/algorithms/bipartite/__pycache__/cluster.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..40cf50e49d6e988e98c42ed70bcd60b39814cf61
Binary files /dev/null and b/pythonProject/.venv/Lib/site-packages/networkx/algorithms/bipartite/__pycache__/cluster.cpython-310.pyc differ
diff --git a/pythonProject/.venv/Lib/site-packages/networkx/algorithms/bipartite/__pycache__/covering.cpython-310.pyc b/pythonProject/.venv/Lib/site-packages/networkx/algorithms/bipartite/__pycache__/covering.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..b091256c45f239aeb05b337410ee6ee85caf795c
Binary files /dev/null and b/pythonProject/.venv/Lib/site-packages/networkx/algorithms/bipartite/__pycache__/covering.cpython-310.pyc differ
diff --git a/pythonProject/.venv/Lib/site-packages/networkx/algorithms/bipartite/__pycache__/edgelist.cpython-310.pyc b/pythonProject/.venv/Lib/site-packages/networkx/algorithms/bipartite/__pycache__/edgelist.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..d4387a9e572e0a54b90a02bef9fde46bf4ba9a27
Binary files /dev/null and b/pythonProject/.venv/Lib/site-packages/networkx/algorithms/bipartite/__pycache__/edgelist.cpython-310.pyc differ
diff --git a/pythonProject/.venv/Lib/site-packages/networkx/algorithms/bipartite/__pycache__/extendability.cpython-310.pyc b/pythonProject/.venv/Lib/site-packages/networkx/algorithms/bipartite/__pycache__/extendability.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..74fda7664cc50afa92c14062e60032d586f64618
Binary files /dev/null and b/pythonProject/.venv/Lib/site-packages/networkx/algorithms/bipartite/__pycache__/extendability.cpython-310.pyc differ
diff --git a/pythonProject/.venv/Lib/site-packages/networkx/algorithms/bipartite/__pycache__/generators.cpython-310.pyc b/pythonProject/.venv/Lib/site-packages/networkx/algorithms/bipartite/__pycache__/generators.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..77aca40f37549e7d28c1d64e0d658a4a994c6524
Binary files /dev/null and b/pythonProject/.venv/Lib/site-packages/networkx/algorithms/bipartite/__pycache__/generators.cpython-310.pyc differ
diff --git a/pythonProject/.venv/Lib/site-packages/networkx/algorithms/bipartite/__pycache__/matching.cpython-310.pyc b/pythonProject/.venv/Lib/site-packages/networkx/algorithms/bipartite/__pycache__/matching.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..34d4617043dd46282989838d76477cbbbe720ec2
Binary files /dev/null and b/pythonProject/.venv/Lib/site-packages/networkx/algorithms/bipartite/__pycache__/matching.cpython-310.pyc differ
diff --git a/pythonProject/.venv/Lib/site-packages/networkx/algorithms/bipartite/__pycache__/matrix.cpython-310.pyc b/pythonProject/.venv/Lib/site-packages/networkx/algorithms/bipartite/__pycache__/matrix.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..779d34d3036ad4f29383c1d9d91b7ed50cfc0ce1
Binary files /dev/null and b/pythonProject/.venv/Lib/site-packages/networkx/algorithms/bipartite/__pycache__/matrix.cpython-310.pyc differ
diff --git a/pythonProject/.venv/Lib/site-packages/networkx/algorithms/bipartite/__pycache__/projection.cpython-310.pyc b/pythonProject/.venv/Lib/site-packages/networkx/algorithms/bipartite/__pycache__/projection.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..23d80cb1ced4e7d0e996238de66626aaf06b69c4
Binary files /dev/null and b/pythonProject/.venv/Lib/site-packages/networkx/algorithms/bipartite/__pycache__/projection.cpython-310.pyc differ
diff --git a/pythonProject/.venv/Lib/site-packages/networkx/algorithms/bipartite/__pycache__/redundancy.cpython-310.pyc b/pythonProject/.venv/Lib/site-packages/networkx/algorithms/bipartite/__pycache__/redundancy.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..981114749b08a85b7fd0e2f502262099633e400b
Binary files /dev/null and b/pythonProject/.venv/Lib/site-packages/networkx/algorithms/bipartite/__pycache__/redundancy.cpython-310.pyc differ
diff --git a/pythonProject/.venv/Lib/site-packages/networkx/algorithms/bipartite/__pycache__/spectral.cpython-310.pyc b/pythonProject/.venv/Lib/site-packages/networkx/algorithms/bipartite/__pycache__/spectral.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..900c74facbe3bd11bc31a58fa8501107101c712b
Binary files /dev/null and b/pythonProject/.venv/Lib/site-packages/networkx/algorithms/bipartite/__pycache__/spectral.cpython-310.pyc differ
diff --git a/pythonProject/.venv/Lib/site-packages/networkx/algorithms/bipartite/basic.py b/pythonProject/.venv/Lib/site-packages/networkx/algorithms/bipartite/basic.py
new file mode 100644
index 0000000000000000000000000000000000000000..d0a63a10fd1a58cb2096de0314349e178d4f1ad8
--- /dev/null
+++ b/pythonProject/.venv/Lib/site-packages/networkx/algorithms/bipartite/basic.py
@@ -0,0 +1,321 @@
+"""
+==========================
+Bipartite Graph Algorithms
+==========================
+"""
+import networkx as nx
+from networkx.algorithms.components import connected_components
+from networkx.exception import AmbiguousSolution
+
+__all__ = [
+ "is_bipartite",
+ "is_bipartite_node_set",
+ "color",
+ "sets",
+ "density",
+ "degrees",
+]
+
+
+@nx._dispatchable
+def color(G):
+ """Returns a two-coloring of the graph.
+
+ Raises an exception if the graph is not bipartite.
+
+ Parameters
+ ----------
+ G : NetworkX graph
+
+ Returns
+ -------
+ color : dictionary
+ A dictionary keyed by node with a 1 or 0 as data for each node color.
+
+ Raises
+ ------
+ NetworkXError
+ If the graph is not two-colorable.
+
+ Examples
+ --------
+ >>> from networkx.algorithms import bipartite
+ >>> G = nx.path_graph(4)
+ >>> c = bipartite.color(G)
+ >>> print(c)
+ {0: 1, 1: 0, 2: 1, 3: 0}
+
+ You can use this to set a node attribute indicating the bipartite set:
+
+ >>> nx.set_node_attributes(G, c, "bipartite")
+ >>> print(G.nodes[0]["bipartite"])
+ 1
+ >>> print(G.nodes[1]["bipartite"])
+ 0
+ """
+ if G.is_directed():
+ import itertools
+
+ def neighbors(v):
+ return itertools.chain.from_iterable([G.predecessors(v), G.successors(v)])
+
+ else:
+ neighbors = G.neighbors
+
+ color = {}
+ for n in G: # handle disconnected graphs
+ if n in color or len(G[n]) == 0: # skip isolates
+ continue
+ queue = [n]
+ color[n] = 1 # nodes seen with color (1 or 0)
+ while queue:
+ v = queue.pop()
+ c = 1 - color[v] # opposite color of node v
+ for w in neighbors(v):
+ if w in color:
+ if color[w] == color[v]:
+ raise nx.NetworkXError("Graph is not bipartite.")
+ else:
+ color[w] = c
+ queue.append(w)
+ # color isolates with 0
+ color.update(dict.fromkeys(nx.isolates(G), 0))
+ return color
+
+
+@nx._dispatchable
+def is_bipartite(G):
+ """Returns True if graph G is bipartite, False if not.
+
+ Parameters
+ ----------
+ G : NetworkX graph
+
+ Examples
+ --------
+ >>> from networkx.algorithms import bipartite
+ >>> G = nx.path_graph(4)
+ >>> print(bipartite.is_bipartite(G))
+ True
+
+ See Also
+ --------
+ color, is_bipartite_node_set
+ """
+ try:
+ color(G)
+ return True
+ except nx.NetworkXError:
+ return False
+
+
+@nx._dispatchable
+def is_bipartite_node_set(G, nodes):
+ """Returns True if nodes and G/nodes are a bipartition of G.
+
+ Parameters
+ ----------
+ G : NetworkX graph
+
+ nodes: list or container
+ Check if nodes are a one of a bipartite set.
+
+ Examples
+ --------
+ >>> from networkx.algorithms import bipartite
+ >>> G = nx.path_graph(4)
+ >>> X = set([1, 3])
+ >>> bipartite.is_bipartite_node_set(G, X)
+ True
+
+ Notes
+ -----
+ An exception is raised if the input nodes are not distinct, because in this
+ case some bipartite algorithms will yield incorrect results.
+ For connected graphs the bipartite sets are unique. This function handles
+ disconnected graphs.
+ """
+ S = set(nodes)
+
+ if len(S) < len(nodes):
+ # this should maybe just return False?
+ raise AmbiguousSolution(
+ "The input node set contains duplicates.\n"
+ "This may lead to incorrect results when using it in bipartite algorithms.\n"
+ "Consider using set(nodes) as the input"
+ )
+
+ for CC in (G.subgraph(c).copy() for c in connected_components(G)):
+ X, Y = sets(CC)
+ if not (
+ (X.issubset(S) and Y.isdisjoint(S)) or (Y.issubset(S) and X.isdisjoint(S))
+ ):
+ return False
+ return True
+
+
+@nx._dispatchable
+def sets(G, top_nodes=None):
+ """Returns bipartite node sets of graph G.
+
+ Raises an exception if the graph is not bipartite or if the input
+ graph is disconnected and thus more than one valid solution exists.
+ See :mod:`bipartite documentation `
+ for further details on how bipartite graphs are handled in NetworkX.
+
+ Parameters
+ ----------
+ G : NetworkX graph
+
+ top_nodes : container, optional
+ Container with all nodes in one bipartite node set. If not supplied
+ it will be computed. But if more than one solution exists an exception
+ will be raised.
+
+ Returns
+ -------
+ X : set
+ Nodes from one side of the bipartite graph.
+ Y : set
+ Nodes from the other side.
+
+ Raises
+ ------
+ AmbiguousSolution
+ Raised if the input bipartite graph is disconnected and no container
+ with all nodes in one bipartite set is provided. When determining
+ the nodes in each bipartite set more than one valid solution is
+ possible if the input graph is disconnected.
+ NetworkXError
+ Raised if the input graph is not bipartite.
+
+ Examples
+ --------
+ >>> from networkx.algorithms import bipartite
+ >>> G = nx.path_graph(4)
+ >>> X, Y = bipartite.sets(G)
+ >>> list(X)
+ [0, 2]
+ >>> list(Y)
+ [1, 3]
+
+ See Also
+ --------
+ color
+
+ """
+ if G.is_directed():
+ is_connected = nx.is_weakly_connected
+ else:
+ is_connected = nx.is_connected
+ if top_nodes is not None:
+ X = set(top_nodes)
+ Y = set(G) - X
+ else:
+ if not is_connected(G):
+ msg = "Disconnected graph: Ambiguous solution for bipartite sets."
+ raise nx.AmbiguousSolution(msg)
+ c = color(G)
+ X = {n for n, is_top in c.items() if is_top}
+ Y = {n for n, is_top in c.items() if not is_top}
+ return (X, Y)
+
+
+@nx._dispatchable(graphs="B")
+def density(B, nodes):
+ """Returns density of bipartite graph B.
+
+ Parameters
+ ----------
+ B : NetworkX graph
+
+ nodes: list or container
+ Nodes in one node set of the bipartite graph.
+
+ Returns
+ -------
+ d : float
+ The bipartite density
+
+ Examples
+ --------
+ >>> from networkx.algorithms import bipartite
+ >>> G = nx.complete_bipartite_graph(3, 2)
+ >>> X = set([0, 1, 2])
+ >>> bipartite.density(G, X)
+ 1.0
+ >>> Y = set([3, 4])
+ >>> bipartite.density(G, Y)
+ 1.0
+
+ Notes
+ -----
+ The container of nodes passed as argument must contain all nodes
+ in one of the two bipartite node sets to avoid ambiguity in the
+ case of disconnected graphs.
+ See :mod:`bipartite documentation `
+ for further details on how bipartite graphs are handled in NetworkX.
+
+ See Also
+ --------
+ color
+ """
+ n = len(B)
+ m = nx.number_of_edges(B)
+ nb = len(nodes)
+ nt = n - nb
+ if m == 0: # includes cases n==0 and n==1
+ d = 0.0
+ else:
+ if B.is_directed():
+ d = m / (2 * nb * nt)
+ else:
+ d = m / (nb * nt)
+ return d
+
+
+@nx._dispatchable(graphs="B", edge_attrs="weight")
+def degrees(B, nodes, weight=None):
+ """Returns the degrees of the two node sets in the bipartite graph B.
+
+ Parameters
+ ----------
+ B : NetworkX graph
+
+ nodes: list or container
+ Nodes in one node set of the bipartite graph.
+
+ weight : string or None, optional (default=None)
+ The edge attribute that holds the numerical value used as a weight.
+ If None, then each edge has weight 1.
+ The degree is the sum of the edge weights adjacent to the node.
+
+ Returns
+ -------
+ (degX,degY) : tuple of dictionaries
+ The degrees of the two bipartite sets as dictionaries keyed by node.
+
+ Examples
+ --------
+ >>> from networkx.algorithms import bipartite
+ >>> G = nx.complete_bipartite_graph(3, 2)
+ >>> Y = set([3, 4])
+ >>> degX, degY = bipartite.degrees(G, Y)
+ >>> dict(degX)
+ {0: 2, 1: 2, 2: 2}
+
+ Notes
+ -----
+ The container of nodes passed as argument must contain all nodes
+ in one of the two bipartite node sets to avoid ambiguity in the
+ case of disconnected graphs.
+ See :mod:`bipartite documentation `
+ for further details on how bipartite graphs are handled in NetworkX.
+
+ See Also
+ --------
+ color, density
+ """
+ bottom = set(nodes)
+ top = set(B) - bottom
+ return (B.degree(top, weight), B.degree(bottom, weight))
diff --git a/pythonProject/.venv/Lib/site-packages/networkx/algorithms/bipartite/centrality.py b/pythonProject/.venv/Lib/site-packages/networkx/algorithms/bipartite/centrality.py
new file mode 100644
index 0000000000000000000000000000000000000000..42d7270ee7d0bb18b56a55dc4c17dc19f5dc77a7
--- /dev/null
+++ b/pythonProject/.venv/Lib/site-packages/networkx/algorithms/bipartite/centrality.py
@@ -0,0 +1,290 @@
+import networkx as nx
+
+__all__ = ["degree_centrality", "betweenness_centrality", "closeness_centrality"]
+
+
+@nx._dispatchable(name="bipartite_degree_centrality")
+def degree_centrality(G, nodes):
+ r"""Compute the degree centrality for nodes in a bipartite network.
+
+ The degree centrality for a node `v` is the fraction of nodes
+ connected to it.
+
+ Parameters
+ ----------
+ G : graph
+ A bipartite network
+
+ nodes : list or container
+ Container with all nodes in one bipartite node set.
+
+ Returns
+ -------
+ centrality : dictionary
+ Dictionary keyed by node with bipartite degree centrality as the value.
+
+ Examples
+ --------
+ >>> G = nx.wheel_graph(5)
+ >>> top_nodes = {0, 1, 2}
+ >>> nx.bipartite.degree_centrality(G, nodes=top_nodes)
+ {0: 2.0, 1: 1.5, 2: 1.5, 3: 1.0, 4: 1.0}
+
+ See Also
+ --------
+ betweenness_centrality
+ closeness_centrality
+ :func:`~networkx.algorithms.bipartite.basic.sets`
+ :func:`~networkx.algorithms.bipartite.basic.is_bipartite`
+
+ Notes
+ -----
+ The nodes input parameter must contain all nodes in one bipartite node set,
+ but the dictionary returned contains all nodes from both bipartite node
+ sets. See :mod:`bipartite documentation `
+ for further details on how bipartite graphs are handled in NetworkX.
+
+ For unipartite networks, the degree centrality values are
+ normalized by dividing by the maximum possible degree (which is
+ `n-1` where `n` is the number of nodes in G).
+
+ In the bipartite case, the maximum possible degree of a node in a
+ bipartite node set is the number of nodes in the opposite node set
+ [1]_. The degree centrality for a node `v` in the bipartite
+ sets `U` with `n` nodes and `V` with `m` nodes is
+
+ .. math::
+
+ d_{v} = \frac{deg(v)}{m}, \mbox{for} v \in U ,
+
+ d_{v} = \frac{deg(v)}{n}, \mbox{for} v \in V ,
+
+
+ where `deg(v)` is the degree of node `v`.
+
+ References
+ ----------
+ .. [1] Borgatti, S.P. and Halgin, D. In press. "Analyzing Affiliation
+ Networks". In Carrington, P. and Scott, J. (eds) The Sage Handbook
+ of Social Network Analysis. Sage Publications.
+ https://dx.doi.org/10.4135/9781446294413.n28
+ """
+ top = set(nodes)
+ bottom = set(G) - top
+ s = 1.0 / len(bottom)
+ centrality = {n: d * s for n, d in G.degree(top)}
+ s = 1.0 / len(top)
+ centrality.update({n: d * s for n, d in G.degree(bottom)})
+ return centrality
+
+
+@nx._dispatchable(name="bipartite_betweenness_centrality")
+def betweenness_centrality(G, nodes):
+ r"""Compute betweenness centrality for nodes in a bipartite network.
+
+ Betweenness centrality of a node `v` is the sum of the
+ fraction of all-pairs shortest paths that pass through `v`.
+
+ Values of betweenness are normalized by the maximum possible
+ value which for bipartite graphs is limited by the relative size
+ of the two node sets [1]_.
+
+ Let `n` be the number of nodes in the node set `U` and
+ `m` be the number of nodes in the node set `V`, then
+ nodes in `U` are normalized by dividing by
+
+ .. math::
+
+ \frac{1}{2} [m^2 (s + 1)^2 + m (s + 1)(2t - s - 1) - t (2s - t + 3)] ,
+
+ where
+
+ .. math::
+
+ s = (n - 1) \div m , t = (n - 1) \mod m ,
+
+ and nodes in `V` are normalized by dividing by
+
+ .. math::
+
+ \frac{1}{2} [n^2 (p + 1)^2 + n (p + 1)(2r - p - 1) - r (2p - r + 3)] ,
+
+ where,
+
+ .. math::
+
+ p = (m - 1) \div n , r = (m - 1) \mod n .
+
+ Parameters
+ ----------
+ G : graph
+ A bipartite graph
+
+ nodes : list or container
+ Container with all nodes in one bipartite node set.
+
+ Returns
+ -------
+ betweenness : dictionary
+ Dictionary keyed by node with bipartite betweenness centrality
+ as the value.
+
+ Examples
+ --------
+ >>> G = nx.cycle_graph(4)
+ >>> top_nodes = {1, 2}
+ >>> nx.bipartite.betweenness_centrality(G, nodes=top_nodes)
+ {0: 0.25, 1: 0.25, 2: 0.25, 3: 0.25}
+
+ See Also
+ --------
+ degree_centrality
+ closeness_centrality
+ :func:`~networkx.algorithms.bipartite.basic.sets`
+ :func:`~networkx.algorithms.bipartite.basic.is_bipartite`
+
+ Notes
+ -----
+ The nodes input parameter must contain all nodes in one bipartite node set,
+ but the dictionary returned contains all nodes from both node sets.
+ See :mod:`bipartite documentation `
+ for further details on how bipartite graphs are handled in NetworkX.
+
+
+ References
+ ----------
+ .. [1] Borgatti, S.P. and Halgin, D. In press. "Analyzing Affiliation
+ Networks". In Carrington, P. and Scott, J. (eds) The Sage Handbook
+ of Social Network Analysis. Sage Publications.
+ https://dx.doi.org/10.4135/9781446294413.n28
+ """
+ top = set(nodes)
+ bottom = set(G) - top
+ n = len(top)
+ m = len(bottom)
+ s, t = divmod(n - 1, m)
+ bet_max_top = (
+ ((m**2) * ((s + 1) ** 2))
+ + (m * (s + 1) * (2 * t - s - 1))
+ - (t * ((2 * s) - t + 3))
+ ) / 2.0
+ p, r = divmod(m - 1, n)
+ bet_max_bot = (
+ ((n**2) * ((p + 1) ** 2))
+ + (n * (p + 1) * (2 * r - p - 1))
+ - (r * ((2 * p) - r + 3))
+ ) / 2.0
+ betweenness = nx.betweenness_centrality(G, normalized=False, weight=None)
+ for node in top:
+ betweenness[node] /= bet_max_top
+ for node in bottom:
+ betweenness[node] /= bet_max_bot
+ return betweenness
+
+
+@nx._dispatchable(name="bipartite_closeness_centrality")
+def closeness_centrality(G, nodes, normalized=True):
+ r"""Compute the closeness centrality for nodes in a bipartite network.
+
+ The closeness of a node is the distance to all other nodes in the
+ graph or in the case that the graph is not connected to all other nodes
+ in the connected component containing that node.
+
+ Parameters
+ ----------
+ G : graph
+ A bipartite network
+
+ nodes : list or container
+ Container with all nodes in one bipartite node set.
+
+ normalized : bool, optional
+ If True (default) normalize by connected component size.
+
+ Returns
+ -------
+ closeness : dictionary
+ Dictionary keyed by node with bipartite closeness centrality
+ as the value.
+
+ Examples
+ --------
+ >>> G = nx.wheel_graph(5)
+ >>> top_nodes = {0, 1, 2}
+ >>> nx.bipartite.closeness_centrality(G, nodes=top_nodes)
+ {0: 1.5, 1: 1.2, 2: 1.2, 3: 1.0, 4: 1.0}
+
+ See Also
+ --------
+ betweenness_centrality
+ degree_centrality
+ :func:`~networkx.algorithms.bipartite.basic.sets`
+ :func:`~networkx.algorithms.bipartite.basic.is_bipartite`
+
+ Notes
+ -----
+ The nodes input parameter must contain all nodes in one bipartite node set,
+ but the dictionary returned contains all nodes from both node sets.
+ See :mod:`bipartite documentation `
+ for further details on how bipartite graphs are handled in NetworkX.
+
+
+ Closeness centrality is normalized by the minimum distance possible.
+ In the bipartite case the minimum distance for a node in one bipartite
+ node set is 1 from all nodes in the other node set and 2 from all
+ other nodes in its own set [1]_. Thus the closeness centrality
+ for node `v` in the two bipartite sets `U` with
+ `n` nodes and `V` with `m` nodes is
+
+ .. math::
+
+ c_{v} = \frac{m + 2(n - 1)}{d}, \mbox{for} v \in U,
+
+ c_{v} = \frac{n + 2(m - 1)}{d}, \mbox{for} v \in V,
+
+ where `d` is the sum of the distances from `v` to all
+ other nodes.
+
+ Higher values of closeness indicate higher centrality.
+
+ As in the unipartite case, setting normalized=True causes the
+ values to normalized further to n-1 / size(G)-1 where n is the
+ number of nodes in the connected part of graph containing the
+ node. If the graph is not completely connected, this algorithm
+ computes the closeness centrality for each connected part
+ separately.
+
+ References
+ ----------
+ .. [1] Borgatti, S.P. and Halgin, D. In press. "Analyzing Affiliation
+ Networks". In Carrington, P. and Scott, J. (eds) The Sage Handbook
+ of Social Network Analysis. Sage Publications.
+ https://dx.doi.org/10.4135/9781446294413.n28
+ """
+ closeness = {}
+ path_length = nx.single_source_shortest_path_length
+ top = set(nodes)
+ bottom = set(G) - top
+ n = len(top)
+ m = len(bottom)
+ for node in top:
+ sp = dict(path_length(G, node))
+ totsp = sum(sp.values())
+ if totsp > 0.0 and len(G) > 1:
+ closeness[node] = (m + 2 * (n - 1)) / totsp
+ if normalized:
+ s = (len(sp) - 1) / (len(G) - 1)
+ closeness[node] *= s
+ else:
+ closeness[node] = 0.0
+ for node in bottom:
+ sp = dict(path_length(G, node))
+ totsp = sum(sp.values())
+ if totsp > 0.0 and len(G) > 1:
+ closeness[node] = (n + 2 * (m - 1)) / totsp
+ if normalized:
+ s = (len(sp) - 1) / (len(G) - 1)
+ closeness[node] *= s
+ else:
+ closeness[node] = 0.0
+ return closeness
diff --git a/pythonProject/.venv/Lib/site-packages/networkx/algorithms/bipartite/cluster.py b/pythonProject/.venv/Lib/site-packages/networkx/algorithms/bipartite/cluster.py
new file mode 100644
index 0000000000000000000000000000000000000000..d9611527759b45056169ecdde9e192be571d5c18
--- /dev/null
+++ b/pythonProject/.venv/Lib/site-packages/networkx/algorithms/bipartite/cluster.py
@@ -0,0 +1,280 @@
+"""Functions for computing clustering of pairs
+
+"""
+
+import itertools
+
+import networkx as nx
+
+__all__ = [
+ "clustering",
+ "average_clustering",
+ "latapy_clustering",
+ "robins_alexander_clustering",
+]
+
+
+def cc_dot(nu, nv):
+ return len(nu & nv) / len(nu | nv)
+
+
+def cc_max(nu, nv):
+ return len(nu & nv) / max(len(nu), len(nv))
+
+
+def cc_min(nu, nv):
+ return len(nu & nv) / min(len(nu), len(nv))
+
+
+modes = {"dot": cc_dot, "min": cc_min, "max": cc_max}
+
+
+@nx._dispatchable
+def latapy_clustering(G, nodes=None, mode="dot"):
+ r"""Compute a bipartite clustering coefficient for nodes.
+
+ The bipartite clustering coefficient is a measure of local density
+ of connections defined as [1]_:
+
+ .. math::
+
+ c_u = \frac{\sum_{v \in N(N(u))} c_{uv} }{|N(N(u))|}
+
+ where `N(N(u))` are the second order neighbors of `u` in `G` excluding `u`,
+ and `c_{uv}` is the pairwise clustering coefficient between nodes
+ `u` and `v`.
+
+ The mode selects the function for `c_{uv}` which can be:
+
+ `dot`:
+
+ .. math::
+
+ c_{uv}=\frac{|N(u)\cap N(v)|}{|N(u) \cup N(v)|}
+
+ `min`:
+
+ .. math::
+
+ c_{uv}=\frac{|N(u)\cap N(v)|}{min(|N(u)|,|N(v)|)}
+
+ `max`:
+
+ .. math::
+
+ c_{uv}=\frac{|N(u)\cap N(v)|}{max(|N(u)|,|N(v)|)}
+
+
+ Parameters
+ ----------
+ G : graph
+ A bipartite graph
+
+ nodes : list or iterable (optional)
+ Compute bipartite clustering for these nodes. The default
+ is all nodes in G.
+
+ mode : string
+ The pairwise bipartite clustering method to be used in the computation.
+ It must be "dot", "max", or "min".
+
+ Returns
+ -------
+ clustering : dictionary
+ A dictionary keyed by node with the clustering coefficient value.
+
+
+ Examples
+ --------
+ >>> from networkx.algorithms import bipartite
+ >>> G = nx.path_graph(4) # path graphs are bipartite
+ >>> c = bipartite.clustering(G)
+ >>> c[0]
+ 0.5
+ >>> c = bipartite.clustering(G, mode="min")
+ >>> c[0]
+ 1.0
+
+ See Also
+ --------
+ robins_alexander_clustering
+ average_clustering
+ networkx.algorithms.cluster.square_clustering
+
+ References
+ ----------
+ .. [1] Latapy, Matthieu, Clémence Magnien, and Nathalie Del Vecchio (2008).
+ Basic notions for the analysis of large two-mode networks.
+ Social Networks 30(1), 31--48.
+ """
+ if not nx.algorithms.bipartite.is_bipartite(G):
+ raise nx.NetworkXError("Graph is not bipartite")
+
+ try:
+ cc_func = modes[mode]
+ except KeyError as err:
+ raise nx.NetworkXError(
+ "Mode for bipartite clustering must be: dot, min or max"
+ ) from err
+
+ if nodes is None:
+ nodes = G
+ ccs = {}
+ for v in nodes:
+ cc = 0.0
+ nbrs2 = {u for nbr in G[v] for u in G[nbr]} - {v}
+ for u in nbrs2:
+ cc += cc_func(set(G[u]), set(G[v]))
+ if cc > 0.0: # len(nbrs2)>0
+ cc /= len(nbrs2)
+ ccs[v] = cc
+ return ccs
+
+
+clustering = latapy_clustering
+
+
+@nx._dispatchable(name="bipartite_average_clustering")
+def average_clustering(G, nodes=None, mode="dot"):
+ r"""Compute the average bipartite clustering coefficient.
+
+ A clustering coefficient for the whole graph is the average,
+
+ .. math::
+
+ C = \frac{1}{n}\sum_{v \in G} c_v,
+
+ where `n` is the number of nodes in `G`.
+
+ Similar measures for the two bipartite sets can be defined [1]_
+
+ .. math::
+
+ C_X = \frac{1}{|X|}\sum_{v \in X} c_v,
+
+ where `X` is a bipartite set of `G`.
+
+ Parameters
+ ----------
+ G : graph
+ a bipartite graph
+
+ nodes : list or iterable, optional
+ A container of nodes to use in computing the average.
+ The nodes should be either the entire graph (the default) or one of the
+ bipartite sets.
+
+ mode : string
+ The pairwise bipartite clustering method.
+ It must be "dot", "max", or "min"
+
+ Returns
+ -------
+ clustering : float
+ The average bipartite clustering for the given set of nodes or the
+ entire graph if no nodes are specified.
+
+ Examples
+ --------
+ >>> from networkx.algorithms import bipartite
+ >>> G = nx.star_graph(3) # star graphs are bipartite
+ >>> bipartite.average_clustering(G)
+ 0.75
+ >>> X, Y = bipartite.sets(G)
+ >>> bipartite.average_clustering(G, X)
+ 0.0
+ >>> bipartite.average_clustering(G, Y)
+ 1.0
+
+ See Also
+ --------
+ clustering
+
+ Notes
+ -----
+ The container of nodes passed to this function must contain all of the nodes
+ in one of the bipartite sets ("top" or "bottom") in order to compute
+ the correct average bipartite clustering coefficients.
+ See :mod:`bipartite documentation `
+ for further details on how bipartite graphs are handled in NetworkX.
+
+
+ References
+ ----------
+ .. [1] Latapy, Matthieu, Clémence Magnien, and Nathalie Del Vecchio (2008).
+ Basic notions for the analysis of large two-mode networks.
+ Social Networks 30(1), 31--48.
+ """
+ if nodes is None:
+ nodes = G
+ ccs = latapy_clustering(G, nodes=nodes, mode=mode)
+ return sum(ccs[v] for v in nodes) / len(nodes)
+
+
+@nx._dispatchable
+def robins_alexander_clustering(G):
+ r"""Compute the bipartite clustering of G.
+
+ Robins and Alexander [1]_ defined bipartite clustering coefficient as
+ four times the number of four cycles `C_4` divided by the number of
+ three paths `L_3` in a bipartite graph:
+
+ .. math::
+
+ CC_4 = \frac{4 * C_4}{L_3}
+
+ Parameters
+ ----------
+ G : graph
+ a bipartite graph
+
+ Returns
+ -------
+ clustering : float
+ The Robins and Alexander bipartite clustering for the input graph.
+
+ Examples
+ --------
+ >>> from networkx.algorithms import bipartite
+ >>> G = nx.davis_southern_women_graph()
+ >>> print(round(bipartite.robins_alexander_clustering(G), 3))
+ 0.468
+
+ See Also
+ --------
+ latapy_clustering
+ networkx.algorithms.cluster.square_clustering
+
+ References
+ ----------
+ .. [1] Robins, G. and M. Alexander (2004). Small worlds among interlocking
+ directors: Network structure and distance in bipartite graphs.
+ Computational & Mathematical Organization Theory 10(1), 69–94.
+
+ """
+ if G.order() < 4 or G.size() < 3:
+ return 0
+ L_3 = _threepaths(G)
+ if L_3 == 0:
+ return 0
+ C_4 = _four_cycles(G)
+ return (4.0 * C_4) / L_3
+
+
+def _four_cycles(G):
+ cycles = 0
+ for v in G:
+ for u, w in itertools.combinations(G[v], 2):
+ cycles += len((set(G[u]) & set(G[w])) - {v})
+ return cycles / 4
+
+
+def _threepaths(G):
+ paths = 0
+ for v in G:
+ for u in G[v]:
+ for w in set(G[u]) - {v}:
+ paths += len(set(G[w]) - {v, u})
+ # Divide by two because we count each three path twice
+ # one for each possible starting point
+ return paths / 2
diff --git a/pythonProject/.venv/Lib/site-packages/networkx/algorithms/bipartite/covering.py b/pythonProject/.venv/Lib/site-packages/networkx/algorithms/bipartite/covering.py
new file mode 100644
index 0000000000000000000000000000000000000000..720c63ac40c8723b19ceee99091c4c51d3ab2d25
--- /dev/null
+++ b/pythonProject/.venv/Lib/site-packages/networkx/algorithms/bipartite/covering.py
@@ -0,0 +1,57 @@
+""" Functions related to graph covers."""
+
+import networkx as nx
+from networkx.algorithms.bipartite.matching import hopcroft_karp_matching
+from networkx.algorithms.covering import min_edge_cover as _min_edge_cover
+from networkx.utils import not_implemented_for
+
+__all__ = ["min_edge_cover"]
+
+
+@not_implemented_for("directed")
+@not_implemented_for("multigraph")
+@nx._dispatchable(name="bipartite_min_edge_cover")
+def min_edge_cover(G, matching_algorithm=None):
+ """Returns a set of edges which constitutes
+ the minimum edge cover of the graph.
+
+ The smallest edge cover can be found in polynomial time by finding
+ a maximum matching and extending it greedily so that all nodes
+ are covered.
+
+ Parameters
+ ----------
+ G : NetworkX graph
+ An undirected bipartite graph.
+
+ matching_algorithm : function
+ A function that returns a maximum cardinality matching in a
+ given bipartite graph. The function must take one input, the
+ graph ``G``, and return a dictionary mapping each node to its
+ mate. If not specified,
+ :func:`~networkx.algorithms.bipartite.matching.hopcroft_karp_matching`
+ will be used. Other possibilities include
+ :func:`~networkx.algorithms.bipartite.matching.eppstein_matching`,
+
+ Returns
+ -------
+ set
+ A set of the edges in a minimum edge cover of the graph, given as
+ pairs of nodes. It contains both the edges `(u, v)` and `(v, u)`
+ for given nodes `u` and `v` among the edges of minimum edge cover.
+
+ Notes
+ -----
+ An edge cover of a graph is a set of edges such that every node of
+ the graph is incident to at least one edge of the set.
+ A minimum edge cover is an edge covering of smallest cardinality.
+
+ Due to its implementation, the worst-case running time of this algorithm
+ is bounded by the worst-case running time of the function
+ ``matching_algorithm``.
+ """
+ if G.order() == 0: # Special case for the empty graph
+ return set()
+ if matching_algorithm is None:
+ matching_algorithm = hopcroft_karp_matching
+ return _min_edge_cover(G, matching_algorithm=matching_algorithm)
diff --git a/pythonProject/.venv/Lib/site-packages/networkx/algorithms/bipartite/edgelist.py b/pythonProject/.venv/Lib/site-packages/networkx/algorithms/bipartite/edgelist.py
new file mode 100644
index 0000000000000000000000000000000000000000..70631ea0e09983b0eb8bc9db0e2d94352701d89c
--- /dev/null
+++ b/pythonProject/.venv/Lib/site-packages/networkx/algorithms/bipartite/edgelist.py
@@ -0,0 +1,359 @@
+"""
+********************
+Bipartite Edge Lists
+********************
+Read and write NetworkX graphs as bipartite edge lists.
+
+Format
+------
+You can read or write three formats of edge lists with these functions.
+
+Node pairs with no data::
+
+ 1 2
+
+Python dictionary as data::
+
+ 1 2 {'weight':7, 'color':'green'}
+
+Arbitrary data::
+
+ 1 2 7 green
+
+For each edge (u, v) the node u is assigned to part 0 and the node v to part 1.
+"""
+__all__ = ["generate_edgelist", "write_edgelist", "parse_edgelist", "read_edgelist"]
+
+import networkx as nx
+from networkx.utils import not_implemented_for, open_file
+
+
+@open_file(1, mode="wb")
+def write_edgelist(G, path, comments="#", delimiter=" ", data=True, encoding="utf-8"):
+ """Write a bipartite graph as a list of edges.
+
+ Parameters
+ ----------
+ G : Graph
+ A NetworkX bipartite graph
+ path : file or string
+ File or filename to write. If a file is provided, it must be
+ opened in 'wb' mode. Filenames ending in .gz or .bz2 will be compressed.
+ comments : string, optional
+ The character used to indicate the start of a comment
+ delimiter : string, optional
+ The string used to separate values. The default is whitespace.
+ data : bool or list, optional
+ If False write no edge data.
+ If True write a string representation of the edge data dictionary..
+ If a list (or other iterable) is provided, write the keys specified
+ in the list.
+ encoding: string, optional
+ Specify which encoding to use when writing file.
+
+ Examples
+ --------
+ >>> G = nx.path_graph(4)
+ >>> G.add_nodes_from([0, 2], bipartite=0)
+ >>> G.add_nodes_from([1, 3], bipartite=1)
+ >>> nx.write_edgelist(G, "test.edgelist")
+ >>> fh = open("test.edgelist", "wb")
+ >>> nx.write_edgelist(G, fh)
+ >>> nx.write_edgelist(G, "test.edgelist.gz")
+ >>> nx.write_edgelist(G, "test.edgelist.gz", data=False)
+
+ >>> G = nx.Graph()
+ >>> G.add_edge(1, 2, weight=7, color="red")
+ >>> nx.write_edgelist(G, "test.edgelist", data=False)
+ >>> nx.write_edgelist(G, "test.edgelist", data=["color"])
+ >>> nx.write_edgelist(G, "test.edgelist", data=["color", "weight"])
+
+ See Also
+ --------
+ write_edgelist
+ generate_edgelist
+ """
+ for line in generate_edgelist(G, delimiter, data):
+ line += "\n"
+ path.write(line.encode(encoding))
+
+
+@not_implemented_for("directed")
+def generate_edgelist(G, delimiter=" ", data=True):
+ """Generate a single line of the bipartite graph G in edge list format.
+
+ Parameters
+ ----------
+ G : NetworkX graph
+ The graph is assumed to have node attribute `part` set to 0,1 representing
+ the two graph parts
+
+ delimiter : string, optional
+ Separator for node labels
+
+ data : bool or list of keys
+ If False generate no edge data. If True use a dictionary
+ representation of edge data. If a list of keys use a list of data
+ values corresponding to the keys.
+
+ Returns
+ -------
+ lines : string
+ Lines of data in adjlist format.
+
+ Examples
+ --------
+ >>> from networkx.algorithms import bipartite
+ >>> G = nx.path_graph(4)
+ >>> G.add_nodes_from([0, 2], bipartite=0)
+ >>> G.add_nodes_from([1, 3], bipartite=1)
+ >>> G[1][2]["weight"] = 3
+ >>> G[2][3]["capacity"] = 12
+ >>> for line in bipartite.generate_edgelist(G, data=False):
+ ... print(line)
+ 0 1
+ 2 1
+ 2 3
+
+ >>> for line in bipartite.generate_edgelist(G):
+ ... print(line)
+ 0 1 {}
+ 2 1 {'weight': 3}
+ 2 3 {'capacity': 12}
+
+ >>> for line in bipartite.generate_edgelist(G, data=["weight"]):
+ ... print(line)
+ 0 1
+ 2 1 3
+ 2 3
+ """
+ try:
+ part0 = [n for n, d in G.nodes.items() if d["bipartite"] == 0]
+ except BaseException as err:
+ raise AttributeError("Missing node attribute `bipartite`") from err
+ if data is True or data is False:
+ for n in part0:
+ for edge in G.edges(n, data=data):
+ yield delimiter.join(map(str, edge))
+ else:
+ for n in part0:
+ for u, v, d in G.edges(n, data=True):
+ edge = [u, v]
+ try:
+ edge.extend(d[k] for k in data)
+ except KeyError:
+ pass # missing data for this edge, should warn?
+ yield delimiter.join(map(str, edge))
+
+
+@nx._dispatchable(name="bipartite_parse_edgelist", graphs=None, returns_graph=True)
+def parse_edgelist(
+ lines, comments="#", delimiter=None, create_using=None, nodetype=None, data=True
+):
+ """Parse lines of an edge list representation of a bipartite graph.
+
+ Parameters
+ ----------
+ lines : list or iterator of strings
+ Input data in edgelist format
+ comments : string, optional
+ Marker for comment lines
+ delimiter : string, optional
+ Separator for node labels
+ create_using: NetworkX graph container, optional
+ Use given NetworkX graph for holding nodes or edges.
+ nodetype : Python type, optional
+ Convert nodes to this type.
+ data : bool or list of (label,type) tuples
+ If False generate no edge data or if True use a dictionary
+ representation of edge data or a list tuples specifying dictionary
+ key names and types for edge data.
+
+ Returns
+ -------
+ G: NetworkX Graph
+ The bipartite graph corresponding to lines
+
+ Examples
+ --------
+ Edgelist with no data:
+
+ >>> from networkx.algorithms import bipartite
+ >>> lines = ["1 2", "2 3", "3 4"]
+ >>> G = bipartite.parse_edgelist(lines, nodetype=int)
+ >>> sorted(G.nodes())
+ [1, 2, 3, 4]
+ >>> sorted(G.nodes(data=True))
+ [(1, {'bipartite': 0}), (2, {'bipartite': 0}), (3, {'bipartite': 0}), (4, {'bipartite': 1})]
+ >>> sorted(G.edges())
+ [(1, 2), (2, 3), (3, 4)]
+
+ Edgelist with data in Python dictionary representation:
+
+ >>> lines = ["1 2 {'weight':3}", "2 3 {'weight':27}", "3 4 {'weight':3.0}"]
+ >>> G = bipartite.parse_edgelist(lines, nodetype=int)
+ >>> sorted(G.nodes())
+ [1, 2, 3, 4]
+ >>> sorted(G.edges(data=True))
+ [(1, 2, {'weight': 3}), (2, 3, {'weight': 27}), (3, 4, {'weight': 3.0})]
+
+ Edgelist with data in a list:
+
+ >>> lines = ["1 2 3", "2 3 27", "3 4 3.0"]
+ >>> G = bipartite.parse_edgelist(lines, nodetype=int, data=(("weight", float),))
+ >>> sorted(G.nodes())
+ [1, 2, 3, 4]
+ >>> sorted(G.edges(data=True))
+ [(1, 2, {'weight': 3.0}), (2, 3, {'weight': 27.0}), (3, 4, {'weight': 3.0})]
+
+ See Also
+ --------
+ """
+ from ast import literal_eval
+
+ G = nx.empty_graph(0, create_using)
+ for line in lines:
+ p = line.find(comments)
+ if p >= 0:
+ line = line[:p]
+ if not len(line):
+ continue
+ # split line, should have 2 or more
+ s = line.strip().split(delimiter)
+ if len(s) < 2:
+ continue
+ u = s.pop(0)
+ v = s.pop(0)
+ d = s
+ if nodetype is not None:
+ try:
+ u = nodetype(u)
+ v = nodetype(v)
+ except BaseException as err:
+ raise TypeError(
+ f"Failed to convert nodes {u},{v} to type {nodetype}."
+ ) from err
+
+ if len(d) == 0 or data is False:
+ # no data or data type specified
+ edgedata = {}
+ elif data is True:
+ # no edge types specified
+ try: # try to evaluate as dictionary
+ edgedata = dict(literal_eval(" ".join(d)))
+ except BaseException as err:
+ raise TypeError(
+ f"Failed to convert edge data ({d}) to dictionary."
+ ) from err
+ else:
+ # convert edge data to dictionary with specified keys and type
+ if len(d) != len(data):
+ raise IndexError(
+ f"Edge data {d} and data_keys {data} are not the same length"
+ )
+ edgedata = {}
+ for (edge_key, edge_type), edge_value in zip(data, d):
+ try:
+ edge_value = edge_type(edge_value)
+ except BaseException as err:
+ raise TypeError(
+ f"Failed to convert {edge_key} data "
+ f"{edge_value} to type {edge_type}."
+ ) from err
+ edgedata.update({edge_key: edge_value})
+ G.add_node(u, bipartite=0)
+ G.add_node(v, bipartite=1)
+ G.add_edge(u, v, **edgedata)
+ return G
+
+
+@open_file(0, mode="rb")
+@nx._dispatchable(name="bipartite_read_edgelist", graphs=None, returns_graph=True)
+def read_edgelist(
+ path,
+ comments="#",
+ delimiter=None,
+ create_using=None,
+ nodetype=None,
+ data=True,
+ edgetype=None,
+ encoding="utf-8",
+):
+ """Read a bipartite graph from a list of edges.
+
+ Parameters
+ ----------
+ path : file or string
+ File or filename to read. If a file is provided, it must be
+ opened in 'rb' mode.
+ Filenames ending in .gz or .bz2 will be uncompressed.
+ comments : string, optional
+ The character used to indicate the start of a comment.
+ delimiter : string, optional
+ The string used to separate values. The default is whitespace.
+ create_using : Graph container, optional,
+ Use specified container to build graph. The default is networkx.Graph,
+ an undirected graph.
+ nodetype : int, float, str, Python type, optional
+ Convert node data from strings to specified type
+ data : bool or list of (label,type) tuples
+ Tuples specifying dictionary key names and types for edge data
+ edgetype : int, float, str, Python type, optional OBSOLETE
+ Convert edge data from strings to specified type and use as 'weight'
+ encoding: string, optional
+ Specify which encoding to use when reading file.
+
+ Returns
+ -------
+ G : graph
+ A networkx Graph or other type specified with create_using
+
+ Examples
+ --------
+ >>> from networkx.algorithms import bipartite
+ >>> G = nx.path_graph(4)
+ >>> G.add_nodes_from([0, 2], bipartite=0)
+ >>> G.add_nodes_from([1, 3], bipartite=1)
+ >>> bipartite.write_edgelist(G, "test.edgelist")
+ >>> G = bipartite.read_edgelist("test.edgelist")
+
+ >>> fh = open("test.edgelist", "rb")
+ >>> G = bipartite.read_edgelist(fh)
+ >>> fh.close()
+
+ >>> G = bipartite.read_edgelist("test.edgelist", nodetype=int)
+
+ Edgelist with data in a list:
+
+ >>> textline = "1 2 3"
+ >>> fh = open("test.edgelist", "w")
+ >>> d = fh.write(textline)
+ >>> fh.close()
+ >>> G = bipartite.read_edgelist(
+ ... "test.edgelist", nodetype=int, data=(("weight", float),)
+ ... )
+ >>> list(G)
+ [1, 2]
+ >>> list(G.edges(data=True))
+ [(1, 2, {'weight': 3.0})]
+
+ See parse_edgelist() for more examples of formatting.
+
+ See Also
+ --------
+ parse_edgelist
+
+ Notes
+ -----
+ Since nodes must be hashable, the function nodetype must return hashable
+ types (e.g. int, float, str, frozenset - or tuples of those, etc.)
+ """
+ lines = (line.decode(encoding) for line in path)
+ return parse_edgelist(
+ lines,
+ comments=comments,
+ delimiter=delimiter,
+ create_using=create_using,
+ nodetype=nodetype,
+ data=data,
+ )
diff --git a/pythonProject/.venv/Lib/site-packages/networkx/algorithms/bipartite/extendability.py b/pythonProject/.venv/Lib/site-packages/networkx/algorithms/bipartite/extendability.py
new file mode 100644
index 0000000000000000000000000000000000000000..0764997ad00895c0df79c2b47429a595d73e1256
--- /dev/null
+++ b/pythonProject/.venv/Lib/site-packages/networkx/algorithms/bipartite/extendability.py
@@ -0,0 +1,106 @@
+""" Provides a function for computing the extendability of a graph which is
+undirected, simple, connected and bipartite and contains at least one perfect matching."""
+
+
+import networkx as nx
+from networkx.utils import not_implemented_for
+
+__all__ = ["maximal_extendability"]
+
+
+@not_implemented_for("directed")
+@not_implemented_for("multigraph")
+@nx._dispatchable
+def maximal_extendability(G):
+ """Computes the extendability of a graph.
+
+ The extendability of a graph is defined as the maximum $k$ for which `G`
+ is $k$-extendable. Graph `G` is $k$-extendable if and only if `G` has a
+ perfect matching and every set of $k$ independent edges can be extended
+ to a perfect matching in `G`.
+
+ Parameters
+ ----------
+ G : NetworkX Graph
+ A fully-connected bipartite graph without self-loops
+
+ Returns
+ -------
+ extendability : int
+
+ Raises
+ ------
+ NetworkXError
+ If the graph `G` is disconnected.
+ If the graph `G` is not bipartite.
+ If the graph `G` does not contain a perfect matching.
+ If the residual graph of `G` is not strongly connected.
+
+ Notes
+ -----
+ Definition:
+ Let `G` be a simple, connected, undirected and bipartite graph with a perfect
+ matching M and bipartition (U,V). The residual graph of `G`, denoted by $G_M$,
+ is the graph obtained from G by directing the edges of M from V to U and the
+ edges that do not belong to M from U to V.
+
+ Lemma [1]_ :
+ Let M be a perfect matching of `G`. `G` is $k$-extendable if and only if its residual
+ graph $G_M$ is strongly connected and there are $k$ vertex-disjoint directed
+ paths between every vertex of U and every vertex of V.
+
+ Assuming that input graph `G` is undirected, simple, connected, bipartite and contains
+ a perfect matching M, this function constructs the residual graph $G_M$ of G and
+ returns the minimum value among the maximum vertex-disjoint directed paths between
+ every vertex of U and every vertex of V in $G_M$. By combining the definitions
+ and the lemma, this value represents the extendability of the graph `G`.
+
+ Time complexity O($n^3$ $m^2$)) where $n$ is the number of vertices
+ and $m$ is the number of edges.
+
+ References
+ ----------
+ .. [1] "A polynomial algorithm for the extendability problem in bipartite graphs",
+ J. Lakhal, L. Litzler, Information Processing Letters, 1998.
+ .. [2] "On n-extendible graphs", M. D. Plummer, Discrete Mathematics, 31:201–210, 1980
+ https://doi.org/10.1016/0012-365X(80)90037-0
+
+ """
+ if not nx.is_connected(G):
+ raise nx.NetworkXError("Graph G is not connected")
+
+ if not nx.bipartite.is_bipartite(G):
+ raise nx.NetworkXError("Graph G is not bipartite")
+
+ U, V = nx.bipartite.sets(G)
+
+ maximum_matching = nx.bipartite.hopcroft_karp_matching(G)
+
+ if not nx.is_perfect_matching(G, maximum_matching):
+ raise nx.NetworkXError("Graph G does not contain a perfect matching")
+
+ # list of edges in perfect matching, directed from V to U
+ pm = [(node, maximum_matching[node]) for node in V & maximum_matching.keys()]
+
+ # Direct all the edges of G, from V to U if in matching, else from U to V
+ directed_edges = [
+ (x, y) if (x in V and (x, y) in pm) or (x in U and (y, x) not in pm) else (y, x)
+ for x, y in G.edges
+ ]
+
+ # Construct the residual graph of G
+ residual_G = nx.DiGraph()
+ residual_G.add_nodes_from(G)
+ residual_G.add_edges_from(directed_edges)
+
+ if not nx.is_strongly_connected(residual_G):
+ raise nx.NetworkXError("The residual graph of G is not strongly connected")
+
+ # For node-pairs between V & U, keep min of max number of node-disjoint paths
+ # Variable $k$ stands for the extendability of graph G
+ k = float("inf")
+ for u in U:
+ for v in V:
+ num_paths = sum(1 for _ in nx.node_disjoint_paths(residual_G, u, v))
+ k = k if k < num_paths else num_paths
+ return k
diff --git a/pythonProject/.venv/Lib/site-packages/networkx/algorithms/bipartite/generators.py b/pythonProject/.venv/Lib/site-packages/networkx/algorithms/bipartite/generators.py
new file mode 100644
index 0000000000000000000000000000000000000000..de6f07972394747d85f301584bb8b37d4192502d
--- /dev/null
+++ b/pythonProject/.venv/Lib/site-packages/networkx/algorithms/bipartite/generators.py
@@ -0,0 +1,603 @@
+"""
+Generators and functions for bipartite graphs.
+"""
+import math
+import numbers
+from functools import reduce
+
+import networkx as nx
+from networkx.utils import nodes_or_number, py_random_state
+
+__all__ = [
+ "configuration_model",
+ "havel_hakimi_graph",
+ "reverse_havel_hakimi_graph",
+ "alternating_havel_hakimi_graph",
+ "preferential_attachment_graph",
+ "random_graph",
+ "gnmk_random_graph",
+ "complete_bipartite_graph",
+]
+
+
+@nx._dispatchable(graphs=None, returns_graph=True)
+@nodes_or_number([0, 1])
+def complete_bipartite_graph(n1, n2, create_using=None):
+ """Returns the complete bipartite graph `K_{n_1,n_2}`.
+
+ The graph is composed of two partitions with nodes 0 to (n1 - 1)
+ in the first and nodes n1 to (n1 + n2 - 1) in the second.
+ Each node in the first is connected to each node in the second.
+
+ Parameters
+ ----------
+ n1, n2 : integer or iterable container of nodes
+ If integers, nodes are from `range(n1)` and `range(n1, n1 + n2)`.
+ If a container, the elements are the nodes.
+ create_using : NetworkX graph instance, (default: nx.Graph)
+ Return graph of this type.
+
+ Notes
+ -----
+ Nodes are the integers 0 to `n1 + n2 - 1` unless either n1 or n2 are
+ containers of nodes. If only one of n1 or n2 are integers, that
+ integer is replaced by `range` of that integer.
+
+ The nodes are assigned the attribute 'bipartite' with the value 0 or 1
+ to indicate which bipartite set the node belongs to.
+
+ This function is not imported in the main namespace.
+ To use it use nx.bipartite.complete_bipartite_graph
+ """
+ G = nx.empty_graph(0, create_using)
+ if G.is_directed():
+ raise nx.NetworkXError("Directed Graph not supported")
+
+ n1, top = n1
+ n2, bottom = n2
+ if isinstance(n1, numbers.Integral) and isinstance(n2, numbers.Integral):
+ bottom = [n1 + i for i in bottom]
+ G.add_nodes_from(top, bipartite=0)
+ G.add_nodes_from(bottom, bipartite=1)
+ if len(G) != len(top) + len(bottom):
+ raise nx.NetworkXError("Inputs n1 and n2 must contain distinct nodes")
+ G.add_edges_from((u, v) for u in top for v in bottom)
+ G.graph["name"] = f"complete_bipartite_graph({n1}, {n2})"
+ return G
+
+
+@py_random_state(3)
+@nx._dispatchable(name="bipartite_configuration_model", graphs=None, returns_graph=True)
+def configuration_model(aseq, bseq, create_using=None, seed=None):
+ """Returns a random bipartite graph from two given degree sequences.
+
+ Parameters
+ ----------
+ aseq : list
+ Degree sequence for node set A.
+ bseq : list
+ Degree sequence for node set B.
+ create_using : NetworkX graph instance, optional
+ Return graph of this type.
+ seed : integer, random_state, or None (default)
+ Indicator of random number generation state.
+ See :ref:`Randomness`.
+
+ The graph is composed of two partitions. Set A has nodes 0 to
+ (len(aseq) - 1) and set B has nodes len(aseq) to (len(bseq) - 1).
+ Nodes from set A are connected to nodes in set B by choosing
+ randomly from the possible free stubs, one in A and one in B.
+
+ Notes
+ -----
+ The sum of the two sequences must be equal: sum(aseq)=sum(bseq)
+ If no graph type is specified use MultiGraph with parallel edges.
+ If you want a graph with no parallel edges use create_using=Graph()
+ but then the resulting degree sequences might not be exact.
+
+ The nodes are assigned the attribute 'bipartite' with the value 0 or 1
+ to indicate which bipartite set the node belongs to.
+
+ This function is not imported in the main namespace.
+ To use it use nx.bipartite.configuration_model
+ """
+ G = nx.empty_graph(0, create_using, default=nx.MultiGraph)
+ if G.is_directed():
+ raise nx.NetworkXError("Directed Graph not supported")
+
+ # length and sum of each sequence
+ lena = len(aseq)
+ lenb = len(bseq)
+ suma = sum(aseq)
+ sumb = sum(bseq)
+
+ if not suma == sumb:
+ raise nx.NetworkXError(
+ f"invalid degree sequences, sum(aseq)!=sum(bseq),{suma},{sumb}"
+ )
+
+ G = _add_nodes_with_bipartite_label(G, lena, lenb)
+
+ if len(aseq) == 0 or max(aseq) == 0:
+ return G # done if no edges
+
+ # build lists of degree-repeated vertex numbers
+ stubs = [[v] * aseq[v] for v in range(lena)]
+ astubs = [x for subseq in stubs for x in subseq]
+
+ stubs = [[v] * bseq[v - lena] for v in range(lena, lena + lenb)]
+ bstubs = [x for subseq in stubs for x in subseq]
+
+ # shuffle lists
+ seed.shuffle(astubs)
+ seed.shuffle(bstubs)
+
+ G.add_edges_from([astubs[i], bstubs[i]] for i in range(suma))
+
+ G.name = "bipartite_configuration_model"
+ return G
+
+
+@nx._dispatchable(name="bipartite_havel_hakimi_graph", graphs=None, returns_graph=True)
+def havel_hakimi_graph(aseq, bseq, create_using=None):
+ """Returns a bipartite graph from two given degree sequences using a
+ Havel-Hakimi style construction.
+
+ The graph is composed of two partitions. Set A has nodes 0 to
+ (len(aseq) - 1) and set B has nodes len(aseq) to (len(bseq) - 1).
+ Nodes from the set A are connected to nodes in the set B by
+ connecting the highest degree nodes in set A to the highest degree
+ nodes in set B until all stubs are connected.
+
+ Parameters
+ ----------
+ aseq : list
+ Degree sequence for node set A.
+ bseq : list
+ Degree sequence for node set B.
+ create_using : NetworkX graph instance, optional
+ Return graph of this type.
+
+ Notes
+ -----
+ The sum of the two sequences must be equal: sum(aseq)=sum(bseq)
+ If no graph type is specified use MultiGraph with parallel edges.
+ If you want a graph with no parallel edges use create_using=Graph()
+ but then the resulting degree sequences might not be exact.
+
+ The nodes are assigned the attribute 'bipartite' with the value 0 or 1
+ to indicate which bipartite set the node belongs to.
+
+ This function is not imported in the main namespace.
+ To use it use nx.bipartite.havel_hakimi_graph
+ """
+ G = nx.empty_graph(0, create_using, default=nx.MultiGraph)
+ if G.is_directed():
+ raise nx.NetworkXError("Directed Graph not supported")
+
+ # length of the each sequence
+ naseq = len(aseq)
+ nbseq = len(bseq)
+
+ suma = sum(aseq)
+ sumb = sum(bseq)
+
+ if not suma == sumb:
+ raise nx.NetworkXError(
+ f"invalid degree sequences, sum(aseq)!=sum(bseq),{suma},{sumb}"
+ )
+
+ G = _add_nodes_with_bipartite_label(G, naseq, nbseq)
+
+ if len(aseq) == 0 or max(aseq) == 0:
+ return G # done if no edges
+
+ # build list of degree-repeated vertex numbers
+ astubs = [[aseq[v], v] for v in range(naseq)]
+ bstubs = [[bseq[v - naseq], v] for v in range(naseq, naseq + nbseq)]
+ astubs.sort()
+ while astubs:
+ (degree, u) = astubs.pop() # take of largest degree node in the a set
+ if degree == 0:
+ break # done, all are zero
+ # connect the source to largest degree nodes in the b set
+ bstubs.sort()
+ for target in bstubs[-degree:]:
+ v = target[1]
+ G.add_edge(u, v)
+ target[0] -= 1 # note this updates bstubs too.
+ if target[0] == 0:
+ bstubs.remove(target)
+
+ G.name = "bipartite_havel_hakimi_graph"
+ return G
+
+
+@nx._dispatchable(graphs=None, returns_graph=True)
+def reverse_havel_hakimi_graph(aseq, bseq, create_using=None):
+ """Returns a bipartite graph from two given degree sequences using a
+ Havel-Hakimi style construction.
+
+ The graph is composed of two partitions. Set A has nodes 0 to
+ (len(aseq) - 1) and set B has nodes len(aseq) to (len(bseq) - 1).
+ Nodes from set A are connected to nodes in the set B by connecting
+ the highest degree nodes in set A to the lowest degree nodes in
+ set B until all stubs are connected.
+
+ Parameters
+ ----------
+ aseq : list
+ Degree sequence for node set A.
+ bseq : list
+ Degree sequence for node set B.
+ create_using : NetworkX graph instance, optional
+ Return graph of this type.
+
+ Notes
+ -----
+ The sum of the two sequences must be equal: sum(aseq)=sum(bseq)
+ If no graph type is specified use MultiGraph with parallel edges.
+ If you want a graph with no parallel edges use create_using=Graph()
+ but then the resulting degree sequences might not be exact.
+
+ The nodes are assigned the attribute 'bipartite' with the value 0 or 1
+ to indicate which bipartite set the node belongs to.
+
+ This function is not imported in the main namespace.
+ To use it use nx.bipartite.reverse_havel_hakimi_graph
+ """
+ G = nx.empty_graph(0, create_using, default=nx.MultiGraph)
+ if G.is_directed():
+ raise nx.NetworkXError("Directed Graph not supported")
+
+ # length of the each sequence
+ lena = len(aseq)
+ lenb = len(bseq)
+ suma = sum(aseq)
+ sumb = sum(bseq)
+
+ if not suma == sumb:
+ raise nx.NetworkXError(
+ f"invalid degree sequences, sum(aseq)!=sum(bseq),{suma},{sumb}"
+ )
+
+ G = _add_nodes_with_bipartite_label(G, lena, lenb)
+
+ if len(aseq) == 0 or max(aseq) == 0:
+ return G # done if no edges
+
+ # build list of degree-repeated vertex numbers
+ astubs = [[aseq[v], v] for v in range(lena)]
+ bstubs = [[bseq[v - lena], v] for v in range(lena, lena + lenb)]
+ astubs.sort()
+ bstubs.sort()
+ while astubs:
+ (degree, u) = astubs.pop() # take of largest degree node in the a set
+ if degree == 0:
+ break # done, all are zero
+ # connect the source to the smallest degree nodes in the b set
+ for target in bstubs[0:degree]:
+ v = target[1]
+ G.add_edge(u, v)
+ target[0] -= 1 # note this updates bstubs too.
+ if target[0] == 0:
+ bstubs.remove(target)
+
+ G.name = "bipartite_reverse_havel_hakimi_graph"
+ return G
+
+
+@nx._dispatchable(graphs=None, returns_graph=True)
+def alternating_havel_hakimi_graph(aseq, bseq, create_using=None):
+ """Returns a bipartite graph from two given degree sequences using
+ an alternating Havel-Hakimi style construction.
+
+ The graph is composed of two partitions. Set A has nodes 0 to
+ (len(aseq) - 1) and set B has nodes len(aseq) to (len(bseq) - 1).
+ Nodes from the set A are connected to nodes in the set B by
+ connecting the highest degree nodes in set A to alternatively the
+ highest and the lowest degree nodes in set B until all stubs are
+ connected.
+
+ Parameters
+ ----------
+ aseq : list
+ Degree sequence for node set A.
+ bseq : list
+ Degree sequence for node set B.
+ create_using : NetworkX graph instance, optional
+ Return graph of this type.
+
+ Notes
+ -----
+ The sum of the two sequences must be equal: sum(aseq)=sum(bseq)
+ If no graph type is specified use MultiGraph with parallel edges.
+ If you want a graph with no parallel edges use create_using=Graph()
+ but then the resulting degree sequences might not be exact.
+
+ The nodes are assigned the attribute 'bipartite' with the value 0 or 1
+ to indicate which bipartite set the node belongs to.
+
+ This function is not imported in the main namespace.
+ To use it use nx.bipartite.alternating_havel_hakimi_graph
+ """
+ G = nx.empty_graph(0, create_using, default=nx.MultiGraph)
+ if G.is_directed():
+ raise nx.NetworkXError("Directed Graph not supported")
+
+ # length of the each sequence
+ naseq = len(aseq)
+ nbseq = len(bseq)
+ suma = sum(aseq)
+ sumb = sum(bseq)
+
+ if not suma == sumb:
+ raise nx.NetworkXError(
+ f"invalid degree sequences, sum(aseq)!=sum(bseq),{suma},{sumb}"
+ )
+
+ G = _add_nodes_with_bipartite_label(G, naseq, nbseq)
+
+ if len(aseq) == 0 or max(aseq) == 0:
+ return G # done if no edges
+ # build list of degree-repeated vertex numbers
+ astubs = [[aseq[v], v] for v in range(naseq)]
+ bstubs = [[bseq[v - naseq], v] for v in range(naseq, naseq + nbseq)]
+ while astubs:
+ astubs.sort()
+ (degree, u) = astubs.pop() # take of largest degree node in the a set
+ if degree == 0:
+ break # done, all are zero
+ bstubs.sort()
+ small = bstubs[0 : degree // 2] # add these low degree targets
+ large = bstubs[(-degree + degree // 2) :] # now high degree targets
+ stubs = [x for z in zip(large, small) for x in z] # combine, sorry
+ if len(stubs) < len(small) + len(large): # check for zip truncation
+ stubs.append(large.pop())
+ for target in stubs:
+ v = target[1]
+ G.add_edge(u, v)
+ target[0] -= 1 # note this updates bstubs too.
+ if target[0] == 0:
+ bstubs.remove(target)
+
+ G.name = "bipartite_alternating_havel_hakimi_graph"
+ return G
+
+
+@py_random_state(3)
+@nx._dispatchable(graphs=None, returns_graph=True)
+def preferential_attachment_graph(aseq, p, create_using=None, seed=None):
+ """Create a bipartite graph with a preferential attachment model from
+ a given single degree sequence.
+
+ The graph is composed of two partitions. Set A has nodes 0 to
+ (len(aseq) - 1) and set B has nodes starting with node len(aseq).
+ The number of nodes in set B is random.
+
+ Parameters
+ ----------
+ aseq : list
+ Degree sequence for node set A.
+ p : float
+ Probability that a new bottom node is added.
+ create_using : NetworkX graph instance, optional
+ Return graph of this type.
+ seed : integer, random_state, or None (default)
+ Indicator of random number generation state.
+ See :ref:`Randomness`.
+
+ References
+ ----------
+ .. [1] Guillaume, J.L. and Latapy, M.,
+ Bipartite graphs as models of complex networks.
+ Physica A: Statistical Mechanics and its Applications,
+ 2006, 371(2), pp.795-813.
+ .. [2] Jean-Loup Guillaume and Matthieu Latapy,
+ Bipartite structure of all complex networks,
+ Inf. Process. Lett. 90, 2004, pg. 215-221
+ https://doi.org/10.1016/j.ipl.2004.03.007
+
+ Notes
+ -----
+ The nodes are assigned the attribute 'bipartite' with the value 0 or 1
+ to indicate which bipartite set the node belongs to.
+
+ This function is not imported in the main namespace.
+ To use it use nx.bipartite.preferential_attachment_graph
+ """
+ G = nx.empty_graph(0, create_using, default=nx.MultiGraph)
+ if G.is_directed():
+ raise nx.NetworkXError("Directed Graph not supported")
+
+ if p > 1:
+ raise nx.NetworkXError(f"probability {p} > 1")
+
+ naseq = len(aseq)
+ G = _add_nodes_with_bipartite_label(G, naseq, 0)
+ vv = [[v] * aseq[v] for v in range(naseq)]
+ while vv:
+ while vv[0]:
+ source = vv[0][0]
+ vv[0].remove(source)
+ if seed.random() < p or len(G) == naseq:
+ target = len(G)
+ G.add_node(target, bipartite=1)
+ G.add_edge(source, target)
+ else:
+ bb = [[b] * G.degree(b) for b in range(naseq, len(G))]
+ # flatten the list of lists into a list.
+ bbstubs = reduce(lambda x, y: x + y, bb)
+ # choose preferentially a bottom node.
+ target = seed.choice(bbstubs)
+ G.add_node(target, bipartite=1)
+ G.add_edge(source, target)
+ vv.remove(vv[0])
+ G.name = "bipartite_preferential_attachment_model"
+ return G
+
+
+@py_random_state(3)
+@nx._dispatchable(graphs=None, returns_graph=True)
+def random_graph(n, m, p, seed=None, directed=False):
+ """Returns a bipartite random graph.
+
+ This is a bipartite version of the binomial (Erdős-Rényi) graph.
+ The graph is composed of two partitions. Set A has nodes 0 to
+ (n - 1) and set B has nodes n to (n + m - 1).
+
+ Parameters
+ ----------
+ n : int
+ The number of nodes in the first bipartite set.
+ m : int
+ The number of nodes in the second bipartite set.
+ p : float
+ Probability for edge creation.
+ seed : integer, random_state, or None (default)
+ Indicator of random number generation state.
+ See :ref:`Randomness`.
+ directed : bool, optional (default=False)
+ If True return a directed graph
+
+ Notes
+ -----
+ The bipartite random graph algorithm chooses each of the n*m (undirected)
+ or 2*nm (directed) possible edges with probability p.
+
+ This algorithm is $O(n+m)$ where $m$ is the expected number of edges.
+
+ The nodes are assigned the attribute 'bipartite' with the value 0 or 1
+ to indicate which bipartite set the node belongs to.
+
+ This function is not imported in the main namespace.
+ To use it use nx.bipartite.random_graph
+
+ See Also
+ --------
+ gnp_random_graph, configuration_model
+
+ References
+ ----------
+ .. [1] Vladimir Batagelj and Ulrik Brandes,
+ "Efficient generation of large random networks",
+ Phys. Rev. E, 71, 036113, 2005.
+ """
+ G = nx.Graph()
+ G = _add_nodes_with_bipartite_label(G, n, m)
+ if directed:
+ G = nx.DiGraph(G)
+ G.name = f"fast_gnp_random_graph({n},{m},{p})"
+
+ if p <= 0:
+ return G
+ if p >= 1:
+ return nx.complete_bipartite_graph(n, m)
+
+ lp = math.log(1.0 - p)
+
+ v = 0
+ w = -1
+ while v < n:
+ lr = math.log(1.0 - seed.random())
+ w = w + 1 + int(lr / lp)
+ while w >= m and v < n:
+ w = w - m
+ v = v + 1
+ if v < n:
+ G.add_edge(v, n + w)
+
+ if directed:
+ # use the same algorithm to
+ # add edges from the "m" to "n" set
+ v = 0
+ w = -1
+ while v < n:
+ lr = math.log(1.0 - seed.random())
+ w = w + 1 + int(lr / lp)
+ while w >= m and v < n:
+ w = w - m
+ v = v + 1
+ if v < n:
+ G.add_edge(n + w, v)
+
+ return G
+
+
+@py_random_state(3)
+@nx._dispatchable(graphs=None, returns_graph=True)
+def gnmk_random_graph(n, m, k, seed=None, directed=False):
+ """Returns a random bipartite graph G_{n,m,k}.
+
+ Produces a bipartite graph chosen randomly out of the set of all graphs
+ with n top nodes, m bottom nodes, and k edges.
+ The graph is composed of two sets of nodes.
+ Set A has nodes 0 to (n - 1) and set B has nodes n to (n + m - 1).
+
+ Parameters
+ ----------
+ n : int
+ The number of nodes in the first bipartite set.
+ m : int
+ The number of nodes in the second bipartite set.
+ k : int
+ The number of edges
+ seed : integer, random_state, or None (default)
+ Indicator of random number generation state.
+ See :ref:`Randomness`.
+ directed : bool, optional (default=False)
+ If True return a directed graph
+
+ Examples
+ --------
+ from nx.algorithms import bipartite
+ G = bipartite.gnmk_random_graph(10,20,50)
+
+ See Also
+ --------
+ gnm_random_graph
+
+ Notes
+ -----
+ If k > m * n then a complete bipartite graph is returned.
+
+ This graph is a bipartite version of the `G_{nm}` random graph model.
+
+ The nodes are assigned the attribute 'bipartite' with the value 0 or 1
+ to indicate which bipartite set the node belongs to.
+
+ This function is not imported in the main namespace.
+ To use it use nx.bipartite.gnmk_random_graph
+ """
+ G = nx.Graph()
+ G = _add_nodes_with_bipartite_label(G, n, m)
+ if directed:
+ G = nx.DiGraph(G)
+ G.name = f"bipartite_gnm_random_graph({n},{m},{k})"
+ if n == 1 or m == 1:
+ return G
+ max_edges = n * m # max_edges for bipartite networks
+ if k >= max_edges: # Maybe we should raise an exception here
+ return nx.complete_bipartite_graph(n, m, create_using=G)
+
+ top = [n for n, d in G.nodes(data=True) if d["bipartite"] == 0]
+ bottom = list(set(G) - set(top))
+ edge_count = 0
+ while edge_count < k:
+ # generate random edge,u,v
+ u = seed.choice(top)
+ v = seed.choice(bottom)
+ if v in G[u]:
+ continue
+ else:
+ G.add_edge(u, v)
+ edge_count += 1
+ return G
+
+
+def _add_nodes_with_bipartite_label(G, lena, lenb):
+ G.add_nodes_from(range(lena + lenb))
+ b = dict(zip(range(lena), [0] * lena))
+ b.update(dict(zip(range(lena, lena + lenb), [1] * lenb)))
+ nx.set_node_attributes(G, b, "bipartite")
+ return G
diff --git a/pythonProject/.venv/Lib/site-packages/networkx/algorithms/bipartite/matching.py b/pythonProject/.venv/Lib/site-packages/networkx/algorithms/bipartite/matching.py
new file mode 100644
index 0000000000000000000000000000000000000000..48149ab9e31875be93fa1d8acf434903e23326bc
--- /dev/null
+++ b/pythonProject/.venv/Lib/site-packages/networkx/algorithms/bipartite/matching.py
@@ -0,0 +1,589 @@
+# This module uses material from the Wikipedia article Hopcroft--Karp algorithm
+# , accessed on
+# January 3, 2015, which is released under the Creative Commons
+# Attribution-Share-Alike License 3.0
+# . That article includes
+# pseudocode, which has been translated into the corresponding Python code.
+#
+# Portions of this module use code from David Eppstein's Python Algorithms and
+# Data Structures (PADS) library, which is dedicated to the public domain (for
+# proof, see ).
+"""Provides functions for computing maximum cardinality matchings and minimum
+weight full matchings in a bipartite graph.
+
+If you don't care about the particular implementation of the maximum matching
+algorithm, simply use the :func:`maximum_matching`. If you do care, you can
+import one of the named maximum matching algorithms directly.
+
+For example, to find a maximum matching in the complete bipartite graph with
+two vertices on the left and three vertices on the right:
+
+>>> G = nx.complete_bipartite_graph(2, 3)
+>>> left, right = nx.bipartite.sets(G)
+>>> list(left)
+[0, 1]
+>>> list(right)
+[2, 3, 4]
+>>> nx.bipartite.maximum_matching(G)
+{0: 2, 1: 3, 2: 0, 3: 1}
+
+The dictionary returned by :func:`maximum_matching` includes a mapping for
+vertices in both the left and right vertex sets.
+
+Similarly, :func:`minimum_weight_full_matching` produces, for a complete
+weighted bipartite graph, a matching whose cardinality is the cardinality of
+the smaller of the two partitions, and for which the sum of the weights of the
+edges included in the matching is minimal.
+
+"""
+import collections
+import itertools
+
+import networkx as nx
+from networkx.algorithms.bipartite import sets as bipartite_sets
+from networkx.algorithms.bipartite.matrix import biadjacency_matrix
+
+__all__ = [
+ "maximum_matching",
+ "hopcroft_karp_matching",
+ "eppstein_matching",
+ "to_vertex_cover",
+ "minimum_weight_full_matching",
+]
+
+INFINITY = float("inf")
+
+
+@nx._dispatchable
+def hopcroft_karp_matching(G, top_nodes=None):
+ """Returns the maximum cardinality matching of the bipartite graph `G`.
+
+ A matching is a set of edges that do not share any nodes. A maximum
+ cardinality matching is a matching with the most edges possible. It
+ is not always unique. Finding a matching in a bipartite graph can be
+ treated as a networkx flow problem.
+
+ The functions ``hopcroft_karp_matching`` and ``maximum_matching``
+ are aliases of the same function.
+
+ Parameters
+ ----------
+ G : NetworkX graph
+
+ Undirected bipartite graph
+
+ top_nodes : container of nodes
+
+ Container with all nodes in one bipartite node set. If not supplied
+ it will be computed. But if more than one solution exists an exception
+ will be raised.
+
+ Returns
+ -------
+ matches : dictionary
+
+ The matching is returned as a dictionary, `matches`, such that
+ ``matches[v] == w`` if node `v` is matched to node `w`. Unmatched
+ nodes do not occur as a key in `matches`.
+
+ Raises
+ ------
+ AmbiguousSolution
+ Raised if the input bipartite graph is disconnected and no container
+ with all nodes in one bipartite set is provided. When determining
+ the nodes in each bipartite set more than one valid solution is
+ possible if the input graph is disconnected.
+
+ Notes
+ -----
+ This function is implemented with the `Hopcroft--Karp matching algorithm
+ `_ for
+ bipartite graphs.
+
+ See :mod:`bipartite documentation `
+ for further details on how bipartite graphs are handled in NetworkX.
+
+ See Also
+ --------
+ maximum_matching
+ hopcroft_karp_matching
+ eppstein_matching
+
+ References
+ ----------
+ .. [1] John E. Hopcroft and Richard M. Karp. "An n^{5 / 2} Algorithm for
+ Maximum Matchings in Bipartite Graphs" In: **SIAM Journal of Computing**
+ 2.4 (1973), pp. 225--231. .
+
+ """
+
+ # First we define some auxiliary search functions.
+ #
+ # If you are a human reading these auxiliary search functions, the "global"
+ # variables `leftmatches`, `rightmatches`, `distances`, etc. are defined
+ # below the functions, so that they are initialized close to the initial
+ # invocation of the search functions.
+ def breadth_first_search():
+ for v in left:
+ if leftmatches[v] is None:
+ distances[v] = 0
+ queue.append(v)
+ else:
+ distances[v] = INFINITY
+ distances[None] = INFINITY
+ while queue:
+ v = queue.popleft()
+ if distances[v] < distances[None]:
+ for u in G[v]:
+ if distances[rightmatches[u]] is INFINITY:
+ distances[rightmatches[u]] = distances[v] + 1
+ queue.append(rightmatches[u])
+ return distances[None] is not INFINITY
+
+ def depth_first_search(v):
+ if v is not None:
+ for u in G[v]:
+ if distances[rightmatches[u]] == distances[v] + 1:
+ if depth_first_search(rightmatches[u]):
+ rightmatches[u] = v
+ leftmatches[v] = u
+ return True
+ distances[v] = INFINITY
+ return False
+ return True
+
+ # Initialize the "global" variables that maintain state during the search.
+ left, right = bipartite_sets(G, top_nodes)
+ leftmatches = {v: None for v in left}
+ rightmatches = {v: None for v in right}
+ distances = {}
+ queue = collections.deque()
+
+ # Implementation note: this counter is incremented as pairs are matched but
+ # it is currently not used elsewhere in the computation.
+ num_matched_pairs = 0
+ while breadth_first_search():
+ for v in left:
+ if leftmatches[v] is None:
+ if depth_first_search(v):
+ num_matched_pairs += 1
+
+ # Strip the entries matched to `None`.
+ leftmatches = {k: v for k, v in leftmatches.items() if v is not None}
+ rightmatches = {k: v for k, v in rightmatches.items() if v is not None}
+
+ # At this point, the left matches and the right matches are inverses of one
+ # another. In other words,
+ #
+ # leftmatches == {v, k for k, v in rightmatches.items()}
+ #
+ # Finally, we combine both the left matches and right matches.
+ return dict(itertools.chain(leftmatches.items(), rightmatches.items()))
+
+
+@nx._dispatchable
+def eppstein_matching(G, top_nodes=None):
+ """Returns the maximum cardinality matching of the bipartite graph `G`.
+
+ Parameters
+ ----------
+ G : NetworkX graph
+
+ Undirected bipartite graph
+
+ top_nodes : container
+
+ Container with all nodes in one bipartite node set. If not supplied
+ it will be computed. But if more than one solution exists an exception
+ will be raised.
+
+ Returns
+ -------
+ matches : dictionary
+
+ The matching is returned as a dictionary, `matching`, such that
+ ``matching[v] == w`` if node `v` is matched to node `w`. Unmatched
+ nodes do not occur as a key in `matching`.
+
+ Raises
+ ------
+ AmbiguousSolution
+ Raised if the input bipartite graph is disconnected and no container
+ with all nodes in one bipartite set is provided. When determining
+ the nodes in each bipartite set more than one valid solution is
+ possible if the input graph is disconnected.
+
+ Notes
+ -----
+ This function is implemented with David Eppstein's version of the algorithm
+ Hopcroft--Karp algorithm (see :func:`hopcroft_karp_matching`), which
+ originally appeared in the `Python Algorithms and Data Structures library
+ (PADS) `_.
+
+ See :mod:`bipartite documentation `
+ for further details on how bipartite graphs are handled in NetworkX.
+
+ See Also
+ --------
+
+ hopcroft_karp_matching
+
+ """
+ # Due to its original implementation, a directed graph is needed
+ # so that the two sets of bipartite nodes can be distinguished
+ left, right = bipartite_sets(G, top_nodes)
+ G = nx.DiGraph(G.edges(left))
+ # initialize greedy matching (redundant, but faster than full search)
+ matching = {}
+ for u in G:
+ for v in G[u]:
+ if v not in matching:
+ matching[v] = u
+ break
+ while True:
+ # structure residual graph into layers
+ # pred[u] gives the neighbor in the previous layer for u in U
+ # preds[v] gives a list of neighbors in the previous layer for v in V
+ # unmatched gives a list of unmatched vertices in final layer of V,
+ # and is also used as a flag value for pred[u] when u is in the first
+ # layer
+ preds = {}
+ unmatched = []
+ pred = {u: unmatched for u in G}
+ for v in matching:
+ del pred[matching[v]]
+ layer = list(pred)
+
+ # repeatedly extend layering structure by another pair of layers
+ while layer and not unmatched:
+ newLayer = {}
+ for u in layer:
+ for v in G[u]:
+ if v not in preds:
+ newLayer.setdefault(v, []).append(u)
+ layer = []
+ for v in newLayer:
+ preds[v] = newLayer[v]
+ if v in matching:
+ layer.append(matching[v])
+ pred[matching[v]] = v
+ else:
+ unmatched.append(v)
+
+ # did we finish layering without finding any alternating paths?
+ if not unmatched:
+ # TODO - The lines between --- were unused and were thus commented
+ # out. This whole commented chunk should be reviewed to determine
+ # whether it should be built upon or completely removed.
+ # ---
+ # unlayered = {}
+ # for u in G:
+ # # TODO Why is extra inner loop necessary?
+ # for v in G[u]:
+ # if v not in preds:
+ # unlayered[v] = None
+ # ---
+ # TODO Originally, this function returned a three-tuple:
+ #
+ # return (matching, list(pred), list(unlayered))
+ #
+ # For some reason, the documentation for this function
+ # indicated that the second and third elements of the returned
+ # three-tuple would be the vertices in the left and right vertex
+ # sets, respectively, that are also in the maximum independent set.
+ # However, what I think the author meant was that the second
+ # element is the list of vertices that were unmatched and the third
+ # element was the list of vertices that were matched. Since that
+ # seems to be the case, they don't really need to be returned,
+ # since that information can be inferred from the matching
+ # dictionary.
+
+ # All the matched nodes must be a key in the dictionary
+ for key in matching.copy():
+ matching[matching[key]] = key
+ return matching
+
+ # recursively search backward through layers to find alternating paths
+ # recursion returns true if found path, false otherwise
+ def recurse(v):
+ if v in preds:
+ L = preds.pop(v)
+ for u in L:
+ if u in pred:
+ pu = pred.pop(u)
+ if pu is unmatched or recurse(pu):
+ matching[v] = u
+ return True
+ return False
+
+ for v in unmatched:
+ recurse(v)
+
+
+def _is_connected_by_alternating_path(G, v, matched_edges, unmatched_edges, targets):
+ """Returns True if and only if the vertex `v` is connected to one of
+ the target vertices by an alternating path in `G`.
+
+ An *alternating path* is a path in which every other edge is in the
+ specified maximum matching (and the remaining edges in the path are not in
+ the matching). An alternating path may have matched edges in the even
+ positions or in the odd positions, as long as the edges alternate between
+ 'matched' and 'unmatched'.
+
+ `G` is an undirected bipartite NetworkX graph.
+
+ `v` is a vertex in `G`.
+
+ `matched_edges` is a set of edges present in a maximum matching in `G`.
+
+ `unmatched_edges` is a set of edges not present in a maximum
+ matching in `G`.
+
+ `targets` is a set of vertices.
+
+ """
+
+ def _alternating_dfs(u, along_matched=True):
+ """Returns True if and only if `u` is connected to one of the
+ targets by an alternating path.
+
+ `u` is a vertex in the graph `G`.
+
+ If `along_matched` is True, this step of the depth-first search
+ will continue only through edges in the given matching. Otherwise, it
+ will continue only through edges *not* in the given matching.
+
+ """
+ visited = set()
+ # Follow matched edges when depth is even,
+ # and follow unmatched edges when depth is odd.
+ initial_depth = 0 if along_matched else 1
+ stack = [(u, iter(G[u]), initial_depth)]
+ while stack:
+ parent, children, depth = stack[-1]
+ valid_edges = matched_edges if depth % 2 else unmatched_edges
+ try:
+ child = next(children)
+ if child not in visited:
+ if (parent, child) in valid_edges or (child, parent) in valid_edges:
+ if child in targets:
+ return True
+ visited.add(child)
+ stack.append((child, iter(G[child]), depth + 1))
+ except StopIteration:
+ stack.pop()
+ return False
+
+ # Check for alternating paths starting with edges in the matching, then
+ # check for alternating paths starting with edges not in the
+ # matching.
+ return _alternating_dfs(v, along_matched=True) or _alternating_dfs(
+ v, along_matched=False
+ )
+
+
+def _connected_by_alternating_paths(G, matching, targets):
+ """Returns the set of vertices that are connected to one of the target
+ vertices by an alternating path in `G` or are themselves a target.
+
+ An *alternating path* is a path in which every other edge is in the
+ specified maximum matching (and the remaining edges in the path are not in
+ the matching). An alternating path may have matched edges in the even
+ positions or in the odd positions, as long as the edges alternate between
+ 'matched' and 'unmatched'.
+
+ `G` is an undirected bipartite NetworkX graph.
+
+ `matching` is a dictionary representing a maximum matching in `G`, as
+ returned by, for example, :func:`maximum_matching`.
+
+ `targets` is a set of vertices.
+
+ """
+ # Get the set of matched edges and the set of unmatched edges. Only include
+ # one version of each undirected edge (for example, include edge (1, 2) but
+ # not edge (2, 1)). Using frozensets as an intermediary step we do not
+ # require nodes to be orderable.
+ edge_sets = {frozenset((u, v)) for u, v in matching.items()}
+ matched_edges = {tuple(edge) for edge in edge_sets}
+ unmatched_edges = {
+ (u, v) for (u, v) in G.edges() if frozenset((u, v)) not in edge_sets
+ }
+
+ return {
+ v
+ for v in G
+ if v in targets
+ or _is_connected_by_alternating_path(
+ G, v, matched_edges, unmatched_edges, targets
+ )
+ }
+
+
+@nx._dispatchable
+def to_vertex_cover(G, matching, top_nodes=None):
+ """Returns the minimum vertex cover corresponding to the given maximum
+ matching of the bipartite graph `G`.
+
+ Parameters
+ ----------
+ G : NetworkX graph
+
+ Undirected bipartite graph
+
+ matching : dictionary
+
+ A dictionary whose keys are vertices in `G` and whose values are the
+ distinct neighbors comprising the maximum matching for `G`, as returned
+ by, for example, :func:`maximum_matching`. The dictionary *must*
+ represent the maximum matching.
+
+ top_nodes : container
+
+ Container with all nodes in one bipartite node set. If not supplied
+ it will be computed. But if more than one solution exists an exception
+ will be raised.
+
+ Returns
+ -------
+ vertex_cover : :class:`set`
+
+ The minimum vertex cover in `G`.
+
+ Raises
+ ------
+ AmbiguousSolution
+ Raised if the input bipartite graph is disconnected and no container
+ with all nodes in one bipartite set is provided. When determining
+ the nodes in each bipartite set more than one valid solution is
+ possible if the input graph is disconnected.
+
+ Notes
+ -----
+ This function is implemented using the procedure guaranteed by `Konig's
+ theorem
+ `_,
+ which proves an equivalence between a maximum matching and a minimum vertex
+ cover in bipartite graphs.
+
+ Since a minimum vertex cover is the complement of a maximum independent set
+ for any graph, one can compute the maximum independent set of a bipartite
+ graph this way:
+
+ >>> G = nx.complete_bipartite_graph(2, 3)
+ >>> matching = nx.bipartite.maximum_matching(G)
+ >>> vertex_cover = nx.bipartite.to_vertex_cover(G, matching)
+ >>> independent_set = set(G) - vertex_cover
+ >>> print(list(independent_set))
+ [2, 3, 4]
+
+ See :mod:`bipartite documentation `
+ for further details on how bipartite graphs are handled in NetworkX.
+
+ """
+ # This is a Python implementation of the algorithm described at
+ # .
+ L, R = bipartite_sets(G, top_nodes)
+ # Let U be the set of unmatched vertices in the left vertex set.
+ unmatched_vertices = set(G) - set(matching)
+ U = unmatched_vertices & L
+ # Let Z be the set of vertices that are either in U or are connected to U
+ # by alternating paths.
+ Z = _connected_by_alternating_paths(G, matching, U)
+ # At this point, every edge either has a right endpoint in Z or a left
+ # endpoint not in Z. This gives us the vertex cover.
+ return (L - Z) | (R & Z)
+
+
+#: Returns the maximum cardinality matching in the given bipartite graph.
+#:
+#: This function is simply an alias for :func:`hopcroft_karp_matching`.
+maximum_matching = hopcroft_karp_matching
+
+
+@nx._dispatchable(edge_attrs="weight")
+def minimum_weight_full_matching(G, top_nodes=None, weight="weight"):
+ r"""Returns a minimum weight full matching of the bipartite graph `G`.
+
+ Let :math:`G = ((U, V), E)` be a weighted bipartite graph with real weights
+ :math:`w : E \to \mathbb{R}`. This function then produces a matching
+ :math:`M \subseteq E` with cardinality
+
+ .. math::
+ \lvert M \rvert = \min(\lvert U \rvert, \lvert V \rvert),
+
+ which minimizes the sum of the weights of the edges included in the
+ matching, :math:`\sum_{e \in M} w(e)`, or raises an error if no such
+ matching exists.
+
+ When :math:`\lvert U \rvert = \lvert V \rvert`, this is commonly
+ referred to as a perfect matching; here, since we allow
+ :math:`\lvert U \rvert` and :math:`\lvert V \rvert` to differ, we
+ follow Karp [1]_ and refer to the matching as *full*.
+
+ Parameters
+ ----------
+ G : NetworkX graph
+
+ Undirected bipartite graph
+
+ top_nodes : container
+
+ Container with all nodes in one bipartite node set. If not supplied
+ it will be computed.
+
+ weight : string, optional (default='weight')
+
+ The edge data key used to provide each value in the matrix.
+ If None, then each edge has weight 1.
+
+ Returns
+ -------
+ matches : dictionary
+
+ The matching is returned as a dictionary, `matches`, such that
+ ``matches[v] == w`` if node `v` is matched to node `w`. Unmatched
+ nodes do not occur as a key in `matches`.
+
+ Raises
+ ------
+ ValueError
+ Raised if no full matching exists.
+
+ ImportError
+ Raised if SciPy is not available.
+
+ Notes
+ -----
+ The problem of determining a minimum weight full matching is also known as
+ the rectangular linear assignment problem. This implementation defers the
+ calculation of the assignment to SciPy.
+
+ References
+ ----------
+ .. [1] Richard Manning Karp:
+ An algorithm to Solve the m x n Assignment Problem in Expected Time
+ O(mn log n).
+ Networks, 10(2):143–152, 1980.
+
+ """
+ import numpy as np
+ import scipy as sp
+
+ left, right = nx.bipartite.sets(G, top_nodes)
+ U = list(left)
+ V = list(right)
+ # We explicitly create the biadjacency matrix having infinities
+ # where edges are missing (as opposed to zeros, which is what one would
+ # get by using toarray on the sparse matrix).
+ weights_sparse = biadjacency_matrix(
+ G, row_order=U, column_order=V, weight=weight, format="coo"
+ )
+ weights = np.full(weights_sparse.shape, np.inf)
+ weights[weights_sparse.row, weights_sparse.col] = weights_sparse.data
+ left_matches = sp.optimize.linear_sum_assignment(weights)
+ d = {U[u]: V[v] for u, v in zip(*left_matches)}
+ # d will contain the matching from edges in left to right; we need to
+ # add the ones from right to left as well.
+ d.update({v: u for u, v in d.items()})
+ return d
diff --git a/pythonProject/.venv/Lib/site-packages/networkx/algorithms/bipartite/matrix.py b/pythonProject/.venv/Lib/site-packages/networkx/algorithms/bipartite/matrix.py
new file mode 100644
index 0000000000000000000000000000000000000000..462ef8a1311c0e4fbc250a2778af108ed60a0df7
--- /dev/null
+++ b/pythonProject/.venv/Lib/site-packages/networkx/algorithms/bipartite/matrix.py
@@ -0,0 +1,167 @@
+"""
+====================
+Biadjacency matrices
+====================
+"""
+import itertools
+
+import networkx as nx
+from networkx.convert_matrix import _generate_weighted_edges
+
+__all__ = ["biadjacency_matrix", "from_biadjacency_matrix"]
+
+
+@nx._dispatchable(edge_attrs="weight")
+def biadjacency_matrix(
+ G, row_order, column_order=None, dtype=None, weight="weight", format="csr"
+):
+ r"""Returns the biadjacency matrix of the bipartite graph G.
+
+ Let `G = (U, V, E)` be a bipartite graph with node sets
+ `U = u_{1},...,u_{r}` and `V = v_{1},...,v_{s}`. The biadjacency
+ matrix [1]_ is the `r` x `s` matrix `B` in which `b_{i,j} = 1`
+ if, and only if, `(u_i, v_j) \in E`. If the parameter `weight` is
+ not `None` and matches the name of an edge attribute, its value is
+ used instead of 1.
+
+ Parameters
+ ----------
+ G : graph
+ A NetworkX graph
+
+ row_order : list of nodes
+ The rows of the matrix are ordered according to the list of nodes.
+
+ column_order : list, optional
+ The columns of the matrix are ordered according to the list of nodes.
+ If column_order is None, then the ordering of columns is arbitrary.
+
+ dtype : NumPy data-type, optional
+ A valid NumPy dtype used to initialize the array. If None, then the
+ NumPy default is used.
+
+ weight : string or None, optional (default='weight')
+ The edge data key used to provide each value in the matrix.
+ If None, then each edge has weight 1.
+
+ format : str in {'bsr', 'csr', 'csc', 'coo', 'lil', 'dia', 'dok'}
+ The type of the matrix to be returned (default 'csr'). For
+ some algorithms different implementations of sparse matrices
+ can perform better. See [2]_ for details.
+
+ Returns
+ -------
+ M : SciPy sparse array
+ Biadjacency matrix representation of the bipartite graph G.
+
+ Notes
+ -----
+ No attempt is made to check that the input graph is bipartite.
+
+ For directed bipartite graphs only successors are considered as neighbors.
+ To obtain an adjacency matrix with ones (or weight values) for both
+ predecessors and successors you have to generate two biadjacency matrices
+ where the rows of one of them are the columns of the other, and then add
+ one to the transpose of the other.
+
+ See Also
+ --------
+ adjacency_matrix
+ from_biadjacency_matrix
+
+ References
+ ----------
+ .. [1] https://en.wikipedia.org/wiki/Adjacency_matrix#Adjacency_matrix_of_a_bipartite_graph
+ .. [2] Scipy Dev. References, "Sparse Matrices",
+ https://docs.scipy.org/doc/scipy/reference/sparse.html
+ """
+ import scipy as sp
+
+ nlen = len(row_order)
+ if nlen == 0:
+ raise nx.NetworkXError("row_order is empty list")
+ if len(row_order) != len(set(row_order)):
+ msg = "Ambiguous ordering: `row_order` contained duplicates."
+ raise nx.NetworkXError(msg)
+ if column_order is None:
+ column_order = list(set(G) - set(row_order))
+ mlen = len(column_order)
+ if len(column_order) != len(set(column_order)):
+ msg = "Ambiguous ordering: `column_order` contained duplicates."
+ raise nx.NetworkXError(msg)
+
+ row_index = dict(zip(row_order, itertools.count()))
+ col_index = dict(zip(column_order, itertools.count()))
+
+ if G.number_of_edges() == 0:
+ row, col, data = [], [], []
+ else:
+ row, col, data = zip(
+ *(
+ (row_index[u], col_index[v], d.get(weight, 1))
+ for u, v, d in G.edges(row_order, data=True)
+ if u in row_index and v in col_index
+ )
+ )
+ A = sp.sparse.coo_array((data, (row, col)), shape=(nlen, mlen), dtype=dtype)
+ try:
+ return A.asformat(format)
+ except ValueError as err:
+ raise nx.NetworkXError(f"Unknown sparse array format: {format}") from err
+
+
+@nx._dispatchable(graphs=None, returns_graph=True)
+def from_biadjacency_matrix(A, create_using=None, edge_attribute="weight"):
+ r"""Creates a new bipartite graph from a biadjacency matrix given as a
+ SciPy sparse array.
+
+ Parameters
+ ----------
+ A: scipy sparse array
+ A biadjacency matrix representation of a graph
+
+ create_using: NetworkX graph
+ Use specified graph for result. The default is Graph()
+
+ edge_attribute: string
+ Name of edge attribute to store matrix numeric value. The data will
+ have the same type as the matrix entry (int, float, (real,imag)).
+
+ Notes
+ -----
+ The nodes are labeled with the attribute `bipartite` set to an integer
+ 0 or 1 representing membership in part 0 or part 1 of the bipartite graph.
+
+ If `create_using` is an instance of :class:`networkx.MultiGraph` or
+ :class:`networkx.MultiDiGraph` and the entries of `A` are of
+ type :class:`int`, then this function returns a multigraph (of the same
+ type as `create_using`) with parallel edges. In this case, `edge_attribute`
+ will be ignored.
+
+ See Also
+ --------
+ biadjacency_matrix
+ from_numpy_array
+
+ References
+ ----------
+ [1] https://en.wikipedia.org/wiki/Adjacency_matrix#Adjacency_matrix_of_a_bipartite_graph
+ """
+ G = nx.empty_graph(0, create_using)
+ n, m = A.shape
+ # Make sure we get even the isolated nodes of the graph.
+ G.add_nodes_from(range(n), bipartite=0)
+ G.add_nodes_from(range(n, n + m), bipartite=1)
+ # Create an iterable over (u, v, w) triples and for each triple, add an
+ # edge from u to v with weight w.
+ triples = ((u, n + v, d) for (u, v, d) in _generate_weighted_edges(A))
+ # If the entries in the adjacency matrix are integers and the graph is a
+ # multigraph, then create parallel edges, each with weight 1, for each
+ # entry in the adjacency matrix. Otherwise, create one edge for each
+ # positive entry in the adjacency matrix and set the weight of that edge to
+ # be the entry in the matrix.
+ if A.dtype.kind in ("i", "u") and G.is_multigraph():
+ chain = itertools.chain.from_iterable
+ triples = chain(((u, v, 1) for d in range(w)) for (u, v, w) in triples)
+ G.add_weighted_edges_from(triples, weight=edge_attribute)
+ return G
diff --git a/pythonProject/.venv/Lib/site-packages/networkx/algorithms/bipartite/projection.py b/pythonProject/.venv/Lib/site-packages/networkx/algorithms/bipartite/projection.py
new file mode 100644
index 0000000000000000000000000000000000000000..1eb71fa528f1759e6c9cb5883646badfaa493b94
--- /dev/null
+++ b/pythonProject/.venv/Lib/site-packages/networkx/algorithms/bipartite/projection.py
@@ -0,0 +1,521 @@
+"""One-mode (unipartite) projections of bipartite graphs."""
+import networkx as nx
+from networkx.exception import NetworkXAlgorithmError
+from networkx.utils import not_implemented_for
+
+__all__ = [
+ "projected_graph",
+ "weighted_projected_graph",
+ "collaboration_weighted_projected_graph",
+ "overlap_weighted_projected_graph",
+ "generic_weighted_projected_graph",
+]
+
+
+@nx._dispatchable(
+ graphs="B", preserve_node_attrs=True, preserve_graph_attrs=True, returns_graph=True
+)
+def projected_graph(B, nodes, multigraph=False):
+ r"""Returns the projection of B onto one of its node sets.
+
+ Returns the graph G that is the projection of the bipartite graph B
+ onto the specified nodes. They retain their attributes and are connected
+ in G if they have a common neighbor in B.
+
+ Parameters
+ ----------
+ B : NetworkX graph
+ The input graph should be bipartite.
+
+ nodes : list or iterable
+ Nodes to project onto (the "bottom" nodes).
+
+ multigraph: bool (default=False)
+ If True return a multigraph where the multiple edges represent multiple
+ shared neighbors. They edge key in the multigraph is assigned to the
+ label of the neighbor.
+
+ Returns
+ -------
+ Graph : NetworkX graph or multigraph
+ A graph that is the projection onto the given nodes.
+
+ Examples
+ --------
+ >>> from networkx.algorithms import bipartite
+ >>> B = nx.path_graph(4)
+ >>> G = bipartite.projected_graph(B, [1, 3])
+ >>> list(G)
+ [1, 3]
+ >>> list(G.edges())
+ [(1, 3)]
+
+ If nodes `a`, and `b` are connected through both nodes 1 and 2 then
+ building a multigraph results in two edges in the projection onto
+ [`a`, `b`]:
+
+ >>> B = nx.Graph()
+ >>> B.add_edges_from([("a", 1), ("b", 1), ("a", 2), ("b", 2)])
+ >>> G = bipartite.projected_graph(B, ["a", "b"], multigraph=True)
+ >>> print([sorted((u, v)) for u, v in G.edges()])
+ [['a', 'b'], ['a', 'b']]
+
+ Notes
+ -----
+ No attempt is made to verify that the input graph B is bipartite.
+ Returns a simple graph that is the projection of the bipartite graph B
+ onto the set of nodes given in list nodes. If multigraph=True then
+ a multigraph is returned with an edge for every shared neighbor.
+
+ Directed graphs are allowed as input. The output will also then
+ be a directed graph with edges if there is a directed path between
+ the nodes.
+
+ The graph and node properties are (shallow) copied to the projected graph.
+
+ See :mod:`bipartite documentation `
+ for further details on how bipartite graphs are handled in NetworkX.
+
+ See Also
+ --------
+ is_bipartite,
+ is_bipartite_node_set,
+ sets,
+ weighted_projected_graph,
+ collaboration_weighted_projected_graph,
+ overlap_weighted_projected_graph,
+ generic_weighted_projected_graph
+ """
+ if B.is_multigraph():
+ raise nx.NetworkXError("not defined for multigraphs")
+ if B.is_directed():
+ directed = True
+ if multigraph:
+ G = nx.MultiDiGraph()
+ else:
+ G = nx.DiGraph()
+ else:
+ directed = False
+ if multigraph:
+ G = nx.MultiGraph()
+ else:
+ G = nx.Graph()
+ G.graph.update(B.graph)
+ G.add_nodes_from((n, B.nodes[n]) for n in nodes)
+ for u in nodes:
+ nbrs2 = {v for nbr in B[u] for v in B[nbr] if v != u}
+ if multigraph:
+ for n in nbrs2:
+ if directed:
+ links = set(B[u]) & set(B.pred[n])
+ else:
+ links = set(B[u]) & set(B[n])
+ for l in links:
+ if not G.has_edge(u, n, l):
+ G.add_edge(u, n, key=l)
+ else:
+ G.add_edges_from((u, n) for n in nbrs2)
+ return G
+
+
+@not_implemented_for("multigraph")
+@nx._dispatchable(graphs="B", returns_graph=True)
+def weighted_projected_graph(B, nodes, ratio=False):
+ r"""Returns a weighted projection of B onto one of its node sets.
+
+ The weighted projected graph is the projection of the bipartite
+ network B onto the specified nodes with weights representing the
+ number of shared neighbors or the ratio between actual shared
+ neighbors and possible shared neighbors if ``ratio is True`` [1]_.
+ The nodes retain their attributes and are connected in the resulting
+ graph if they have an edge to a common node in the original graph.
+
+ Parameters
+ ----------
+ B : NetworkX graph
+ The input graph should be bipartite.
+
+ nodes : list or iterable
+ Distinct nodes to project onto (the "bottom" nodes).
+
+ ratio: Bool (default=False)
+ If True, edge weight is the ratio between actual shared neighbors
+ and maximum possible shared neighbors (i.e., the size of the other
+ node set). If False, edges weight is the number of shared neighbors.
+
+ Returns
+ -------
+ Graph : NetworkX graph
+ A graph that is the projection onto the given nodes.
+
+ Examples
+ --------
+ >>> from networkx.algorithms import bipartite
+ >>> B = nx.path_graph(4)
+ >>> G = bipartite.weighted_projected_graph(B, [1, 3])
+ >>> list(G)
+ [1, 3]
+ >>> list(G.edges(data=True))
+ [(1, 3, {'weight': 1})]
+ >>> G = bipartite.weighted_projected_graph(B, [1, 3], ratio=True)
+ >>> list(G.edges(data=True))
+ [(1, 3, {'weight': 0.5})]
+
+ Notes
+ -----
+ No attempt is made to verify that the input graph B is bipartite, or that
+ the input nodes are distinct. However, if the length of the input nodes is
+ greater than or equal to the nodes in the graph B, an exception is raised.
+ If the nodes are not distinct but don't raise this error, the output weights
+ will be incorrect.
+ The graph and node properties are (shallow) copied to the projected graph.
+
+ See :mod:`bipartite documentation `
+ for further details on how bipartite graphs are handled in NetworkX.
+
+ See Also
+ --------
+ is_bipartite,
+ is_bipartite_node_set,
+ sets,
+ collaboration_weighted_projected_graph,
+ overlap_weighted_projected_graph,
+ generic_weighted_projected_graph
+ projected_graph
+
+ References
+ ----------
+ .. [1] Borgatti, S.P. and Halgin, D. In press. "Analyzing Affiliation
+ Networks". In Carrington, P. and Scott, J. (eds) The Sage Handbook
+ of Social Network Analysis. Sage Publications.
+ """
+ if B.is_directed():
+ pred = B.pred
+ G = nx.DiGraph()
+ else:
+ pred = B.adj
+ G = nx.Graph()
+ G.graph.update(B.graph)
+ G.add_nodes_from((n, B.nodes[n]) for n in nodes)
+ n_top = len(B) - len(nodes)
+
+ if n_top < 1:
+ raise NetworkXAlgorithmError(
+ f"the size of the nodes to project onto ({len(nodes)}) is >= the graph size ({len(B)}).\n"
+ "They are either not a valid bipartite partition or contain duplicates"
+ )
+
+ for u in nodes:
+ unbrs = set(B[u])
+ nbrs2 = {n for nbr in unbrs for n in B[nbr]} - {u}
+ for v in nbrs2:
+ vnbrs = set(pred[v])
+ common = unbrs & vnbrs
+ if not ratio:
+ weight = len(common)
+ else:
+ weight = len(common) / n_top
+ G.add_edge(u, v, weight=weight)
+ return G
+
+
+@not_implemented_for("multigraph")
+@nx._dispatchable(graphs="B", returns_graph=True)
+def collaboration_weighted_projected_graph(B, nodes):
+ r"""Newman's weighted projection of B onto one of its node sets.
+
+ The collaboration weighted projection is the projection of the
+ bipartite network B onto the specified nodes with weights assigned
+ using Newman's collaboration model [1]_:
+
+ .. math::
+
+ w_{u, v} = \sum_k \frac{\delta_{u}^{k} \delta_{v}^{k}}{d_k - 1}
+
+ where `u` and `v` are nodes from the bottom bipartite node set,
+ and `k` is a node of the top node set.
+ The value `d_k` is the degree of node `k` in the bipartite
+ network and `\delta_{u}^{k}` is 1 if node `u` is
+ linked to node `k` in the original bipartite graph or 0 otherwise.
+
+ The nodes retain their attributes and are connected in the resulting
+ graph if have an edge to a common node in the original bipartite
+ graph.
+
+ Parameters
+ ----------
+ B : NetworkX graph
+ The input graph should be bipartite.
+
+ nodes : list or iterable
+ Nodes to project onto (the "bottom" nodes).
+
+ Returns
+ -------
+ Graph : NetworkX graph
+ A graph that is the projection onto the given nodes.
+
+ Examples
+ --------
+ >>> from networkx.algorithms import bipartite
+ >>> B = nx.path_graph(5)
+ >>> B.add_edge(1, 5)
+ >>> G = bipartite.collaboration_weighted_projected_graph(B, [0, 2, 4, 5])
+ >>> list(G)
+ [0, 2, 4, 5]
+ >>> for edge in sorted(G.edges(data=True)):
+ ... print(edge)
+ (0, 2, {'weight': 0.5})
+ (0, 5, {'weight': 0.5})
+ (2, 4, {'weight': 1.0})
+ (2, 5, {'weight': 0.5})
+
+ Notes
+ -----
+ No attempt is made to verify that the input graph B is bipartite.
+ The graph and node properties are (shallow) copied to the projected graph.
+
+ See :mod:`bipartite documentation `
+ for further details on how bipartite graphs are handled in NetworkX.
+
+ See Also
+ --------
+ is_bipartite,
+ is_bipartite_node_set,
+ sets,
+ weighted_projected_graph,
+ overlap_weighted_projected_graph,
+ generic_weighted_projected_graph,
+ projected_graph
+
+ References
+ ----------
+ .. [1] Scientific collaboration networks: II.
+ Shortest paths, weighted networks, and centrality,
+ M. E. J. Newman, Phys. Rev. E 64, 016132 (2001).
+ """
+ if B.is_directed():
+ pred = B.pred
+ G = nx.DiGraph()
+ else:
+ pred = B.adj
+ G = nx.Graph()
+ G.graph.update(B.graph)
+ G.add_nodes_from((n, B.nodes[n]) for n in nodes)
+ for u in nodes:
+ unbrs = set(B[u])
+ nbrs2 = {n for nbr in unbrs for n in B[nbr] if n != u}
+ for v in nbrs2:
+ vnbrs = set(pred[v])
+ common_degree = (len(B[n]) for n in unbrs & vnbrs)
+ weight = sum(1.0 / (deg - 1) for deg in common_degree if deg > 1)
+ G.add_edge(u, v, weight=weight)
+ return G
+
+
+@not_implemented_for("multigraph")
+@nx._dispatchable(graphs="B", returns_graph=True)
+def overlap_weighted_projected_graph(B, nodes, jaccard=True):
+ r"""Overlap weighted projection of B onto one of its node sets.
+
+ The overlap weighted projection is the projection of the bipartite
+ network B onto the specified nodes with weights representing
+ the Jaccard index between the neighborhoods of the two nodes in the
+ original bipartite network [1]_:
+
+ .. math::
+
+ w_{v, u} = \frac{|N(u) \cap N(v)|}{|N(u) \cup N(v)|}
+
+ or if the parameter 'jaccard' is False, the fraction of common
+ neighbors by minimum of both nodes degree in the original
+ bipartite graph [1]_:
+
+ .. math::
+
+ w_{v, u} = \frac{|N(u) \cap N(v)|}{min(|N(u)|, |N(v)|)}
+
+ The nodes retain their attributes and are connected in the resulting
+ graph if have an edge to a common node in the original bipartite graph.
+
+ Parameters
+ ----------
+ B : NetworkX graph
+ The input graph should be bipartite.
+
+ nodes : list or iterable
+ Nodes to project onto (the "bottom" nodes).
+
+ jaccard: Bool (default=True)
+
+ Returns
+ -------
+ Graph : NetworkX graph
+ A graph that is the projection onto the given nodes.
+
+ Examples
+ --------
+ >>> from networkx.algorithms import bipartite
+ >>> B = nx.path_graph(5)
+ >>> nodes = [0, 2, 4]
+ >>> G = bipartite.overlap_weighted_projected_graph(B, nodes)
+ >>> list(G)
+ [0, 2, 4]
+ >>> list(G.edges(data=True))
+ [(0, 2, {'weight': 0.5}), (2, 4, {'weight': 0.5})]
+ >>> G = bipartite.overlap_weighted_projected_graph(B, nodes, jaccard=False)
+ >>> list(G.edges(data=True))
+ [(0, 2, {'weight': 1.0}), (2, 4, {'weight': 1.0})]
+
+ Notes
+ -----
+ No attempt is made to verify that the input graph B is bipartite.
+ The graph and node properties are (shallow) copied to the projected graph.
+
+ See :mod:`bipartite documentation `
+ for further details on how bipartite graphs are handled in NetworkX.
+
+ See Also
+ --------
+ is_bipartite,
+ is_bipartite_node_set,
+ sets,
+ weighted_projected_graph,
+ collaboration_weighted_projected_graph,
+ generic_weighted_projected_graph,
+ projected_graph
+
+ References
+ ----------
+ .. [1] Borgatti, S.P. and Halgin, D. In press. Analyzing Affiliation
+ Networks. In Carrington, P. and Scott, J. (eds) The Sage Handbook
+ of Social Network Analysis. Sage Publications.
+
+ """
+ if B.is_directed():
+ pred = B.pred
+ G = nx.DiGraph()
+ else:
+ pred = B.adj
+ G = nx.Graph()
+ G.graph.update(B.graph)
+ G.add_nodes_from((n, B.nodes[n]) for n in nodes)
+ for u in nodes:
+ unbrs = set(B[u])
+ nbrs2 = {n for nbr in unbrs for n in B[nbr]} - {u}
+ for v in nbrs2:
+ vnbrs = set(pred[v])
+ if jaccard:
+ wt = len(unbrs & vnbrs) / len(unbrs | vnbrs)
+ else:
+ wt = len(unbrs & vnbrs) / min(len(unbrs), len(vnbrs))
+ G.add_edge(u, v, weight=wt)
+ return G
+
+
+@not_implemented_for("multigraph")
+@nx._dispatchable(graphs="B", preserve_all_attrs=True, returns_graph=True)
+def generic_weighted_projected_graph(B, nodes, weight_function=None):
+ r"""Weighted projection of B with a user-specified weight function.
+
+ The bipartite network B is projected on to the specified nodes
+ with weights computed by a user-specified function. This function
+ must accept as a parameter the neighborhood sets of two nodes and
+ return an integer or a float.
+
+ The nodes retain their attributes and are connected in the resulting graph
+ if they have an edge to a common node in the original graph.
+
+ Parameters
+ ----------
+ B : NetworkX graph
+ The input graph should be bipartite.
+
+ nodes : list or iterable
+ Nodes to project onto (the "bottom" nodes).
+
+ weight_function : function
+ This function must accept as parameters the same input graph
+ that this function, and two nodes; and return an integer or a float.
+ The default function computes the number of shared neighbors.
+
+ Returns
+ -------
+ Graph : NetworkX graph
+ A graph that is the projection onto the given nodes.
+
+ Examples
+ --------
+ >>> from networkx.algorithms import bipartite
+ >>> # Define some custom weight functions
+ >>> def jaccard(G, u, v):
+ ... unbrs = set(G[u])
+ ... vnbrs = set(G[v])
+ ... return float(len(unbrs & vnbrs)) / len(unbrs | vnbrs)
+ >>> def my_weight(G, u, v, weight="weight"):
+ ... w = 0
+ ... for nbr in set(G[u]) & set(G[v]):
+ ... w += G[u][nbr].get(weight, 1) + G[v][nbr].get(weight, 1)
+ ... return w
+ >>> # A complete bipartite graph with 4 nodes and 4 edges
+ >>> B = nx.complete_bipartite_graph(2, 2)
+ >>> # Add some arbitrary weight to the edges
+ >>> for i, (u, v) in enumerate(B.edges()):
+ ... B.edges[u, v]["weight"] = i + 1
+ >>> for edge in B.edges(data=True):
+ ... print(edge)
+ (0, 2, {'weight': 1})
+ (0, 3, {'weight': 2})
+ (1, 2, {'weight': 3})
+ (1, 3, {'weight': 4})
+ >>> # By default, the weight is the number of shared neighbors
+ >>> G = bipartite.generic_weighted_projected_graph(B, [0, 1])
+ >>> print(list(G.edges(data=True)))
+ [(0, 1, {'weight': 2})]
+ >>> # To specify a custom weight function use the weight_function parameter
+ >>> G = bipartite.generic_weighted_projected_graph(B, [0, 1], weight_function=jaccard)
+ >>> print(list(G.edges(data=True)))
+ [(0, 1, {'weight': 1.0})]
+ >>> G = bipartite.generic_weighted_projected_graph(B, [0, 1], weight_function=my_weight)
+ >>> print(list(G.edges(data=True)))
+ [(0, 1, {'weight': 10})]
+
+ Notes
+ -----
+ No attempt is made to verify that the input graph B is bipartite.
+ The graph and node properties are (shallow) copied to the projected graph.
+
+ See :mod:`bipartite documentation `
+ for further details on how bipartite graphs are handled in NetworkX.
+
+ See Also
+ --------
+ is_bipartite,
+ is_bipartite_node_set,
+ sets,
+ weighted_projected_graph,
+ collaboration_weighted_projected_graph,
+ overlap_weighted_projected_graph,
+ projected_graph
+
+ """
+ if B.is_directed():
+ pred = B.pred
+ G = nx.DiGraph()
+ else:
+ pred = B.adj
+ G = nx.Graph()
+ if weight_function is None:
+
+ def weight_function(G, u, v):
+ # Notice that we use set(pred[v]) for handling the directed case.
+ return len(set(G[u]) & set(pred[v]))
+
+ G.graph.update(B.graph)
+ G.add_nodes_from((n, B.nodes[n]) for n in nodes)
+ for u in nodes:
+ nbrs2 = {n for nbr in set(B[u]) for n in B[nbr]} - {u}
+ for v in nbrs2:
+ weight = weight_function(B, u, v)
+ G.add_edge(u, v, weight=weight)
+ return G
diff --git a/pythonProject/.venv/Lib/site-packages/networkx/algorithms/bipartite/redundancy.py b/pythonProject/.venv/Lib/site-packages/networkx/algorithms/bipartite/redundancy.py
new file mode 100644
index 0000000000000000000000000000000000000000..7a44d212896329a0e5d4f8436385d2d25d20e0a3
--- /dev/null
+++ b/pythonProject/.venv/Lib/site-packages/networkx/algorithms/bipartite/redundancy.py
@@ -0,0 +1,111 @@
+"""Node redundancy for bipartite graphs."""
+from itertools import combinations
+
+import networkx as nx
+from networkx import NetworkXError
+
+__all__ = ["node_redundancy"]
+
+
+@nx._dispatchable
+def node_redundancy(G, nodes=None):
+ r"""Computes the node redundancy coefficients for the nodes in the bipartite
+ graph `G`.
+
+ The redundancy coefficient of a node `v` is the fraction of pairs of
+ neighbors of `v` that are both linked to other nodes. In a one-mode
+ projection these nodes would be linked together even if `v` were
+ not there.
+
+ More formally, for any vertex `v`, the *redundancy coefficient of `v`* is
+ defined by
+
+ .. math::
+
+ rc(v) = \frac{|\{\{u, w\} \subseteq N(v),
+ \: \exists v' \neq v,\: (v',u) \in E\:
+ \mathrm{and}\: (v',w) \in E\}|}{ \frac{|N(v)|(|N(v)|-1)}{2}},
+
+ where `N(v)` is the set of neighbors of `v` in `G`.
+
+ Parameters
+ ----------
+ G : graph
+ A bipartite graph
+
+ nodes : list or iterable (optional)
+ Compute redundancy for these nodes. The default is all nodes in G.
+
+ Returns
+ -------
+ redundancy : dictionary
+ A dictionary keyed by node with the node redundancy value.
+
+ Examples
+ --------
+ Compute the redundancy coefficient of each node in a graph::
+
+ >>> from networkx.algorithms import bipartite
+ >>> G = nx.cycle_graph(4)
+ >>> rc = bipartite.node_redundancy(G)
+ >>> rc[0]
+ 1.0
+
+ Compute the average redundancy for the graph::
+
+ >>> from networkx.algorithms import bipartite
+ >>> G = nx.cycle_graph(4)
+ >>> rc = bipartite.node_redundancy(G)
+ >>> sum(rc.values()) / len(G)
+ 1.0
+
+ Compute the average redundancy for a set of nodes::
+
+ >>> from networkx.algorithms import bipartite
+ >>> G = nx.cycle_graph(4)
+ >>> rc = bipartite.node_redundancy(G)
+ >>> nodes = [0, 2]
+ >>> sum(rc[n] for n in nodes) / len(nodes)
+ 1.0
+
+ Raises
+ ------
+ NetworkXError
+ If any of the nodes in the graph (or in `nodes`, if specified) has
+ (out-)degree less than two (which would result in division by zero,
+ according to the definition of the redundancy coefficient).
+
+ References
+ ----------
+ .. [1] Latapy, Matthieu, Clémence Magnien, and Nathalie Del Vecchio (2008).
+ Basic notions for the analysis of large two-mode networks.
+ Social Networks 30(1), 31--48.
+
+ """
+ if nodes is None:
+ nodes = G
+ if any(len(G[v]) < 2 for v in nodes):
+ raise NetworkXError(
+ "Cannot compute redundancy coefficient for a node"
+ " that has fewer than two neighbors."
+ )
+ # TODO This can be trivially parallelized.
+ return {v: _node_redundancy(G, v) for v in nodes}
+
+
+def _node_redundancy(G, v):
+ """Returns the redundancy of the node `v` in the bipartite graph `G`.
+
+ If `G` is a graph with `n` nodes, the redundancy of a node is the ratio
+ of the "overlap" of `v` to the maximum possible overlap of `v`
+ according to its degree. The overlap of `v` is the number of pairs of
+ neighbors that have mutual neighbors themselves, other than `v`.
+
+ `v` must have at least two neighbors in `G`.
+
+ """
+ n = len(G[v])
+ overlap = sum(
+ 1 for (u, w) in combinations(G[v], 2) if (set(G[u]) & set(G[w])) - {v}
+ )
+ return (2 * overlap) / (n * (n - 1))
diff --git a/pythonProject/.venv/Lib/site-packages/networkx/algorithms/bipartite/spectral.py b/pythonProject/.venv/Lib/site-packages/networkx/algorithms/bipartite/spectral.py
new file mode 100644
index 0000000000000000000000000000000000000000..61a56dd2c0e37ae9ca30c36276087186539a73a9
--- /dev/null
+++ b/pythonProject/.venv/Lib/site-packages/networkx/algorithms/bipartite/spectral.py
@@ -0,0 +1,68 @@
+"""
+Spectral bipartivity measure.
+"""
+import networkx as nx
+
+__all__ = ["spectral_bipartivity"]
+
+
+@nx._dispatchable(edge_attrs="weight")
+def spectral_bipartivity(G, nodes=None, weight="weight"):
+ """Returns the spectral bipartivity.
+
+ Parameters
+ ----------
+ G : NetworkX graph
+
+ nodes : list or container optional(default is all nodes)
+ Nodes to return value of spectral bipartivity contribution.
+
+ weight : string or None optional (default = 'weight')
+ Edge data key to use for edge weights. If None, weights set to 1.
+
+ Returns
+ -------
+ sb : float or dict
+ A single number if the keyword nodes is not specified, or
+ a dictionary keyed by node with the spectral bipartivity contribution
+ of that node as the value.
+
+ Examples
+ --------
+ >>> from networkx.algorithms import bipartite
+ >>> G = nx.path_graph(4)
+ >>> bipartite.spectral_bipartivity(G)
+ 1.0
+
+ Notes
+ -----
+ This implementation uses Numpy (dense) matrices which are not efficient
+ for storing large sparse graphs.
+
+ See Also
+ --------
+ color
+
+ References
+ ----------
+ .. [1] E. Estrada and J. A. Rodríguez-Velázquez, "Spectral measures of
+ bipartivity in complex networks", PhysRev E 72, 046105 (2005)
+ """
+ import scipy as sp
+
+ nodelist = list(G) # ordering of nodes in matrix
+ A = nx.to_numpy_array(G, nodelist, weight=weight)
+ expA = sp.linalg.expm(A)
+ expmA = sp.linalg.expm(-A)
+ coshA = 0.5 * (expA + expmA)
+ if nodes is None:
+ # return single number for entire graph
+ return float(coshA.diagonal().sum() / expA.diagonal().sum())
+ else:
+ # contribution for individual nodes
+ index = dict(zip(nodelist, range(len(nodelist))))
+ sb = {}
+ for n in nodes:
+ i = index[n]
+ sb[n] = coshA.item(i, i) / expA.item(i, i)
+ return sb
diff --git a/pythonProject/.venv/Lib/site-packages/networkx/algorithms/bipartite/tests/__init__.py b/pythonProject/.venv/Lib/site-packages/networkx/algorithms/bipartite/tests/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391
diff --git a/pythonProject/.venv/Lib/site-packages/networkx/algorithms/bipartite/tests/__pycache__/__init__.cpython-310.pyc b/pythonProject/.venv/Lib/site-packages/networkx/algorithms/bipartite/tests/__pycache__/__init__.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..926235d7db4edabdb891fb30d6a5c772d569700a
Binary files /dev/null and b/pythonProject/.venv/Lib/site-packages/networkx/algorithms/bipartite/tests/__pycache__/__init__.cpython-310.pyc differ
diff --git a/pythonProject/.venv/Lib/site-packages/networkx/algorithms/bipartite/tests/__pycache__/test_basic.cpython-310.pyc b/pythonProject/.venv/Lib/site-packages/networkx/algorithms/bipartite/tests/__pycache__/test_basic.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..794cc20802d56c233a89464d28a0e33a96c60a13
Binary files /dev/null and b/pythonProject/.venv/Lib/site-packages/networkx/algorithms/bipartite/tests/__pycache__/test_basic.cpython-310.pyc differ
diff --git a/pythonProject/.venv/Lib/site-packages/networkx/algorithms/bipartite/tests/__pycache__/test_centrality.cpython-310.pyc b/pythonProject/.venv/Lib/site-packages/networkx/algorithms/bipartite/tests/__pycache__/test_centrality.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..7cd5d3705608870141bc54113d9d2173603846cb
Binary files /dev/null and b/pythonProject/.venv/Lib/site-packages/networkx/algorithms/bipartite/tests/__pycache__/test_centrality.cpython-310.pyc differ
diff --git a/pythonProject/.venv/Lib/site-packages/networkx/algorithms/bipartite/tests/__pycache__/test_cluster.cpython-310.pyc b/pythonProject/.venv/Lib/site-packages/networkx/algorithms/bipartite/tests/__pycache__/test_cluster.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..a891732f653a6ba8983f48cb0b1d6b80fa39fd1b
Binary files /dev/null and b/pythonProject/.venv/Lib/site-packages/networkx/algorithms/bipartite/tests/__pycache__/test_cluster.cpython-310.pyc differ
diff --git a/pythonProject/.venv/Lib/site-packages/networkx/algorithms/bipartite/tests/__pycache__/test_covering.cpython-310.pyc b/pythonProject/.venv/Lib/site-packages/networkx/algorithms/bipartite/tests/__pycache__/test_covering.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..0bb53a524abe3ed7fd5f4240980b2d2b89253d15
Binary files /dev/null and b/pythonProject/.venv/Lib/site-packages/networkx/algorithms/bipartite/tests/__pycache__/test_covering.cpython-310.pyc differ
diff --git a/pythonProject/.venv/Lib/site-packages/networkx/algorithms/bipartite/tests/__pycache__/test_edgelist.cpython-310.pyc b/pythonProject/.venv/Lib/site-packages/networkx/algorithms/bipartite/tests/__pycache__/test_edgelist.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..431920876c9d0993e2cc7e301ba8c0d03a02c890
Binary files /dev/null and b/pythonProject/.venv/Lib/site-packages/networkx/algorithms/bipartite/tests/__pycache__/test_edgelist.cpython-310.pyc differ
diff --git a/pythonProject/.venv/Lib/site-packages/networkx/algorithms/bipartite/tests/__pycache__/test_extendability.cpython-310.pyc b/pythonProject/.venv/Lib/site-packages/networkx/algorithms/bipartite/tests/__pycache__/test_extendability.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..84959987bdbe77eb9c4ce0ad11edac7162c8a6ae
Binary files /dev/null and b/pythonProject/.venv/Lib/site-packages/networkx/algorithms/bipartite/tests/__pycache__/test_extendability.cpython-310.pyc differ
diff --git a/pythonProject/.venv/Lib/site-packages/networkx/algorithms/bipartite/tests/__pycache__/test_generators.cpython-310.pyc b/pythonProject/.venv/Lib/site-packages/networkx/algorithms/bipartite/tests/__pycache__/test_generators.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..1d2ee9e12880ef5876ecd3e9433afa20dd4f0a1e
Binary files /dev/null and b/pythonProject/.venv/Lib/site-packages/networkx/algorithms/bipartite/tests/__pycache__/test_generators.cpython-310.pyc differ
diff --git a/pythonProject/.venv/Lib/site-packages/networkx/algorithms/bipartite/tests/__pycache__/test_matching.cpython-310.pyc b/pythonProject/.venv/Lib/site-packages/networkx/algorithms/bipartite/tests/__pycache__/test_matching.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..ef15c73e635c940ba4ff504def27204bdeb618ae
Binary files /dev/null and b/pythonProject/.venv/Lib/site-packages/networkx/algorithms/bipartite/tests/__pycache__/test_matching.cpython-310.pyc differ
diff --git a/pythonProject/.venv/Lib/site-packages/networkx/algorithms/bipartite/tests/__pycache__/test_matrix.cpython-310.pyc b/pythonProject/.venv/Lib/site-packages/networkx/algorithms/bipartite/tests/__pycache__/test_matrix.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..d577faf4b6b60e8a1d0673f8cde5379510242eea
Binary files /dev/null and b/pythonProject/.venv/Lib/site-packages/networkx/algorithms/bipartite/tests/__pycache__/test_matrix.cpython-310.pyc differ
diff --git a/pythonProject/.venv/Lib/site-packages/networkx/algorithms/bipartite/tests/__pycache__/test_project.cpython-310.pyc b/pythonProject/.venv/Lib/site-packages/networkx/algorithms/bipartite/tests/__pycache__/test_project.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..9de94f6fa767932750ff7703bd095b183546ca5f
Binary files /dev/null and b/pythonProject/.venv/Lib/site-packages/networkx/algorithms/bipartite/tests/__pycache__/test_project.cpython-310.pyc differ
diff --git a/pythonProject/.venv/Lib/site-packages/networkx/algorithms/bipartite/tests/__pycache__/test_redundancy.cpython-310.pyc b/pythonProject/.venv/Lib/site-packages/networkx/algorithms/bipartite/tests/__pycache__/test_redundancy.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..aedc8495d8ef26a0b64331bd9b61d0868e8a4b83
Binary files /dev/null and b/pythonProject/.venv/Lib/site-packages/networkx/algorithms/bipartite/tests/__pycache__/test_redundancy.cpython-310.pyc differ
diff --git a/pythonProject/.venv/Lib/site-packages/networkx/algorithms/bipartite/tests/__pycache__/test_spectral_bipartivity.cpython-310.pyc b/pythonProject/.venv/Lib/site-packages/networkx/algorithms/bipartite/tests/__pycache__/test_spectral_bipartivity.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..b1761213749e34ad6ee11afdff2073c49e9223f4
Binary files /dev/null and b/pythonProject/.venv/Lib/site-packages/networkx/algorithms/bipartite/tests/__pycache__/test_spectral_bipartivity.cpython-310.pyc differ
diff --git a/pythonProject/.venv/Lib/site-packages/networkx/algorithms/bipartite/tests/test_basic.py b/pythonProject/.venv/Lib/site-packages/networkx/algorithms/bipartite/tests/test_basic.py
new file mode 100644
index 0000000000000000000000000000000000000000..655506b4f74110b57cb37db277e2be50bb0be8f4
--- /dev/null
+++ b/pythonProject/.venv/Lib/site-packages/networkx/algorithms/bipartite/tests/test_basic.py
@@ -0,0 +1,125 @@
+import pytest
+
+import networkx as nx
+from networkx.algorithms import bipartite
+
+
+class TestBipartiteBasic:
+ def test_is_bipartite(self):
+ assert bipartite.is_bipartite(nx.path_graph(4))
+ assert bipartite.is_bipartite(nx.DiGraph([(1, 0)]))
+ assert not bipartite.is_bipartite(nx.complete_graph(3))
+
+ def test_bipartite_color(self):
+ G = nx.path_graph(4)
+ c = bipartite.color(G)
+ assert c == {0: 1, 1: 0, 2: 1, 3: 0}
+
+ def test_not_bipartite_color(self):
+ with pytest.raises(nx.NetworkXError):
+ c = bipartite.color(nx.complete_graph(4))
+
+ def test_bipartite_directed(self):
+ G = bipartite.random_graph(10, 10, 0.1, directed=True)
+ assert bipartite.is_bipartite(G)
+
+ def test_bipartite_sets(self):
+ G = nx.path_graph(4)
+ X, Y = bipartite.sets(G)
+ assert X == {0, 2}
+ assert Y == {1, 3}
+
+ def test_bipartite_sets_directed(self):
+ G = nx.path_graph(4)
+ D = G.to_directed()
+ X, Y = bipartite.sets(D)
+ assert X == {0, 2}
+ assert Y == {1, 3}
+
+ def test_bipartite_sets_given_top_nodes(self):
+ G = nx.path_graph(4)
+ top_nodes = [0, 2]
+ X, Y = bipartite.sets(G, top_nodes)
+ assert X == {0, 2}
+ assert Y == {1, 3}
+
+ def test_bipartite_sets_disconnected(self):
+ with pytest.raises(nx.AmbiguousSolution):
+ G = nx.path_graph(4)
+ G.add_edges_from([(5, 6), (6, 7)])
+ X, Y = bipartite.sets(G)
+
+ def test_is_bipartite_node_set(self):
+ G = nx.path_graph(4)
+
+ with pytest.raises(nx.AmbiguousSolution):
+ bipartite.is_bipartite_node_set(G, [1, 1, 2, 3])
+
+ assert bipartite.is_bipartite_node_set(G, [0, 2])
+ assert bipartite.is_bipartite_node_set(G, [1, 3])
+ assert not bipartite.is_bipartite_node_set(G, [1, 2])
+ G.add_edge(10, 20)
+ assert bipartite.is_bipartite_node_set(G, [0, 2, 10])
+ assert bipartite.is_bipartite_node_set(G, [0, 2, 20])
+ assert bipartite.is_bipartite_node_set(G, [1, 3, 10])
+ assert bipartite.is_bipartite_node_set(G, [1, 3, 20])
+
+ def test_bipartite_density(self):
+ G = nx.path_graph(5)
+ X, Y = bipartite.sets(G)
+ density = len(list(G.edges())) / (len(X) * len(Y))
+ assert bipartite.density(G, X) == density
+ D = nx.DiGraph(G.edges())
+ assert bipartite.density(D, X) == density / 2.0
+ assert bipartite.density(nx.Graph(), {}) == 0.0
+
+ def test_bipartite_degrees(self):
+ G = nx.path_graph(5)
+ X = {1, 3}
+ Y = {0, 2, 4}
+ u, d = bipartite.degrees(G, Y)
+ assert dict(u) == {1: 2, 3: 2}
+ assert dict(d) == {0: 1, 2: 2, 4: 1}
+
+ def test_bipartite_weighted_degrees(self):
+ G = nx.path_graph(5)
+ G.add_edge(0, 1, weight=0.1, other=0.2)
+ X = {1, 3}
+ Y = {0, 2, 4}
+ u, d = bipartite.degrees(G, Y, weight="weight")
+ assert dict(u) == {1: 1.1, 3: 2}
+ assert dict(d) == {0: 0.1, 2: 2, 4: 1}
+ u, d = bipartite.degrees(G, Y, weight="other")
+ assert dict(u) == {1: 1.2, 3: 2}
+ assert dict(d) == {0: 0.2, 2: 2, 4: 1}
+
+ def test_biadjacency_matrix_weight(self):
+ pytest.importorskip("scipy")
+ G = nx.path_graph(5)
+ G.add_edge(0, 1, weight=2, other=4)
+ X = [1, 3]
+ Y = [0, 2, 4]
+ M = bipartite.biadjacency_matrix(G, X, weight="weight")
+ assert M[0, 0] == 2
+ M = bipartite.biadjacency_matrix(G, X, weight="other")
+ assert M[0, 0] == 4
+
+ def test_biadjacency_matrix(self):
+ pytest.importorskip("scipy")
+ tops = [2, 5, 10]
+ bots = [5, 10, 15]
+ for i in range(len(tops)):
+ G = bipartite.random_graph(tops[i], bots[i], 0.2)
+ top = [n for n, d in G.nodes(data=True) if d["bipartite"] == 0]
+ M = bipartite.biadjacency_matrix(G, top)
+ assert M.shape[0] == tops[i]
+ assert M.shape[1] == bots[i]
+
+ def test_biadjacency_matrix_order(self):
+ pytest.importorskip("scipy")
+ G = nx.path_graph(5)
+ G.add_edge(0, 1, weight=2)
+ X = [3, 1]
+ Y = [4, 2, 0]
+ M = bipartite.biadjacency_matrix(G, X, Y, weight="weight")
+ assert M[1, 2] == 2
diff --git a/pythonProject/.venv/Lib/site-packages/networkx/algorithms/bipartite/tests/test_centrality.py b/pythonProject/.venv/Lib/site-packages/networkx/algorithms/bipartite/tests/test_centrality.py
new file mode 100644
index 0000000000000000000000000000000000000000..19fb5d117be94c688616a394ea3322e93bfa3e00
--- /dev/null
+++ b/pythonProject/.venv/Lib/site-packages/networkx/algorithms/bipartite/tests/test_centrality.py
@@ -0,0 +1,192 @@
+import pytest
+
+import networkx as nx
+from networkx.algorithms import bipartite
+
+
+class TestBipartiteCentrality:
+ @classmethod
+ def setup_class(cls):
+ cls.P4 = nx.path_graph(4)
+ cls.K3 = nx.complete_bipartite_graph(3, 3)
+ cls.C4 = nx.cycle_graph(4)
+ cls.davis = nx.davis_southern_women_graph()
+ cls.top_nodes = [
+ n for n, d in cls.davis.nodes(data=True) if d["bipartite"] == 0
+ ]
+
+ def test_degree_centrality(self):
+ d = bipartite.degree_centrality(self.P4, [1, 3])
+ answer = {0: 0.5, 1: 1.0, 2: 1.0, 3: 0.5}
+ assert d == answer
+ d = bipartite.degree_centrality(self.K3, [0, 1, 2])
+ answer = {0: 1.0, 1: 1.0, 2: 1.0, 3: 1.0, 4: 1.0, 5: 1.0}
+ assert d == answer
+ d = bipartite.degree_centrality(self.C4, [0, 2])
+ answer = {0: 1.0, 1: 1.0, 2: 1.0, 3: 1.0}
+ assert d == answer
+
+ def test_betweenness_centrality(self):
+ c = bipartite.betweenness_centrality(self.P4, [1, 3])
+ answer = {0: 0.0, 1: 1.0, 2: 1.0, 3: 0.0}
+ assert c == answer
+ c = bipartite.betweenness_centrality(self.K3, [0, 1, 2])
+ answer = {0: 0.125, 1: 0.125, 2: 0.125, 3: 0.125, 4: 0.125, 5: 0.125}
+ assert c == answer
+ c = bipartite.betweenness_centrality(self.C4, [0, 2])
+ answer = {0: 0.25, 1: 0.25, 2: 0.25, 3: 0.25}
+ assert c == answer
+
+ def test_closeness_centrality(self):
+ c = bipartite.closeness_centrality(self.P4, [1, 3])
+ answer = {0: 2.0 / 3, 1: 1.0, 2: 1.0, 3: 2.0 / 3}
+ assert c == answer
+ c = bipartite.closeness_centrality(self.K3, [0, 1, 2])
+ answer = {0: 1.0, 1: 1.0, 2: 1.0, 3: 1.0, 4: 1.0, 5: 1.0}
+ assert c == answer
+ c = bipartite.closeness_centrality(self.C4, [0, 2])
+ answer = {0: 1.0, 1: 1.0, 2: 1.0, 3: 1.0}
+ assert c == answer
+ G = nx.Graph()
+ G.add_node(0)
+ G.add_node(1)
+ c = bipartite.closeness_centrality(G, [0])
+ assert c == {0: 0.0, 1: 0.0}
+ c = bipartite.closeness_centrality(G, [1])
+ assert c == {0: 0.0, 1: 0.0}
+
+ def test_bipartite_closeness_centrality_unconnected(self):
+ G = nx.complete_bipartite_graph(3, 3)
+ G.add_edge(6, 7)
+ c = bipartite.closeness_centrality(G, [0, 2, 4, 6], normalized=False)
+ answer = {
+ 0: 10.0 / 7,
+ 2: 10.0 / 7,
+ 4: 10.0 / 7,
+ 6: 10.0,
+ 1: 10.0 / 7,
+ 3: 10.0 / 7,
+ 5: 10.0 / 7,
+ 7: 10.0,
+ }
+ assert c == answer
+
+ def test_davis_degree_centrality(self):
+ G = self.davis
+ deg = bipartite.degree_centrality(G, self.top_nodes)
+ answer = {
+ "E8": 0.78,
+ "E9": 0.67,
+ "E7": 0.56,
+ "Nora Fayette": 0.57,
+ "Evelyn Jefferson": 0.57,
+ "Theresa Anderson": 0.57,
+ "E6": 0.44,
+ "Sylvia Avondale": 0.50,
+ "Laura Mandeville": 0.50,
+ "Brenda Rogers": 0.50,
+ "Katherina Rogers": 0.43,
+ "E5": 0.44,
+ "Helen Lloyd": 0.36,
+ "E3": 0.33,
+ "Ruth DeSand": 0.29,
+ "Verne Sanderson": 0.29,
+ "E12": 0.33,
+ "Myra Liddel": 0.29,
+ "E11": 0.22,
+ "Eleanor Nye": 0.29,
+ "Frances Anderson": 0.29,
+ "Pearl Oglethorpe": 0.21,
+ "E4": 0.22,
+ "Charlotte McDowd": 0.29,
+ "E10": 0.28,
+ "Olivia Carleton": 0.14,
+ "Flora Price": 0.14,
+ "E2": 0.17,
+ "E1": 0.17,
+ "Dorothy Murchison": 0.14,
+ "E13": 0.17,
+ "E14": 0.17,
+ }
+ for node, value in answer.items():
+ assert value == pytest.approx(deg[node], abs=1e-2)
+
+ def test_davis_betweenness_centrality(self):
+ G = self.davis
+ bet = bipartite.betweenness_centrality(G, self.top_nodes)
+ answer = {
+ "E8": 0.24,
+ "E9": 0.23,
+ "E7": 0.13,
+ "Nora Fayette": 0.11,
+ "Evelyn Jefferson": 0.10,
+ "Theresa Anderson": 0.09,
+ "E6": 0.07,
+ "Sylvia Avondale": 0.07,
+ "Laura Mandeville": 0.05,
+ "Brenda Rogers": 0.05,
+ "Katherina Rogers": 0.05,
+ "E5": 0.04,
+ "Helen Lloyd": 0.04,
+ "E3": 0.02,
+ "Ruth DeSand": 0.02,
+ "Verne Sanderson": 0.02,
+ "E12": 0.02,
+ "Myra Liddel": 0.02,
+ "E11": 0.02,
+ "Eleanor Nye": 0.01,
+ "Frances Anderson": 0.01,
+ "Pearl Oglethorpe": 0.01,
+ "E4": 0.01,
+ "Charlotte McDowd": 0.01,
+ "E10": 0.01,
+ "Olivia Carleton": 0.01,
+ "Flora Price": 0.01,
+ "E2": 0.00,
+ "E1": 0.00,
+ "Dorothy Murchison": 0.00,
+ "E13": 0.00,
+ "E14": 0.00,
+ }
+ for node, value in answer.items():
+ assert value == pytest.approx(bet[node], abs=1e-2)
+
+ def test_davis_closeness_centrality(self):
+ G = self.davis
+ clos = bipartite.closeness_centrality(G, self.top_nodes)
+ answer = {
+ "E8": 0.85,
+ "E9": 0.79,
+ "E7": 0.73,
+ "Nora Fayette": 0.80,
+ "Evelyn Jefferson": 0.80,
+ "Theresa Anderson": 0.80,
+ "E6": 0.69,
+ "Sylvia Avondale": 0.77,
+ "Laura Mandeville": 0.73,
+ "Brenda Rogers": 0.73,
+ "Katherina Rogers": 0.73,
+ "E5": 0.59,
+ "Helen Lloyd": 0.73,
+ "E3": 0.56,
+ "Ruth DeSand": 0.71,
+ "Verne Sanderson": 0.71,
+ "E12": 0.56,
+ "Myra Liddel": 0.69,
+ "E11": 0.54,
+ "Eleanor Nye": 0.67,
+ "Frances Anderson": 0.67,
+ "Pearl Oglethorpe": 0.67,
+ "E4": 0.54,
+ "Charlotte McDowd": 0.60,
+ "E10": 0.55,
+ "Olivia Carleton": 0.59,
+ "Flora Price": 0.59,
+ "E2": 0.52,
+ "E1": 0.52,
+ "Dorothy Murchison": 0.65,
+ "E13": 0.52,
+ "E14": 0.52,
+ }
+ for node, value in answer.items():
+ assert value == pytest.approx(clos[node], abs=1e-2)
diff --git a/pythonProject/.venv/Lib/site-packages/networkx/algorithms/bipartite/tests/test_cluster.py b/pythonProject/.venv/Lib/site-packages/networkx/algorithms/bipartite/tests/test_cluster.py
new file mode 100644
index 0000000000000000000000000000000000000000..72e2dbadd64e9e768d1541b2ce742c2b62278929
--- /dev/null
+++ b/pythonProject/.venv/Lib/site-packages/networkx/algorithms/bipartite/tests/test_cluster.py
@@ -0,0 +1,84 @@
+import pytest
+
+import networkx as nx
+from networkx.algorithms import bipartite
+from networkx.algorithms.bipartite.cluster import cc_dot, cc_max, cc_min
+
+
+def test_pairwise_bipartite_cc_functions():
+ # Test functions for different kinds of bipartite clustering coefficients
+ # between pairs of nodes using 3 example graphs from figure 5 p. 40
+ # Latapy et al (2008)
+ G1 = nx.Graph([(0, 2), (0, 3), (0, 4), (0, 5), (0, 6), (1, 5), (1, 6), (1, 7)])
+ G2 = nx.Graph([(0, 2), (0, 3), (0, 4), (1, 3), (1, 4), (1, 5)])
+ G3 = nx.Graph(
+ [(0, 2), (0, 3), (0, 4), (0, 5), (0, 6), (1, 5), (1, 6), (1, 7), (1, 8), (1, 9)]
+ )
+ result = {
+ 0: [1 / 3.0, 2 / 3.0, 2 / 5.0],
+ 1: [1 / 2.0, 2 / 3.0, 2 / 3.0],
+ 2: [2 / 8.0, 2 / 5.0, 2 / 5.0],
+ }
+ for i, G in enumerate([G1, G2, G3]):
+ assert bipartite.is_bipartite(G)
+ assert cc_dot(set(G[0]), set(G[1])) == result[i][0]
+ assert cc_min(set(G[0]), set(G[1])) == result[i][1]
+ assert cc_max(set(G[0]), set(G[1])) == result[i][2]
+
+
+def test_star_graph():
+ G = nx.star_graph(3)
+ # all modes are the same
+ answer = {0: 0, 1: 1, 2: 1, 3: 1}
+ assert bipartite.clustering(G, mode="dot") == answer
+ assert bipartite.clustering(G, mode="min") == answer
+ assert bipartite.clustering(G, mode="max") == answer
+
+
+def test_not_bipartite():
+ with pytest.raises(nx.NetworkXError):
+ bipartite.clustering(nx.complete_graph(4))
+
+
+def test_bad_mode():
+ with pytest.raises(nx.NetworkXError):
+ bipartite.clustering(nx.path_graph(4), mode="foo")
+
+
+def test_path_graph():
+ G = nx.path_graph(4)
+ answer = {0: 0.5, 1: 0.5, 2: 0.5, 3: 0.5}
+ assert bipartite.clustering(G, mode="dot") == answer
+ assert bipartite.clustering(G, mode="max") == answer
+ answer = {0: 1, 1: 1, 2: 1, 3: 1}
+ assert bipartite.clustering(G, mode="min") == answer
+
+
+def test_average_path_graph():
+ G = nx.path_graph(4)
+ assert bipartite.average_clustering(G, mode="dot") == 0.5
+ assert bipartite.average_clustering(G, mode="max") == 0.5
+ assert bipartite.average_clustering(G, mode="min") == 1
+
+
+def test_ra_clustering_davis():
+ G = nx.davis_southern_women_graph()
+ cc4 = round(bipartite.robins_alexander_clustering(G), 3)
+ assert cc4 == 0.468
+
+
+def test_ra_clustering_square():
+ G = nx.path_graph(4)
+ G.add_edge(0, 3)
+ assert bipartite.robins_alexander_clustering(G) == 1.0
+
+
+def test_ra_clustering_zero():
+ G = nx.Graph()
+ assert bipartite.robins_alexander_clustering(G) == 0
+ G.add_nodes_from(range(4))
+ assert bipartite.robins_alexander_clustering(G) == 0
+ G.add_edges_from([(0, 1), (2, 3), (3, 4)])
+ assert bipartite.robins_alexander_clustering(G) == 0
+ G.add_edge(1, 2)
+ assert bipartite.robins_alexander_clustering(G) == 0
diff --git a/pythonProject/.venv/Lib/site-packages/networkx/algorithms/bipartite/tests/test_covering.py b/pythonProject/.venv/Lib/site-packages/networkx/algorithms/bipartite/tests/test_covering.py
new file mode 100644
index 0000000000000000000000000000000000000000..9507e13492acbe505aa3394a24dbc41c095a037c
--- /dev/null
+++ b/pythonProject/.venv/Lib/site-packages/networkx/algorithms/bipartite/tests/test_covering.py
@@ -0,0 +1,33 @@
+import networkx as nx
+from networkx.algorithms import bipartite
+
+
+class TestMinEdgeCover:
+ """Tests for :func:`networkx.algorithms.bipartite.min_edge_cover`"""
+
+ def test_empty_graph(self):
+ G = nx.Graph()
+ assert bipartite.min_edge_cover(G) == set()
+
+ def test_graph_single_edge(self):
+ G = nx.Graph()
+ G.add_edge(0, 1)
+ assert bipartite.min_edge_cover(G) == {(0, 1), (1, 0)}
+
+ def test_bipartite_default(self):
+ G = nx.Graph()
+ G.add_nodes_from([1, 2, 3, 4], bipartite=0)
+ G.add_nodes_from(["a", "b", "c"], bipartite=1)
+ G.add_edges_from([(1, "a"), (1, "b"), (2, "b"), (2, "c"), (3, "c"), (4, "a")])
+ min_cover = bipartite.min_edge_cover(G)
+ assert nx.is_edge_cover(G, min_cover)
+ assert len(min_cover) == 8
+
+ def test_bipartite_explicit(self):
+ G = nx.Graph()
+ G.add_nodes_from([1, 2, 3, 4], bipartite=0)
+ G.add_nodes_from(["a", "b", "c"], bipartite=1)
+ G.add_edges_from([(1, "a"), (1, "b"), (2, "b"), (2, "c"), (3, "c"), (4, "a")])
+ min_cover = bipartite.min_edge_cover(G, bipartite.eppstein_matching)
+ assert nx.is_edge_cover(G, min_cover)
+ assert len(min_cover) == 8
diff --git a/pythonProject/.venv/Lib/site-packages/networkx/algorithms/bipartite/tests/test_edgelist.py b/pythonProject/.venv/Lib/site-packages/networkx/algorithms/bipartite/tests/test_edgelist.py
new file mode 100644
index 0000000000000000000000000000000000000000..74035b35e9c6fc06b5416de07f94bd9fc6255cce
--- /dev/null
+++ b/pythonProject/.venv/Lib/site-packages/networkx/algorithms/bipartite/tests/test_edgelist.py
@@ -0,0 +1,217 @@
+"""
+ Unit tests for bipartite edgelists.
+"""
+import io
+
+import pytest
+
+import networkx as nx
+from networkx.algorithms import bipartite
+from networkx.utils import edges_equal, graphs_equal, nodes_equal
+
+
+class TestEdgelist:
+ @classmethod
+ def setup_class(cls):
+ cls.G = nx.Graph(name="test")
+ e = [("a", "b"), ("b", "c"), ("c", "d"), ("d", "e"), ("e", "f"), ("a", "f")]
+ cls.G.add_edges_from(e)
+ cls.G.add_nodes_from(["a", "c", "e"], bipartite=0)
+ cls.G.add_nodes_from(["b", "d", "f"], bipartite=1)
+ cls.G.add_node("g", bipartite=0)
+ cls.DG = nx.DiGraph(cls.G)
+ cls.MG = nx.MultiGraph()
+ cls.MG.add_edges_from([(1, 2), (1, 2), (1, 2)])
+ cls.MG.add_node(1, bipartite=0)
+ cls.MG.add_node(2, bipartite=1)
+
+ def test_read_edgelist_1(self):
+ s = b"""\
+# comment line
+1 2
+# comment line
+2 3
+"""
+ bytesIO = io.BytesIO(s)
+ G = bipartite.read_edgelist(bytesIO, nodetype=int)
+ assert edges_equal(G.edges(), [(1, 2), (2, 3)])
+
+ def test_read_edgelist_3(self):
+ s = b"""\
+# comment line
+1 2 {'weight':2.0}
+# comment line
+2 3 {'weight':3.0}
+"""
+ bytesIO = io.BytesIO(s)
+ G = bipartite.read_edgelist(bytesIO, nodetype=int, data=False)
+ assert edges_equal(G.edges(), [(1, 2), (2, 3)])
+
+ bytesIO = io.BytesIO(s)
+ G = bipartite.read_edgelist(bytesIO, nodetype=int, data=True)
+ assert edges_equal(
+ G.edges(data=True), [(1, 2, {"weight": 2.0}), (2, 3, {"weight": 3.0})]
+ )
+
+ def test_write_edgelist_1(self):
+ fh = io.BytesIO()
+ G = nx.Graph()
+ G.add_edges_from([(1, 2), (2, 3)])
+ G.add_node(1, bipartite=0)
+ G.add_node(2, bipartite=1)
+ G.add_node(3, bipartite=0)
+ bipartite.write_edgelist(G, fh, data=False)
+ fh.seek(0)
+ assert fh.read() == b"1 2\n3 2\n"
+
+ def test_write_edgelist_2(self):
+ fh = io.BytesIO()
+ G = nx.Graph()
+ G.add_edges_from([(1, 2), (2, 3)])
+ G.add_node(1, bipartite=0)
+ G.add_node(2, bipartite=1)
+ G.add_node(3, bipartite=0)
+ bipartite.write_edgelist(G, fh, data=True)
+ fh.seek(0)
+ assert fh.read() == b"1 2 {}\n3 2 {}\n"
+
+ def test_write_edgelist_3(self):
+ fh = io.BytesIO()
+ G = nx.Graph()
+ G.add_edge(1, 2, weight=2.0)
+ G.add_edge(2, 3, weight=3.0)
+ G.add_node(1, bipartite=0)
+ G.add_node(2, bipartite=1)
+ G.add_node(3, bipartite=0)
+ bipartite.write_edgelist(G, fh, data=True)
+ fh.seek(0)
+ assert fh.read() == b"1 2 {'weight': 2.0}\n3 2 {'weight': 3.0}\n"
+
+ def test_write_edgelist_4(self):
+ fh = io.BytesIO()
+ G = nx.Graph()
+ G.add_edge(1, 2, weight=2.0)
+ G.add_edge(2, 3, weight=3.0)
+ G.add_node(1, bipartite=0)
+ G.add_node(2, bipartite=1)
+ G.add_node(3, bipartite=0)
+ bipartite.write_edgelist(G, fh, data=[("weight")])
+ fh.seek(0)
+ assert fh.read() == b"1 2 2.0\n3 2 3.0\n"
+
+ def test_unicode(self, tmp_path):
+ G = nx.Graph()
+ name1 = chr(2344) + chr(123) + chr(6543)
+ name2 = chr(5543) + chr(1543) + chr(324)
+ G.add_edge(name1, "Radiohead", **{name2: 3})
+ G.add_node(name1, bipartite=0)
+ G.add_node("Radiohead", bipartite=1)
+
+ fname = tmp_path / "edgelist.txt"
+ bipartite.write_edgelist(G, fname)
+ H = bipartite.read_edgelist(fname)
+ assert graphs_equal(G, H)
+
+ def test_latin1_issue(self, tmp_path):
+ G = nx.Graph()
+ name1 = chr(2344) + chr(123) + chr(6543)
+ name2 = chr(5543) + chr(1543) + chr(324)
+ G.add_edge(name1, "Radiohead", **{name2: 3})
+ G.add_node(name1, bipartite=0)
+ G.add_node("Radiohead", bipartite=1)
+
+ fname = tmp_path / "edgelist.txt"
+ with pytest.raises(UnicodeEncodeError):
+ bipartite.write_edgelist(G, fname, encoding="latin-1")
+
+ def test_latin1(self, tmp_path):
+ G = nx.Graph()
+ name1 = "Bj" + chr(246) + "rk"
+ name2 = chr(220) + "ber"
+ G.add_edge(name1, "Radiohead", **{name2: 3})
+ G.add_node(name1, bipartite=0)
+ G.add_node("Radiohead", bipartite=1)
+
+ fname = tmp_path / "edgelist.txt"
+ bipartite.write_edgelist(G, fname, encoding="latin-1")
+ H = bipartite.read_edgelist(fname, encoding="latin-1")
+ assert graphs_equal(G, H)
+
+ def test_edgelist_graph(self, tmp_path):
+ G = self.G
+ fname = tmp_path / "edgelist.txt"
+ bipartite.write_edgelist(G, fname)
+ H = bipartite.read_edgelist(fname)
+ H2 = bipartite.read_edgelist(fname)
+ assert H is not H2 # they should be different graphs
+ G.remove_node("g") # isolated nodes are not written in edgelist
+ assert nodes_equal(list(H), list(G))
+ assert edges_equal(list(H.edges()), list(G.edges()))
+
+ def test_edgelist_integers(self, tmp_path):
+ G = nx.convert_node_labels_to_integers(self.G)
+ fname = tmp_path / "edgelist.txt"
+ bipartite.write_edgelist(G, fname)
+ H = bipartite.read_edgelist(fname, nodetype=int)
+ # isolated nodes are not written in edgelist
+ G.remove_nodes_from(list(nx.isolates(G)))
+ assert nodes_equal(list(H), list(G))
+ assert edges_equal(list(H.edges()), list(G.edges()))
+
+ def test_edgelist_multigraph(self, tmp_path):
+ G = self.MG
+ fname = tmp_path / "edgelist.txt"
+ bipartite.write_edgelist(G, fname)
+ H = bipartite.read_edgelist(fname, nodetype=int, create_using=nx.MultiGraph())
+ H2 = bipartite.read_edgelist(fname, nodetype=int, create_using=nx.MultiGraph())
+ assert H is not H2 # they should be different graphs
+ assert nodes_equal(list(H), list(G))
+ assert edges_equal(list(H.edges()), list(G.edges()))
+
+ def test_empty_digraph(self):
+ with pytest.raises(nx.NetworkXNotImplemented):
+ bytesIO = io.BytesIO()
+ bipartite.write_edgelist(nx.DiGraph(), bytesIO)
+
+ def test_raise_attribute(self):
+ with pytest.raises(AttributeError):
+ G = nx.path_graph(4)
+ bytesIO = io.BytesIO()
+ bipartite.write_edgelist(G, bytesIO)
+
+ def test_parse_edgelist(self):
+ """Tests for conditions specific to
+ parse_edge_list method"""
+
+ # ignore strings of length less than 2
+ lines = ["1 2", "2 3", "3 1", "4", " "]
+ G = bipartite.parse_edgelist(lines, nodetype=int)
+ assert list(G.nodes) == [1, 2, 3]
+
+ # Exception raised when node is not convertible
+ # to specified data type
+ with pytest.raises(TypeError, match=".*Failed to convert nodes"):
+ lines = ["a b", "b c", "c a"]
+ G = bipartite.parse_edgelist(lines, nodetype=int)
+
+ # Exception raised when format of data is not
+ # convertible to dictionary object
+ with pytest.raises(TypeError, match=".*Failed to convert edge data"):
+ lines = ["1 2 3", "2 3 4", "3 1 2"]
+ G = bipartite.parse_edgelist(lines, nodetype=int)
+
+ # Exception raised when edge data and data
+ # keys are not of same length
+ with pytest.raises(IndexError):
+ lines = ["1 2 3 4", "2 3 4"]
+ G = bipartite.parse_edgelist(
+ lines, nodetype=int, data=[("weight", int), ("key", int)]
+ )
+
+ # Exception raised when edge data is not
+ # convertible to specified data type
+ with pytest.raises(TypeError, match=".*Failed to convert key data"):
+ lines = ["1 2 3 a", "2 3 4 b"]
+ G = bipartite.parse_edgelist(
+ lines, nodetype=int, data=[("weight", int), ("key", int)]
+ )
diff --git a/pythonProject/.venv/Lib/site-packages/networkx/algorithms/bipartite/tests/test_extendability.py b/pythonProject/.venv/Lib/site-packages/networkx/algorithms/bipartite/tests/test_extendability.py
new file mode 100644
index 0000000000000000000000000000000000000000..17b7124341bd6b0e82b5f01b8e5c6f8d1235efb9
--- /dev/null
+++ b/pythonProject/.venv/Lib/site-packages/networkx/algorithms/bipartite/tests/test_extendability.py
@@ -0,0 +1,334 @@
+import pytest
+
+import networkx as nx
+
+
+def test_selfloops_raises():
+ G = nx.ladder_graph(3)
+ G.add_edge(0, 0)
+ with pytest.raises(nx.NetworkXError, match=".*not bipartite"):
+ nx.bipartite.maximal_extendability(G)
+
+
+def test_disconnected_raises():
+ G = nx.ladder_graph(3)
+ G.add_node("a")
+ with pytest.raises(nx.NetworkXError, match=".*not connected"):
+ nx.bipartite.maximal_extendability(G)
+
+
+def test_not_bipartite_raises():
+ G = nx.complete_graph(5)
+ with pytest.raises(nx.NetworkXError, match=".*not bipartite"):
+ nx.bipartite.maximal_extendability(G)
+
+
+def test_no_perfect_matching_raises():
+ G = nx.Graph([(0, 1), (0, 2)])
+ with pytest.raises(nx.NetworkXError, match=".*not contain a perfect matching"):
+ nx.bipartite.maximal_extendability(G)
+
+
+def test_residual_graph_not_strongly_connected_raises():
+ G = nx.Graph([(1, 2), (2, 3), (3, 4)])
+ with pytest.raises(
+ nx.NetworkXError, match="The residual graph of G is not strongly connected"
+ ):
+ nx.bipartite.maximal_extendability(G)
+
+
+def test_ladder_graph_is_1():
+ G = nx.ladder_graph(3)
+ assert nx.bipartite.maximal_extendability(G) == 1
+
+
+def test_cubical_graph_is_2():
+ G = nx.cubical_graph()
+ assert nx.bipartite.maximal_extendability(G) == 2
+
+
+def test_k_is_3():
+ G = nx.Graph(
+ [
+ (1, 6),
+ (1, 7),
+ (1, 8),
+ (1, 9),
+ (2, 6),
+ (2, 7),
+ (2, 8),
+ (2, 10),
+ (3, 6),
+ (3, 8),
+ (3, 9),
+ (3, 10),
+ (4, 7),
+ (4, 8),
+ (4, 9),
+ (4, 10),
+ (5, 6),
+ (5, 7),
+ (5, 9),
+ (5, 10),
+ ]
+ )
+ assert nx.bipartite.maximal_extendability(G) == 3
+
+
+def test_k_is_4():
+ G = nx.Graph(
+ [
+ (8, 1),
+ (8, 2),
+ (8, 3),
+ (8, 4),
+ (8, 5),
+ (9, 1),
+ (9, 2),
+ (9, 3),
+ (9, 4),
+ (9, 7),
+ (10, 1),
+ (10, 2),
+ (10, 3),
+ (10, 4),
+ (10, 6),
+ (11, 1),
+ (11, 2),
+ (11, 5),
+ (11, 6),
+ (11, 7),
+ (12, 1),
+ (12, 3),
+ (12, 5),
+ (12, 6),
+ (12, 7),
+ (13, 2),
+ (13, 4),
+ (13, 5),
+ (13, 6),
+ (13, 7),
+ (14, 3),
+ (14, 4),
+ (14, 5),
+ (14, 6),
+ (14, 7),
+ ]
+ )
+ assert nx.bipartite.maximal_extendability(G) == 4
+
+
+def test_k_is_5():
+ G = nx.Graph(
+ [
+ (8, 1),
+ (8, 2),
+ (8, 3),
+ (8, 4),
+ (8, 5),
+ (8, 6),
+ (9, 1),
+ (9, 2),
+ (9, 3),
+ (9, 4),
+ (9, 5),
+ (9, 7),
+ (10, 1),
+ (10, 2),
+ (10, 3),
+ (10, 4),
+ (10, 6),
+ (10, 7),
+ (11, 1),
+ (11, 2),
+ (11, 3),
+ (11, 5),
+ (11, 6),
+ (11, 7),
+ (12, 1),
+ (12, 2),
+ (12, 4),
+ (12, 5),
+ (12, 6),
+ (12, 7),
+ (13, 1),
+ (13, 3),
+ (13, 4),
+ (13, 5),
+ (13, 6),
+ (13, 7),
+ (14, 2),
+ (14, 3),
+ (14, 4),
+ (14, 5),
+ (14, 6),
+ (14, 7),
+ ]
+ )
+ assert nx.bipartite.maximal_extendability(G) == 5
+
+
+def test_k_is_6():
+ G = nx.Graph(
+ [
+ (9, 1),
+ (9, 2),
+ (9, 3),
+ (9, 4),
+ (9, 5),
+ (9, 6),
+ (9, 7),
+ (10, 1),
+ (10, 2),
+ (10, 3),
+ (10, 4),
+ (10, 5),
+ (10, 6),
+ (10, 8),
+ (11, 1),
+ (11, 2),
+ (11, 3),
+ (11, 4),
+ (11, 5),
+ (11, 7),
+ (11, 8),
+ (12, 1),
+ (12, 2),
+ (12, 3),
+ (12, 4),
+ (12, 6),
+ (12, 7),
+ (12, 8),
+ (13, 1),
+ (13, 2),
+ (13, 3),
+ (13, 5),
+ (13, 6),
+ (13, 7),
+ (13, 8),
+ (14, 1),
+ (14, 2),
+ (14, 4),
+ (14, 5),
+ (14, 6),
+ (14, 7),
+ (14, 8),
+ (15, 1),
+ (15, 3),
+ (15, 4),
+ (15, 5),
+ (15, 6),
+ (15, 7),
+ (15, 8),
+ (16, 2),
+ (16, 3),
+ (16, 4),
+ (16, 5),
+ (16, 6),
+ (16, 7),
+ (16, 8),
+ ]
+ )
+ assert nx.bipartite.maximal_extendability(G) == 6
+
+
+def test_k_is_7():
+ G = nx.Graph(
+ [
+ (1, 11),
+ (1, 12),
+ (1, 13),
+ (1, 14),
+ (1, 15),
+ (1, 16),
+ (1, 17),
+ (1, 18),
+ (2, 11),
+ (2, 12),
+ (2, 13),
+ (2, 14),
+ (2, 15),
+ (2, 16),
+ (2, 17),
+ (2, 19),
+ (3, 11),
+ (3, 12),
+ (3, 13),
+ (3, 14),
+ (3, 15),
+ (3, 16),
+ (3, 17),
+ (3, 20),
+ (4, 11),
+ (4, 12),
+ (4, 13),
+ (4, 14),
+ (4, 15),
+ (4, 16),
+ (4, 17),
+ (4, 18),
+ (4, 19),
+ (4, 20),
+ (5, 11),
+ (5, 12),
+ (5, 13),
+ (5, 14),
+ (5, 15),
+ (5, 16),
+ (5, 17),
+ (5, 18),
+ (5, 19),
+ (5, 20),
+ (6, 11),
+ (6, 12),
+ (6, 13),
+ (6, 14),
+ (6, 15),
+ (6, 16),
+ (6, 17),
+ (6, 18),
+ (6, 19),
+ (6, 20),
+ (7, 11),
+ (7, 12),
+ (7, 13),
+ (7, 14),
+ (7, 15),
+ (7, 16),
+ (7, 17),
+ (7, 18),
+ (7, 19),
+ (7, 20),
+ (8, 11),
+ (8, 12),
+ (8, 13),
+ (8, 14),
+ (8, 15),
+ (8, 16),
+ (8, 17),
+ (8, 18),
+ (8, 19),
+ (8, 20),
+ (9, 11),
+ (9, 12),
+ (9, 13),
+ (9, 14),
+ (9, 15),
+ (9, 16),
+ (9, 17),
+ (9, 18),
+ (9, 19),
+ (9, 20),
+ (10, 11),
+ (10, 12),
+ (10, 13),
+ (10, 14),
+ (10, 15),
+ (10, 16),
+ (10, 17),
+ (10, 18),
+ (10, 19),
+ (10, 20),
+ ]
+ )
+ assert nx.bipartite.maximal_extendability(G) == 7
diff --git a/pythonProject/.venv/Lib/site-packages/networkx/algorithms/bipartite/tests/test_generators.py b/pythonProject/.venv/Lib/site-packages/networkx/algorithms/bipartite/tests/test_generators.py
new file mode 100644
index 0000000000000000000000000000000000000000..5f3b84cece23ba6f3de2a1e454d01548af2e1390
--- /dev/null
+++ b/pythonProject/.venv/Lib/site-packages/networkx/algorithms/bipartite/tests/test_generators.py
@@ -0,0 +1,400 @@
+import numbers
+
+import pytest
+
+import networkx as nx
+
+from ..generators import (
+ alternating_havel_hakimi_graph,
+ complete_bipartite_graph,
+ configuration_model,
+ gnmk_random_graph,
+ havel_hakimi_graph,
+ preferential_attachment_graph,
+ random_graph,
+ reverse_havel_hakimi_graph,
+)
+
+"""
+Generators - Bipartite
+----------------------
+"""
+
+
+class TestGeneratorsBipartite:
+ def test_complete_bipartite_graph(self):
+ G = complete_bipartite_graph(0, 0)
+ assert nx.is_isomorphic(G, nx.null_graph())
+
+ for i in [1, 5]:
+ G = complete_bipartite_graph(i, 0)
+ assert nx.is_isomorphic(G, nx.empty_graph(i))
+ G = complete_bipartite_graph(0, i)
+ assert nx.is_isomorphic(G, nx.empty_graph(i))
+
+ G = complete_bipartite_graph(2, 2)
+ assert nx.is_isomorphic(G, nx.cycle_graph(4))
+
+ G = complete_bipartite_graph(1, 5)
+ assert nx.is_isomorphic(G, nx.star_graph(5))
+
+ G = complete_bipartite_graph(5, 1)
+ assert nx.is_isomorphic(G, nx.star_graph(5))
+
+ # complete_bipartite_graph(m1,m2) is a connected graph with
+ # m1+m2 nodes and m1*m2 edges
+ for m1, m2 in [(5, 11), (7, 3)]:
+ G = complete_bipartite_graph(m1, m2)
+ assert nx.number_of_nodes(G) == m1 + m2
+ assert nx.number_of_edges(G) == m1 * m2
+
+ with pytest.raises(nx.NetworkXError):
+ complete_bipartite_graph(7, 3, create_using=nx.DiGraph)
+ with pytest.raises(nx.NetworkXError):
+ complete_bipartite_graph(7, 3, create_using=nx.MultiDiGraph)
+
+ mG = complete_bipartite_graph(7, 3, create_using=nx.MultiGraph)
+ assert mG.is_multigraph()
+ assert sorted(mG.edges()) == sorted(G.edges())
+
+ mG = complete_bipartite_graph(7, 3, create_using=nx.MultiGraph)
+ assert mG.is_multigraph()
+ assert sorted(mG.edges()) == sorted(G.edges())
+
+ mG = complete_bipartite_graph(7, 3) # default to Graph
+ assert sorted(mG.edges()) == sorted(G.edges())
+ assert not mG.is_multigraph()
+ assert not mG.is_directed()
+
+ # specify nodes rather than number of nodes
+ for n1, n2 in [([1, 2], "ab"), (3, 2), (3, "ab"), ("ab", 3)]:
+ G = complete_bipartite_graph(n1, n2)
+ if isinstance(n1, numbers.Integral):
+ if isinstance(n2, numbers.Integral):
+ n2 = range(n1, n1 + n2)
+ n1 = range(n1)
+ elif isinstance(n2, numbers.Integral):
+ n2 = range(n2)
+ edges = {(u, v) for u in n1 for v in n2}
+ assert edges == set(G.edges)
+ assert G.size() == len(edges)
+
+ # raise when node sets are not distinct
+ for n1, n2 in [([1, 2], 3), (3, [1, 2]), ("abc", "bcd")]:
+ pytest.raises(nx.NetworkXError, complete_bipartite_graph, n1, n2)
+
+ def test_configuration_model(self):
+ aseq = []
+ bseq = []
+ G = configuration_model(aseq, bseq)
+ assert len(G) == 0
+
+ aseq = [0, 0]
+ bseq = [0, 0]
+ G = configuration_model(aseq, bseq)
+ assert len(G) == 4
+ assert G.number_of_edges() == 0
+
+ aseq = [3, 3, 3, 3]
+ bseq = [2, 2, 2, 2, 2]
+ pytest.raises(nx.NetworkXError, configuration_model, aseq, bseq)
+
+ aseq = [3, 3, 3, 3]
+ bseq = [2, 2, 2, 2, 2, 2]
+ G = configuration_model(aseq, bseq)
+ assert sorted(d for n, d in G.degree()) == [2, 2, 2, 2, 2, 2, 3, 3, 3, 3]
+
+ aseq = [2, 2, 2, 2, 2, 2]
+ bseq = [3, 3, 3, 3]
+ G = configuration_model(aseq, bseq)
+ assert sorted(d for n, d in G.degree()) == [2, 2, 2, 2, 2, 2, 3, 3, 3, 3]
+
+ aseq = [2, 2, 2, 1, 1, 1]
+ bseq = [3, 3, 3]
+ G = configuration_model(aseq, bseq)
+ assert G.is_multigraph()
+ assert not G.is_directed()
+ assert sorted(d for n, d in G.degree()) == [1, 1, 1, 2, 2, 2, 3, 3, 3]
+
+ GU = nx.projected_graph(nx.Graph(G), range(len(aseq)))
+ assert GU.number_of_nodes() == 6
+
+ GD = nx.projected_graph(nx.Graph(G), range(len(aseq), len(aseq) + len(bseq)))
+ assert GD.number_of_nodes() == 3
+
+ G = reverse_havel_hakimi_graph(aseq, bseq, create_using=nx.Graph)
+ assert not G.is_multigraph()
+ assert not G.is_directed()
+
+ pytest.raises(
+ nx.NetworkXError, configuration_model, aseq, bseq, create_using=nx.DiGraph()
+ )
+ pytest.raises(
+ nx.NetworkXError, configuration_model, aseq, bseq, create_using=nx.DiGraph
+ )
+ pytest.raises(
+ nx.NetworkXError,
+ configuration_model,
+ aseq,
+ bseq,
+ create_using=nx.MultiDiGraph,
+ )
+
+ def test_havel_hakimi_graph(self):
+ aseq = []
+ bseq = []
+ G = havel_hakimi_graph(aseq, bseq)
+ assert len(G) == 0
+
+ aseq = [0, 0]
+ bseq = [0, 0]
+ G = havel_hakimi_graph(aseq, bseq)
+ assert len(G) == 4
+ assert G.number_of_edges() == 0
+
+ aseq = [3, 3, 3, 3]
+ bseq = [2, 2, 2, 2, 2]
+ pytest.raises(nx.NetworkXError, havel_hakimi_graph, aseq, bseq)
+
+ bseq = [2, 2, 2, 2, 2, 2]
+ G = havel_hakimi_graph(aseq, bseq)
+ assert sorted(d for n, d in G.degree()) == [2, 2, 2, 2, 2, 2, 3, 3, 3, 3]
+
+ aseq = [2, 2, 2, 2, 2, 2]
+ bseq = [3, 3, 3, 3]
+ G = havel_hakimi_graph(aseq, bseq)
+ assert G.is_multigraph()
+ assert not G.is_directed()
+ assert sorted(d for n, d in G.degree()) == [2, 2, 2, 2, 2, 2, 3, 3, 3, 3]
+
+ GU = nx.projected_graph(nx.Graph(G), range(len(aseq)))
+ assert GU.number_of_nodes() == 6
+
+ GD = nx.projected_graph(nx.Graph(G), range(len(aseq), len(aseq) + len(bseq)))
+ assert GD.number_of_nodes() == 4
+
+ G = reverse_havel_hakimi_graph(aseq, bseq, create_using=nx.Graph)
+ assert not G.is_multigraph()
+ assert not G.is_directed()
+
+ pytest.raises(
+ nx.NetworkXError, havel_hakimi_graph, aseq, bseq, create_using=nx.DiGraph
+ )
+ pytest.raises(
+ nx.NetworkXError, havel_hakimi_graph, aseq, bseq, create_using=nx.DiGraph
+ )
+ pytest.raises(
+ nx.NetworkXError,
+ havel_hakimi_graph,
+ aseq,
+ bseq,
+ create_using=nx.MultiDiGraph,
+ )
+
+ def test_reverse_havel_hakimi_graph(self):
+ aseq = []
+ bseq = []
+ G = reverse_havel_hakimi_graph(aseq, bseq)
+ assert len(G) == 0
+
+ aseq = [0, 0]
+ bseq = [0, 0]
+ G = reverse_havel_hakimi_graph(aseq, bseq)
+ assert len(G) == 4
+ assert G.number_of_edges() == 0
+
+ aseq = [3, 3, 3, 3]
+ bseq = [2, 2, 2, 2, 2]
+ pytest.raises(nx.NetworkXError, reverse_havel_hakimi_graph, aseq, bseq)
+
+ bseq = [2, 2, 2, 2, 2, 2]
+ G = reverse_havel_hakimi_graph(aseq, bseq)
+ assert sorted(d for n, d in G.degree()) == [2, 2, 2, 2, 2, 2, 3, 3, 3, 3]
+
+ aseq = [2, 2, 2, 2, 2, 2]
+ bseq = [3, 3, 3, 3]
+ G = reverse_havel_hakimi_graph(aseq, bseq)
+ assert sorted(d for n, d in G.degree()) == [2, 2, 2, 2, 2, 2, 3, 3, 3, 3]
+
+ aseq = [2, 2, 2, 1, 1, 1]
+ bseq = [3, 3, 3]
+ G = reverse_havel_hakimi_graph(aseq, bseq)
+ assert G.is_multigraph()
+ assert not G.is_directed()
+ assert sorted(d for n, d in G.degree()) == [1, 1, 1, 2, 2, 2, 3, 3, 3]
+
+ GU = nx.projected_graph(nx.Graph(G), range(len(aseq)))
+ assert GU.number_of_nodes() == 6
+
+ GD = nx.projected_graph(nx.Graph(G), range(len(aseq), len(aseq) + len(bseq)))
+ assert GD.number_of_nodes() == 3
+
+ G = reverse_havel_hakimi_graph(aseq, bseq, create_using=nx.Graph)
+ assert not G.is_multigraph()
+ assert not G.is_directed()
+
+ pytest.raises(
+ nx.NetworkXError,
+ reverse_havel_hakimi_graph,
+ aseq,
+ bseq,
+ create_using=nx.DiGraph,
+ )
+ pytest.raises(
+ nx.NetworkXError,
+ reverse_havel_hakimi_graph,
+ aseq,
+ bseq,
+ create_using=nx.DiGraph,
+ )
+ pytest.raises(
+ nx.NetworkXError,
+ reverse_havel_hakimi_graph,
+ aseq,
+ bseq,
+ create_using=nx.MultiDiGraph,
+ )
+
+ def test_alternating_havel_hakimi_graph(self):
+ aseq = []
+ bseq = []
+ G = alternating_havel_hakimi_graph(aseq, bseq)
+ assert len(G) == 0
+
+ aseq = [0, 0]
+ bseq = [0, 0]
+ G = alternating_havel_hakimi_graph(aseq, bseq)
+ assert len(G) == 4
+ assert G.number_of_edges() == 0
+
+ aseq = [3, 3, 3, 3]
+ bseq = [2, 2, 2, 2, 2]
+ pytest.raises(nx.NetworkXError, alternating_havel_hakimi_graph, aseq, bseq)
+
+ bseq = [2, 2, 2, 2, 2, 2]
+ G = alternating_havel_hakimi_graph(aseq, bseq)
+ assert sorted(d for n, d in G.degree()) == [2, 2, 2, 2, 2, 2, 3, 3, 3, 3]
+
+ aseq = [2, 2, 2, 2, 2, 2]
+ bseq = [3, 3, 3, 3]
+ G = alternating_havel_hakimi_graph(aseq, bseq)
+ assert sorted(d for n, d in G.degree()) == [2, 2, 2, 2, 2, 2, 3, 3, 3, 3]
+
+ aseq = [2, 2, 2, 1, 1, 1]
+ bseq = [3, 3, 3]
+ G = alternating_havel_hakimi_graph(aseq, bseq)
+ assert G.is_multigraph()
+ assert not G.is_directed()
+ assert sorted(d for n, d in G.degree()) == [1, 1, 1, 2, 2, 2, 3, 3, 3]
+
+ GU = nx.projected_graph(nx.Graph(G), range(len(aseq)))
+ assert GU.number_of_nodes() == 6
+
+ GD = nx.projected_graph(nx.Graph(G), range(len(aseq), len(aseq) + len(bseq)))
+ assert GD.number_of_nodes() == 3
+
+ G = reverse_havel_hakimi_graph(aseq, bseq, create_using=nx.Graph)
+ assert not G.is_multigraph()
+ assert not G.is_directed()
+
+ pytest.raises(
+ nx.NetworkXError,
+ alternating_havel_hakimi_graph,
+ aseq,
+ bseq,
+ create_using=nx.DiGraph,
+ )
+ pytest.raises(
+ nx.NetworkXError,
+ alternating_havel_hakimi_graph,
+ aseq,
+ bseq,
+ create_using=nx.DiGraph,
+ )
+ pytest.raises(
+ nx.NetworkXError,
+ alternating_havel_hakimi_graph,
+ aseq,
+ bseq,
+ create_using=nx.MultiDiGraph,
+ )
+
+ def test_preferential_attachment(self):
+ aseq = [3, 2, 1, 1]
+ G = preferential_attachment_graph(aseq, 0.5)
+ assert G.is_multigraph()
+ assert not G.is_directed()
+
+ G = preferential_attachment_graph(aseq, 0.5, create_using=nx.Graph)
+ assert not G.is_multigraph()
+ assert not G.is_directed()
+
+ pytest.raises(
+ nx.NetworkXError,
+ preferential_attachment_graph,
+ aseq,
+ 0.5,
+ create_using=nx.DiGraph(),
+ )
+ pytest.raises(
+ nx.NetworkXError,
+ preferential_attachment_graph,
+ aseq,
+ 0.5,
+ create_using=nx.DiGraph(),
+ )
+ pytest.raises(
+ nx.NetworkXError,
+ preferential_attachment_graph,
+ aseq,
+ 0.5,
+ create_using=nx.DiGraph(),
+ )
+
+ def test_random_graph(self):
+ n = 10
+ m = 20
+ G = random_graph(n, m, 0.9)
+ assert len(G) == 30
+ assert nx.is_bipartite(G)
+ X, Y = nx.algorithms.bipartite.sets(G)
+ assert set(range(n)) == X
+ assert set(range(n, n + m)) == Y
+
+ def test_random_digraph(self):
+ n = 10
+ m = 20
+ G = random_graph(n, m, 0.9, directed=True)
+ assert len(G) == 30
+ assert nx.is_bipartite(G)
+ X, Y = nx.algorithms.bipartite.sets(G)
+ assert set(range(n)) == X
+ assert set(range(n, n + m)) == Y
+
+ def test_gnmk_random_graph(self):
+ n = 10
+ m = 20
+ edges = 100
+ # set seed because sometimes it is not connected
+ # which raises an error in bipartite.sets(G) below.
+ G = gnmk_random_graph(n, m, edges, seed=1234)
+ assert len(G) == n + m
+ assert nx.is_bipartite(G)
+ X, Y = nx.algorithms.bipartite.sets(G)
+ # print(X)
+ assert set(range(n)) == X
+ assert set(range(n, n + m)) == Y
+ assert edges == len(list(G.edges()))
+
+ def test_gnmk_random_graph_complete(self):
+ n = 10
+ m = 20
+ edges = 200
+ G = gnmk_random_graph(n, m, edges)
+ assert len(G) == n + m
+ assert nx.is_bipartite(G)
+ X, Y = nx.algorithms.bipartite.sets(G)
+ # print(X)
+ assert set(range(n)) == X
+ assert set(range(n, n + m)) == Y
+ assert edges == len(list(G.edges()))
diff --git a/pythonProject/.venv/Lib/site-packages/networkx/algorithms/bipartite/tests/test_matching.py b/pythonProject/.venv/Lib/site-packages/networkx/algorithms/bipartite/tests/test_matching.py
new file mode 100644
index 0000000000000000000000000000000000000000..7ed7cdcb43429f92c95a8d78da48f5d2771db77b
--- /dev/null
+++ b/pythonProject/.venv/Lib/site-packages/networkx/algorithms/bipartite/tests/test_matching.py
@@ -0,0 +1,326 @@
+"""Unit tests for the :mod:`networkx.algorithms.bipartite.matching` module."""
+import itertools
+
+import pytest
+
+import networkx as nx
+from networkx.algorithms.bipartite.matching import (
+ eppstein_matching,
+ hopcroft_karp_matching,
+ maximum_matching,
+ minimum_weight_full_matching,
+ to_vertex_cover,
+)
+
+
+class TestMatching:
+ """Tests for bipartite matching algorithms."""
+
+ def setup_method(self):
+ """Creates a bipartite graph for use in testing matching algorithms.
+
+ The bipartite graph has a maximum cardinality matching that leaves
+ vertex 1 and vertex 10 unmatched. The first six numbers are the left
+ vertices and the next six numbers are the right vertices.
+
+ """
+ self.simple_graph = nx.complete_bipartite_graph(2, 3)
+ self.simple_solution = {0: 2, 1: 3, 2: 0, 3: 1}
+
+ edges = [(0, 7), (0, 8), (2, 6), (2, 9), (3, 8), (4, 8), (4, 9), (5, 11)]
+ self.top_nodes = set(range(6))
+ self.graph = nx.Graph()
+ self.graph.add_nodes_from(range(12))
+ self.graph.add_edges_from(edges)
+
+ # Example bipartite graph from issue 2127
+ G = nx.Graph()
+ G.add_nodes_from(
+ [
+ (1, "C"),
+ (1, "B"),
+ (0, "G"),
+ (1, "F"),
+ (1, "E"),
+ (0, "C"),
+ (1, "D"),
+ (1, "I"),
+ (0, "A"),
+ (0, "D"),
+ (0, "F"),
+ (0, "E"),
+ (0, "H"),
+ (1, "G"),
+ (1, "A"),
+ (0, "I"),
+ (0, "B"),
+ (1, "H"),
+ ]
+ )
+ G.add_edge((1, "C"), (0, "A"))
+ G.add_edge((1, "B"), (0, "A"))
+ G.add_edge((0, "G"), (1, "I"))
+ G.add_edge((0, "G"), (1, "H"))
+ G.add_edge((1, "F"), (0, "A"))
+ G.add_edge((1, "F"), (0, "C"))
+ G.add_edge((1, "F"), (0, "E"))
+ G.add_edge((1, "E"), (0, "A"))
+ G.add_edge((1, "E"), (0, "C"))
+ G.add_edge((0, "C"), (1, "D"))
+ G.add_edge((0, "C"), (1, "I"))
+ G.add_edge((0, "C"), (1, "G"))
+ G.add_edge((0, "C"), (1, "H"))
+ G.add_edge((1, "D"), (0, "A"))
+ G.add_edge((1, "I"), (0, "A"))
+ G.add_edge((1, "I"), (0, "E"))
+ G.add_edge((0, "A"), (1, "G"))
+ G.add_edge((0, "A"), (1, "H"))
+ G.add_edge((0, "E"), (1, "G"))
+ G.add_edge((0, "E"), (1, "H"))
+ self.disconnected_graph = G
+
+ def check_match(self, matching):
+ """Asserts that the matching is what we expect from the bipartite graph
+ constructed in the :meth:`setup` fixture.
+
+ """
+ # For the sake of brevity, rename `matching` to `M`.
+ M = matching
+ matched_vertices = frozenset(itertools.chain(*M.items()))
+ # Assert that the maximum number of vertices (10) is matched.
+ assert matched_vertices == frozenset(range(12)) - {1, 10}
+ # Assert that no vertex appears in two edges, or in other words, that
+ # the matching (u, v) and (v, u) both appear in the matching
+ # dictionary.
+ assert all(u == M[M[u]] for u in range(12) if u in M)
+
+ def check_vertex_cover(self, vertices):
+ """Asserts that the given set of vertices is the vertex cover we
+ expected from the bipartite graph constructed in the :meth:`setup`
+ fixture.
+
+ """
+ # By Konig's theorem, the number of edges in a maximum matching equals
+ # the number of vertices in a minimum vertex cover.
+ assert len(vertices) == 5
+ # Assert that the set is truly a vertex cover.
+ for u, v in self.graph.edges():
+ assert u in vertices or v in vertices
+ # TODO Assert that the vertices are the correct ones.
+
+ def test_eppstein_matching(self):
+ """Tests that David Eppstein's implementation of the Hopcroft--Karp
+ algorithm produces a maximum cardinality matching.
+
+ """
+ self.check_match(eppstein_matching(self.graph, self.top_nodes))
+
+ def test_hopcroft_karp_matching(self):
+ """Tests that the Hopcroft--Karp algorithm produces a maximum
+ cardinality matching in a bipartite graph.
+
+ """
+ self.check_match(hopcroft_karp_matching(self.graph, self.top_nodes))
+
+ def test_to_vertex_cover(self):
+ """Test for converting a maximum matching to a minimum vertex cover."""
+ matching = maximum_matching(self.graph, self.top_nodes)
+ vertex_cover = to_vertex_cover(self.graph, matching, self.top_nodes)
+ self.check_vertex_cover(vertex_cover)
+
+ def test_eppstein_matching_simple(self):
+ match = eppstein_matching(self.simple_graph)
+ assert match == self.simple_solution
+
+ def test_hopcroft_karp_matching_simple(self):
+ match = hopcroft_karp_matching(self.simple_graph)
+ assert match == self.simple_solution
+
+ def test_eppstein_matching_disconnected(self):
+ with pytest.raises(nx.AmbiguousSolution):
+ match = eppstein_matching(self.disconnected_graph)
+
+ def test_hopcroft_karp_matching_disconnected(self):
+ with pytest.raises(nx.AmbiguousSolution):
+ match = hopcroft_karp_matching(self.disconnected_graph)
+
+ def test_issue_2127(self):
+ """Test from issue 2127"""
+ # Build the example DAG
+ G = nx.DiGraph()
+ G.add_edge("A", "C")
+ G.add_edge("A", "B")
+ G.add_edge("C", "E")
+ G.add_edge("C", "D")
+ G.add_edge("E", "G")
+ G.add_edge("E", "F")
+ G.add_edge("G", "I")
+ G.add_edge("G", "H")
+
+ tc = nx.transitive_closure(G)
+ btc = nx.Graph()
+
+ # Create a bipartite graph based on the transitive closure of G
+ for v in tc.nodes():
+ btc.add_node((0, v))
+ btc.add_node((1, v))
+
+ for u, v in tc.edges():
+ btc.add_edge((0, u), (1, v))
+
+ top_nodes = {n for n in btc if n[0] == 0}
+ matching = hopcroft_karp_matching(btc, top_nodes)
+ vertex_cover = to_vertex_cover(btc, matching, top_nodes)
+ independent_set = set(G) - {v for _, v in vertex_cover}
+ assert {"B", "D", "F", "I", "H"} == independent_set
+
+ def test_vertex_cover_issue_2384(self):
+ G = nx.Graph([(0, 3), (1, 3), (1, 4), (2, 3)])
+ matching = maximum_matching(G)
+ vertex_cover = to_vertex_cover(G, matching)
+ for u, v in G.edges():
+ assert u in vertex_cover or v in vertex_cover
+
+ def test_vertex_cover_issue_3306(self):
+ G = nx.Graph()
+ edges = [(0, 2), (1, 0), (1, 1), (1, 2), (2, 2)]
+ G.add_edges_from([((i, "L"), (j, "R")) for i, j in edges])
+
+ matching = maximum_matching(G)
+ vertex_cover = to_vertex_cover(G, matching)
+ for u, v in G.edges():
+ assert u in vertex_cover or v in vertex_cover
+
+ def test_unorderable_nodes(self):
+ a = object()
+ b = object()
+ c = object()
+ d = object()
+ e = object()
+ G = nx.Graph([(a, d), (b, d), (b, e), (c, d)])
+ matching = maximum_matching(G)
+ vertex_cover = to_vertex_cover(G, matching)
+ for u, v in G.edges():
+ assert u in vertex_cover or v in vertex_cover
+
+
+def test_eppstein_matching():
+ """Test in accordance to issue #1927"""
+ G = nx.Graph()
+ G.add_nodes_from(["a", 2, 3, 4], bipartite=0)
+ G.add_nodes_from([1, "b", "c"], bipartite=1)
+ G.add_edges_from([("a", 1), ("a", "b"), (2, "b"), (2, "c"), (3, "c"), (4, 1)])
+ matching = eppstein_matching(G)
+ assert len(matching) == len(maximum_matching(G))
+ assert all(x in set(matching.keys()) for x in set(matching.values()))
+
+
+class TestMinimumWeightFullMatching:
+ @classmethod
+ def setup_class(cls):
+ pytest.importorskip("scipy")
+
+ def test_minimum_weight_full_matching_incomplete_graph(self):
+ B = nx.Graph()
+ B.add_nodes_from([1, 2], bipartite=0)
+ B.add_nodes_from([3, 4], bipartite=1)
+ B.add_edge(1, 4, weight=100)
+ B.add_edge(2, 3, weight=100)
+ B.add_edge(2, 4, weight=50)
+ matching = minimum_weight_full_matching(B)
+ assert matching == {1: 4, 2: 3, 4: 1, 3: 2}
+
+ def test_minimum_weight_full_matching_with_no_full_matching(self):
+ B = nx.Graph()
+ B.add_nodes_from([1, 2, 3], bipartite=0)
+ B.add_nodes_from([4, 5, 6], bipartite=1)
+ B.add_edge(1, 4, weight=100)
+ B.add_edge(2, 4, weight=100)
+ B.add_edge(3, 4, weight=50)
+ B.add_edge(3, 5, weight=50)
+ B.add_edge(3, 6, weight=50)
+ with pytest.raises(ValueError):
+ minimum_weight_full_matching(B)
+
+ def test_minimum_weight_full_matching_square(self):
+ G = nx.complete_bipartite_graph(3, 3)
+ G.add_edge(0, 3, weight=400)
+ G.add_edge(0, 4, weight=150)
+ G.add_edge(0, 5, weight=400)
+ G.add_edge(1, 3, weight=400)
+ G.add_edge(1, 4, weight=450)
+ G.add_edge(1, 5, weight=600)
+ G.add_edge(2, 3, weight=300)
+ G.add_edge(2, 4, weight=225)
+ G.add_edge(2, 5, weight=300)
+ matching = minimum_weight_full_matching(G)
+ assert matching == {0: 4, 1: 3, 2: 5, 4: 0, 3: 1, 5: 2}
+
+ def test_minimum_weight_full_matching_smaller_left(self):
+ G = nx.complete_bipartite_graph(3, 4)
+ G.add_edge(0, 3, weight=400)
+ G.add_edge(0, 4, weight=150)
+ G.add_edge(0, 5, weight=400)
+ G.add_edge(0, 6, weight=1)
+ G.add_edge(1, 3, weight=400)
+ G.add_edge(1, 4, weight=450)
+ G.add_edge(1, 5, weight=600)
+ G.add_edge(1, 6, weight=2)
+ G.add_edge(2, 3, weight=300)
+ G.add_edge(2, 4, weight=225)
+ G.add_edge(2, 5, weight=290)
+ G.add_edge(2, 6, weight=3)
+ matching = minimum_weight_full_matching(G)
+ assert matching == {0: 4, 1: 6, 2: 5, 4: 0, 5: 2, 6: 1}
+
+ def test_minimum_weight_full_matching_smaller_top_nodes_right(self):
+ G = nx.complete_bipartite_graph(3, 4)
+ G.add_edge(0, 3, weight=400)
+ G.add_edge(0, 4, weight=150)
+ G.add_edge(0, 5, weight=400)
+ G.add_edge(0, 6, weight=1)
+ G.add_edge(1, 3, weight=400)
+ G.add_edge(1, 4, weight=450)
+ G.add_edge(1, 5, weight=600)
+ G.add_edge(1, 6, weight=2)
+ G.add_edge(2, 3, weight=300)
+ G.add_edge(2, 4, weight=225)
+ G.add_edge(2, 5, weight=290)
+ G.add_edge(2, 6, weight=3)
+ matching = minimum_weight_full_matching(G, top_nodes=[3, 4, 5, 6])
+ assert matching == {0: 4, 1: 6, 2: 5, 4: 0, 5: 2, 6: 1}
+
+ def test_minimum_weight_full_matching_smaller_right(self):
+ G = nx.complete_bipartite_graph(4, 3)
+ G.add_edge(0, 4, weight=400)
+ G.add_edge(0, 5, weight=400)
+ G.add_edge(0, 6, weight=300)
+ G.add_edge(1, 4, weight=150)
+ G.add_edge(1, 5, weight=450)
+ G.add_edge(1, 6, weight=225)
+ G.add_edge(2, 4, weight=400)
+ G.add_edge(2, 5, weight=600)
+ G.add_edge(2, 6, weight=290)
+ G.add_edge(3, 4, weight=1)
+ G.add_edge(3, 5, weight=2)
+ G.add_edge(3, 6, weight=3)
+ matching = minimum_weight_full_matching(G)
+ assert matching == {1: 4, 2: 6, 3: 5, 4: 1, 5: 3, 6: 2}
+
+ def test_minimum_weight_full_matching_negative_weights(self):
+ G = nx.complete_bipartite_graph(2, 2)
+ G.add_edge(0, 2, weight=-2)
+ G.add_edge(0, 3, weight=0.2)
+ G.add_edge(1, 2, weight=-2)
+ G.add_edge(1, 3, weight=0.3)
+ matching = minimum_weight_full_matching(G)
+ assert matching == {0: 3, 1: 2, 2: 1, 3: 0}
+
+ def test_minimum_weight_full_matching_different_weight_key(self):
+ G = nx.complete_bipartite_graph(2, 2)
+ G.add_edge(0, 2, mass=2)
+ G.add_edge(0, 3, mass=0.2)
+ G.add_edge(1, 2, mass=1)
+ G.add_edge(1, 3, mass=2)
+ matching = minimum_weight_full_matching(G, weight="mass")
+ assert matching == {0: 3, 1: 2, 2: 1, 3: 0}
diff --git a/pythonProject/.venv/Lib/site-packages/networkx/algorithms/bipartite/tests/test_matrix.py b/pythonProject/.venv/Lib/site-packages/networkx/algorithms/bipartite/tests/test_matrix.py
new file mode 100644
index 0000000000000000000000000000000000000000..53d83115118e9bbdf6238b89d171bf6b7b829477
--- /dev/null
+++ b/pythonProject/.venv/Lib/site-packages/networkx/algorithms/bipartite/tests/test_matrix.py
@@ -0,0 +1,84 @@
+import pytest
+
+np = pytest.importorskip("numpy")
+sp = pytest.importorskip("scipy")
+sparse = pytest.importorskip("scipy.sparse")
+
+
+import networkx as nx
+from networkx.algorithms import bipartite
+from networkx.utils import edges_equal
+
+
+class TestBiadjacencyMatrix:
+ def test_biadjacency_matrix_weight(self):
+ G = nx.path_graph(5)
+ G.add_edge(0, 1, weight=2, other=4)
+ X = [1, 3]
+ Y = [0, 2, 4]
+ M = bipartite.biadjacency_matrix(G, X, weight="weight")
+ assert M[0, 0] == 2
+ M = bipartite.biadjacency_matrix(G, X, weight="other")
+ assert M[0, 0] == 4
+
+ def test_biadjacency_matrix(self):
+ tops = [2, 5, 10]
+ bots = [5, 10, 15]
+ for i in range(len(tops)):
+ G = bipartite.random_graph(tops[i], bots[i], 0.2)
+ top = [n for n, d in G.nodes(data=True) if d["bipartite"] == 0]
+ M = bipartite.biadjacency_matrix(G, top)
+ assert M.shape[0] == tops[i]
+ assert M.shape[1] == bots[i]
+
+ def test_biadjacency_matrix_order(self):
+ G = nx.path_graph(5)
+ G.add_edge(0, 1, weight=2)
+ X = [3, 1]
+ Y = [4, 2, 0]
+ M = bipartite.biadjacency_matrix(G, X, Y, weight="weight")
+ assert M[1, 2] == 2
+
+ def test_biadjacency_matrix_empty_graph(self):
+ G = nx.empty_graph(2)
+ M = nx.bipartite.biadjacency_matrix(G, [0])
+ assert np.array_equal(M.toarray(), np.array([[0]]))
+
+ def test_null_graph(self):
+ with pytest.raises(nx.NetworkXError):
+ bipartite.biadjacency_matrix(nx.Graph(), [])
+
+ def test_empty_graph(self):
+ with pytest.raises(nx.NetworkXError):
+ bipartite.biadjacency_matrix(nx.Graph([(1, 0)]), [])
+
+ def test_duplicate_row(self):
+ with pytest.raises(nx.NetworkXError):
+ bipartite.biadjacency_matrix(nx.Graph([(1, 0)]), [1, 1])
+
+ def test_duplicate_col(self):
+ with pytest.raises(nx.NetworkXError):
+ bipartite.biadjacency_matrix(nx.Graph([(1, 0)]), [0], [1, 1])
+
+ def test_format_keyword(self):
+ with pytest.raises(nx.NetworkXError):
+ bipartite.biadjacency_matrix(nx.Graph([(1, 0)]), [0], format="foo")
+
+ def test_from_biadjacency_roundtrip(self):
+ B1 = nx.path_graph(5)
+ M = bipartite.biadjacency_matrix(B1, [0, 2, 4])
+ B2 = bipartite.from_biadjacency_matrix(M)
+ assert nx.is_isomorphic(B1, B2)
+
+ def test_from_biadjacency_weight(self):
+ M = sparse.csc_matrix([[1, 2], [0, 3]])
+ B = bipartite.from_biadjacency_matrix(M)
+ assert edges_equal(B.edges(), [(0, 2), (0, 3), (1, 3)])
+ B = bipartite.from_biadjacency_matrix(M, edge_attribute="weight")
+ e = [(0, 2, {"weight": 1}), (0, 3, {"weight": 2}), (1, 3, {"weight": 3})]
+ assert edges_equal(B.edges(data=True), e)
+
+ def test_from_biadjacency_multigraph(self):
+ M = sparse.csc_matrix([[1, 2], [0, 3]])
+ B = bipartite.from_biadjacency_matrix(M, create_using=nx.MultiGraph())
+ assert edges_equal(B.edges(), [(0, 2), (0, 3), (0, 3), (1, 3), (1, 3), (1, 3)])
diff --git a/pythonProject/.venv/Lib/site-packages/networkx/algorithms/bipartite/tests/test_project.py b/pythonProject/.venv/Lib/site-packages/networkx/algorithms/bipartite/tests/test_project.py
new file mode 100644
index 0000000000000000000000000000000000000000..076bb42b668657cad51f6423e5aacf23a2a1cd28
--- /dev/null
+++ b/pythonProject/.venv/Lib/site-packages/networkx/algorithms/bipartite/tests/test_project.py
@@ -0,0 +1,407 @@
+import pytest
+
+import networkx as nx
+from networkx.algorithms import bipartite
+from networkx.utils import edges_equal, nodes_equal
+
+
+class TestBipartiteProject:
+ def test_path_projected_graph(self):
+ G = nx.path_graph(4)
+ P = bipartite.projected_graph(G, [1, 3])
+ assert nodes_equal(list(P), [1, 3])
+ assert edges_equal(list(P.edges()), [(1, 3)])
+ P = bipartite.projected_graph(G, [0, 2])
+ assert nodes_equal(list(P), [0, 2])
+ assert edges_equal(list(P.edges()), [(0, 2)])
+ G = nx.MultiGraph([(0, 1)])
+ with pytest.raises(nx.NetworkXError, match="not defined for multigraphs"):
+ bipartite.projected_graph(G, [0])
+
+ def test_path_projected_properties_graph(self):
+ G = nx.path_graph(4)
+ G.add_node(1, name="one")
+ G.add_node(2, name="two")
+ P = bipartite.projected_graph(G, [1, 3])
+ assert nodes_equal(list(P), [1, 3])
+ assert edges_equal(list(P.edges()), [(1, 3)])
+ assert P.nodes[1]["name"] == G.nodes[1]["name"]
+ P = bipartite.projected_graph(G, [0, 2])
+ assert nodes_equal(list(P), [0, 2])
+ assert edges_equal(list(P.edges()), [(0, 2)])
+ assert P.nodes[2]["name"] == G.nodes[2]["name"]
+
+ def test_path_collaboration_projected_graph(self):
+ G = nx.path_graph(4)
+ P = bipartite.collaboration_weighted_projected_graph(G, [1, 3])
+ assert nodes_equal(list(P), [1, 3])
+ assert edges_equal(list(P.edges()), [(1, 3)])
+ P[1][3]["weight"] = 1
+ P = bipartite.collaboration_weighted_projected_graph(G, [0, 2])
+ assert nodes_equal(list(P), [0, 2])
+ assert edges_equal(list(P.edges()), [(0, 2)])
+ P[0][2]["weight"] = 1
+
+ def test_directed_path_collaboration_projected_graph(self):
+ G = nx.DiGraph()
+ nx.add_path(G, range(4))
+ P = bipartite.collaboration_weighted_projected_graph(G, [1, 3])
+ assert nodes_equal(list(P), [1, 3])
+ assert edges_equal(list(P.edges()), [(1, 3)])
+ P[1][3]["weight"] = 1
+ P = bipartite.collaboration_weighted_projected_graph(G, [0, 2])
+ assert nodes_equal(list(P), [0, 2])
+ assert edges_equal(list(P.edges()), [(0, 2)])
+ P[0][2]["weight"] = 1
+
+ def test_path_weighted_projected_graph(self):
+ G = nx.path_graph(4)
+
+ with pytest.raises(nx.NetworkXAlgorithmError):
+ bipartite.weighted_projected_graph(G, [1, 2, 3, 3])
+
+ P = bipartite.weighted_projected_graph(G, [1, 3])
+ assert nodes_equal(list(P), [1, 3])
+ assert edges_equal(list(P.edges()), [(1, 3)])
+ P[1][3]["weight"] = 1
+ P = bipartite.weighted_projected_graph(G, [0, 2])
+ assert nodes_equal(list(P), [0, 2])
+ assert edges_equal(list(P.edges()), [(0, 2)])
+ P[0][2]["weight"] = 1
+
+ def test_digraph_weighted_projection(self):
+ G = nx.DiGraph([(0, 1), (1, 2), (2, 3), (3, 4)])
+ P = bipartite.overlap_weighted_projected_graph(G, [1, 3])
+ assert nx.get_edge_attributes(P, "weight") == {(1, 3): 1.0}
+ assert len(P) == 2
+
+ def test_path_weighted_projected_directed_graph(self):
+ G = nx.DiGraph()
+ nx.add_path(G, range(4))
+ P = bipartite.weighted_projected_graph(G, [1, 3])
+ assert nodes_equal(list(P), [1, 3])
+ assert edges_equal(list(P.edges()), [(1, 3)])
+ P[1][3]["weight"] = 1
+ P = bipartite.weighted_projected_graph(G, [0, 2])
+ assert nodes_equal(list(P), [0, 2])
+ assert edges_equal(list(P.edges()), [(0, 2)])
+ P[0][2]["weight"] = 1
+
+ def test_star_projected_graph(self):
+ G = nx.star_graph(3)
+ P = bipartite.projected_graph(G, [1, 2, 3])
+ assert nodes_equal(list(P), [1, 2, 3])
+ assert edges_equal(list(P.edges()), [(1, 2), (1, 3), (2, 3)])
+ P = bipartite.weighted_projected_graph(G, [1, 2, 3])
+ assert nodes_equal(list(P), [1, 2, 3])
+ assert edges_equal(list(P.edges()), [(1, 2), (1, 3), (2, 3)])
+
+ P = bipartite.projected_graph(G, [0])
+ assert nodes_equal(list(P), [0])
+ assert edges_equal(list(P.edges()), [])
+
+ def test_project_multigraph(self):
+ G = nx.Graph()
+ G.add_edge("a", 1)
+ G.add_edge("b", 1)
+ G.add_edge("a", 2)
+ G.add_edge("b", 2)
+ P = bipartite.projected_graph(G, "ab")
+ assert edges_equal(list(P.edges()), [("a", "b")])
+ P = bipartite.weighted_projected_graph(G, "ab")
+ assert edges_equal(list(P.edges()), [("a", "b")])
+ P = bipartite.projected_graph(G, "ab", multigraph=True)
+ assert edges_equal(list(P.edges()), [("a", "b"), ("a", "b")])
+
+ def test_project_collaboration(self):
+ G = nx.Graph()
+ G.add_edge("a", 1)
+ G.add_edge("b", 1)
+ G.add_edge("b", 2)
+ G.add_edge("c", 2)
+ G.add_edge("c", 3)
+ G.add_edge("c", 4)
+ G.add_edge("b", 4)
+ P = bipartite.collaboration_weighted_projected_graph(G, "abc")
+ assert P["a"]["b"]["weight"] == 1
+ assert P["b"]["c"]["weight"] == 2
+
+ def test_directed_projection(self):
+ G = nx.DiGraph()
+ G.add_edge("A", 1)
+ G.add_edge(1, "B")
+ G.add_edge("A", 2)
+ G.add_edge("B", 2)
+ P = bipartite.projected_graph(G, "AB")
+ assert edges_equal(list(P.edges()), [("A", "B")])
+ P = bipartite.weighted_projected_graph(G, "AB")
+ assert edges_equal(list(P.edges()), [("A", "B")])
+ assert P["A"]["B"]["weight"] == 1
+
+ P = bipartite.projected_graph(G, "AB", multigraph=True)
+ assert edges_equal(list(P.edges()), [("A", "B")])
+
+ G = nx.DiGraph()
+ G.add_edge("A", 1)
+ G.add_edge(1, "B")
+ G.add_edge("A", 2)
+ G.add_edge(2, "B")
+ P = bipartite.projected_graph(G, "AB")
+ assert edges_equal(list(P.edges()), [("A", "B")])
+ P = bipartite.weighted_projected_graph(G, "AB")
+ assert edges_equal(list(P.edges()), [("A", "B")])
+ assert P["A"]["B"]["weight"] == 2
+
+ P = bipartite.projected_graph(G, "AB", multigraph=True)
+ assert edges_equal(list(P.edges()), [("A", "B"), ("A", "B")])
+
+
+class TestBipartiteWeightedProjection:
+ @classmethod
+ def setup_class(cls):
+ # Tore Opsahl's example
+ # http://toreopsahl.com/2009/05/01/projecting-two-mode-networks-onto-weighted-one-mode-networks/
+ cls.G = nx.Graph()
+ cls.G.add_edge("A", 1)
+ cls.G.add_edge("A", 2)
+ cls.G.add_edge("B", 1)
+ cls.G.add_edge("B", 2)
+ cls.G.add_edge("B", 3)
+ cls.G.add_edge("B", 4)
+ cls.G.add_edge("B", 5)
+ cls.G.add_edge("C", 1)
+ cls.G.add_edge("D", 3)
+ cls.G.add_edge("E", 4)
+ cls.G.add_edge("E", 5)
+ cls.G.add_edge("E", 6)
+ cls.G.add_edge("F", 6)
+ # Graph based on figure 6 from Newman (2001)
+ cls.N = nx.Graph()
+ cls.N.add_edge("A", 1)
+ cls.N.add_edge("A", 2)
+ cls.N.add_edge("A", 3)
+ cls.N.add_edge("B", 1)
+ cls.N.add_edge("B", 2)
+ cls.N.add_edge("B", 3)
+ cls.N.add_edge("C", 1)
+ cls.N.add_edge("D", 1)
+ cls.N.add_edge("E", 3)
+
+ def test_project_weighted_shared(self):
+ edges = [
+ ("A", "B", 2),
+ ("A", "C", 1),
+ ("B", "C", 1),
+ ("B", "D", 1),
+ ("B", "E", 2),
+ ("E", "F", 1),
+ ]
+ Panswer = nx.Graph()
+ Panswer.add_weighted_edges_from(edges)
+ P = bipartite.weighted_projected_graph(self.G, "ABCDEF")
+ assert edges_equal(list(P.edges()), Panswer.edges())
+ for u, v in list(P.edges()):
+ assert P[u][v]["weight"] == Panswer[u][v]["weight"]
+
+ edges = [
+ ("A", "B", 3),
+ ("A", "E", 1),
+ ("A", "C", 1),
+ ("A", "D", 1),
+ ("B", "E", 1),
+ ("B", "C", 1),
+ ("B", "D", 1),
+ ("C", "D", 1),
+ ]
+ Panswer = nx.Graph()
+ Panswer.add_weighted_edges_from(edges)
+ P = bipartite.weighted_projected_graph(self.N, "ABCDE")
+ assert edges_equal(list(P.edges()), Panswer.edges())
+ for u, v in list(P.edges()):
+ assert P[u][v]["weight"] == Panswer[u][v]["weight"]
+
+ def test_project_weighted_newman(self):
+ edges = [
+ ("A", "B", 1.5),
+ ("A", "C", 0.5),
+ ("B", "C", 0.5),
+ ("B", "D", 1),
+ ("B", "E", 2),
+ ("E", "F", 1),
+ ]
+ Panswer = nx.Graph()
+ Panswer.add_weighted_edges_from(edges)
+ P = bipartite.collaboration_weighted_projected_graph(self.G, "ABCDEF")
+ assert edges_equal(list(P.edges()), Panswer.edges())
+ for u, v in list(P.edges()):
+ assert P[u][v]["weight"] == Panswer[u][v]["weight"]
+
+ edges = [
+ ("A", "B", 11 / 6.0),
+ ("A", "E", 1 / 2.0),
+ ("A", "C", 1 / 3.0),
+ ("A", "D", 1 / 3.0),
+ ("B", "E", 1 / 2.0),
+ ("B", "C", 1 / 3.0),
+ ("B", "D", 1 / 3.0),
+ ("C", "D", 1 / 3.0),
+ ]
+ Panswer = nx.Graph()
+ Panswer.add_weighted_edges_from(edges)
+ P = bipartite.collaboration_weighted_projected_graph(self.N, "ABCDE")
+ assert edges_equal(list(P.edges()), Panswer.edges())
+ for u, v in list(P.edges()):
+ assert P[u][v]["weight"] == Panswer[u][v]["weight"]
+
+ def test_project_weighted_ratio(self):
+ edges = [
+ ("A", "B", 2 / 6.0),
+ ("A", "C", 1 / 6.0),
+ ("B", "C", 1 / 6.0),
+ ("B", "D", 1 / 6.0),
+ ("B", "E", 2 / 6.0),
+ ("E", "F", 1 / 6.0),
+ ]
+ Panswer = nx.Graph()
+ Panswer.add_weighted_edges_from(edges)
+ P = bipartite.weighted_projected_graph(self.G, "ABCDEF", ratio=True)
+ assert edges_equal(list(P.edges()), Panswer.edges())
+ for u, v in list(P.edges()):
+ assert P[u][v]["weight"] == Panswer[u][v]["weight"]
+
+ edges = [
+ ("A", "B", 3 / 3.0),
+ ("A", "E", 1 / 3.0),
+ ("A", "C", 1 / 3.0),
+ ("A", "D", 1 / 3.0),
+ ("B", "E", 1 / 3.0),
+ ("B", "C", 1 / 3.0),
+ ("B", "D", 1 / 3.0),
+ ("C", "D", 1 / 3.0),
+ ]
+ Panswer = nx.Graph()
+ Panswer.add_weighted_edges_from(edges)
+ P = bipartite.weighted_projected_graph(self.N, "ABCDE", ratio=True)
+ assert edges_equal(list(P.edges()), Panswer.edges())
+ for u, v in list(P.edges()):
+ assert P[u][v]["weight"] == Panswer[u][v]["weight"]
+
+ def test_project_weighted_overlap(self):
+ edges = [
+ ("A", "B", 2 / 2.0),
+ ("A", "C", 1 / 1.0),
+ ("B", "C", 1 / 1.0),
+ ("B", "D", 1 / 1.0),
+ ("B", "E", 2 / 3.0),
+ ("E", "F", 1 / 1.0),
+ ]
+ Panswer = nx.Graph()
+ Panswer.add_weighted_edges_from(edges)
+ P = bipartite.overlap_weighted_projected_graph(self.G, "ABCDEF", jaccard=False)
+ assert edges_equal(list(P.edges()), Panswer.edges())
+ for u, v in list(P.edges()):
+ assert P[u][v]["weight"] == Panswer[u][v]["weight"]
+
+ edges = [
+ ("A", "B", 3 / 3.0),
+ ("A", "E", 1 / 1.0),
+ ("A", "C", 1 / 1.0),
+ ("A", "D", 1 / 1.0),
+ ("B", "E", 1 / 1.0),
+ ("B", "C", 1 / 1.0),
+ ("B", "D", 1 / 1.0),
+ ("C", "D", 1 / 1.0),
+ ]
+ Panswer = nx.Graph()
+ Panswer.add_weighted_edges_from(edges)
+ P = bipartite.overlap_weighted_projected_graph(self.N, "ABCDE", jaccard=False)
+ assert edges_equal(list(P.edges()), Panswer.edges())
+ for u, v in list(P.edges()):
+ assert P[u][v]["weight"] == Panswer[u][v]["weight"]
+
+ def test_project_weighted_jaccard(self):
+ edges = [
+ ("A", "B", 2 / 5.0),
+ ("A", "C", 1 / 2.0),
+ ("B", "C", 1 / 5.0),
+ ("B", "D", 1 / 5.0),
+ ("B", "E", 2 / 6.0),
+ ("E", "F", 1 / 3.0),
+ ]
+ Panswer = nx.Graph()
+ Panswer.add_weighted_edges_from(edges)
+ P = bipartite.overlap_weighted_projected_graph(self.G, "ABCDEF")
+ assert edges_equal(list(P.edges()), Panswer.edges())
+ for u, v in list(P.edges()):
+ assert P[u][v]["weight"] == Panswer[u][v]["weight"]
+
+ edges = [
+ ("A", "B", 3 / 3.0),
+ ("A", "E", 1 / 3.0),
+ ("A", "C", 1 / 3.0),
+ ("A", "D", 1 / 3.0),
+ ("B", "E", 1 / 3.0),
+ ("B", "C", 1 / 3.0),
+ ("B", "D", 1 / 3.0),
+ ("C", "D", 1 / 1.0),
+ ]
+ Panswer = nx.Graph()
+ Panswer.add_weighted_edges_from(edges)
+ P = bipartite.overlap_weighted_projected_graph(self.N, "ABCDE")
+ assert edges_equal(list(P.edges()), Panswer.edges())
+ for u, v in P.edges():
+ assert P[u][v]["weight"] == Panswer[u][v]["weight"]
+
+ def test_generic_weighted_projected_graph_simple(self):
+ def shared(G, u, v):
+ return len(set(G[u]) & set(G[v]))
+
+ B = nx.path_graph(5)
+ G = bipartite.generic_weighted_projected_graph(
+ B, [0, 2, 4], weight_function=shared
+ )
+ assert nodes_equal(list(G), [0, 2, 4])
+ assert edges_equal(
+ list(G.edges(data=True)),
+ [(0, 2, {"weight": 1}), (2, 4, {"weight": 1})],
+ )
+
+ G = bipartite.generic_weighted_projected_graph(B, [0, 2, 4])
+ assert nodes_equal(list(G), [0, 2, 4])
+ assert edges_equal(
+ list(G.edges(data=True)),
+ [(0, 2, {"weight": 1}), (2, 4, {"weight": 1})],
+ )
+ B = nx.DiGraph()
+ nx.add_path(B, range(5))
+ G = bipartite.generic_weighted_projected_graph(B, [0, 2, 4])
+ assert nodes_equal(list(G), [0, 2, 4])
+ assert edges_equal(
+ list(G.edges(data=True)), [(0, 2, {"weight": 1}), (2, 4, {"weight": 1})]
+ )
+
+ def test_generic_weighted_projected_graph_custom(self):
+ def jaccard(G, u, v):
+ unbrs = set(G[u])
+ vnbrs = set(G[v])
+ return len(unbrs & vnbrs) / len(unbrs | vnbrs)
+
+ def my_weight(G, u, v, weight="weight"):
+ w = 0
+ for nbr in set(G[u]) & set(G[v]):
+ w += G.edges[u, nbr].get(weight, 1) + G.edges[v, nbr].get(weight, 1)
+ return w
+
+ B = nx.bipartite.complete_bipartite_graph(2, 2)
+ for i, (u, v) in enumerate(B.edges()):
+ B.edges[u, v]["weight"] = i + 1
+ G = bipartite.generic_weighted_projected_graph(
+ B, [0, 1], weight_function=jaccard
+ )
+ assert edges_equal(list(G.edges(data=True)), [(0, 1, {"weight": 1.0})])
+ G = bipartite.generic_weighted_projected_graph(
+ B, [0, 1], weight_function=my_weight
+ )
+ assert edges_equal(list(G.edges(data=True)), [(0, 1, {"weight": 10})])
+ G = bipartite.generic_weighted_projected_graph(B, [0, 1])
+ assert edges_equal(list(G.edges(data=True)), [(0, 1, {"weight": 2})])
diff --git a/pythonProject/.venv/Lib/site-packages/networkx/algorithms/bipartite/tests/test_redundancy.py b/pythonProject/.venv/Lib/site-packages/networkx/algorithms/bipartite/tests/test_redundancy.py
new file mode 100644
index 0000000000000000000000000000000000000000..7ab7813d5facd2953e1d661d3b64a2223b38e48b
--- /dev/null
+++ b/pythonProject/.venv/Lib/site-packages/networkx/algorithms/bipartite/tests/test_redundancy.py
@@ -0,0 +1,37 @@
+"""Unit tests for the :mod:`networkx.algorithms.bipartite.redundancy` module.
+
+"""
+
+import pytest
+
+from networkx import NetworkXError, cycle_graph
+from networkx.algorithms.bipartite import complete_bipartite_graph, node_redundancy
+
+
+def test_no_redundant_nodes():
+ G = complete_bipartite_graph(2, 2)
+
+ # when nodes is None
+ rc = node_redundancy(G)
+ assert all(redundancy == 1 for redundancy in rc.values())
+
+ # when set of nodes is specified
+ rc = node_redundancy(G, (2, 3))
+ assert rc == {2: 1.0, 3: 1.0}
+
+
+def test_redundant_nodes():
+ G = cycle_graph(6)
+ edge = {0, 3}
+ G.add_edge(*edge)
+ redundancy = node_redundancy(G)
+ for v in edge:
+ assert redundancy[v] == 2 / 3
+ for v in set(G) - edge:
+ assert redundancy[v] == 1
+
+
+def test_not_enough_neighbors():
+ with pytest.raises(NetworkXError):
+ G = complete_bipartite_graph(1, 2)
+ node_redundancy(G)
diff --git a/pythonProject/.venv/Lib/site-packages/networkx/algorithms/bipartite/tests/test_spectral_bipartivity.py b/pythonProject/.venv/Lib/site-packages/networkx/algorithms/bipartite/tests/test_spectral_bipartivity.py
new file mode 100644
index 0000000000000000000000000000000000000000..b940649793d40aa73606914f3d48348761c329df
--- /dev/null
+++ b/pythonProject/.venv/Lib/site-packages/networkx/algorithms/bipartite/tests/test_spectral_bipartivity.py
@@ -0,0 +1,80 @@
+import pytest
+
+pytest.importorskip("scipy")
+
+import networkx as nx
+from networkx.algorithms.bipartite import spectral_bipartivity as sb
+
+# Examples from Figure 1
+# E. Estrada and J. A. Rodríguez-Velázquez, "Spectral measures of
+# bipartivity in complex networks", PhysRev E 72, 046105 (2005)
+
+
+class TestSpectralBipartivity:
+ def test_star_like(self):
+ # star-like
+
+ G = nx.star_graph(2)
+ G.add_edge(1, 2)
+ assert sb(G) == pytest.approx(0.843, abs=1e-3)
+
+ G = nx.star_graph(3)
+ G.add_edge(1, 2)
+ assert sb(G) == pytest.approx(0.871, abs=1e-3)
+
+ G = nx.star_graph(4)
+ G.add_edge(1, 2)
+ assert sb(G) == pytest.approx(0.890, abs=1e-3)
+
+ def test_k23_like(self):
+ # K2,3-like
+ G = nx.complete_bipartite_graph(2, 3)
+ G.add_edge(0, 1)
+ assert sb(G) == pytest.approx(0.769, abs=1e-3)
+
+ G = nx.complete_bipartite_graph(2, 3)
+ G.add_edge(2, 4)
+ assert sb(G) == pytest.approx(0.829, abs=1e-3)
+
+ G = nx.complete_bipartite_graph(2, 3)
+ G.add_edge(2, 4)
+ G.add_edge(3, 4)
+ assert sb(G) == pytest.approx(0.731, abs=1e-3)
+
+ G = nx.complete_bipartite_graph(2, 3)
+ G.add_edge(0, 1)
+ G.add_edge(2, 4)
+ assert sb(G) == pytest.approx(0.692, abs=1e-3)
+
+ G = nx.complete_bipartite_graph(2, 3)
+ G.add_edge(2, 4)
+ G.add_edge(3, 4)
+ G.add_edge(0, 1)
+ assert sb(G) == pytest.approx(0.645, abs=1e-3)
+
+ G = nx.complete_bipartite_graph(2, 3)
+ G.add_edge(2, 4)
+ G.add_edge(3, 4)
+ G.add_edge(2, 3)
+ assert sb(G) == pytest.approx(0.645, abs=1e-3)
+
+ G = nx.complete_bipartite_graph(2, 3)
+ G.add_edge(2, 4)
+ G.add_edge(3, 4)
+ G.add_edge(2, 3)
+ G.add_edge(0, 1)
+ assert sb(G) == pytest.approx(0.597, abs=1e-3)
+
+ def test_single_nodes(self):
+ # single nodes
+ G = nx.complete_bipartite_graph(2, 3)
+ G.add_edge(2, 4)
+ sbn = sb(G, nodes=[1, 2])
+ assert sbn[1] == pytest.approx(0.85, abs=1e-2)
+ assert sbn[2] == pytest.approx(0.77, abs=1e-2)
+
+ G = nx.complete_bipartite_graph(2, 3)
+ G.add_edge(0, 1)
+ sbn = sb(G, nodes=[1, 2])
+ assert sbn[1] == pytest.approx(0.73, abs=1e-2)
+ assert sbn[2] == pytest.approx(0.82, abs=1e-2)
diff --git a/pythonProject/.venv/Lib/site-packages/networkx/algorithms/boundary.py b/pythonProject/.venv/Lib/site-packages/networkx/algorithms/boundary.py
new file mode 100644
index 0000000000000000000000000000000000000000..fef9ba223699b61ac4f26cb7b1152cea8238f899
--- /dev/null
+++ b/pythonProject/.venv/Lib/site-packages/networkx/algorithms/boundary.py
@@ -0,0 +1,167 @@
+"""Routines to find the boundary of a set of nodes.
+
+An edge boundary is a set of edges, each of which has exactly one
+endpoint in a given set of nodes (or, in the case of directed graphs,
+the set of edges whose source node is in the set).
+
+A node boundary of a set *S* of nodes is the set of (out-)neighbors of
+nodes in *S* that are outside *S*.
+
+"""
+from itertools import chain
+
+import networkx as nx
+
+__all__ = ["edge_boundary", "node_boundary"]
+
+
+@nx._dispatchable(edge_attrs={"data": "default"}, preserve_edge_attrs="data")
+def edge_boundary(G, nbunch1, nbunch2=None, data=False, keys=False, default=None):
+ """Returns the edge boundary of `nbunch1`.
+
+ The *edge boundary* of a set *S* with respect to a set *T* is the
+ set of edges (*u*, *v*) such that *u* is in *S* and *v* is in *T*.
+ If *T* is not specified, it is assumed to be the set of all nodes
+ not in *S*.
+
+ Parameters
+ ----------
+ G : NetworkX graph
+
+ nbunch1 : iterable
+ Iterable of nodes in the graph representing the set of nodes
+ whose edge boundary will be returned. (This is the set *S* from
+ the definition above.)
+
+ nbunch2 : iterable
+ Iterable of nodes representing the target (or "exterior") set of
+ nodes. (This is the set *T* from the definition above.) If not
+ specified, this is assumed to be the set of all nodes in `G`
+ not in `nbunch1`.
+
+ keys : bool
+ This parameter has the same meaning as in
+ :meth:`MultiGraph.edges`.
+
+ data : bool or object
+ This parameter has the same meaning as in
+ :meth:`MultiGraph.edges`.
+
+ default : object
+ This parameter has the same meaning as in
+ :meth:`MultiGraph.edges`.
+
+ Returns
+ -------
+ iterator
+ An iterator over the edges in the boundary of `nbunch1` with
+ respect to `nbunch2`. If `keys`, `data`, or `default`
+ are specified and `G` is a multigraph, then edges are returned
+ with keys and/or data, as in :meth:`MultiGraph.edges`.
+
+ Examples
+ --------
+ >>> G = nx.wheel_graph(6)
+
+ When nbunch2=None:
+
+ >>> list(nx.edge_boundary(G, (1, 3)))
+ [(1, 0), (1, 2), (1, 5), (3, 0), (3, 2), (3, 4)]
+
+ When nbunch2 is given:
+
+ >>> list(nx.edge_boundary(G, (1, 3), (2, 0)))
+ [(1, 0), (1, 2), (3, 0), (3, 2)]
+
+ Notes
+ -----
+ Any element of `nbunch` that is not in the graph `G` will be
+ ignored.
+
+ `nbunch1` and `nbunch2` are usually meant to be disjoint, but in
+ the interest of speed and generality, that is not required here.
+
+ """
+ nset1 = {n for n in nbunch1 if n in G}
+ # Here we create an iterator over edges incident to nodes in the set
+ # `nset1`. The `Graph.edges()` method does not provide a guarantee
+ # on the orientation of the edges, so our algorithm below must
+ # handle the case in which exactly one orientation, either (u, v) or
+ # (v, u), appears in this iterable.
+ if G.is_multigraph():
+ edges = G.edges(nset1, data=data, keys=keys, default=default)
+ else:
+ edges = G.edges(nset1, data=data, default=default)
+ # If `nbunch2` is not provided, then it is assumed to be the set
+ # complement of `nbunch1`. For the sake of efficiency, this is
+ # implemented by using the `not in` operator, instead of by creating
+ # an additional set and using the `in` operator.
+ if nbunch2 is None:
+ return (e for e in edges if (e[0] in nset1) ^ (e[1] in nset1))
+ nset2 = set(nbunch2)
+ return (
+ e
+ for e in edges
+ if (e[0] in nset1 and e[1] in nset2) or (e[1] in nset1 and e[0] in nset2)
+ )
+
+
+@nx._dispatchable
+def node_boundary(G, nbunch1, nbunch2=None):
+ """Returns the node boundary of `nbunch1`.
+
+ The *node boundary* of a set *S* with respect to a set *T* is the
+ set of nodes *v* in *T* such that for some *u* in *S*, there is an
+ edge joining *u* to *v*. If *T* is not specified, it is assumed to
+ be the set of all nodes not in *S*.
+
+ Parameters
+ ----------
+ G : NetworkX graph
+
+ nbunch1 : iterable
+ Iterable of nodes in the graph representing the set of nodes
+ whose node boundary will be returned. (This is the set *S* from
+ the definition above.)
+
+ nbunch2 : iterable
+ Iterable of nodes representing the target (or "exterior") set of
+ nodes. (This is the set *T* from the definition above.) If not
+ specified, this is assumed to be the set of all nodes in `G`
+ not in `nbunch1`.
+
+ Returns
+ -------
+ set
+ The node boundary of `nbunch1` with respect to `nbunch2`.
+
+ Examples
+ --------
+ >>> G = nx.wheel_graph(6)
+
+ When nbunch2=None:
+
+ >>> list(nx.node_boundary(G, (3, 4)))
+ [0, 2, 5]
+
+ When nbunch2 is given:
+
+ >>> list(nx.node_boundary(G, (3, 4), (0, 1, 5)))
+ [0, 5]
+
+ Notes
+ -----
+ Any element of `nbunch` that is not in the graph `G` will be
+ ignored.
+
+ `nbunch1` and `nbunch2` are usually meant to be disjoint, but in
+ the interest of speed and generality, that is not required here.
+
+ """
+ nset1 = {n for n in nbunch1 if n in G}
+ bdy = set(chain.from_iterable(G[v] for v in nset1)) - nset1
+ # If `nbunch2` is not specified, it is assumed to be the set
+ # complement of `nbunch1`.
+ if nbunch2 is not None:
+ bdy &= set(nbunch2)
+ return bdy
diff --git a/pythonProject/.venv/Lib/site-packages/networkx/algorithms/bridges.py b/pythonProject/.venv/Lib/site-packages/networkx/algorithms/bridges.py
new file mode 100644
index 0000000000000000000000000000000000000000..e076a256cb8c9b5431aea2e1bce8549b117e841b
--- /dev/null
+++ b/pythonProject/.venv/Lib/site-packages/networkx/algorithms/bridges.py
@@ -0,0 +1,205 @@
+"""Bridge-finding algorithms."""
+from itertools import chain
+
+import networkx as nx
+from networkx.utils import not_implemented_for
+
+__all__ = ["bridges", "has_bridges", "local_bridges"]
+
+
+@not_implemented_for("directed")
+@nx._dispatchable
+def bridges(G, root=None):
+ """Generate all bridges in a graph.
+
+ A *bridge* in a graph is an edge whose removal causes the number of
+ connected components of the graph to increase. Equivalently, a bridge is an
+ edge that does not belong to any cycle. Bridges are also known as cut-edges,
+ isthmuses, or cut arcs.
+
+ Parameters
+ ----------
+ G : undirected graph
+
+ root : node (optional)
+ A node in the graph `G`. If specified, only the bridges in the
+ connected component containing this node will be returned.
+
+ Yields
+ ------
+ e : edge
+ An edge in the graph whose removal disconnects the graph (or
+ causes the number of connected components to increase).
+
+ Raises
+ ------
+ NodeNotFound
+ If `root` is not in the graph `G`.
+
+ NetworkXNotImplemented
+ If `G` is a directed graph.
+
+ Examples
+ --------
+ The barbell graph with parameter zero has a single bridge:
+
+ >>> G = nx.barbell_graph(10, 0)
+ >>> list(nx.bridges(G))
+ [(9, 10)]
+
+ Notes
+ -----
+ This is an implementation of the algorithm described in [1]_. An edge is a
+ bridge if and only if it is not contained in any chain. Chains are found
+ using the :func:`networkx.chain_decomposition` function.
+
+ The algorithm described in [1]_ requires a simple graph. If the provided
+ graph is a multigraph, we convert it to a simple graph and verify that any
+ bridges discovered by the chain decomposition algorithm are not multi-edges.
+
+ Ignoring polylogarithmic factors, the worst-case time complexity is the
+ same as the :func:`networkx.chain_decomposition` function,
+ $O(m + n)$, where $n$ is the number of nodes in the graph and $m$ is
+ the number of edges.
+
+ References
+ ----------
+ .. [1] https://en.wikipedia.org/wiki/Bridge_%28graph_theory%29#Bridge-Finding_with_Chain_Decompositions
+ """
+ multigraph = G.is_multigraph()
+ H = nx.Graph(G) if multigraph else G
+ chains = nx.chain_decomposition(H, root=root)
+ chain_edges = set(chain.from_iterable(chains))
+ H_copy = H.copy()
+ if root is not None:
+ H = H.subgraph(nx.node_connected_component(H, root)).copy()
+ for u, v in H.edges():
+ if (u, v) not in chain_edges and (v, u) not in chain_edges:
+ if multigraph and len(G[u][v]) > 1:
+ continue
+ yield u, v
+
+
+@not_implemented_for("directed")
+@nx._dispatchable
+def has_bridges(G, root=None):
+ """Decide whether a graph has any bridges.
+
+ A *bridge* in a graph is an edge whose removal causes the number of
+ connected components of the graph to increase.
+
+ Parameters
+ ----------
+ G : undirected graph
+
+ root : node (optional)
+ A node in the graph `G`. If specified, only the bridges in the
+ connected component containing this node will be considered.
+
+ Returns
+ -------
+ bool
+ Whether the graph (or the connected component containing `root`)
+ has any bridges.
+
+ Raises
+ ------
+ NodeNotFound
+ If `root` is not in the graph `G`.
+
+ NetworkXNotImplemented
+ If `G` is a directed graph.
+
+ Examples
+ --------
+ The barbell graph with parameter zero has a single bridge::
+
+ >>> G = nx.barbell_graph(10, 0)
+ >>> nx.has_bridges(G)
+ True
+
+ On the other hand, the cycle graph has no bridges::
+
+ >>> G = nx.cycle_graph(5)
+ >>> nx.has_bridges(G)
+ False
+
+ Notes
+ -----
+ This implementation uses the :func:`networkx.bridges` function, so
+ it shares its worst-case time complexity, $O(m + n)$, ignoring
+ polylogarithmic factors, where $n$ is the number of nodes in the
+ graph and $m$ is the number of edges.
+
+ """
+ try:
+ next(bridges(G, root=root))
+ except StopIteration:
+ return False
+ else:
+ return True
+
+
+@not_implemented_for("multigraph")
+@not_implemented_for("directed")
+@nx._dispatchable(edge_attrs="weight")
+def local_bridges(G, with_span=True, weight=None):
+ """Iterate over local bridges of `G` optionally computing the span
+
+ A *local bridge* is an edge whose endpoints have no common neighbors.
+ That is, the edge is not part of a triangle in the graph.
+
+ The *span* of a *local bridge* is the shortest path length between
+ the endpoints if the local bridge is removed.
+
+ Parameters
+ ----------
+ G : undirected graph
+
+ with_span : bool
+ If True, yield a 3-tuple `(u, v, span)`
+
+ weight : function, string or None (default: None)
+ If function, used to compute edge weights for the span.
+ If string, the edge data attribute used in calculating span.
+ If None, all edges have weight 1.
+
+ Yields
+ ------
+ e : edge
+ The local bridges as an edge 2-tuple of nodes `(u, v)` or
+ as a 3-tuple `(u, v, span)` when `with_span is True`.
+
+ Raises
+ ------
+ NetworkXNotImplemented
+ If `G` is a directed graph or multigraph.
+
+ Examples
+ --------
+ A cycle graph has every edge a local bridge with span N-1.
+
+ >>> G = nx.cycle_graph(9)
+ >>> (0, 8, 8) in set(nx.local_bridges(G))
+ True
+ """
+ if with_span is not True:
+ for u, v in G.edges:
+ if not (set(G[u]) & set(G[v])):
+ yield u, v
+ else:
+ wt = nx.weighted._weight_function(G, weight)
+ for u, v in G.edges:
+ if not (set(G[u]) & set(G[v])):
+ enodes = {u, v}
+
+ def hide_edge(n, nbr, d):
+ if n not in enodes or nbr not in enodes:
+ return wt(n, nbr, d)
+ return None
+
+ try:
+ span = nx.shortest_path_length(G, u, v, weight=hide_edge)
+ yield u, v, span
+ except nx.NetworkXNoPath:
+ yield u, v, float("inf")
diff --git a/pythonProject/.venv/Lib/site-packages/networkx/algorithms/broadcasting.py b/pythonProject/.venv/Lib/site-packages/networkx/algorithms/broadcasting.py
new file mode 100644
index 0000000000000000000000000000000000000000..9b362a0e1346c29f7207dc0afce392118daaeb2b
--- /dev/null
+++ b/pythonProject/.venv/Lib/site-packages/networkx/algorithms/broadcasting.py
@@ -0,0 +1,155 @@
+"""Routines to calculate the broadcast time of certain graphs.
+
+Broadcasting is an information dissemination problem in which a node in a graph,
+called the originator, must distribute a message to all other nodes by placing
+a series of calls along the edges of the graph. Once informed, other nodes aid
+the originator in distributing the message.
+
+The broadcasting must be completed as quickly as possible subject to the
+following constraints:
+- Each call requires one unit of time.
+- A node can only participate in one call per unit of time.
+- Each call only involves two adjacent nodes: a sender and a receiver.
+"""
+
+import networkx as nx
+from networkx import NetworkXError
+from networkx.utils import not_implemented_for
+
+__all__ = [
+ "tree_broadcast_center",
+ "tree_broadcast_time",
+]
+
+
+def _get_max_broadcast_value(G, U, v, values):
+ adj = sorted(set(G.neighbors(v)) & U, key=values.get, reverse=True)
+ return max(values[u] + i for i, u in enumerate(adj, start=1))
+
+
+def _get_broadcast_centers(G, v, values, target):
+ adj = sorted(G.neighbors(v), key=values.get, reverse=True)
+ j = next(i for i, u in enumerate(adj, start=1) if values[u] + i == target)
+ return set([v] + adj[:j])
+
+
+@not_implemented_for("directed")
+@not_implemented_for("multigraph")
+@nx._dispatchable
+def tree_broadcast_center(G):
+ """Return the Broadcast Center of the tree `G`.
+
+ The broadcast center of a graph G denotes the set of nodes having
+ minimum broadcast time [1]_. This is a linear algorithm for determining
+ the broadcast center of a tree with ``N`` nodes, as a by-product it also
+ determines the broadcast time from the broadcast center.
+
+ Parameters
+ ----------
+ G : undirected graph
+ The graph should be an undirected tree
+
+ Returns
+ -------
+ BC : (int, set) tuple
+ minimum broadcast number of the tree, set of broadcast centers
+
+ Raises
+ ------
+ NetworkXNotImplemented
+ If the graph is directed or is a multigraph.
+
+ References
+ ----------
+ .. [1] Slater, P.J., Cockayne, E.J., Hedetniemi, S.T,
+ Information dissemination in trees. SIAM J.Comput. 10(4), 692–701 (1981)
+ """
+ # Assert that the graph G is a tree
+ if not nx.is_tree(G):
+ NetworkXError("Input graph is not a tree")
+ # step 0
+ if G.number_of_nodes() == 2:
+ return 1, set(G.nodes())
+ if G.number_of_nodes() == 1:
+ return 0, set(G.nodes())
+
+ # step 1
+ U = {node for node, deg in G.degree if deg == 1}
+ values = {n: 0 for n in U}
+ T = G.copy()
+ T.remove_nodes_from(U)
+
+ # step 2
+ W = {node for node, deg in T.degree if deg == 1}
+ values.update((w, G.degree[w] - 1) for w in W)
+
+ # step 3
+ while T.number_of_nodes() >= 2:
+ # step 4
+ w = min(W, key=lambda n: values[n])
+ v = next(T.neighbors(w))
+
+ # step 5
+ U.add(w)
+ W.remove(w)
+ T.remove_node(w)
+
+ # step 6
+ if T.degree(v) == 1:
+ # update t(v)
+ values.update({v: _get_max_broadcast_value(G, U, v, values)})
+ W.add(v)
+
+ # step 7
+ v = nx.utils.arbitrary_element(T)
+ b_T = _get_max_broadcast_value(G, U, v, values)
+ return b_T, _get_broadcast_centers(G, v, values, b_T)
+
+
+@not_implemented_for("directed")
+@not_implemented_for("multigraph")
+@nx._dispatchable
+def tree_broadcast_time(G, node=None):
+ """Return the Broadcast Time of the tree `G`.
+
+ The minimum broadcast time of a node is defined as the minimum amount
+ of time required to complete broadcasting starting from the
+ originator. The broadcast time of a graph is the maximum over
+ all nodes of the minimum broadcast time from that node [1]_.
+ This function returns the minimum broadcast time of `node`.
+ If `node` is None the broadcast time for the graph is returned.
+
+ Parameters
+ ----------
+ G : undirected graph
+ The graph should be an undirected tree
+ node: int, optional
+ index of starting node. If `None`, the algorithm returns the broadcast
+ time of the tree.
+
+ Returns
+ -------
+ BT : int
+ Broadcast Time of a node in a tree
+
+ Raises
+ ------
+ NetworkXNotImplemented
+ If the graph is directed or is a multigraph.
+
+ References
+ ----------
+ .. [1] Harutyunyan, H. A. and Li, Z.
+ "A Simple Construction of Broadcast Graphs."
+ In Computing and Combinatorics. COCOON 2019
+ (Ed. D. Z. Du and C. Tian.) Springer, pp. 240-253, 2019.
+ """
+ b_T, b_C = tree_broadcast_center(G)
+ if node is not None:
+ return b_T + min(nx.shortest_path_length(G, node, u) for u in b_C)
+ dist_from_center = dict.fromkeys(G, len(G))
+ for u in b_C:
+ for v, dist in nx.shortest_path_length(G, u).items():
+ if dist < dist_from_center[v]:
+ dist_from_center[v] = dist
+ return b_T + max(dist_from_center.values())
diff --git a/pythonProject/.venv/Lib/site-packages/networkx/algorithms/centrality/betweenness.py b/pythonProject/.venv/Lib/site-packages/networkx/algorithms/centrality/betweenness.py
new file mode 100644
index 0000000000000000000000000000000000000000..4f44fb19ba09a34644e1166dfbcb4fddf2ce9066
--- /dev/null
+++ b/pythonProject/.venv/Lib/site-packages/networkx/algorithms/centrality/betweenness.py
@@ -0,0 +1,435 @@
+"""Betweenness centrality measures."""
+from collections import deque
+from heapq import heappop, heappush
+from itertools import count
+
+import networkx as nx
+from networkx.algorithms.shortest_paths.weighted import _weight_function
+from networkx.utils import py_random_state
+from networkx.utils.decorators import not_implemented_for
+
+__all__ = ["betweenness_centrality", "edge_betweenness_centrality"]
+
+
+@py_random_state(5)
+@nx._dispatchable(edge_attrs="weight")
+def betweenness_centrality(
+ G, k=None, normalized=True, weight=None, endpoints=False, seed=None
+):
+ r"""Compute the shortest-path betweenness centrality for nodes.
+
+ Betweenness centrality of a node $v$ is the sum of the
+ fraction of all-pairs shortest paths that pass through $v$
+
+ .. math::
+
+ c_B(v) =\sum_{s,t \in V} \frac{\sigma(s, t|v)}{\sigma(s, t)}
+
+ where $V$ is the set of nodes, $\sigma(s, t)$ is the number of
+ shortest $(s, t)$-paths, and $\sigma(s, t|v)$ is the number of
+ those paths passing through some node $v$ other than $s, t$.
+ If $s = t$, $\sigma(s, t) = 1$, and if $v \in {s, t}$,
+ $\sigma(s, t|v) = 0$ [2]_.
+
+ Parameters
+ ----------
+ G : graph
+ A NetworkX graph.
+
+ k : int, optional (default=None)
+ If k is not None use k node samples to estimate betweenness.
+ The value of k <= n where n is the number of nodes in the graph.
+ Higher values give better approximation.
+
+ normalized : bool, optional
+ If True the betweenness values are normalized by `2/((n-1)(n-2))`
+ for graphs, and `1/((n-1)(n-2))` for directed graphs where `n`
+ is the number of nodes in G.
+
+ weight : None or string, optional (default=None)
+ If None, all edge weights are considered equal.
+ Otherwise holds the name of the edge attribute used as weight.
+ Weights are used to calculate weighted shortest paths, so they are
+ interpreted as distances.
+
+ endpoints : bool, optional
+ If True include the endpoints in the shortest path counts.
+
+ seed : integer, random_state, or None (default)
+ Indicator of random number generation state.
+ See :ref:`Randomness`.
+ Note that this is only used if k is not None.
+
+ Returns
+ -------
+ nodes : dictionary
+ Dictionary of nodes with betweenness centrality as the value.
+
+ See Also
+ --------
+ edge_betweenness_centrality
+ load_centrality
+
+ Notes
+ -----
+ The algorithm is from Ulrik Brandes [1]_.
+ See [4]_ for the original first published version and [2]_ for details on
+ algorithms for variations and related metrics.
+
+ For approximate betweenness calculations set k=#samples to use
+ k nodes ("pivots") to estimate the betweenness values. For an estimate
+ of the number of pivots needed see [3]_.
+
+ For weighted graphs the edge weights must be greater than zero.
+ Zero edge weights can produce an infinite number of equal length
+ paths between pairs of nodes.
+
+ The total number of paths between source and target is counted
+ differently for directed and undirected graphs. Directed paths
+ are easy to count. Undirected paths are tricky: should a path
+ from "u" to "v" count as 1 undirected path or as 2 directed paths?
+
+ For betweenness_centrality we report the number of undirected
+ paths when G is undirected.
+
+ For betweenness_centrality_subset the reporting is different.
+ If the source and target subsets are the same, then we want
+ to count undirected paths. But if the source and target subsets
+ differ -- for example, if sources is {0} and targets is {1},
+ then we are only counting the paths in one direction. They are
+ undirected paths but we are counting them in a directed way.
+ To count them as undirected paths, each should count as half a path.
+
+ This algorithm is not guaranteed to be correct if edge weights
+ are floating point numbers. As a workaround you can use integer
+ numbers by multiplying the relevant edge attributes by a convenient
+ constant factor (eg 100) and converting to integers.
+
+ References
+ ----------
+ .. [1] Ulrik Brandes:
+ A Faster Algorithm for Betweenness Centrality.
+ Journal of Mathematical Sociology 25(2):163-177, 2001.
+ https://doi.org/10.1080/0022250X.2001.9990249
+ .. [2] Ulrik Brandes:
+ On Variants of Shortest-Path Betweenness
+ Centrality and their Generic Computation.
+ Social Networks 30(2):136-145, 2008.
+ https://doi.org/10.1016/j.socnet.2007.11.001
+ .. [3] Ulrik Brandes and Christian Pich:
+ Centrality Estimation in Large Networks.
+ International Journal of Bifurcation and Chaos 17(7):2303-2318, 2007.
+ https://dx.doi.org/10.1142/S0218127407018403
+ .. [4] Linton C. Freeman:
+ A set of measures of centrality based on betweenness.
+ Sociometry 40: 35–41, 1977
+ https://doi.org/10.2307/3033543
+ """
+ betweenness = dict.fromkeys(G, 0.0) # b[v]=0 for v in G
+ if k is None:
+ nodes = G
+ else:
+ nodes = seed.sample(list(G.nodes()), k)
+ for s in nodes:
+ # single source shortest paths
+ if weight is None: # use BFS
+ S, P, sigma, _ = _single_source_shortest_path_basic(G, s)
+ else: # use Dijkstra's algorithm
+ S, P, sigma, _ = _single_source_dijkstra_path_basic(G, s, weight)
+ # accumulation
+ if endpoints:
+ betweenness, _ = _accumulate_endpoints(betweenness, S, P, sigma, s)
+ else:
+ betweenness, _ = _accumulate_basic(betweenness, S, P, sigma, s)
+ # rescaling
+ betweenness = _rescale(
+ betweenness,
+ len(G),
+ normalized=normalized,
+ directed=G.is_directed(),
+ k=k,
+ endpoints=endpoints,
+ )
+ return betweenness
+
+
+@py_random_state(4)
+@nx._dispatchable(edge_attrs="weight")
+def edge_betweenness_centrality(G, k=None, normalized=True, weight=None, seed=None):
+ r"""Compute betweenness centrality for edges.
+
+ Betweenness centrality of an edge $e$ is the sum of the
+ fraction of all-pairs shortest paths that pass through $e$
+
+ .. math::
+
+ c_B(e) =\sum_{s,t \in V} \frac{\sigma(s, t|e)}{\sigma(s, t)}
+
+ where $V$ is the set of nodes, $\sigma(s, t)$ is the number of
+ shortest $(s, t)$-paths, and $\sigma(s, t|e)$ is the number of
+ those paths passing through edge $e$ [2]_.
+
+ Parameters
+ ----------
+ G : graph
+ A NetworkX graph.
+
+ k : int, optional (default=None)
+ If k is not None use k node samples to estimate betweenness.
+ The value of k <= n where n is the number of nodes in the graph.
+ Higher values give better approximation.
+
+ normalized : bool, optional
+ If True the betweenness values are normalized by $2/(n(n-1))$
+ for graphs, and $1/(n(n-1))$ for directed graphs where $n$
+ is the number of nodes in G.
+
+ weight : None or string, optional (default=None)
+ If None, all edge weights are considered equal.
+ Otherwise holds the name of the edge attribute used as weight.
+ Weights are used to calculate weighted shortest paths, so they are
+ interpreted as distances.
+
+ seed : integer, random_state, or None (default)
+ Indicator of random number generation state.
+ See :ref:`Randomness`.
+ Note that this is only used if k is not None.
+
+ Returns
+ -------
+ edges : dictionary
+ Dictionary of edges with betweenness centrality as the value.
+
+ See Also
+ --------
+ betweenness_centrality
+ edge_load
+
+ Notes
+ -----
+ The algorithm is from Ulrik Brandes [1]_.
+
+ For weighted graphs the edge weights must be greater than zero.
+ Zero edge weights can produce an infinite number of equal length
+ paths between pairs of nodes.
+
+ References
+ ----------
+ .. [1] A Faster Algorithm for Betweenness Centrality. Ulrik Brandes,
+ Journal of Mathematical Sociology 25(2):163-177, 2001.
+ https://doi.org/10.1080/0022250X.2001.9990249
+ .. [2] Ulrik Brandes: On Variants of Shortest-Path Betweenness
+ Centrality and their Generic Computation.
+ Social Networks 30(2):136-145, 2008.
+ https://doi.org/10.1016/j.socnet.2007.11.001
+ """
+ betweenness = dict.fromkeys(G, 0.0) # b[v]=0 for v in G
+ # b[e]=0 for e in G.edges()
+ betweenness.update(dict.fromkeys(G.edges(), 0.0))
+ if k is None:
+ nodes = G
+ else:
+ nodes = seed.sample(list(G.nodes()), k)
+ for s in nodes:
+ # single source shortest paths
+ if weight is None: # use BFS
+ S, P, sigma, _ = _single_source_shortest_path_basic(G, s)
+ else: # use Dijkstra's algorithm
+ S, P, sigma, _ = _single_source_dijkstra_path_basic(G, s, weight)
+ # accumulation
+ betweenness = _accumulate_edges(betweenness, S, P, sigma, s)
+ # rescaling
+ for n in G: # remove nodes to only return edges
+ del betweenness[n]
+ betweenness = _rescale_e(
+ betweenness, len(G), normalized=normalized, directed=G.is_directed()
+ )
+ if G.is_multigraph():
+ betweenness = _add_edge_keys(G, betweenness, weight=weight)
+ return betweenness
+
+
+# helpers for betweenness centrality
+
+
+def _single_source_shortest_path_basic(G, s):
+ S = []
+ P = {}
+ for v in G:
+ P[v] = []
+ sigma = dict.fromkeys(G, 0.0) # sigma[v]=0 for v in G
+ D = {}
+ sigma[s] = 1.0
+ D[s] = 0
+ Q = deque([s])
+ while Q: # use BFS to find shortest paths
+ v = Q.popleft()
+ S.append(v)
+ Dv = D[v]
+ sigmav = sigma[v]
+ for w in G[v]:
+ if w not in D:
+ Q.append(w)
+ D[w] = Dv + 1
+ if D[w] == Dv + 1: # this is a shortest path, count paths
+ sigma[w] += sigmav
+ P[w].append(v) # predecessors
+ return S, P, sigma, D
+
+
+def _single_source_dijkstra_path_basic(G, s, weight):
+ weight = _weight_function(G, weight)
+ # modified from Eppstein
+ S = []
+ P = {}
+ for v in G:
+ P[v] = []
+ sigma = dict.fromkeys(G, 0.0) # sigma[v]=0 for v in G
+ D = {}
+ sigma[s] = 1.0
+ push = heappush
+ pop = heappop
+ seen = {s: 0}
+ c = count()
+ Q = [] # use Q as heap with (distance,node id) tuples
+ push(Q, (0, next(c), s, s))
+ while Q:
+ (dist, _, pred, v) = pop(Q)
+ if v in D:
+ continue # already searched this node.
+ sigma[v] += sigma[pred] # count paths
+ S.append(v)
+ D[v] = dist
+ for w, edgedata in G[v].items():
+ vw_dist = dist + weight(v, w, edgedata)
+ if w not in D and (w not in seen or vw_dist < seen[w]):
+ seen[w] = vw_dist
+ push(Q, (vw_dist, next(c), v, w))
+ sigma[w] = 0.0
+ P[w] = [v]
+ elif vw_dist == seen[w]: # handle equal paths
+ sigma[w] += sigma[v]
+ P[w].append(v)
+ return S, P, sigma, D
+
+
+def _accumulate_basic(betweenness, S, P, sigma, s):
+ delta = dict.fromkeys(S, 0)
+ while S:
+ w = S.pop()
+ coeff = (1 + delta[w]) / sigma[w]
+ for v in P[w]:
+ delta[v] += sigma[v] * coeff
+ if w != s:
+ betweenness[w] += delta[w]
+ return betweenness, delta
+
+
+def _accumulate_endpoints(betweenness, S, P, sigma, s):
+ betweenness[s] += len(S) - 1
+ delta = dict.fromkeys(S, 0)
+ while S:
+ w = S.pop()
+ coeff = (1 + delta[w]) / sigma[w]
+ for v in P[w]:
+ delta[v] += sigma[v] * coeff
+ if w != s:
+ betweenness[w] += delta[w] + 1
+ return betweenness, delta
+
+
+def _accumulate_edges(betweenness, S, P, sigma, s):
+ delta = dict.fromkeys(S, 0)
+ while S:
+ w = S.pop()
+ coeff = (1 + delta[w]) / sigma[w]
+ for v in P[w]:
+ c = sigma[v] * coeff
+ if (v, w) not in betweenness:
+ betweenness[(w, v)] += c
+ else:
+ betweenness[(v, w)] += c
+ delta[v] += c
+ if w != s:
+ betweenness[w] += delta[w]
+ return betweenness
+
+
+def _rescale(betweenness, n, normalized, directed=False, k=None, endpoints=False):
+ if normalized:
+ if endpoints:
+ if n < 2:
+ scale = None # no normalization
+ else:
+ # Scale factor should include endpoint nodes
+ scale = 1 / (n * (n - 1))
+ elif n <= 2:
+ scale = None # no normalization b=0 for all nodes
+ else:
+ scale = 1 / ((n - 1) * (n - 2))
+ else: # rescale by 2 for undirected graphs
+ if not directed:
+ scale = 0.5
+ else:
+ scale = None
+ if scale is not None:
+ if k is not None:
+ scale = scale * n / k
+ for v in betweenness:
+ betweenness[v] *= scale
+ return betweenness
+
+
+def _rescale_e(betweenness, n, normalized, directed=False, k=None):
+ if normalized:
+ if n <= 1:
+ scale = None # no normalization b=0 for all nodes
+ else:
+ scale = 1 / (n * (n - 1))
+ else: # rescale by 2 for undirected graphs
+ if not directed:
+ scale = 0.5
+ else:
+ scale = None
+ if scale is not None:
+ if k is not None:
+ scale = scale * n / k
+ for v in betweenness:
+ betweenness[v] *= scale
+ return betweenness
+
+
+@not_implemented_for("graph")
+def _add_edge_keys(G, betweenness, weight=None):
+ r"""Adds the corrected betweenness centrality (BC) values for multigraphs.
+
+ Parameters
+ ----------
+ G : NetworkX graph.
+
+ betweenness : dictionary
+ Dictionary mapping adjacent node tuples to betweenness centrality values.
+
+ weight : string or function
+ See `_weight_function` for details. Defaults to `None`.
+
+ Returns
+ -------
+ edges : dictionary
+ The parameter `betweenness` including edges with keys and their
+ betweenness centrality values.
+
+ The BC value is divided among edges of equal weight.
+ """
+ _weight = _weight_function(G, weight)
+
+ edge_bc = dict.fromkeys(G.edges, 0.0)
+ for u, v in betweenness:
+ d = G[u][v]
+ wt = _weight(u, v, d)
+ keys = [k for k in d if _weight(u, v, {k: d[k]}) == wt]
+ bc = betweenness[(u, v)] / len(keys)
+ for k in keys:
+ edge_bc[(u, v, k)] = bc
+
+ return edge_bc
diff --git a/pythonProject/.venv/Lib/site-packages/networkx/algorithms/centrality/betweenness_subset.py b/pythonProject/.venv/Lib/site-packages/networkx/algorithms/centrality/betweenness_subset.py
new file mode 100644
index 0000000000000000000000000000000000000000..7f9967e964c8cad1393bd9fe3e91a3409c69cf63
--- /dev/null
+++ b/pythonProject/.venv/Lib/site-packages/networkx/algorithms/centrality/betweenness_subset.py
@@ -0,0 +1,274 @@
+"""Betweenness centrality measures for subsets of nodes."""
+import networkx as nx
+from networkx.algorithms.centrality.betweenness import (
+ _add_edge_keys,
+)
+from networkx.algorithms.centrality.betweenness import (
+ _single_source_dijkstra_path_basic as dijkstra,
+)
+from networkx.algorithms.centrality.betweenness import (
+ _single_source_shortest_path_basic as shortest_path,
+)
+
+__all__ = [
+ "betweenness_centrality_subset",
+ "edge_betweenness_centrality_subset",
+]
+
+
+@nx._dispatchable(edge_attrs="weight")
+def betweenness_centrality_subset(G, sources, targets, normalized=False, weight=None):
+ r"""Compute betweenness centrality for a subset of nodes.
+
+ .. math::
+
+ c_B(v) =\sum_{s\in S, t \in T} \frac{\sigma(s, t|v)}{\sigma(s, t)}
+
+ where $S$ is the set of sources, $T$ is the set of targets,
+ $\sigma(s, t)$ is the number of shortest $(s, t)$-paths,
+ and $\sigma(s, t|v)$ is the number of those paths
+ passing through some node $v$ other than $s, t$.
+ If $s = t$, $\sigma(s, t) = 1$,
+ and if $v \in {s, t}$, $\sigma(s, t|v) = 0$ [2]_.
+
+
+ Parameters
+ ----------
+ G : graph
+ A NetworkX graph.
+
+ sources: list of nodes
+ Nodes to use as sources for shortest paths in betweenness
+
+ targets: list of nodes
+ Nodes to use as targets for shortest paths in betweenness
+
+ normalized : bool, optional
+ If True the betweenness values are normalized by $2/((n-1)(n-2))$
+ for graphs, and $1/((n-1)(n-2))$ for directed graphs where $n$
+ is the number of nodes in G.
+
+ weight : None or string, optional (default=None)
+ If None, all edge weights are considered equal.
+ Otherwise holds the name of the edge attribute used as weight.
+ Weights are used to calculate weighted shortest paths, so they are
+ interpreted as distances.
+
+ Returns
+ -------
+ nodes : dictionary
+ Dictionary of nodes with betweenness centrality as the value.
+
+ See Also
+ --------
+ edge_betweenness_centrality
+ load_centrality
+
+ Notes
+ -----
+ The basic algorithm is from [1]_.
+
+ For weighted graphs the edge weights must be greater than zero.
+ Zero edge weights can produce an infinite number of equal length
+ paths between pairs of nodes.
+
+ The normalization might seem a little strange but it is
+ designed to make betweenness_centrality(G) be the same as
+ betweenness_centrality_subset(G,sources=G.nodes(),targets=G.nodes()).
+
+ The total number of paths between source and target is counted
+ differently for directed and undirected graphs. Directed paths
+ are easy to count. Undirected paths are tricky: should a path
+ from "u" to "v" count as 1 undirected path or as 2 directed paths?
+
+ For betweenness_centrality we report the number of undirected
+ paths when G is undirected.
+
+ For betweenness_centrality_subset the reporting is different.
+ If the source and target subsets are the same, then we want
+ to count undirected paths. But if the source and target subsets
+ differ -- for example, if sources is {0} and targets is {1},
+ then we are only counting the paths in one direction. They are
+ undirected paths but we are counting them in a directed way.
+ To count them as undirected paths, each should count as half a path.
+
+ References
+ ----------
+ .. [1] Ulrik Brandes, A Faster Algorithm for Betweenness Centrality.
+ Journal of Mathematical Sociology 25(2):163-177, 2001.
+ https://doi.org/10.1080/0022250X.2001.9990249
+ .. [2] Ulrik Brandes: On Variants of Shortest-Path Betweenness
+ Centrality and their Generic Computation.
+ Social Networks 30(2):136-145, 2008.
+ https://doi.org/10.1016/j.socnet.2007.11.001
+ """
+ b = dict.fromkeys(G, 0.0) # b[v]=0 for v in G
+ for s in sources:
+ # single source shortest paths
+ if weight is None: # use BFS
+ S, P, sigma, _ = shortest_path(G, s)
+ else: # use Dijkstra's algorithm
+ S, P, sigma, _ = dijkstra(G, s, weight)
+ b = _accumulate_subset(b, S, P, sigma, s, targets)
+ b = _rescale(b, len(G), normalized=normalized, directed=G.is_directed())
+ return b
+
+
+@nx._dispatchable(edge_attrs="weight")
+def edge_betweenness_centrality_subset(
+ G, sources, targets, normalized=False, weight=None
+):
+ r"""Compute betweenness centrality for edges for a subset of nodes.
+
+ .. math::
+
+ c_B(v) =\sum_{s\in S,t \in T} \frac{\sigma(s, t|e)}{\sigma(s, t)}
+
+ where $S$ is the set of sources, $T$ is the set of targets,
+ $\sigma(s, t)$ is the number of shortest $(s, t)$-paths,
+ and $\sigma(s, t|e)$ is the number of those paths
+ passing through edge $e$ [2]_.
+
+ Parameters
+ ----------
+ G : graph
+ A networkx graph.
+
+ sources: list of nodes
+ Nodes to use as sources for shortest paths in betweenness
+
+ targets: list of nodes
+ Nodes to use as targets for shortest paths in betweenness
+
+ normalized : bool, optional
+ If True the betweenness values are normalized by `2/(n(n-1))`
+ for graphs, and `1/(n(n-1))` for directed graphs where `n`
+ is the number of nodes in G.
+
+ weight : None or string, optional (default=None)
+ If None, all edge weights are considered equal.
+ Otherwise holds the name of the edge attribute used as weight.
+ Weights are used to calculate weighted shortest paths, so they are
+ interpreted as distances.
+
+ Returns
+ -------
+ edges : dictionary
+ Dictionary of edges with Betweenness centrality as the value.
+
+ See Also
+ --------
+ betweenness_centrality
+ edge_load
+
+ Notes
+ -----
+ The basic algorithm is from [1]_.
+
+ For weighted graphs the edge weights must be greater than zero.
+ Zero edge weights can produce an infinite number of equal length
+ paths between pairs of nodes.
+
+ The normalization might seem a little strange but it is the same
+ as in edge_betweenness_centrality() and is designed to make
+ edge_betweenness_centrality(G) be the same as
+ edge_betweenness_centrality_subset(G,sources=G.nodes(),targets=G.nodes()).
+
+ References
+ ----------
+ .. [1] Ulrik Brandes, A Faster Algorithm for Betweenness Centrality.
+ Journal of Mathematical Sociology 25(2):163-177, 2001.
+ https://doi.org/10.1080/0022250X.2001.9990249
+ .. [2] Ulrik Brandes: On Variants of Shortest-Path Betweenness
+ Centrality and their Generic Computation.
+ Social Networks 30(2):136-145, 2008.
+ https://doi.org/10.1016/j.socnet.2007.11.001
+ """
+ b = dict.fromkeys(G, 0.0) # b[v]=0 for v in G
+ b.update(dict.fromkeys(G.edges(), 0.0)) # b[e] for e in G.edges()
+ for s in sources:
+ # single source shortest paths
+ if weight is None: # use BFS
+ S, P, sigma, _ = shortest_path(G, s)
+ else: # use Dijkstra's algorithm
+ S, P, sigma, _ = dijkstra(G, s, weight)
+ b = _accumulate_edges_subset(b, S, P, sigma, s, targets)
+ for n in G: # remove nodes to only return edges
+ del b[n]
+ b = _rescale_e(b, len(G), normalized=normalized, directed=G.is_directed())
+ if G.is_multigraph():
+ b = _add_edge_keys(G, b, weight=weight)
+ return b
+
+
+def _accumulate_subset(betweenness, S, P, sigma, s, targets):
+ delta = dict.fromkeys(S, 0.0)
+ target_set = set(targets) - {s}
+ while S:
+ w = S.pop()
+ if w in target_set:
+ coeff = (delta[w] + 1.0) / sigma[w]
+ else:
+ coeff = delta[w] / sigma[w]
+ for v in P[w]:
+ delta[v] += sigma[v] * coeff
+ if w != s:
+ betweenness[w] += delta[w]
+ return betweenness
+
+
+def _accumulate_edges_subset(betweenness, S, P, sigma, s, targets):
+ """edge_betweenness_centrality_subset helper."""
+ delta = dict.fromkeys(S, 0)
+ target_set = set(targets)
+ while S:
+ w = S.pop()
+ for v in P[w]:
+ if w in target_set:
+ c = (sigma[v] / sigma[w]) * (1.0 + delta[w])
+ else:
+ c = delta[w] / len(P[w])
+ if (v, w) not in betweenness:
+ betweenness[(w, v)] += c
+ else:
+ betweenness[(v, w)] += c
+ delta[v] += c
+ if w != s:
+ betweenness[w] += delta[w]
+ return betweenness
+
+
+def _rescale(betweenness, n, normalized, directed=False):
+ """betweenness_centrality_subset helper."""
+ if normalized:
+ if n <= 2:
+ scale = None # no normalization b=0 for all nodes
+ else:
+ scale = 1.0 / ((n - 1) * (n - 2))
+ else: # rescale by 2 for undirected graphs
+ if not directed:
+ scale = 0.5
+ else:
+ scale = None
+ if scale is not None:
+ for v in betweenness:
+ betweenness[v] *= scale
+ return betweenness
+
+
+def _rescale_e(betweenness, n, normalized, directed=False):
+ """edge_betweenness_centrality_subset helper."""
+ if normalized:
+ if n <= 1:
+ scale = None # no normalization b=0 for all nodes
+ else:
+ scale = 1.0 / (n * (n - 1))
+ else: # rescale by 2 for undirected graphs
+ if not directed:
+ scale = 0.5
+ else:
+ scale = None
+ if scale is not None:
+ for v in betweenness:
+ betweenness[v] *= scale
+ return betweenness
diff --git a/pythonProject/.venv/Lib/site-packages/networkx/algorithms/centrality/closeness.py b/pythonProject/.venv/Lib/site-packages/networkx/algorithms/centrality/closeness.py
new file mode 100644
index 0000000000000000000000000000000000000000..1c1722d4ed4cd5681867a1c738da529db1dece9b
--- /dev/null
+++ b/pythonProject/.venv/Lib/site-packages/networkx/algorithms/centrality/closeness.py
@@ -0,0 +1,281 @@
+"""
+Closeness centrality measures.
+"""
+import functools
+
+import networkx as nx
+from networkx.exception import NetworkXError
+from networkx.utils.decorators import not_implemented_for
+
+__all__ = ["closeness_centrality", "incremental_closeness_centrality"]
+
+
+@nx._dispatchable(edge_attrs="distance")
+def closeness_centrality(G, u=None, distance=None, wf_improved=True):
+ r"""Compute closeness centrality for nodes.
+
+ Closeness centrality [1]_ of a node `u` is the reciprocal of the
+ average shortest path distance to `u` over all `n-1` reachable nodes.
+
+ .. math::
+
+ C(u) = \frac{n - 1}{\sum_{v=1}^{n-1} d(v, u)},
+
+ where `d(v, u)` is the shortest-path distance between `v` and `u`,
+ and `n-1` is the number of nodes reachable from `u`. Notice that the
+ closeness distance function computes the incoming distance to `u`
+ for directed graphs. To use outward distance, act on `G.reverse()`.
+
+ Notice that higher values of closeness indicate higher centrality.
+
+ Wasserman and Faust propose an improved formula for graphs with
+ more than one connected component. The result is "a ratio of the
+ fraction of actors in the group who are reachable, to the average
+ distance" from the reachable actors [2]_. You might think this
+ scale factor is inverted but it is not. As is, nodes from small
+ components receive a smaller closeness value. Letting `N` denote
+ the number of nodes in the graph,
+
+ .. math::
+
+ C_{WF}(u) = \frac{n-1}{N-1} \frac{n - 1}{\sum_{v=1}^{n-1} d(v, u)},
+
+ Parameters
+ ----------
+ G : graph
+ A NetworkX graph
+
+ u : node, optional
+ Return only the value for node u
+
+ distance : edge attribute key, optional (default=None)
+ Use the specified edge attribute as the edge distance in shortest
+ path calculations. If `None` (the default) all edges have a distance of 1.
+ Absent edge attributes are assigned a distance of 1. Note that no check
+ is performed to ensure that edges have the provided attribute.
+
+ wf_improved : bool, optional (default=True)
+ If True, scale by the fraction of nodes reachable. This gives the
+ Wasserman and Faust improved formula. For single component graphs
+ it is the same as the original formula.
+
+ Returns
+ -------
+ nodes : dictionary
+ Dictionary of nodes with closeness centrality as the value.
+
+ Examples
+ --------
+ >>> G = nx.Graph([(0, 1), (0, 2), (0, 3), (1, 2), (1, 3)])
+ >>> nx.closeness_centrality(G)
+ {0: 1.0, 1: 1.0, 2: 0.75, 3: 0.75}
+
+ See Also
+ --------
+ betweenness_centrality, load_centrality, eigenvector_centrality,
+ degree_centrality, incremental_closeness_centrality
+
+ Notes
+ -----
+ The closeness centrality is normalized to `(n-1)/(|G|-1)` where
+ `n` is the number of nodes in the connected part of graph
+ containing the node. If the graph is not completely connected,
+ this algorithm computes the closeness centrality for each
+ connected part separately scaled by that parts size.
+
+ If the 'distance' keyword is set to an edge attribute key then the
+ shortest-path length will be computed using Dijkstra's algorithm with
+ that edge attribute as the edge weight.
+
+ The closeness centrality uses *inward* distance to a node, not outward.
+ If you want to use outword distances apply the function to `G.reverse()`
+
+ In NetworkX 2.2 and earlier a bug caused Dijkstra's algorithm to use the
+ outward distance rather than the inward distance. If you use a 'distance'
+ keyword and a DiGraph, your results will change between v2.2 and v2.3.
+
+ References
+ ----------
+ .. [1] Linton C. Freeman: Centrality in networks: I.
+ Conceptual clarification. Social Networks 1:215-239, 1979.
+ https://doi.org/10.1016/0378-8733(78)90021-7
+ .. [2] pg. 201 of Wasserman, S. and Faust, K.,
+ Social Network Analysis: Methods and Applications, 1994,
+ Cambridge University Press.
+ """
+ if G.is_directed():
+ G = G.reverse() # create a reversed graph view
+
+ if distance is not None:
+ # use Dijkstra's algorithm with specified attribute as edge weight
+ path_length = functools.partial(
+ nx.single_source_dijkstra_path_length, weight=distance
+ )
+ else:
+ path_length = nx.single_source_shortest_path_length
+
+ if u is None:
+ nodes = G.nodes
+ else:
+ nodes = [u]
+ closeness_dict = {}
+ for n in nodes:
+ sp = path_length(G, n)
+ totsp = sum(sp.values())
+ len_G = len(G)
+ _closeness_centrality = 0.0
+ if totsp > 0.0 and len_G > 1:
+ _closeness_centrality = (len(sp) - 1.0) / totsp
+ # normalize to number of nodes-1 in connected part
+ if wf_improved:
+ s = (len(sp) - 1.0) / (len_G - 1)
+ _closeness_centrality *= s
+ closeness_dict[n] = _closeness_centrality
+ if u is not None:
+ return closeness_dict[u]
+ return closeness_dict
+
+
+@not_implemented_for("directed")
+@nx._dispatchable(mutates_input=True)
+def incremental_closeness_centrality(
+ G, edge, prev_cc=None, insertion=True, wf_improved=True
+):
+ r"""Incremental closeness centrality for nodes.
+
+ Compute closeness centrality for nodes using level-based work filtering
+ as described in Incremental Algorithms for Closeness Centrality by Sariyuce et al.
+
+ Level-based work filtering detects unnecessary updates to the closeness
+ centrality and filters them out.
+
+ ---
+ From "Incremental Algorithms for Closeness Centrality":
+
+ Theorem 1: Let :math:`G = (V, E)` be a graph and u and v be two vertices in V
+ such that there is no edge (u, v) in E. Let :math:`G' = (V, E \cup uv)`
+ Then :math:`cc[s] = cc'[s]` if and only if :math:`\left|dG(s, u) - dG(s, v)\right| \leq 1`.
+
+ Where :math:`dG(u, v)` denotes the length of the shortest path between
+ two vertices u, v in a graph G, cc[s] is the closeness centrality for a
+ vertex s in V, and cc'[s] is the closeness centrality for a
+ vertex s in V, with the (u, v) edge added.
+ ---
+
+ We use Theorem 1 to filter out updates when adding or removing an edge.
+ When adding an edge (u, v), we compute the shortest path lengths from all
+ other nodes to u and to v before the node is added. When removing an edge,
+ we compute the shortest path lengths after the edge is removed. Then we
+ apply Theorem 1 to use previously computed closeness centrality for nodes
+ where :math:`\left|dG(s, u) - dG(s, v)\right| \leq 1`. This works only for
+ undirected, unweighted graphs; the distance argument is not supported.
+
+ Closeness centrality [1]_ of a node `u` is the reciprocal of the
+ sum of the shortest path distances from `u` to all `n-1` other nodes.
+ Since the sum of distances depends on the number of nodes in the
+ graph, closeness is normalized by the sum of minimum possible
+ distances `n-1`.
+
+ .. math::
+
+ C(u) = \frac{n - 1}{\sum_{v=1}^{n-1} d(v, u)},
+
+ where `d(v, u)` is the shortest-path distance between `v` and `u`,
+ and `n` is the number of nodes in the graph.
+
+ Notice that higher values of closeness indicate higher centrality.
+
+ Parameters
+ ----------
+ G : graph
+ A NetworkX graph
+
+ edge : tuple
+ The modified edge (u, v) in the graph.
+
+ prev_cc : dictionary
+ The previous closeness centrality for all nodes in the graph.
+
+ insertion : bool, optional
+ If True (default) the edge was inserted, otherwise it was deleted from the graph.
+
+ wf_improved : bool, optional (default=True)
+ If True, scale by the fraction of nodes reachable. This gives the
+ Wasserman and Faust improved formula. For single component graphs
+ it is the same as the original formula.
+
+ Returns
+ -------
+ nodes : dictionary
+ Dictionary of nodes with closeness centrality as the value.
+
+ See Also
+ --------
+ betweenness_centrality, load_centrality, eigenvector_centrality,
+ degree_centrality, closeness_centrality
+
+ Notes
+ -----
+ The closeness centrality is normalized to `(n-1)/(|G|-1)` where
+ `n` is the number of nodes in the connected part of graph
+ containing the node. If the graph is not completely connected,
+ this algorithm computes the closeness centrality for each
+ connected part separately.
+
+ References
+ ----------
+ .. [1] Freeman, L.C., 1979. Centrality in networks: I.
+ Conceptual clarification. Social Networks 1, 215--239.
+ https://doi.org/10.1016/0378-8733(78)90021-7
+ .. [2] Sariyuce, A.E. ; Kaya, K. ; Saule, E. ; Catalyiirek, U.V. Incremental
+ Algorithms for Closeness Centrality. 2013 IEEE International Conference on Big Data
+ http://sariyuce.com/papers/bigdata13.pdf
+ """
+ if prev_cc is not None and set(prev_cc.keys()) != set(G.nodes()):
+ raise NetworkXError("prev_cc and G do not have the same nodes")
+
+ # Unpack edge
+ (u, v) = edge
+ path_length = nx.single_source_shortest_path_length
+
+ if insertion:
+ # For edge insertion, we want shortest paths before the edge is inserted
+ du = path_length(G, u)
+ dv = path_length(G, v)
+
+ G.add_edge(u, v)
+ else:
+ G.remove_edge(u, v)
+
+ # For edge removal, we want shortest paths after the edge is removed
+ du = path_length(G, u)
+ dv = path_length(G, v)
+
+ if prev_cc is None:
+ return nx.closeness_centrality(G)
+
+ nodes = G.nodes()
+ closeness_dict = {}
+ for n in nodes:
+ if n in du and n in dv and abs(du[n] - dv[n]) <= 1:
+ closeness_dict[n] = prev_cc[n]
+ else:
+ sp = path_length(G, n)
+ totsp = sum(sp.values())
+ len_G = len(G)
+ _closeness_centrality = 0.0
+ if totsp > 0.0 and len_G > 1:
+ _closeness_centrality = (len(sp) - 1.0) / totsp
+ # normalize to number of nodes-1 in connected part
+ if wf_improved:
+ s = (len(sp) - 1.0) / (len_G - 1)
+ _closeness_centrality *= s
+ closeness_dict[n] = _closeness_centrality
+
+ # Leave the graph as we found it
+ if insertion:
+ G.remove_edge(u, v)
+ else:
+ G.add_edge(u, v)
+
+ return closeness_dict
diff --git a/pythonProject/.venv/Lib/site-packages/networkx/algorithms/centrality/current_flow_betweenness.py b/pythonProject/.venv/Lib/site-packages/networkx/algorithms/centrality/current_flow_betweenness.py
new file mode 100644
index 0000000000000000000000000000000000000000..b79a4c801e887d0466348d9c5782ca1a763eee66
--- /dev/null
+++ b/pythonProject/.venv/Lib/site-packages/networkx/algorithms/centrality/current_flow_betweenness.py
@@ -0,0 +1,341 @@
+"""Current-flow betweenness centrality measures."""
+import networkx as nx
+from networkx.algorithms.centrality.flow_matrix import (
+ CGInverseLaplacian,
+ FullInverseLaplacian,
+ SuperLUInverseLaplacian,
+ flow_matrix_row,
+)
+from networkx.utils import (
+ not_implemented_for,
+ py_random_state,
+ reverse_cuthill_mckee_ordering,
+)
+
+__all__ = [
+ "current_flow_betweenness_centrality",
+ "approximate_current_flow_betweenness_centrality",
+ "edge_current_flow_betweenness_centrality",
+]
+
+
+@not_implemented_for("directed")
+@py_random_state(7)
+@nx._dispatchable(edge_attrs="weight")
+def approximate_current_flow_betweenness_centrality(
+ G,
+ normalized=True,
+ weight=None,
+ dtype=float,
+ solver="full",
+ epsilon=0.5,
+ kmax=10000,
+ seed=None,
+):
+ r"""Compute the approximate current-flow betweenness centrality for nodes.
+
+ Approximates the current-flow betweenness centrality within absolute
+ error of epsilon with high probability [1]_.
+
+
+ Parameters
+ ----------
+ G : graph
+ A NetworkX graph
+
+ normalized : bool, optional (default=True)
+ If True the betweenness values are normalized by 2/[(n-1)(n-2)] where
+ n is the number of nodes in G.
+
+ weight : string or None, optional (default=None)
+ Key for edge data used as the edge weight.
+ If None, then use 1 as each edge weight.
+ The weight reflects the capacity or the strength of the
+ edge.
+
+ dtype : data type (float)
+ Default data type for internal matrices.
+ Set to np.float32 for lower memory consumption.
+
+ solver : string (default='full')
+ Type of linear solver to use for computing the flow matrix.
+ Options are "full" (uses most memory), "lu" (recommended), and
+ "cg" (uses least memory).
+
+ epsilon: float
+ Absolute error tolerance.
+
+ kmax: int
+ Maximum number of sample node pairs to use for approximation.
+
+ seed : integer, random_state, or None (default)
+ Indicator of random number generation state.
+ See :ref:`Randomness`.
+
+ Returns
+ -------
+ nodes : dictionary
+ Dictionary of nodes with betweenness centrality as the value.
+
+ See Also
+ --------
+ current_flow_betweenness_centrality
+
+ Notes
+ -----
+ The running time is $O((1/\epsilon^2)m{\sqrt k} \log n)$
+ and the space required is $O(m)$ for $n$ nodes and $m$ edges.
+
+ If the edges have a 'weight' attribute they will be used as
+ weights in this algorithm. Unspecified weights are set to 1.
+
+ References
+ ----------
+ .. [1] Ulrik Brandes and Daniel Fleischer:
+ Centrality Measures Based on Current Flow.
+ Proc. 22nd Symp. Theoretical Aspects of Computer Science (STACS '05).
+ LNCS 3404, pp. 533-544. Springer-Verlag, 2005.
+ https://doi.org/10.1007/978-3-540-31856-9_44
+ """
+ import numpy as np
+
+ if not nx.is_connected(G):
+ raise nx.NetworkXError("Graph not connected.")
+ solvername = {
+ "full": FullInverseLaplacian,
+ "lu": SuperLUInverseLaplacian,
+ "cg": CGInverseLaplacian,
+ }
+ n = G.number_of_nodes()
+ ordering = list(reverse_cuthill_mckee_ordering(G))
+ # make a copy with integer labels according to rcm ordering
+ # this could be done without a copy if we really wanted to
+ H = nx.relabel_nodes(G, dict(zip(ordering, range(n))))
+ L = nx.laplacian_matrix(H, nodelist=range(n), weight=weight).asformat("csc")
+ L = L.astype(dtype)
+ C = solvername[solver](L, dtype=dtype) # initialize solver
+ betweenness = dict.fromkeys(H, 0.0)
+ nb = (n - 1.0) * (n - 2.0) # normalization factor
+ cstar = n * (n - 1) / nb
+ l = 1 # parameter in approximation, adjustable
+ k = l * int(np.ceil((cstar / epsilon) ** 2 * np.log(n)))
+ if k > kmax:
+ msg = f"Number random pairs k>kmax ({k}>{kmax}) "
+ raise nx.NetworkXError(msg, "Increase kmax or epsilon")
+ cstar2k = cstar / (2 * k)
+ for _ in range(k):
+ s, t = pair = seed.sample(range(n), 2)
+ b = np.zeros(n, dtype=dtype)
+ b[s] = 1
+ b[t] = -1
+ p = C.solve(b)
+ for v in H:
+ if v in pair:
+ continue
+ for nbr in H[v]:
+ w = H[v][nbr].get(weight, 1.0)
+ betweenness[v] += float(w * np.abs(p[v] - p[nbr]) * cstar2k)
+ if normalized:
+ factor = 1.0
+ else:
+ factor = nb / 2.0
+ # remap to original node names and "unnormalize" if required
+ return {ordering[k]: v * factor for k, v in betweenness.items()}
+
+
+@not_implemented_for("directed")
+@nx._dispatchable(edge_attrs="weight")
+def current_flow_betweenness_centrality(
+ G, normalized=True, weight=None, dtype=float, solver="full"
+):
+ r"""Compute current-flow betweenness centrality for nodes.
+
+ Current-flow betweenness centrality uses an electrical current
+ model for information spreading in contrast to betweenness
+ centrality which uses shortest paths.
+
+ Current-flow betweenness centrality is also known as
+ random-walk betweenness centrality [2]_.
+
+ Parameters
+ ----------
+ G : graph
+ A NetworkX graph
+
+ normalized : bool, optional (default=True)
+ If True the betweenness values are normalized by 2/[(n-1)(n-2)] where
+ n is the number of nodes in G.
+
+ weight : string or None, optional (default=None)
+ Key for edge data used as the edge weight.
+ If None, then use 1 as each edge weight.
+ The weight reflects the capacity or the strength of the
+ edge.
+
+ dtype : data type (float)
+ Default data type for internal matrices.
+ Set to np.float32 for lower memory consumption.
+
+ solver : string (default='full')
+ Type of linear solver to use for computing the flow matrix.
+ Options are "full" (uses most memory), "lu" (recommended), and
+ "cg" (uses least memory).
+
+ Returns
+ -------
+ nodes : dictionary
+ Dictionary of nodes with betweenness centrality as the value.
+
+ See Also
+ --------
+ approximate_current_flow_betweenness_centrality
+ betweenness_centrality
+ edge_betweenness_centrality
+ edge_current_flow_betweenness_centrality
+
+ Notes
+ -----
+ Current-flow betweenness can be computed in $O(I(n-1)+mn \log n)$
+ time [1]_, where $I(n-1)$ is the time needed to compute the
+ inverse Laplacian. For a full matrix this is $O(n^3)$ but using
+ sparse methods you can achieve $O(nm{\sqrt k})$ where $k$ is the
+ Laplacian matrix condition number.
+
+ The space required is $O(nw)$ where $w$ is the width of the sparse
+ Laplacian matrix. Worse case is $w=n$ for $O(n^2)$.
+
+ If the edges have a 'weight' attribute they will be used as
+ weights in this algorithm. Unspecified weights are set to 1.
+
+ References
+ ----------
+ .. [1] Centrality Measures Based on Current Flow.
+ Ulrik Brandes and Daniel Fleischer,
+ Proc. 22nd Symp. Theoretical Aspects of Computer Science (STACS '05).
+ LNCS 3404, pp. 533-544. Springer-Verlag, 2005.
+ https://doi.org/10.1007/978-3-540-31856-9_44
+
+ .. [2] A measure of betweenness centrality based on random walks,
+ M. E. J. Newman, Social Networks 27, 39-54 (2005).
+ """
+ if not nx.is_connected(G):
+ raise nx.NetworkXError("Graph not connected.")
+ N = G.number_of_nodes()
+ ordering = list(reverse_cuthill_mckee_ordering(G))
+ # make a copy with integer labels according to rcm ordering
+ # this could be done without a copy if we really wanted to
+ H = nx.relabel_nodes(G, dict(zip(ordering, range(N))))
+ betweenness = dict.fromkeys(H, 0.0) # b[n]=0 for n in H
+ for row, (s, t) in flow_matrix_row(H, weight=weight, dtype=dtype, solver=solver):
+ pos = dict(zip(row.argsort()[::-1], range(N)))
+ for i in range(N):
+ betweenness[s] += (i - pos[i]) * row.item(i)
+ betweenness[t] += (N - i - 1 - pos[i]) * row.item(i)
+ if normalized:
+ nb = (N - 1.0) * (N - 2.0) # normalization factor
+ else:
+ nb = 2.0
+ return {ordering[n]: (b - n) * 2.0 / nb for n, b in betweenness.items()}
+
+
+@not_implemented_for("directed")
+@nx._dispatchable(edge_attrs="weight")
+def edge_current_flow_betweenness_centrality(
+ G, normalized=True, weight=None, dtype=float, solver="full"
+):
+ r"""Compute current-flow betweenness centrality for edges.
+
+ Current-flow betweenness centrality uses an electrical current
+ model for information spreading in contrast to betweenness
+ centrality which uses shortest paths.
+
+ Current-flow betweenness centrality is also known as
+ random-walk betweenness centrality [2]_.
+
+ Parameters
+ ----------
+ G : graph
+ A NetworkX graph
+
+ normalized : bool, optional (default=True)
+ If True the betweenness values are normalized by 2/[(n-1)(n-2)] where
+ n is the number of nodes in G.
+
+ weight : string or None, optional (default=None)
+ Key for edge data used as the edge weight.
+ If None, then use 1 as each edge weight.
+ The weight reflects the capacity or the strength of the
+ edge.
+
+ dtype : data type (default=float)
+ Default data type for internal matrices.
+ Set to np.float32 for lower memory consumption.
+
+ solver : string (default='full')
+ Type of linear solver to use for computing the flow matrix.
+ Options are "full" (uses most memory), "lu" (recommended), and
+ "cg" (uses least memory).
+
+ Returns
+ -------
+ nodes : dictionary
+ Dictionary of edge tuples with betweenness centrality as the value.
+
+ Raises
+ ------
+ NetworkXError
+ The algorithm does not support DiGraphs.
+ If the input graph is an instance of DiGraph class, NetworkXError
+ is raised.
+
+ See Also
+ --------
+ betweenness_centrality
+ edge_betweenness_centrality
+ current_flow_betweenness_centrality
+
+ Notes
+ -----
+ Current-flow betweenness can be computed in $O(I(n-1)+mn \log n)$
+ time [1]_, where $I(n-1)$ is the time needed to compute the
+ inverse Laplacian. For a full matrix this is $O(n^3)$ but using
+ sparse methods you can achieve $O(nm{\sqrt k})$ where $k$ is the
+ Laplacian matrix condition number.
+
+ The space required is $O(nw)$ where $w$ is the width of the sparse
+ Laplacian matrix. Worse case is $w=n$ for $O(n^2)$.
+
+ If the edges have a 'weight' attribute they will be used as
+ weights in this algorithm. Unspecified weights are set to 1.
+
+ References
+ ----------
+ .. [1] Centrality Measures Based on Current Flow.
+ Ulrik Brandes and Daniel Fleischer,
+ Proc. 22nd Symp. Theoretical Aspects of Computer Science (STACS '05).
+ LNCS 3404, pp. 533-544. Springer-Verlag, 2005.
+ https://doi.org/10.1007/978-3-540-31856-9_44
+
+ .. [2] A measure of betweenness centrality based on random walks,
+ M. E. J. Newman, Social Networks 27, 39-54 (2005).
+ """
+ if not nx.is_connected(G):
+ raise nx.NetworkXError("Graph not connected.")
+ N = G.number_of_nodes()
+ ordering = list(reverse_cuthill_mckee_ordering(G))
+ # make a copy with integer labels according to rcm ordering
+ # this could be done without a copy if we really wanted to
+ H = nx.relabel_nodes(G, dict(zip(ordering, range(N))))
+ edges = (tuple(sorted((u, v))) for u, v in H.edges())
+ betweenness = dict.fromkeys(edges, 0.0)
+ if normalized:
+ nb = (N - 1.0) * (N - 2.0) # normalization factor
+ else:
+ nb = 2.0
+ for row, (e) in flow_matrix_row(H, weight=weight, dtype=dtype, solver=solver):
+ pos = dict(zip(row.argsort()[::-1], range(1, N + 1)))
+ for i in range(N):
+ betweenness[e] += (i + 1 - pos[i]) * row.item(i)
+ betweenness[e] += (N - i - pos[i]) * row.item(i)
+ betweenness[e] /= nb
+ return {(ordering[s], ordering[t]): b for (s, t), b in betweenness.items()}
diff --git a/pythonProject/.venv/Lib/site-packages/networkx/algorithms/centrality/current_flow_betweenness_subset.py b/pythonProject/.venv/Lib/site-packages/networkx/algorithms/centrality/current_flow_betweenness_subset.py
new file mode 100644
index 0000000000000000000000000000000000000000..c6790b218e9d2e64b5f51d1858b05aa78144ba7d
--- /dev/null
+++ b/pythonProject/.venv/Lib/site-packages/networkx/algorithms/centrality/current_flow_betweenness_subset.py
@@ -0,0 +1,226 @@
+"""Current-flow betweenness centrality measures for subsets of nodes."""
+import networkx as nx
+from networkx.algorithms.centrality.flow_matrix import flow_matrix_row
+from networkx.utils import not_implemented_for, reverse_cuthill_mckee_ordering
+
+__all__ = [
+ "current_flow_betweenness_centrality_subset",
+ "edge_current_flow_betweenness_centrality_subset",
+]
+
+
+@not_implemented_for("directed")
+@nx._dispatchable(edge_attrs="weight")
+def current_flow_betweenness_centrality_subset(
+ G, sources, targets, normalized=True, weight=None, dtype=float, solver="lu"
+):
+ r"""Compute current-flow betweenness centrality for subsets of nodes.
+
+ Current-flow betweenness centrality uses an electrical current
+ model for information spreading in contrast to betweenness
+ centrality which uses shortest paths.
+
+ Current-flow betweenness centrality is also known as
+ random-walk betweenness centrality [2]_.
+
+ Parameters
+ ----------
+ G : graph
+ A NetworkX graph
+
+ sources: list of nodes
+ Nodes to use as sources for current
+
+ targets: list of nodes
+ Nodes to use as sinks for current
+
+ normalized : bool, optional (default=True)
+ If True the betweenness values are normalized by b=b/(n-1)(n-2) where
+ n is the number of nodes in G.
+
+ weight : string or None, optional (default=None)
+ Key for edge data used as the edge weight.
+ If None, then use 1 as each edge weight.
+ The weight reflects the capacity or the strength of the
+ edge.
+
+ dtype: data type (float)
+ Default data type for internal matrices.
+ Set to np.float32 for lower memory consumption.
+
+ solver: string (default='lu')
+ Type of linear solver to use for computing the flow matrix.
+ Options are "full" (uses most memory), "lu" (recommended), and
+ "cg" (uses least memory).
+
+ Returns
+ -------
+ nodes : dictionary
+ Dictionary of nodes with betweenness centrality as the value.
+
+ See Also
+ --------
+ approximate_current_flow_betweenness_centrality
+ betweenness_centrality
+ edge_betweenness_centrality
+ edge_current_flow_betweenness_centrality
+
+ Notes
+ -----
+ Current-flow betweenness can be computed in $O(I(n-1)+mn \log n)$
+ time [1]_, where $I(n-1)$ is the time needed to compute the
+ inverse Laplacian. For a full matrix this is $O(n^3)$ but using
+ sparse methods you can achieve $O(nm{\sqrt k})$ where $k$ is the
+ Laplacian matrix condition number.
+
+ The space required is $O(nw)$ where $w$ is the width of the sparse
+ Laplacian matrix. Worse case is $w=n$ for $O(n^2)$.
+
+ If the edges have a 'weight' attribute they will be used as
+ weights in this algorithm. Unspecified weights are set to 1.
+
+ References
+ ----------
+ .. [1] Centrality Measures Based on Current Flow.
+ Ulrik Brandes and Daniel Fleischer,
+ Proc. 22nd Symp. Theoretical Aspects of Computer Science (STACS '05).
+ LNCS 3404, pp. 533-544. Springer-Verlag, 2005.
+ https://doi.org/10.1007/978-3-540-31856-9_44
+
+ .. [2] A measure of betweenness centrality based on random walks,
+ M. E. J. Newman, Social Networks 27, 39-54 (2005).
+ """
+ import numpy as np
+
+ from networkx.utils import reverse_cuthill_mckee_ordering
+
+ if not nx.is_connected(G):
+ raise nx.NetworkXError("Graph not connected.")
+ N = G.number_of_nodes()
+ ordering = list(reverse_cuthill_mckee_ordering(G))
+ # make a copy with integer labels according to rcm ordering
+ # this could be done without a copy if we really wanted to
+ mapping = dict(zip(ordering, range(N)))
+ H = nx.relabel_nodes(G, mapping)
+ betweenness = dict.fromkeys(H, 0.0) # b[n]=0 for n in H
+ for row, (s, t) in flow_matrix_row(H, weight=weight, dtype=dtype, solver=solver):
+ for ss in sources:
+ i = mapping[ss]
+ for tt in targets:
+ j = mapping[tt]
+ betweenness[s] += 0.5 * abs(row.item(i) - row.item(j))
+ betweenness[t] += 0.5 * abs(row.item(i) - row.item(j))
+ if normalized:
+ nb = (N - 1.0) * (N - 2.0) # normalization factor
+ else:
+ nb = 2.0
+ for node in H:
+ betweenness[node] = betweenness[node] / nb + 1.0 / (2 - N)
+ return {ordering[node]: value for node, value in betweenness.items()}
+
+
+@not_implemented_for("directed")
+@nx._dispatchable(edge_attrs="weight")
+def edge_current_flow_betweenness_centrality_subset(
+ G, sources, targets, normalized=True, weight=None, dtype=float, solver="lu"
+):
+ r"""Compute current-flow betweenness centrality for edges using subsets
+ of nodes.
+
+ Current-flow betweenness centrality uses an electrical current
+ model for information spreading in contrast to betweenness
+ centrality which uses shortest paths.
+
+ Current-flow betweenness centrality is also known as
+ random-walk betweenness centrality [2]_.
+
+ Parameters
+ ----------
+ G : graph
+ A NetworkX graph
+
+ sources: list of nodes
+ Nodes to use as sources for current
+
+ targets: list of nodes
+ Nodes to use as sinks for current
+
+ normalized : bool, optional (default=True)
+ If True the betweenness values are normalized by b=b/(n-1)(n-2) where
+ n is the number of nodes in G.
+
+ weight : string or None, optional (default=None)
+ Key for edge data used as the edge weight.
+ If None, then use 1 as each edge weight.
+ The weight reflects the capacity or the strength of the
+ edge.
+
+ dtype: data type (float)
+ Default data type for internal matrices.
+ Set to np.float32 for lower memory consumption.
+
+ solver: string (default='lu')
+ Type of linear solver to use for computing the flow matrix.
+ Options are "full" (uses most memory), "lu" (recommended), and
+ "cg" (uses least memory).
+
+ Returns
+ -------
+ nodes : dict
+ Dictionary of edge tuples with betweenness centrality as the value.
+
+ See Also
+ --------
+ betweenness_centrality
+ edge_betweenness_centrality
+ current_flow_betweenness_centrality
+
+ Notes
+ -----
+ Current-flow betweenness can be computed in $O(I(n-1)+mn \log n)$
+ time [1]_, where $I(n-1)$ is the time needed to compute the
+ inverse Laplacian. For a full matrix this is $O(n^3)$ but using
+ sparse methods you can achieve $O(nm{\sqrt k})$ where $k$ is the
+ Laplacian matrix condition number.
+
+ The space required is $O(nw)$ where $w$ is the width of the sparse
+ Laplacian matrix. Worse case is $w=n$ for $O(n^2)$.
+
+ If the edges have a 'weight' attribute they will be used as
+ weights in this algorithm. Unspecified weights are set to 1.
+
+ References
+ ----------
+ .. [1] Centrality Measures Based on Current Flow.
+ Ulrik Brandes and Daniel Fleischer,
+ Proc. 22nd Symp. Theoretical Aspects of Computer Science (STACS '05).
+ LNCS 3404, pp. 533-544. Springer-Verlag, 2005.
+ https://doi.org/10.1007/978-3-540-31856-9_44
+
+ .. [2] A measure of betweenness centrality based on random walks,
+ M. E. J. Newman, Social Networks 27, 39-54 (2005).
+ """
+ import numpy as np
+
+ if not nx.is_connected(G):
+ raise nx.NetworkXError("Graph not connected.")
+ N = G.number_of_nodes()
+ ordering = list(reverse_cuthill_mckee_ordering(G))
+ # make a copy with integer labels according to rcm ordering
+ # this could be done without a copy if we really wanted to
+ mapping = dict(zip(ordering, range(N)))
+ H = nx.relabel_nodes(G, mapping)
+ edges = (tuple(sorted((u, v))) for u, v in H.edges())
+ betweenness = dict.fromkeys(edges, 0.0)
+ if normalized:
+ nb = (N - 1.0) * (N - 2.0) # normalization factor
+ else:
+ nb = 2.0
+ for row, (e) in flow_matrix_row(H, weight=weight, dtype=dtype, solver=solver):
+ for ss in sources:
+ i = mapping[ss]
+ for tt in targets:
+ j = mapping[tt]
+ betweenness[e] += 0.5 * abs(row.item(i) - row.item(j))
+ betweenness[e] /= nb
+ return {(ordering[s], ordering[t]): value for (s, t), value in betweenness.items()}
diff --git a/pythonProject/.venv/Lib/site-packages/networkx/algorithms/centrality/current_flow_closeness.py b/pythonProject/.venv/Lib/site-packages/networkx/algorithms/centrality/current_flow_closeness.py
new file mode 100644
index 0000000000000000000000000000000000000000..92c892f74494bcd32ae82943c8c0afb8bc041685
--- /dev/null
+++ b/pythonProject/.venv/Lib/site-packages/networkx/algorithms/centrality/current_flow_closeness.py
@@ -0,0 +1,95 @@
+"""Current-flow closeness centrality measures."""
+import networkx as nx
+from networkx.algorithms.centrality.flow_matrix import (
+ CGInverseLaplacian,
+ FullInverseLaplacian,
+ SuperLUInverseLaplacian,
+)
+from networkx.utils import not_implemented_for, reverse_cuthill_mckee_ordering
+
+__all__ = ["current_flow_closeness_centrality", "information_centrality"]
+
+
+@not_implemented_for("directed")
+@nx._dispatchable(edge_attrs="weight")
+def current_flow_closeness_centrality(G, weight=None, dtype=float, solver="lu"):
+ """Compute current-flow closeness centrality for nodes.
+
+ Current-flow closeness centrality is variant of closeness
+ centrality based on effective resistance between nodes in
+ a network. This metric is also known as information centrality.
+
+ Parameters
+ ----------
+ G : graph
+ A NetworkX graph.
+
+ weight : None or string, optional (default=None)
+ If None, all edge weights are considered equal.
+ Otherwise holds the name of the edge attribute used as weight.
+ The weight reflects the capacity or the strength of the
+ edge.
+
+ dtype: data type (default=float)
+ Default data type for internal matrices.
+ Set to np.float32 for lower memory consumption.
+
+ solver: string (default='lu')
+ Type of linear solver to use for computing the flow matrix.
+ Options are "full" (uses most memory), "lu" (recommended), and
+ "cg" (uses least memory).
+
+ Returns
+ -------
+ nodes : dictionary
+ Dictionary of nodes with current flow closeness centrality as the value.
+
+ See Also
+ --------
+ closeness_centrality
+
+ Notes
+ -----
+ The algorithm is from Brandes [1]_.
+
+ See also [2]_ for the original definition of information centrality.
+
+ References
+ ----------
+ .. [1] Ulrik Brandes and Daniel Fleischer,
+ Centrality Measures Based on Current Flow.
+ Proc. 22nd Symp. Theoretical Aspects of Computer Science (STACS '05).
+ LNCS 3404, pp. 533-544. Springer-Verlag, 2005.
+ https://doi.org/10.1007/978-3-540-31856-9_44
+
+ .. [2] Karen Stephenson and Marvin Zelen:
+ Rethinking centrality: Methods and examples.
+ Social Networks 11(1):1-37, 1989.
+ https://doi.org/10.1016/0378-8733(89)90016-6
+ """
+ if not nx.is_connected(G):
+ raise nx.NetworkXError("Graph not connected.")
+ solvername = {
+ "full": FullInverseLaplacian,
+ "lu": SuperLUInverseLaplacian,
+ "cg": CGInverseLaplacian,
+ }
+ N = G.number_of_nodes()
+ ordering = list(reverse_cuthill_mckee_ordering(G))
+ # make a copy with integer labels according to rcm ordering
+ # this could be done without a copy if we really wanted to
+ H = nx.relabel_nodes(G, dict(zip(ordering, range(N))))
+ betweenness = dict.fromkeys(H, 0.0) # b[n]=0 for n in H
+ N = H.number_of_nodes()
+ L = nx.laplacian_matrix(H, nodelist=range(N), weight=weight).asformat("csc")
+ L = L.astype(dtype)
+ C2 = solvername[solver](L, width=1, dtype=dtype) # initialize solver
+ for v in H:
+ col = C2.get_row(v)
+ for w in H:
+ betweenness[v] += col.item(v) - 2 * col.item(w)
+ betweenness[w] += col.item(v)
+ return {ordering[node]: 1 / value for node, value in betweenness.items()}
+
+
+information_centrality = current_flow_closeness_centrality
diff --git a/pythonProject/.venv/Lib/site-packages/networkx/algorithms/centrality/degree_alg.py b/pythonProject/.venv/Lib/site-packages/networkx/algorithms/centrality/degree_alg.py
new file mode 100644
index 0000000000000000000000000000000000000000..ea53f41ea3e64112b31c140eadc9353b84663207
--- /dev/null
+++ b/pythonProject/.venv/Lib/site-packages/networkx/algorithms/centrality/degree_alg.py
@@ -0,0 +1,149 @@
+"""Degree centrality measures."""
+import networkx as nx
+from networkx.utils.decorators import not_implemented_for
+
+__all__ = ["degree_centrality", "in_degree_centrality", "out_degree_centrality"]
+
+
+@nx._dispatchable
+def degree_centrality(G):
+ """Compute the degree centrality for nodes.
+
+ The degree centrality for a node v is the fraction of nodes it
+ is connected to.
+
+ Parameters
+ ----------
+ G : graph
+ A networkx graph
+
+ Returns
+ -------
+ nodes : dictionary
+ Dictionary of nodes with degree centrality as the value.
+
+ Examples
+ --------
+ >>> G = nx.Graph([(0, 1), (0, 2), (0, 3), (1, 2), (1, 3)])
+ >>> nx.degree_centrality(G)
+ {0: 1.0, 1: 1.0, 2: 0.6666666666666666, 3: 0.6666666666666666}
+
+ See Also
+ --------
+ betweenness_centrality, load_centrality, eigenvector_centrality
+
+ Notes
+ -----
+ The degree centrality values are normalized by dividing by the maximum
+ possible degree in a simple graph n-1 where n is the number of nodes in G.
+
+ For multigraphs or graphs with self loops the maximum degree might
+ be higher than n-1 and values of degree centrality greater than 1
+ are possible.
+ """
+ if len(G) <= 1:
+ return {n: 1 for n in G}
+
+ s = 1.0 / (len(G) - 1.0)
+ centrality = {n: d * s for n, d in G.degree()}
+ return centrality
+
+
+@not_implemented_for("undirected")
+@nx._dispatchable
+def in_degree_centrality(G):
+ """Compute the in-degree centrality for nodes.
+
+ The in-degree centrality for a node v is the fraction of nodes its
+ incoming edges are connected to.
+
+ Parameters
+ ----------
+ G : graph
+ A NetworkX graph
+
+ Returns
+ -------
+ nodes : dictionary
+ Dictionary of nodes with in-degree centrality as values.
+
+ Raises
+ ------
+ NetworkXNotImplemented
+ If G is undirected.
+
+ Examples
+ --------
+ >>> G = nx.DiGraph([(0, 1), (0, 2), (0, 3), (1, 2), (1, 3)])
+ >>> nx.in_degree_centrality(G)
+ {0: 0.0, 1: 0.3333333333333333, 2: 0.6666666666666666, 3: 0.6666666666666666}
+
+ See Also
+ --------
+ degree_centrality, out_degree_centrality
+
+ Notes
+ -----
+ The degree centrality values are normalized by dividing by the maximum
+ possible degree in a simple graph n-1 where n is the number of nodes in G.
+
+ For multigraphs or graphs with self loops the maximum degree might
+ be higher than n-1 and values of degree centrality greater than 1
+ are possible.
+ """
+ if len(G) <= 1:
+ return {n: 1 for n in G}
+
+ s = 1.0 / (len(G) - 1.0)
+ centrality = {n: d * s for n, d in G.in_degree()}
+ return centrality
+
+
+@not_implemented_for("undirected")
+@nx._dispatchable
+def out_degree_centrality(G):
+ """Compute the out-degree centrality for nodes.
+
+ The out-degree centrality for a node v is the fraction of nodes its
+ outgoing edges are connected to.
+
+ Parameters
+ ----------
+ G : graph
+ A NetworkX graph
+
+ Returns
+ -------
+ nodes : dictionary
+ Dictionary of nodes with out-degree centrality as values.
+
+ Raises
+ ------
+ NetworkXNotImplemented
+ If G is undirected.
+
+ Examples
+ --------
+ >>> G = nx.DiGraph([(0, 1), (0, 2), (0, 3), (1, 2), (1, 3)])
+ >>> nx.out_degree_centrality(G)
+ {0: 1.0, 1: 0.6666666666666666, 2: 0.0, 3: 0.0}
+
+ See Also
+ --------
+ degree_centrality, in_degree_centrality
+
+ Notes
+ -----
+ The degree centrality values are normalized by dividing by the maximum
+ possible degree in a simple graph n-1 where n is the number of nodes in G.
+
+ For multigraphs or graphs with self loops the maximum degree might
+ be higher than n-1 and values of degree centrality greater than 1
+ are possible.
+ """
+ if len(G) <= 1:
+ return {n: 1 for n in G}
+
+ s = 1.0 / (len(G) - 1.0)
+ centrality = {n: d * s for n, d in G.out_degree()}
+ return centrality
diff --git a/pythonProject/.venv/Lib/site-packages/networkx/algorithms/chains.py b/pythonProject/.venv/Lib/site-packages/networkx/algorithms/chains.py
new file mode 100644
index 0000000000000000000000000000000000000000..ae342d9c8669acd832a3bdb4fe8eecf3e300464f
--- /dev/null
+++ b/pythonProject/.venv/Lib/site-packages/networkx/algorithms/chains.py
@@ -0,0 +1,172 @@
+"""Functions for finding chains in a graph."""
+
+import networkx as nx
+from networkx.utils import not_implemented_for
+
+__all__ = ["chain_decomposition"]
+
+
+@not_implemented_for("directed")
+@not_implemented_for("multigraph")
+@nx._dispatchable
+def chain_decomposition(G, root=None):
+ """Returns the chain decomposition of a graph.
+
+ The *chain decomposition* of a graph with respect a depth-first
+ search tree is a set of cycles or paths derived from the set of
+ fundamental cycles of the tree in the following manner. Consider
+ each fundamental cycle with respect to the given tree, represented
+ as a list of edges beginning with the nontree edge oriented away
+ from the root of the tree. For each fundamental cycle, if it
+ overlaps with any previous fundamental cycle, just take the initial
+ non-overlapping segment, which is a path instead of a cycle. Each
+ cycle or path is called a *chain*. For more information, see [1]_.
+
+ Parameters
+ ----------
+ G : undirected graph
+
+ root : node (optional)
+ A node in the graph `G`. If specified, only the chain
+ decomposition for the connected component containing this node
+ will be returned. This node indicates the root of the depth-first
+ search tree.
+
+ Yields
+ ------
+ chain : list
+ A list of edges representing a chain. There is no guarantee on
+ the orientation of the edges in each chain (for example, if a
+ chain includes the edge joining nodes 1 and 2, the chain may
+ include either (1, 2) or (2, 1)).
+
+ Raises
+ ------
+ NodeNotFound
+ If `root` is not in the graph `G`.
+
+ Examples
+ --------
+ >>> G = nx.Graph([(0, 1), (1, 4), (3, 4), (3, 5), (4, 5)])
+ >>> list(nx.chain_decomposition(G))
+ [[(4, 5), (5, 3), (3, 4)]]
+
+ Notes
+ -----
+ The worst-case running time of this implementation is linear in the
+ number of nodes and number of edges [1]_.
+
+ References
+ ----------
+ .. [1] Jens M. Schmidt (2013). "A simple test on 2-vertex-
+ and 2-edge-connectivity." *Information Processing Letters*,
+ 113, 241–244. Elsevier.
+
+ """
+
+ def _dfs_cycle_forest(G, root=None):
+ """Builds a directed graph composed of cycles from the given graph.
+
+ `G` is an undirected simple graph. `root` is a node in the graph
+ from which the depth-first search is started.
+
+ This function returns both the depth-first search cycle graph
+ (as a :class:`~networkx.DiGraph`) and the list of nodes in
+ depth-first preorder. The depth-first search cycle graph is a
+ directed graph whose edges are the edges of `G` oriented toward
+ the root if the edge is a tree edge and away from the root if
+ the edge is a non-tree edge. If `root` is not specified, this
+ performs a depth-first search on each connected component of `G`
+ and returns a directed forest instead.
+
+ If `root` is not in the graph, this raises :exc:`KeyError`.
+
+ """
+ # Create a directed graph from the depth-first search tree with
+ # root node `root` in which tree edges are directed toward the
+ # root and nontree edges are directed away from the root. For
+ # each node with an incident nontree edge, this creates a
+ # directed cycle starting with the nontree edge and returning to
+ # that node.
+ #
+ # The `parent` node attribute stores the parent of each node in
+ # the DFS tree. The `nontree` edge attribute indicates whether
+ # the edge is a tree edge or a nontree edge.
+ #
+ # We also store the order of the nodes found in the depth-first
+ # search in the `nodes` list.
+ H = nx.DiGraph()
+ nodes = []
+ for u, v, d in nx.dfs_labeled_edges(G, source=root):
+ if d == "forward":
+ # `dfs_labeled_edges()` yields (root, root, 'forward')
+ # if it is beginning the search on a new connected
+ # component.
+ if u == v:
+ H.add_node(v, parent=None)
+ nodes.append(v)
+ else:
+ H.add_node(v, parent=u)
+ H.add_edge(v, u, nontree=False)
+ nodes.append(v)
+ # `dfs_labeled_edges` considers nontree edges in both
+ # orientations, so we need to not add the edge if it its
+ # other orientation has been added.
+ elif d == "nontree" and v not in H[u]:
+ H.add_edge(v, u, nontree=True)
+ else:
+ # Do nothing on 'reverse' edges; we only care about
+ # forward and nontree edges.
+ pass
+ return H, nodes
+
+ def _build_chain(G, u, v, visited):
+ """Generate the chain starting from the given nontree edge.
+
+ `G` is a DFS cycle graph as constructed by
+ :func:`_dfs_cycle_graph`. The edge (`u`, `v`) is a nontree edge
+ that begins a chain. `visited` is a set representing the nodes
+ in `G` that have already been visited.
+
+ This function yields the edges in an initial segment of the
+ fundamental cycle of `G` starting with the nontree edge (`u`,
+ `v`) that includes all the edges up until the first node that
+ appears in `visited`. The tree edges are given by the 'parent'
+ node attribute. The `visited` set is updated to add each node in
+ an edge yielded by this function.
+
+ """
+ while v not in visited:
+ yield u, v
+ visited.add(v)
+ u, v = v, G.nodes[v]["parent"]
+ yield u, v
+
+ # Check if the root is in the graph G. If not, raise NodeNotFound
+ if root is not None and root not in G:
+ raise nx.NodeNotFound(f"Root node {root} is not in graph")
+
+ # Create a directed version of H that has the DFS edges directed
+ # toward the root and the nontree edges directed away from the root
+ # (in each connected component).
+ H, nodes = _dfs_cycle_forest(G, root)
+
+ # Visit the nodes again in DFS order. For each node, and for each
+ # nontree edge leaving that node, compute the fundamental cycle for
+ # that nontree edge starting with that edge. If the fundamental
+ # cycle overlaps with any visited nodes, just take the prefix of the
+ # cycle up to the point of visited nodes.
+ #
+ # We repeat this process for each connected component (implicitly,
+ # since `nodes` already has a list of the nodes grouped by connected
+ # component).
+ visited = set()
+ for u in nodes:
+ visited.add(u)
+ # For each nontree edge going out of node u...
+ edges = ((u, v) for u, v, d in H.out_edges(u, data="nontree") if d)
+ for u, v in edges:
+ # Create the cycle or cycle prefix starting with the
+ # nontree edge.
+ chain = list(_build_chain(H, u, v, visited))
+ yield chain
diff --git a/pythonProject/.venv/Lib/site-packages/networkx/algorithms/chordal.py b/pythonProject/.venv/Lib/site-packages/networkx/algorithms/chordal.py
new file mode 100644
index 0000000000000000000000000000000000000000..6bd3ccd2ea3eb3bbca170313dd6ec02c433d6a38
--- /dev/null
+++ b/pythonProject/.venv/Lib/site-packages/networkx/algorithms/chordal.py
@@ -0,0 +1,442 @@
+"""
+Algorithms for chordal graphs.
+
+A graph is chordal if every cycle of length at least 4 has a chord
+(an edge joining two nodes not adjacent in the cycle).
+https://en.wikipedia.org/wiki/Chordal_graph
+"""
+import sys
+
+import networkx as nx
+from networkx.algorithms.components import connected_components
+from networkx.utils import arbitrary_element, not_implemented_for
+
+__all__ = [
+ "is_chordal",
+ "find_induced_nodes",
+ "chordal_graph_cliques",
+ "chordal_graph_treewidth",
+ "NetworkXTreewidthBoundExceeded",
+ "complete_to_chordal_graph",
+]
+
+
+class NetworkXTreewidthBoundExceeded(nx.NetworkXException):
+ """Exception raised when a treewidth bound has been provided and it has
+ been exceeded"""
+
+
+@not_implemented_for("directed")
+@not_implemented_for("multigraph")
+@nx._dispatchable
+def is_chordal(G):
+ """Checks whether G is a chordal graph.
+
+ A graph is chordal if every cycle of length at least 4 has a chord
+ (an edge joining two nodes not adjacent in the cycle).
+
+ Parameters
+ ----------
+ G : graph
+ A NetworkX graph.
+
+ Returns
+ -------
+ chordal : bool
+ True if G is a chordal graph and False otherwise.
+
+ Raises
+ ------
+ NetworkXNotImplemented
+ The algorithm does not support DiGraph, MultiGraph and MultiDiGraph.
+
+ Examples
+ --------
+ >>> e = [
+ ... (1, 2),
+ ... (1, 3),
+ ... (2, 3),
+ ... (2, 4),
+ ... (3, 4),
+ ... (3, 5),
+ ... (3, 6),
+ ... (4, 5),
+ ... (4, 6),
+ ... (5, 6),
+ ... ]
+ >>> G = nx.Graph(e)
+ >>> nx.is_chordal(G)
+ True
+
+ Notes
+ -----
+ The routine tries to go through every node following maximum cardinality
+ search. It returns False when it finds that the separator for any node
+ is not a clique. Based on the algorithms in [1]_.
+
+ Self loops are ignored.
+
+ References
+ ----------
+ .. [1] R. E. Tarjan and M. Yannakakis, Simple linear-time algorithms
+ to test chordality of graphs, test acyclicity of hypergraphs, and
+ selectively reduce acyclic hypergraphs, SIAM J. Comput., 13 (1984),
+ pp. 566–579.
+ """
+ if len(G.nodes) <= 3:
+ return True
+ return len(_find_chordality_breaker(G)) == 0
+
+
+@nx._dispatchable
+def find_induced_nodes(G, s, t, treewidth_bound=sys.maxsize):
+ """Returns the set of induced nodes in the path from s to t.
+
+ Parameters
+ ----------
+ G : graph
+ A chordal NetworkX graph
+ s : node
+ Source node to look for induced nodes
+ t : node
+ Destination node to look for induced nodes
+ treewidth_bound: float
+ Maximum treewidth acceptable for the graph H. The search
+ for induced nodes will end as soon as the treewidth_bound is exceeded.
+
+ Returns
+ -------
+ induced_nodes : Set of nodes
+ The set of induced nodes in the path from s to t in G
+
+ Raises
+ ------
+ NetworkXError
+ The algorithm does not support DiGraph, MultiGraph and MultiDiGraph.
+ If the input graph is an instance of one of these classes, a
+ :exc:`NetworkXError` is raised.
+ The algorithm can only be applied to chordal graphs. If the input
+ graph is found to be non-chordal, a :exc:`NetworkXError` is raised.
+
+ Examples
+ --------
+ >>> G = nx.Graph()
+ >>> G = nx.generators.classic.path_graph(10)
+ >>> induced_nodes = nx.find_induced_nodes(G, 1, 9, 2)
+ >>> sorted(induced_nodes)
+ [1, 2, 3, 4, 5, 6, 7, 8, 9]
+
+ Notes
+ -----
+ G must be a chordal graph and (s,t) an edge that is not in G.
+
+ If a treewidth_bound is provided, the search for induced nodes will end
+ as soon as the treewidth_bound is exceeded.
+
+ The algorithm is inspired by Algorithm 4 in [1]_.
+ A formal definition of induced node can also be found on that reference.
+
+ Self Loops are ignored
+
+ References
+ ----------
+ .. [1] Learning Bounded Treewidth Bayesian Networks.
+ Gal Elidan, Stephen Gould; JMLR, 9(Dec):2699--2731, 2008.
+ http://jmlr.csail.mit.edu/papers/volume9/elidan08a/elidan08a.pdf
+ """
+ if not is_chordal(G):
+ raise nx.NetworkXError("Input graph is not chordal.")
+
+ H = nx.Graph(G)
+ H.add_edge(s, t)
+ induced_nodes = set()
+ triplet = _find_chordality_breaker(H, s, treewidth_bound)
+ while triplet:
+ (u, v, w) = triplet
+ induced_nodes.update(triplet)
+ for n in triplet:
+ if n != s:
+ H.add_edge(s, n)
+ triplet = _find_chordality_breaker(H, s, treewidth_bound)
+ if induced_nodes:
+ # Add t and the second node in the induced path from s to t.
+ induced_nodes.add(t)
+ for u in G[s]:
+ if len(induced_nodes & set(G[u])) == 2:
+ induced_nodes.add(u)
+ break
+ return induced_nodes
+
+
+@nx._dispatchable
+def chordal_graph_cliques(G):
+ """Returns all maximal cliques of a chordal graph.
+
+ The algorithm breaks the graph in connected components and performs a
+ maximum cardinality search in each component to get the cliques.
+
+ Parameters
+ ----------
+ G : graph
+ A NetworkX graph
+
+ Yields
+ ------
+ frozenset of nodes
+ Maximal cliques, each of which is a frozenset of
+ nodes in `G`. The order of cliques is arbitrary.
+
+ Raises
+ ------
+ NetworkXError
+ The algorithm does not support DiGraph, MultiGraph and MultiDiGraph.
+ The algorithm can only be applied to chordal graphs. If the input
+ graph is found to be non-chordal, a :exc:`NetworkXError` is raised.
+
+ Examples
+ --------
+ >>> e = [
+ ... (1, 2),
+ ... (1, 3),
+ ... (2, 3),
+ ... (2, 4),
+ ... (3, 4),
+ ... (3, 5),
+ ... (3, 6),
+ ... (4, 5),
+ ... (4, 6),
+ ... (5, 6),
+ ... (7, 8),
+ ... ]
+ >>> G = nx.Graph(e)
+ >>> G.add_node(9)
+ >>> cliques = [c for c in chordal_graph_cliques(G)]
+ >>> cliques[0]
+ frozenset({1, 2, 3})
+ """
+ for C in (G.subgraph(c).copy() for c in connected_components(G)):
+ if C.number_of_nodes() == 1:
+ if nx.number_of_selfloops(C) > 0:
+ raise nx.NetworkXError("Input graph is not chordal.")
+ yield frozenset(C.nodes())
+ else:
+ unnumbered = set(C.nodes())
+ v = arbitrary_element(C)
+ unnumbered.remove(v)
+ numbered = {v}
+ clique_wanna_be = {v}
+ while unnumbered:
+ v = _max_cardinality_node(C, unnumbered, numbered)
+ unnumbered.remove(v)
+ numbered.add(v)
+ new_clique_wanna_be = set(C.neighbors(v)) & numbered
+ sg = C.subgraph(clique_wanna_be)
+ if _is_complete_graph(sg):
+ new_clique_wanna_be.add(v)
+ if not new_clique_wanna_be >= clique_wanna_be:
+ yield frozenset(clique_wanna_be)
+ clique_wanna_be = new_clique_wanna_be
+ else:
+ raise nx.NetworkXError("Input graph is not chordal.")
+ yield frozenset(clique_wanna_be)
+
+
+@nx._dispatchable
+def chordal_graph_treewidth(G):
+ """Returns the treewidth of the chordal graph G.
+
+ Parameters
+ ----------
+ G : graph
+ A NetworkX graph
+
+ Returns
+ -------
+ treewidth : int
+ The size of the largest clique in the graph minus one.
+
+ Raises
+ ------
+ NetworkXError
+ The algorithm does not support DiGraph, MultiGraph and MultiDiGraph.
+ The algorithm can only be applied to chordal graphs. If the input
+ graph is found to be non-chordal, a :exc:`NetworkXError` is raised.
+
+ Examples
+ --------
+ >>> e = [
+ ... (1, 2),
+ ... (1, 3),
+ ... (2, 3),
+ ... (2, 4),
+ ... (3, 4),
+ ... (3, 5),
+ ... (3, 6),
+ ... (4, 5),
+ ... (4, 6),
+ ... (5, 6),
+ ... (7, 8),
+ ... ]
+ >>> G = nx.Graph(e)
+ >>> G.add_node(9)
+ >>> nx.chordal_graph_treewidth(G)
+ 3
+
+ References
+ ----------
+ .. [1] https://en.wikipedia.org/wiki/Tree_decomposition#Treewidth
+ """
+ if not is_chordal(G):
+ raise nx.NetworkXError("Input graph is not chordal.")
+
+ max_clique = -1
+ for clique in nx.chordal_graph_cliques(G):
+ max_clique = max(max_clique, len(clique))
+ return max_clique - 1
+
+
+def _is_complete_graph(G):
+ """Returns True if G is a complete graph."""
+ if nx.number_of_selfloops(G) > 0:
+ raise nx.NetworkXError("Self loop found in _is_complete_graph()")
+ n = G.number_of_nodes()
+ if n < 2:
+ return True
+ e = G.number_of_edges()
+ max_edges = (n * (n - 1)) / 2
+ return e == max_edges
+
+
+def _find_missing_edge(G):
+ """Given a non-complete graph G, returns a missing edge."""
+ nodes = set(G)
+ for u in G:
+ missing = nodes - set(list(G[u].keys()) + [u])
+ if missing:
+ return (u, missing.pop())
+
+
+def _max_cardinality_node(G, choices, wanna_connect):
+ """Returns a the node in choices that has more connections in G
+ to nodes in wanna_connect.
+ """
+ max_number = -1
+ for x in choices:
+ number = len([y for y in G[x] if y in wanna_connect])
+ if number > max_number:
+ max_number = number
+ max_cardinality_node = x
+ return max_cardinality_node
+
+
+def _find_chordality_breaker(G, s=None, treewidth_bound=sys.maxsize):
+ """Given a graph G, starts a max cardinality search
+ (starting from s if s is given and from an arbitrary node otherwise)
+ trying to find a non-chordal cycle.
+
+ If it does find one, it returns (u,v,w) where u,v,w are the three
+ nodes that together with s are involved in the cycle.
+
+ It ignores any self loops.
+ """
+ if len(G) == 0:
+ raise nx.NetworkXPointlessConcept("Graph has no nodes.")
+ unnumbered = set(G)
+ if s is None:
+ s = arbitrary_element(G)
+ unnumbered.remove(s)
+ numbered = {s}
+ current_treewidth = -1
+ while unnumbered: # and current_treewidth <= treewidth_bound:
+ v = _max_cardinality_node(G, unnumbered, numbered)
+ unnumbered.remove(v)
+ numbered.add(v)
+ clique_wanna_be = set(G[v]) & numbered
+ sg = G.subgraph(clique_wanna_be)
+ if _is_complete_graph(sg):
+ # The graph seems to be chordal by now. We update the treewidth
+ current_treewidth = max(current_treewidth, len(clique_wanna_be))
+ if current_treewidth > treewidth_bound:
+ raise nx.NetworkXTreewidthBoundExceeded(
+ f"treewidth_bound exceeded: {current_treewidth}"
+ )
+ else:
+ # sg is not a clique,
+ # look for an edge that is not included in sg
+ (u, w) = _find_missing_edge(sg)
+ return (u, v, w)
+ return ()
+
+
+@not_implemented_for("directed")
+@nx._dispatchable(returns_graph=True)
+def complete_to_chordal_graph(G):
+ """Return a copy of G completed to a chordal graph
+
+ Adds edges to a copy of G to create a chordal graph. A graph G=(V,E) is
+ called chordal if for each cycle with length bigger than 3, there exist
+ two non-adjacent nodes connected by an edge (called a chord).
+
+ Parameters
+ ----------
+ G : NetworkX graph
+ Undirected graph
+
+ Returns
+ -------
+ H : NetworkX graph
+ The chordal enhancement of G
+ alpha : Dictionary
+ The elimination ordering of nodes of G
+
+ Notes
+ -----
+ There are different approaches to calculate the chordal
+ enhancement of a graph. The algorithm used here is called
+ MCS-M and gives at least minimal (local) triangulation of graph. Note
+ that this triangulation is not necessarily a global minimum.
+
+ https://en.wikipedia.org/wiki/Chordal_graph
+
+ References
+ ----------
+ .. [1] Berry, Anne & Blair, Jean & Heggernes, Pinar & Peyton, Barry. (2004)
+ Maximum Cardinality Search for Computing Minimal Triangulations of
+ Graphs. Algorithmica. 39. 287-298. 10.1007/s00453-004-1084-3.
+
+ Examples
+ --------
+ >>> from networkx.algorithms.chordal import complete_to_chordal_graph
+ >>> G = nx.wheel_graph(10)
+ >>> H, alpha = complete_to_chordal_graph(G)
+ """
+ H = G.copy()
+ alpha = {node: 0 for node in H}
+ if nx.is_chordal(H):
+ return H, alpha
+ chords = set()
+ weight = {node: 0 for node in H.nodes()}
+ unnumbered_nodes = list(H.nodes())
+ for i in range(len(H.nodes()), 0, -1):
+ # get the node in unnumbered_nodes with the maximum weight
+ z = max(unnumbered_nodes, key=lambda node: weight[node])
+ unnumbered_nodes.remove(z)
+ alpha[z] = i
+ update_nodes = []
+ for y in unnumbered_nodes:
+ if G.has_edge(y, z):
+ update_nodes.append(y)
+ else:
+ # y_weight will be bigger than node weights between y and z
+ y_weight = weight[y]
+ lower_nodes = [
+ node for node in unnumbered_nodes if weight[node] < y_weight
+ ]
+ if nx.has_path(H.subgraph(lower_nodes + [z, y]), y, z):
+ update_nodes.append(y)
+ chords.add((z, y))
+ # during calculation of paths the weights should not be updated
+ for node in update_nodes:
+ weight[node] += 1
+ H.add_edges_from(chords)
+ return H, alpha
diff --git a/pythonProject/.venv/Lib/site-packages/networkx/algorithms/clique.py b/pythonProject/.venv/Lib/site-packages/networkx/algorithms/clique.py
new file mode 100644
index 0000000000000000000000000000000000000000..5f959dd46582346735c03128767f11aa5d13f808
--- /dev/null
+++ b/pythonProject/.venv/Lib/site-packages/networkx/algorithms/clique.py
@@ -0,0 +1,754 @@
+"""Functions for finding and manipulating cliques.
+
+Finding the largest clique in a graph is NP-complete problem, so most of
+these algorithms have an exponential running time; for more information,
+see the Wikipedia article on the clique problem [1]_.
+
+.. [1] clique problem:: https://en.wikipedia.org/wiki/Clique_problem
+
+"""
+from collections import defaultdict, deque
+from itertools import chain, combinations, islice
+
+import networkx as nx
+from networkx.utils import not_implemented_for
+
+__all__ = [
+ "find_cliques",
+ "find_cliques_recursive",
+ "make_max_clique_graph",
+ "make_clique_bipartite",
+ "node_clique_number",
+ "number_of_cliques",
+ "enumerate_all_cliques",
+ "max_weight_clique",
+]
+
+
+@not_implemented_for("directed")
+@nx._dispatchable
+def enumerate_all_cliques(G):
+ """Returns all cliques in an undirected graph.
+
+ This function returns an iterator over cliques, each of which is a
+ list of nodes. The iteration is ordered by cardinality of the
+ cliques: first all cliques of size one, then all cliques of size
+ two, etc.
+
+ Parameters
+ ----------
+ G : NetworkX graph
+ An undirected graph.
+
+ Returns
+ -------
+ iterator
+ An iterator over cliques, each of which is a list of nodes in
+ `G`. The cliques are ordered according to size.
+
+ Notes
+ -----
+ To obtain a list of all cliques, use
+ `list(enumerate_all_cliques(G))`. However, be aware that in the
+ worst-case, the length of this list can be exponential in the number
+ of nodes in the graph (for example, when the graph is the complete
+ graph). This function avoids storing all cliques in memory by only
+ keeping current candidate node lists in memory during its search.
+
+ The implementation is adapted from the algorithm by Zhang, et
+ al. (2005) [1]_ to output all cliques discovered.
+
+ This algorithm ignores self-loops and parallel edges, since cliques
+ are not conventionally defined with such edges.
+
+ References
+ ----------
+ .. [1] Yun Zhang, Abu-Khzam, F.N., Baldwin, N.E., Chesler, E.J.,
+ Langston, M.A., Samatova, N.F.,
+ "Genome-Scale Computational Approaches to Memory-Intensive
+ Applications in Systems Biology".
+ *Supercomputing*, 2005. Proceedings of the ACM/IEEE SC 2005
+ Conference, pp. 12, 12--18 Nov. 2005.
+ .
+
+ """
+ index = {}
+ nbrs = {}
+ for u in G:
+ index[u] = len(index)
+ # Neighbors of u that appear after u in the iteration order of G.
+ nbrs[u] = {v for v in G[u] if v not in index}
+
+ queue = deque(([u], sorted(nbrs[u], key=index.__getitem__)) for u in G)
+ # Loop invariants:
+ # 1. len(base) is nondecreasing.
+ # 2. (base + cnbrs) is sorted with respect to the iteration order of G.
+ # 3. cnbrs is a set of common neighbors of nodes in base.
+ while queue:
+ base, cnbrs = map(list, queue.popleft())
+ yield base
+ for i, u in enumerate(cnbrs):
+ # Use generators to reduce memory consumption.
+ queue.append(
+ (
+ chain(base, [u]),
+ filter(nbrs[u].__contains__, islice(cnbrs, i + 1, None)),
+ )
+ )
+
+
+@not_implemented_for("directed")
+@nx._dispatchable
+def find_cliques(G, nodes=None):
+ """Returns all maximal cliques in an undirected graph.
+
+ For each node *n*, a *maximal clique for n* is a largest complete
+ subgraph containing *n*. The largest maximal clique is sometimes
+ called the *maximum clique*.
+
+ This function returns an iterator over cliques, each of which is a
+ list of nodes. It is an iterative implementation, so should not
+ suffer from recursion depth issues.
+
+ This function accepts a list of `nodes` and only the maximal cliques
+ containing all of these `nodes` are returned. It can considerably speed up
+ the running time if some specific cliques are desired.
+
+ Parameters
+ ----------
+ G : NetworkX graph
+ An undirected graph.
+
+ nodes : list, optional (default=None)
+ If provided, only yield *maximal cliques* containing all nodes in `nodes`.
+ If `nodes` isn't a clique itself, a ValueError is raised.
+
+ Returns
+ -------
+ iterator
+ An iterator over maximal cliques, each of which is a list of
+ nodes in `G`. If `nodes` is provided, only the maximal cliques
+ containing all the nodes in `nodes` are returned. The order of
+ cliques is arbitrary.
+
+ Raises
+ ------
+ ValueError
+ If `nodes` is not a clique.
+
+ Examples
+ --------
+ >>> from pprint import pprint # For nice dict formatting
+ >>> G = nx.karate_club_graph()
+ >>> sum(1 for c in nx.find_cliques(G)) # The number of maximal cliques in G
+ 36
+ >>> max(nx.find_cliques(G), key=len) # The largest maximal clique in G
+ [0, 1, 2, 3, 13]
+
+ The size of the largest maximal clique is known as the *clique number* of
+ the graph, which can be found directly with:
+
+ >>> max(len(c) for c in nx.find_cliques(G))
+ 5
+
+ One can also compute the number of maximal cliques in `G` that contain a given
+ node. The following produces a dictionary keyed by node whose
+ values are the number of maximal cliques in `G` that contain the node:
+
+ >>> pprint({n: sum(1 for c in nx.find_cliques(G) if n in c) for n in G})
+ {0: 13,
+ 1: 6,
+ 2: 7,
+ 3: 3,
+ 4: 2,
+ 5: 3,
+ 6: 3,
+ 7: 1,
+ 8: 3,
+ 9: 2,
+ 10: 2,
+ 11: 1,
+ 12: 1,
+ 13: 2,
+ 14: 1,
+ 15: 1,
+ 16: 1,
+ 17: 1,
+ 18: 1,
+ 19: 2,
+ 20: 1,
+ 21: 1,
+ 22: 1,
+ 23: 3,
+ 24: 2,
+ 25: 2,
+ 26: 1,
+ 27: 3,
+ 28: 2,
+ 29: 2,
+ 30: 2,
+ 31: 4,
+ 32: 9,
+ 33: 14}
+
+ Or, similarly, the maximal cliques in `G` that contain a given node.
+ For example, the 4 maximal cliques that contain node 31:
+
+ >>> [c for c in nx.find_cliques(G) if 31 in c]
+ [[0, 31], [33, 32, 31], [33, 28, 31], [24, 25, 31]]
+
+ See Also
+ --------
+ find_cliques_recursive
+ A recursive version of the same algorithm.
+
+ Notes
+ -----
+ To obtain a list of all maximal cliques, use
+ `list(find_cliques(G))`. However, be aware that in the worst-case,
+ the length of this list can be exponential in the number of nodes in
+ the graph. This function avoids storing all cliques in memory by
+ only keeping current candidate node lists in memory during its search.
+
+ This implementation is based on the algorithm published by Bron and
+ Kerbosch (1973) [1]_, as adapted by Tomita, Tanaka and Takahashi
+ (2006) [2]_ and discussed in Cazals and Karande (2008) [3]_. It
+ essentially unrolls the recursion used in the references to avoid
+ issues of recursion stack depth (for a recursive implementation, see
+ :func:`find_cliques_recursive`).
+
+ This algorithm ignores self-loops and parallel edges, since cliques
+ are not conventionally defined with such edges.
+
+ References
+ ----------
+ .. [1] Bron, C. and Kerbosch, J.
+ "Algorithm 457: finding all cliques of an undirected graph".
+ *Communications of the ACM* 16, 9 (Sep. 1973), 575--577.
+
+
+ .. [2] Etsuji Tomita, Akira Tanaka, Haruhisa Takahashi,
+ "The worst-case time complexity for generating all maximal
+ cliques and computational experiments",
+ *Theoretical Computer Science*, Volume 363, Issue 1,
+ Computing and Combinatorics,
+ 10th Annual International Conference on
+ Computing and Combinatorics (COCOON 2004), 25 October 2006, Pages 28--42
+
+
+ .. [3] F. Cazals, C. Karande,
+ "A note on the problem of reporting maximal cliques",
+ *Theoretical Computer Science*,
+ Volume 407, Issues 1--3, 6 November 2008, Pages 564--568,
+
+
+ """
+ if len(G) == 0:
+ return
+
+ adj = {u: {v for v in G[u] if v != u} for u in G}
+
+ # Initialize Q with the given nodes and subg, cand with their nbrs
+ Q = nodes[:] if nodes is not None else []
+ cand = set(G)
+ for node in Q:
+ if node not in cand:
+ raise ValueError(f"The given `nodes` {nodes} do not form a clique")
+ cand &= adj[node]
+
+ if not cand:
+ yield Q[:]
+ return
+
+ subg = cand.copy()
+ stack = []
+ Q.append(None)
+
+ u = max(subg, key=lambda u: len(cand & adj[u]))
+ ext_u = cand - adj[u]
+
+ try:
+ while True:
+ if ext_u:
+ q = ext_u.pop()
+ cand.remove(q)
+ Q[-1] = q
+ adj_q = adj[q]
+ subg_q = subg & adj_q
+ if not subg_q:
+ yield Q[:]
+ else:
+ cand_q = cand & adj_q
+ if cand_q:
+ stack.append((subg, cand, ext_u))
+ Q.append(None)
+ subg = subg_q
+ cand = cand_q
+ u = max(subg, key=lambda u: len(cand & adj[u]))
+ ext_u = cand - adj[u]
+ else:
+ Q.pop()
+ subg, cand, ext_u = stack.pop()
+ except IndexError:
+ pass
+
+
+# TODO Should this also be not implemented for directed graphs?
+@nx._dispatchable
+def find_cliques_recursive(G, nodes=None):
+ """Returns all maximal cliques in a graph.
+
+ For each node *v*, a *maximal clique for v* is a largest complete
+ subgraph containing *v*. The largest maximal clique is sometimes
+ called the *maximum clique*.
+
+ This function returns an iterator over cliques, each of which is a
+ list of nodes. It is a recursive implementation, so may suffer from
+ recursion depth issues, but is included for pedagogical reasons.
+ For a non-recursive implementation, see :func:`find_cliques`.
+
+ This function accepts a list of `nodes` and only the maximal cliques
+ containing all of these `nodes` are returned. It can considerably speed up
+ the running time if some specific cliques are desired.
+
+ Parameters
+ ----------
+ G : NetworkX graph
+
+ nodes : list, optional (default=None)
+ If provided, only yield *maximal cliques* containing all nodes in `nodes`.
+ If `nodes` isn't a clique itself, a ValueError is raised.
+
+ Returns
+ -------
+ iterator
+ An iterator over maximal cliques, each of which is a list of
+ nodes in `G`. If `nodes` is provided, only the maximal cliques
+ containing all the nodes in `nodes` are yielded. The order of
+ cliques is arbitrary.
+
+ Raises
+ ------
+ ValueError
+ If `nodes` is not a clique.
+
+ See Also
+ --------
+ find_cliques
+ An iterative version of the same algorithm. See docstring for examples.
+
+ Notes
+ -----
+ To obtain a list of all maximal cliques, use
+ `list(find_cliques_recursive(G))`. However, be aware that in the
+ worst-case, the length of this list can be exponential in the number
+ of nodes in the graph. This function avoids storing all cliques in memory
+ by only keeping current candidate node lists in memory during its search.
+
+ This implementation is based on the algorithm published by Bron and
+ Kerbosch (1973) [1]_, as adapted by Tomita, Tanaka and Takahashi
+ (2006) [2]_ and discussed in Cazals and Karande (2008) [3]_. For a
+ non-recursive implementation, see :func:`find_cliques`.
+
+ This algorithm ignores self-loops and parallel edges, since cliques
+ are not conventionally defined with such edges.
+
+ References
+ ----------
+ .. [1] Bron, C. and Kerbosch, J.
+ "Algorithm 457: finding all cliques of an undirected graph".
+ *Communications of the ACM* 16, 9 (Sep. 1973), 575--577.
+
+
+ .. [2] Etsuji Tomita, Akira Tanaka, Haruhisa Takahashi,
+ "The worst-case time complexity for generating all maximal
+ cliques and computational experiments",
+ *Theoretical Computer Science*, Volume 363, Issue 1,
+ Computing and Combinatorics,
+ 10th Annual International Conference on
+ Computing and Combinatorics (COCOON 2004), 25 October 2006, Pages 28--42
+
+
+ .. [3] F. Cazals, C. Karande,
+ "A note on the problem of reporting maximal cliques",
+ *Theoretical Computer Science*,
+ Volume 407, Issues 1--3, 6 November 2008, Pages 564--568,
+
+
+ """
+ if len(G) == 0:
+ return iter([])
+
+ adj = {u: {v for v in G[u] if v != u} for u in G}
+
+ # Initialize Q with the given nodes and subg, cand with their nbrs
+ Q = nodes[:] if nodes is not None else []
+ cand_init = set(G)
+ for node in Q:
+ if node not in cand_init:
+ raise ValueError(f"The given `nodes` {nodes} do not form a clique")
+ cand_init &= adj[node]
+
+ if not cand_init:
+ return iter([Q])
+
+ subg_init = cand_init.copy()
+
+ def expand(subg, cand):
+ u = max(subg, key=lambda u: len(cand & adj[u]))
+ for q in cand - adj[u]:
+ cand.remove(q)
+ Q.append(q)
+ adj_q = adj[q]
+ subg_q = subg & adj_q
+ if not subg_q:
+ yield Q[:]
+ else:
+ cand_q = cand & adj_q
+ if cand_q:
+ yield from expand(subg_q, cand_q)
+ Q.pop()
+
+ return expand(subg_init, cand_init)
+
+
+@nx._dispatchable(returns_graph=True)
+def make_max_clique_graph(G, create_using=None):
+ """Returns the maximal clique graph of the given graph.
+
+ The nodes of the maximal clique graph of `G` are the cliques of
+ `G` and an edge joins two cliques if the cliques are not disjoint.
+
+ Parameters
+ ----------
+ G : NetworkX graph
+
+ create_using : NetworkX graph constructor, optional (default=nx.Graph)
+ Graph type to create. If graph instance, then cleared before populated.
+
+ Returns
+ -------
+ NetworkX graph
+ A graph whose nodes are the cliques of `G` and whose edges
+ join two cliques if they are not disjoint.
+
+ Notes
+ -----
+ This function behaves like the following code::
+
+ import networkx as nx
+
+ G = nx.make_clique_bipartite(G)
+ cliques = [v for v in G.nodes() if G.nodes[v]["bipartite"] == 0]
+ G = nx.bipartite.projected_graph(G, cliques)
+ G = nx.relabel_nodes(G, {-v: v - 1 for v in G})
+
+ It should be faster, though, since it skips all the intermediate
+ steps.
+
+ """
+ if create_using is None:
+ B = G.__class__()
+ else:
+ B = nx.empty_graph(0, create_using)
+ cliques = list(enumerate(set(c) for c in find_cliques(G)))
+ # Add a numbered node for each clique.
+ B.add_nodes_from(i for i, c in cliques)
+ # Join cliques by an edge if they share a node.
+ clique_pairs = combinations(cliques, 2)
+ B.add_edges_from((i, j) for (i, c1), (j, c2) in clique_pairs if c1 & c2)
+ return B
+
+
+@nx._dispatchable(returns_graph=True)
+def make_clique_bipartite(G, fpos=None, create_using=None, name=None):
+ """Returns the bipartite clique graph corresponding to `G`.
+
+ In the returned bipartite graph, the "bottom" nodes are the nodes of
+ `G` and the "top" nodes represent the maximal cliques of `G`.
+ There is an edge from node *v* to clique *C* in the returned graph
+ if and only if *v* is an element of *C*.
+
+ Parameters
+ ----------
+ G : NetworkX graph
+ An undirected graph.
+
+ fpos : bool
+ If True or not None, the returned graph will have an
+ additional attribute, `pos`, a dictionary mapping node to
+ position in the Euclidean plane.
+
+ create_using : NetworkX graph constructor, optional (default=nx.Graph)
+ Graph type to create. If graph instance, then cleared before populated.
+
+ Returns
+ -------
+ NetworkX graph
+ A bipartite graph whose "bottom" set is the nodes of the graph
+ `G`, whose "top" set is the cliques of `G`, and whose edges
+ join nodes of `G` to the cliques that contain them.
+
+ The nodes of the graph `G` have the node attribute
+ 'bipartite' set to 1 and the nodes representing cliques
+ have the node attribute 'bipartite' set to 0, as is the
+ convention for bipartite graphs in NetworkX.
+
+ """
+ B = nx.empty_graph(0, create_using)
+ B.clear()
+ # The "bottom" nodes in the bipartite graph are the nodes of the
+ # original graph, G.
+ B.add_nodes_from(G, bipartite=1)
+ for i, cl in enumerate(find_cliques(G)):
+ # The "top" nodes in the bipartite graph are the cliques. These
+ # nodes get negative numbers as labels.
+ name = -i - 1
+ B.add_node(name, bipartite=0)
+ B.add_edges_from((v, name) for v in cl)
+ return B
+
+
+@nx._dispatchable
+def node_clique_number(G, nodes=None, cliques=None, separate_nodes=False):
+ """Returns the size of the largest maximal clique containing each given node.
+
+ Returns a single or list depending on input nodes.
+ An optional list of cliques can be input if already computed.
+
+ Parameters
+ ----------
+ G : NetworkX graph
+ An undirected graph.
+
+ cliques : list, optional (default=None)
+ A list of cliques, each of which is itself a list of nodes.
+ If not specified, the list of all cliques will be computed
+ using :func:`find_cliques`.
+
+ Returns
+ -------
+ int or dict
+ If `nodes` is a single node, returns the size of the
+ largest maximal clique in `G` containing that node.
+ Otherwise return a dict keyed by node to the size
+ of the largest maximal clique containing that node.
+
+ See Also
+ --------
+ find_cliques
+ find_cliques yields the maximal cliques of G.
+ It accepts a `nodes` argument which restricts consideration to
+ maximal cliques containing all the given `nodes`.
+ The search for the cliques is optimized for `nodes`.
+ """
+ if cliques is None:
+ if nodes is not None:
+ # Use ego_graph to decrease size of graph
+ # check for single node
+ if nodes in G:
+ return max(len(c) for c in find_cliques(nx.ego_graph(G, nodes)))
+ # handle multiple nodes
+ return {
+ n: max(len(c) for c in find_cliques(nx.ego_graph(G, n))) for n in nodes
+ }
+
+ # nodes is None--find all cliques
+ cliques = list(find_cliques(G))
+
+ # single node requested
+ if nodes in G:
+ return max(len(c) for c in cliques if nodes in c)
+
+ # multiple nodes requested
+ # preprocess all nodes (faster than one at a time for even 2 nodes)
+ size_for_n = defaultdict(int)
+ for c in cliques:
+ size_of_c = len(c)
+ for n in c:
+ if size_for_n[n] < size_of_c:
+ size_for_n[n] = size_of_c
+ if nodes is None:
+ return size_for_n
+ return {n: size_for_n[n] for n in nodes}
+
+
+def number_of_cliques(G, nodes=None, cliques=None):
+ """Returns the number of maximal cliques for each node.
+
+ Returns a single or list depending on input nodes.
+ Optional list of cliques can be input if already computed.
+ """
+ if cliques is None:
+ cliques = list(find_cliques(G))
+
+ if nodes is None:
+ nodes = list(G.nodes()) # none, get entire graph
+
+ if not isinstance(nodes, list): # check for a list
+ v = nodes
+ # assume it is a single value
+ numcliq = len([1 for c in cliques if v in c])
+ else:
+ numcliq = {}
+ for v in nodes:
+ numcliq[v] = len([1 for c in cliques if v in c])
+ return numcliq
+
+
+class MaxWeightClique:
+ """A class for the maximum weight clique algorithm.
+
+ This class is a helper for the `max_weight_clique` function. The class
+ should not normally be used directly.
+
+ Parameters
+ ----------
+ G : NetworkX graph
+ The undirected graph for which a maximum weight clique is sought
+ weight : string or None, optional (default='weight')
+ The node attribute that holds the integer value used as a weight.
+ If None, then each node has weight 1.
+
+ Attributes
+ ----------
+ G : NetworkX graph
+ The undirected graph for which a maximum weight clique is sought
+ node_weights: dict
+ The weight of each node
+ incumbent_nodes : list
+ The nodes of the incumbent clique (the best clique found so far)
+ incumbent_weight: int
+ The weight of the incumbent clique
+ """
+
+ def __init__(self, G, weight):
+ self.G = G
+ self.incumbent_nodes = []
+ self.incumbent_weight = 0
+
+ if weight is None:
+ self.node_weights = {v: 1 for v in G.nodes()}
+ else:
+ for v in G.nodes():
+ if weight not in G.nodes[v]:
+ errmsg = f"Node {v!r} does not have the requested weight field."
+ raise KeyError(errmsg)
+ if not isinstance(G.nodes[v][weight], int):
+ errmsg = f"The {weight!r} field of node {v!r} is not an integer."
+ raise ValueError(errmsg)
+ self.node_weights = {v: G.nodes[v][weight] for v in G.nodes()}
+
+ def update_incumbent_if_improved(self, C, C_weight):
+ """Update the incumbent if the node set C has greater weight.
+
+ C is assumed to be a clique.
+ """
+ if C_weight > self.incumbent_weight:
+ self.incumbent_nodes = C[:]
+ self.incumbent_weight = C_weight
+
+ def greedily_find_independent_set(self, P):
+ """Greedily find an independent set of nodes from a set of
+ nodes P."""
+ independent_set = []
+ P = P[:]
+ while P:
+ v = P[0]
+ independent_set.append(v)
+ P = [w for w in P if v != w and not self.G.has_edge(v, w)]
+ return independent_set
+
+ def find_branching_nodes(self, P, target):
+ """Find a set of nodes to branch on."""
+ residual_wt = {v: self.node_weights[v] for v in P}
+ total_wt = 0
+ P = P[:]
+ while P:
+ independent_set = self.greedily_find_independent_set(P)
+ min_wt_in_class = min(residual_wt[v] for v in independent_set)
+ total_wt += min_wt_in_class
+ if total_wt > target:
+ break
+ for v in independent_set:
+ residual_wt[v] -= min_wt_in_class
+ P = [v for v in P if residual_wt[v] != 0]
+ return P
+
+ def expand(self, C, C_weight, P):
+ """Look for the best clique that contains all the nodes in C and zero or
+ more of the nodes in P, backtracking if it can be shown that no such
+ clique has greater weight than the incumbent.
+ """
+ self.update_incumbent_if_improved(C, C_weight)
+ branching_nodes = self.find_branching_nodes(P, self.incumbent_weight - C_weight)
+ while branching_nodes:
+ v = branching_nodes.pop()
+ P.remove(v)
+ new_C = C + [v]
+ new_C_weight = C_weight + self.node_weights[v]
+ new_P = [w for w in P if self.G.has_edge(v, w)]
+ self.expand(new_C, new_C_weight, new_P)
+
+ def find_max_weight_clique(self):
+ """Find a maximum weight clique."""
+ # Sort nodes in reverse order of degree for speed
+ nodes = sorted(self.G.nodes(), key=lambda v: self.G.degree(v), reverse=True)
+ nodes = [v for v in nodes if self.node_weights[v] > 0]
+ self.expand([], 0, nodes)
+
+
+@not_implemented_for("directed")
+@nx._dispatchable(node_attrs="weight")
+def max_weight_clique(G, weight="weight"):
+ """Find a maximum weight clique in G.
+
+ A *clique* in a graph is a set of nodes such that every two distinct nodes
+ are adjacent. The *weight* of a clique is the sum of the weights of its
+ nodes. A *maximum weight clique* of graph G is a clique C in G such that
+ no clique in G has weight greater than the weight of C.
+
+ Parameters
+ ----------
+ G : NetworkX graph
+ Undirected graph
+ weight : string or None, optional (default='weight')
+ The node attribute that holds the integer value used as a weight.
+ If None, then each node has weight 1.
+
+ Returns
+ -------
+ clique : list
+ the nodes of a maximum weight clique
+ weight : int
+ the weight of a maximum weight clique
+
+ Notes
+ -----
+ The implementation is recursive, and therefore it may run into recursion
+ depth issues if G contains a clique whose number of nodes is close to the
+ recursion depth limit.
+
+ At each search node, the algorithm greedily constructs a weighted
+ independent set cover of part of the graph in order to find a small set of
+ nodes on which to branch. The algorithm is very similar to the algorithm
+ of Tavares et al. [1]_, other than the fact that the NetworkX version does
+ not use bitsets. This style of algorithm for maximum weight clique (and
+ maximum weight independent set, which is the same problem but on the
+ complement graph) has a decades-long history. See Algorithm B of Warren
+ and Hicks [2]_ and the references in that paper.
+
+ References
+ ----------
+ .. [1] Tavares, W.A., Neto, M.B.C., Rodrigues, C.D., Michelon, P.: Um
+ algoritmo de branch and bound para o problema da clique máxima
+ ponderada. Proceedings of XLVII SBPO 1 (2015).
+
+ .. [2] Warren, Jeffrey S, Hicks, Illya V.: Combinatorial Branch-and-Bound
+ for the Maximum Weight Independent Set Problem. Technical Report,
+ Texas A&M University (2016).
+ """
+
+ mwc = MaxWeightClique(G, weight)
+ mwc.find_max_weight_clique()
+ return mwc.incumbent_nodes, mwc.incumbent_weight
diff --git a/pythonProject/.venv/Lib/site-packages/networkx/algorithms/triads.py b/pythonProject/.venv/Lib/site-packages/networkx/algorithms/triads.py
new file mode 100644
index 0000000000000000000000000000000000000000..1e67c145362bb14b9cc35770232d0bf1f97a611a
--- /dev/null
+++ b/pythonProject/.venv/Lib/site-packages/networkx/algorithms/triads.py
@@ -0,0 +1,604 @@
+# See https://github.com/networkx/networkx/pull/1474
+# Copyright 2011 Reya Group
+# Copyright 2011 Alex Levenson
+# Copyright 2011 Diederik van Liere
+"""Functions for analyzing triads of a graph."""
+
+from collections import defaultdict
+from itertools import combinations, permutations
+
+import networkx as nx
+from networkx.utils import not_implemented_for, py_random_state
+
+__all__ = [
+ "triadic_census",
+ "is_triad",
+ "all_triplets",
+ "all_triads",
+ "triads_by_type",
+ "triad_type",
+ "random_triad",
+]
+
+#: The integer codes representing each type of triad.
+#:
+#: Triads that are the same up to symmetry have the same code.
+TRICODES = (
+ 1,
+ 2,
+ 2,
+ 3,
+ 2,
+ 4,
+ 6,
+ 8,
+ 2,
+ 6,
+ 5,
+ 7,
+ 3,
+ 8,
+ 7,
+ 11,
+ 2,
+ 6,
+ 4,
+ 8,
+ 5,
+ 9,
+ 9,
+ 13,
+ 6,
+ 10,
+ 9,
+ 14,
+ 7,
+ 14,
+ 12,
+ 15,
+ 2,
+ 5,
+ 6,
+ 7,
+ 6,
+ 9,
+ 10,
+ 14,
+ 4,
+ 9,
+ 9,
+ 12,
+ 8,
+ 13,
+ 14,
+ 15,
+ 3,
+ 7,
+ 8,
+ 11,
+ 7,
+ 12,
+ 14,
+ 15,
+ 8,
+ 14,
+ 13,
+ 15,
+ 11,
+ 15,
+ 15,
+ 16,
+)
+
+#: The names of each type of triad. The order of the elements is
+#: important: it corresponds to the tricodes given in :data:`TRICODES`.
+TRIAD_NAMES = (
+ "003",
+ "012",
+ "102",
+ "021D",
+ "021U",
+ "021C",
+ "111D",
+ "111U",
+ "030T",
+ "030C",
+ "201",
+ "120D",
+ "120U",
+ "120C",
+ "210",
+ "300",
+)
+
+
+#: A dictionary mapping triad code to triad name.
+TRICODE_TO_NAME = {i: TRIAD_NAMES[code - 1] for i, code in enumerate(TRICODES)}
+
+
+def _tricode(G, v, u, w):
+ """Returns the integer code of the given triad.
+
+ This is some fancy magic that comes from Batagelj and Mrvar's paper. It
+ treats each edge joining a pair of `v`, `u`, and `w` as a bit in
+ the binary representation of an integer.
+
+ """
+ combos = ((v, u, 1), (u, v, 2), (v, w, 4), (w, v, 8), (u, w, 16), (w, u, 32))
+ return sum(x for u, v, x in combos if v in G[u])
+
+
+@not_implemented_for("undirected")
+@nx._dispatchable
+def triadic_census(G, nodelist=None):
+ """Determines the triadic census of a directed graph.
+
+ The triadic census is a count of how many of the 16 possible types of
+ triads are present in a directed graph. If a list of nodes is passed, then
+ only those triads are taken into account which have elements of nodelist in them.
+
+ Parameters
+ ----------
+ G : digraph
+ A NetworkX DiGraph
+ nodelist : list
+ List of nodes for which you want to calculate triadic census
+
+ Returns
+ -------
+ census : dict
+ Dictionary with triad type as keys and number of occurrences as values.
+
+ Examples
+ --------
+ >>> G = nx.DiGraph([(1, 2), (2, 3), (3, 1), (3, 4), (4, 1), (4, 2)])
+ >>> triadic_census = nx.triadic_census(G)
+ >>> for key, value in triadic_census.items():
+ ... print(f"{key}: {value}")
+ 003: 0
+ 012: 0
+ 102: 0
+ 021D: 0
+ 021U: 0
+ 021C: 0
+ 111D: 0
+ 111U: 0
+ 030T: 2
+ 030C: 2
+ 201: 0
+ 120D: 0
+ 120U: 0
+ 120C: 0
+ 210: 0
+ 300: 0
+
+ Notes
+ -----
+ This algorithm has complexity $O(m)$ where $m$ is the number of edges in
+ the graph.
+
+ For undirected graphs, the triadic census can be computed by first converting
+ the graph into a directed graph using the ``G.to_directed()`` method.
+ After this conversion, only the triad types 003, 102, 201 and 300 will be
+ present in the undirected scenario.
+
+ Raises
+ ------
+ ValueError
+ If `nodelist` contains duplicate nodes or nodes not in `G`.
+ If you want to ignore this you can preprocess with `set(nodelist) & G.nodes`
+
+ See also
+ --------
+ triad_graph
+
+ References
+ ----------
+ .. [1] Vladimir Batagelj and Andrej Mrvar, A subquadratic triad census
+ algorithm for large sparse networks with small maximum degree,
+ University of Ljubljana,
+ http://vlado.fmf.uni-lj.si/pub/networks/doc/triads/triads.pdf
+
+ """
+ nodeset = set(G.nbunch_iter(nodelist))
+ if nodelist is not None and len(nodelist) != len(nodeset):
+ raise ValueError("nodelist includes duplicate nodes or nodes not in G")
+
+ N = len(G)
+ Nnot = N - len(nodeset) # can signal special counting for subset of nodes
+
+ # create an ordering of nodes with nodeset nodes first
+ m = {n: i for i, n in enumerate(nodeset)}
+ if Nnot:
+ # add non-nodeset nodes later in the ordering
+ not_nodeset = G.nodes - nodeset
+ m.update((n, i + N) for i, n in enumerate(not_nodeset))
+
+ # build all_neighbor dicts for easy counting
+ # After Python 3.8 can leave off these keys(). Speedup also using G._pred
+ # nbrs = {n: G._pred[n].keys() | G._succ[n].keys() for n in G}
+ nbrs = {n: G.pred[n].keys() | G.succ[n].keys() for n in G}
+ dbl_nbrs = {n: G.pred[n].keys() & G.succ[n].keys() for n in G}
+
+ if Nnot:
+ sgl_nbrs = {n: G.pred[n].keys() ^ G.succ[n].keys() for n in not_nodeset}
+ # find number of edges not incident to nodes in nodeset
+ sgl = sum(1 for n in not_nodeset for nbr in sgl_nbrs[n] if nbr not in nodeset)
+ sgl_edges_outside = sgl // 2
+ dbl = sum(1 for n in not_nodeset for nbr in dbl_nbrs[n] if nbr not in nodeset)
+ dbl_edges_outside = dbl // 2
+
+ # Initialize the count for each triad to be zero.
+ census = {name: 0 for name in TRIAD_NAMES}
+ # Main loop over nodes
+ for v in nodeset:
+ vnbrs = nbrs[v]
+ dbl_vnbrs = dbl_nbrs[v]
+ if Nnot:
+ # set up counts of edges attached to v.
+ sgl_unbrs_bdy = sgl_unbrs_out = dbl_unbrs_bdy = dbl_unbrs_out = 0
+ for u in vnbrs:
+ if m[u] <= m[v]:
+ continue
+ unbrs = nbrs[u]
+ neighbors = (vnbrs | unbrs) - {u, v}
+ # Count connected triads.
+ for w in neighbors:
+ if m[u] < m[w] or (m[v] < m[w] < m[u] and v not in nbrs[w]):
+ code = _tricode(G, v, u, w)
+ census[TRICODE_TO_NAME[code]] += 1
+
+ # Use a formula for dyadic triads with edge incident to v
+ if u in dbl_vnbrs:
+ census["102"] += N - len(neighbors) - 2
+ else:
+ census["012"] += N - len(neighbors) - 2
+
+ # Count edges attached to v. Subtract later to get triads with v isolated
+ # _out are (u,unbr) for unbrs outside boundary of nodeset
+ # _bdy are (u,unbr) for unbrs on boundary of nodeset (get double counted)
+ if Nnot and u not in nodeset:
+ sgl_unbrs = sgl_nbrs[u]
+ sgl_unbrs_bdy += len(sgl_unbrs & vnbrs - nodeset)
+ sgl_unbrs_out += len(sgl_unbrs - vnbrs - nodeset)
+ dbl_unbrs = dbl_nbrs[u]
+ dbl_unbrs_bdy += len(dbl_unbrs & vnbrs - nodeset)
+ dbl_unbrs_out += len(dbl_unbrs - vnbrs - nodeset)
+ # if nodeset == G.nodes, skip this b/c we will find the edge later.
+ if Nnot:
+ # Count edges outside nodeset not connected with v (v isolated triads)
+ census["012"] += sgl_edges_outside - (sgl_unbrs_out + sgl_unbrs_bdy // 2)
+ census["102"] += dbl_edges_outside - (dbl_unbrs_out + dbl_unbrs_bdy // 2)
+
+ # calculate null triads: "003"
+ # null triads = total number of possible triads - all found triads
+ total_triangles = (N * (N - 1) * (N - 2)) // 6
+ triangles_without_nodeset = (Nnot * (Nnot - 1) * (Nnot - 2)) // 6
+ total_census = total_triangles - triangles_without_nodeset
+ census["003"] = total_census - sum(census.values())
+
+ return census
+
+
+@nx._dispatchable
+def is_triad(G):
+ """Returns True if the graph G is a triad, else False.
+
+ Parameters
+ ----------
+ G : graph
+ A NetworkX Graph
+
+ Returns
+ -------
+ istriad : boolean
+ Whether G is a valid triad
+
+ Examples
+ --------
+ >>> G = nx.DiGraph([(1, 2), (2, 3), (3, 1)])
+ >>> nx.is_triad(G)
+ True
+ >>> G.add_edge(0, 1)
+ >>> nx.is_triad(G)
+ False
+ """
+ if isinstance(G, nx.Graph):
+ if G.order() == 3 and nx.is_directed(G):
+ if not any((n, n) in G.edges() for n in G.nodes()):
+ return True
+ return False
+
+
+@not_implemented_for("undirected")
+@nx._dispatchable
+def all_triplets(G):
+ """Returns a generator of all possible sets of 3 nodes in a DiGraph.
+
+ .. deprecated:: 3.3
+
+ all_triplets is deprecated and will be removed in NetworkX version 3.5.
+ Use `itertools.combinations` instead::
+
+ all_triplets = itertools.combinations(G, 3)
+
+ Parameters
+ ----------
+ G : digraph
+ A NetworkX DiGraph
+
+ Returns
+ -------
+ triplets : generator of 3-tuples
+ Generator of tuples of 3 nodes
+
+ Examples
+ --------
+ >>> G = nx.DiGraph([(1, 2), (2, 3), (3, 4)])
+ >>> list(nx.all_triplets(G))
+ [(1, 2, 3), (1, 2, 4), (1, 3, 4), (2, 3, 4)]
+
+ """
+ import warnings
+
+ warnings.warn(
+ (
+ "\n\nall_triplets is deprecated and will be rmoved in v3.5.\n"
+ "Use `itertools.combinations(G, 3)` instead."
+ ),
+ category=DeprecationWarning,
+ stacklevel=4,
+ )
+ triplets = combinations(G.nodes(), 3)
+ return triplets
+
+
+@not_implemented_for("undirected")
+@nx._dispatchable(returns_graph=True)
+def all_triads(G):
+ """A generator of all possible triads in G.
+
+ Parameters
+ ----------
+ G : digraph
+ A NetworkX DiGraph
+
+ Returns
+ -------
+ all_triads : generator of DiGraphs
+ Generator of triads (order-3 DiGraphs)
+
+ Examples
+ --------
+ >>> G = nx.DiGraph([(1, 2), (2, 3), (3, 1), (3, 4), (4, 1), (4, 2)])
+ >>> for triad in nx.all_triads(G):
+ ... print(triad.edges)
+ [(1, 2), (2, 3), (3, 1)]
+ [(1, 2), (4, 1), (4, 2)]
+ [(3, 1), (3, 4), (4, 1)]
+ [(2, 3), (3, 4), (4, 2)]
+
+ """
+ triplets = combinations(G.nodes(), 3)
+ for triplet in triplets:
+ yield G.subgraph(triplet).copy()
+
+
+@not_implemented_for("undirected")
+@nx._dispatchable
+def triads_by_type(G):
+ """Returns a list of all triads for each triad type in a directed graph.
+ There are exactly 16 different types of triads possible. Suppose 1, 2, 3 are three
+ nodes, they will be classified as a particular triad type if their connections
+ are as follows:
+
+ - 003: 1, 2, 3
+ - 012: 1 -> 2, 3
+ - 102: 1 <-> 2, 3
+ - 021D: 1 <- 2 -> 3
+ - 021U: 1 -> 2 <- 3
+ - 021C: 1 -> 2 -> 3
+ - 111D: 1 <-> 2 <- 3
+ - 111U: 1 <-> 2 -> 3
+ - 030T: 1 -> 2 -> 3, 1 -> 3
+ - 030C: 1 <- 2 <- 3, 1 -> 3
+ - 201: 1 <-> 2 <-> 3
+ - 120D: 1 <- 2 -> 3, 1 <-> 3
+ - 120U: 1 -> 2 <- 3, 1 <-> 3
+ - 120C: 1 -> 2 -> 3, 1 <-> 3
+ - 210: 1 -> 2 <-> 3, 1 <-> 3
+ - 300: 1 <-> 2 <-> 3, 1 <-> 3
+
+ Refer to the :doc:`example gallery `
+ for visual examples of the triad types.
+
+ Parameters
+ ----------
+ G : digraph
+ A NetworkX DiGraph
+
+ Returns
+ -------
+ tri_by_type : dict
+ Dictionary with triad types as keys and lists of triads as values.
+
+ Examples
+ --------
+ >>> G = nx.DiGraph([(1, 2), (1, 3), (2, 3), (3, 1), (5, 6), (5, 4), (6, 7)])
+ >>> dict = nx.triads_by_type(G)
+ >>> dict["120C"][0].edges()
+ OutEdgeView([(1, 2), (1, 3), (2, 3), (3, 1)])
+ >>> dict["012"][0].edges()
+ OutEdgeView([(1, 2)])
+
+ References
+ ----------
+ .. [1] Snijders, T. (2012). "Transitivity and triads." University of
+ Oxford.
+ https://web.archive.org/web/20170830032057/http://www.stats.ox.ac.uk/~snijders/Trans_Triads_ha.pdf
+ """
+ # num_triads = o * (o - 1) * (o - 2) // 6
+ # if num_triads > TRIAD_LIMIT: print(WARNING)
+ all_tri = all_triads(G)
+ tri_by_type = defaultdict(list)
+ for triad in all_tri:
+ name = triad_type(triad)
+ tri_by_type[name].append(triad)
+ return tri_by_type
+
+
+@not_implemented_for("undirected")
+@nx._dispatchable
+def triad_type(G):
+ """Returns the sociological triad type for a triad.
+
+ Parameters
+ ----------
+ G : digraph
+ A NetworkX DiGraph with 3 nodes
+
+ Returns
+ -------
+ triad_type : str
+ A string identifying the triad type
+
+ Examples
+ --------
+ >>> G = nx.DiGraph([(1, 2), (2, 3), (3, 1)])
+ >>> nx.triad_type(G)
+ '030C'
+ >>> G.add_edge(1, 3)
+ >>> nx.triad_type(G)
+ '120C'
+
+ Notes
+ -----
+ There can be 6 unique edges in a triad (order-3 DiGraph) (so 2^^6=64 unique
+ triads given 3 nodes). These 64 triads each display exactly 1 of 16
+ topologies of triads (topologies can be permuted). These topologies are
+ identified by the following notation:
+
+ {m}{a}{n}{type} (for example: 111D, 210, 102)
+
+ Here:
+
+ {m} = number of mutual ties (takes 0, 1, 2, 3); a mutual tie is (0,1)
+ AND (1,0)
+ {a} = number of asymmetric ties (takes 0, 1, 2, 3); an asymmetric tie
+ is (0,1) BUT NOT (1,0) or vice versa
+ {n} = number of null ties (takes 0, 1, 2, 3); a null tie is NEITHER
+ (0,1) NOR (1,0)
+ {type} = a letter (takes U, D, C, T) corresponding to up, down, cyclical
+ and transitive. This is only used for topologies that can have
+ more than one form (eg: 021D and 021U).
+
+ References
+ ----------
+ .. [1] Snijders, T. (2012). "Transitivity and triads." University of
+ Oxford.
+ https://web.archive.org/web/20170830032057/http://www.stats.ox.ac.uk/~snijders/Trans_Triads_ha.pdf
+ """
+ if not is_triad(G):
+ raise nx.NetworkXAlgorithmError("G is not a triad (order-3 DiGraph)")
+ num_edges = len(G.edges())
+ if num_edges == 0:
+ return "003"
+ elif num_edges == 1:
+ return "012"
+ elif num_edges == 2:
+ e1, e2 = G.edges()
+ if set(e1) == set(e2):
+ return "102"
+ elif e1[0] == e2[0]:
+ return "021D"
+ elif e1[1] == e2[1]:
+ return "021U"
+ elif e1[1] == e2[0] or e2[1] == e1[0]:
+ return "021C"
+ elif num_edges == 3:
+ for e1, e2, e3 in permutations(G.edges(), 3):
+ if set(e1) == set(e2):
+ if e3[0] in e1:
+ return "111U"
+ # e3[1] in e1:
+ return "111D"
+ elif set(e1).symmetric_difference(set(e2)) == set(e3):
+ if {e1[0], e2[0], e3[0]} == {e1[0], e2[0], e3[0]} == set(G.nodes()):
+ return "030C"
+ # e3 == (e1[0], e2[1]) and e2 == (e1[1], e3[1]):
+ return "030T"
+ elif num_edges == 4:
+ for e1, e2, e3, e4 in permutations(G.edges(), 4):
+ if set(e1) == set(e2):
+ # identify pair of symmetric edges (which necessarily exists)
+ if set(e3) == set(e4):
+ return "201"
+ if {e3[0]} == {e4[0]} == set(e3).intersection(set(e4)):
+ return "120D"
+ if {e3[1]} == {e4[1]} == set(e3).intersection(set(e4)):
+ return "120U"
+ if e3[1] == e4[0]:
+ return "120C"
+ elif num_edges == 5:
+ return "210"
+ elif num_edges == 6:
+ return "300"
+
+
+@not_implemented_for("undirected")
+@py_random_state(1)
+@nx._dispatchable(preserve_all_attrs=True, returns_graph=True)
+def random_triad(G, seed=None):
+ """Returns a random triad from a directed graph.
+
+ .. deprecated:: 3.3
+
+ random_triad is deprecated and will be removed in version 3.5.
+ Use random sampling directly instead::
+
+ G.subgraph(random.sample(list(G), 3))
+
+ Parameters
+ ----------
+ G : digraph
+ A NetworkX DiGraph
+ seed : integer, random_state, or None (default)
+ Indicator of random number generation state.
+ See :ref:`Randomness`.
+
+ Returns
+ -------
+ G2 : subgraph
+ A randomly selected triad (order-3 NetworkX DiGraph)
+
+ Raises
+ ------
+ NetworkXError
+ If the input Graph has less than 3 nodes.
+
+ Examples
+ --------
+ >>> G = nx.DiGraph([(1, 2), (1, 3), (2, 3), (3, 1), (5, 6), (5, 4), (6, 7)])
+ >>> triad = nx.random_triad(G, seed=1)
+ >>> triad.edges
+ OutEdgeView([(1, 2)])
+
+ """
+ import warnings
+
+ warnings.warn(
+ (
+ "\n\nrandom_triad is deprecated and will be removed in NetworkX v3.5.\n"
+ "Use random.sample instead, e.g.::\n\n"
+ "\tG.subgraph(random.sample(list(G), 3))\n"
+ ),
+ category=DeprecationWarning,
+ stacklevel=5,
+ )
+ if len(G) < 3:
+ raise nx.NetworkXError(
+ f"G needs at least 3 nodes to form a triad; (it has {len(G)} nodes)"
+ )
+ nodes = seed.sample(list(G.nodes()), 3)
+ G2 = G.subgraph(nodes)
+ return G2
diff --git a/pythonProject/.venv/Lib/site-packages/networkx/algorithms/vitality.py b/pythonProject/.venv/Lib/site-packages/networkx/algorithms/vitality.py
new file mode 100644
index 0000000000000000000000000000000000000000..29f98fd1bae5fcbba01d2827d21380098cf9fb5c
--- /dev/null
+++ b/pythonProject/.venv/Lib/site-packages/networkx/algorithms/vitality.py
@@ -0,0 +1,76 @@
+"""
+Vitality measures.
+"""
+from functools import partial
+
+import networkx as nx
+
+__all__ = ["closeness_vitality"]
+
+
+@nx._dispatchable(edge_attrs="weight")
+def closeness_vitality(G, node=None, weight=None, wiener_index=None):
+ """Returns the closeness vitality for nodes in the graph.
+
+ The *closeness vitality* of a node, defined in Section 3.6.2 of [1],
+ is the change in the sum of distances between all node pairs when
+ excluding that node.
+
+ Parameters
+ ----------
+ G : NetworkX graph
+ A strongly-connected graph.
+
+ weight : string
+ The name of the edge attribute used as weight. This is passed
+ directly to the :func:`~networkx.wiener_index` function.
+
+ node : object
+ If specified, only the closeness vitality for this node will be
+ returned. Otherwise, a dictionary mapping each node to its
+ closeness vitality will be returned.
+
+ Other parameters
+ ----------------
+ wiener_index : number
+ If you have already computed the Wiener index of the graph
+ `G`, you can provide that value here. Otherwise, it will be
+ computed for you.
+
+ Returns
+ -------
+ dictionary or float
+ If `node` is None, this function returns a dictionary
+ with nodes as keys and closeness vitality as the
+ value. Otherwise, it returns only the closeness vitality for the
+ specified `node`.
+
+ The closeness vitality of a node may be negative infinity if
+ removing that node would disconnect the graph.
+
+ Examples
+ --------
+ >>> G = nx.cycle_graph(3)
+ >>> nx.closeness_vitality(G)
+ {0: 2.0, 1: 2.0, 2: 2.0}
+
+ See Also
+ --------
+ closeness_centrality
+
+ References
+ ----------
+ .. [1] Ulrik Brandes, Thomas Erlebach (eds.).
+ *Network Analysis: Methodological Foundations*.
+ Springer, 2005.
+
+
+ """
+ if wiener_index is None:
+ wiener_index = nx.wiener_index(G, weight=weight)
+ if node is not None:
+ after = nx.wiener_index(G.subgraph(set(G) - {node}), weight=weight)
+ return wiener_index - after
+ vitality = partial(closeness_vitality, G, weight=weight, wiener_index=wiener_index)
+ # TODO This can be trivially parallelized.
+ return {v: vitality(node=v) for v in G}
diff --git a/pythonProject/.venv/Lib/site-packages/networkx/algorithms/voronoi.py b/pythonProject/.venv/Lib/site-packages/networkx/algorithms/voronoi.py
new file mode 100644
index 0000000000000000000000000000000000000000..60c453323394e41f4d98cd0fd94396439cc7d5c4
--- /dev/null
+++ b/pythonProject/.venv/Lib/site-packages/networkx/algorithms/voronoi.py
@@ -0,0 +1,85 @@
+"""Functions for computing the Voronoi cells of a graph."""
+import networkx as nx
+from networkx.utils import groups
+
+__all__ = ["voronoi_cells"]
+
+
+@nx._dispatchable(edge_attrs="weight")
+def voronoi_cells(G, center_nodes, weight="weight"):
+ """Returns the Voronoi cells centered at `center_nodes` with respect
+ to the shortest-path distance metric.
+
+ If $C$ is a set of nodes in the graph and $c$ is an element of $C$,
+ the *Voronoi cell* centered at a node $c$ is the set of all nodes
+ $v$ that are closer to $c$ than to any other center node in $C$ with
+ respect to the shortest-path distance metric. [1]_
+
+ For directed graphs, this will compute the "outward" Voronoi cells,
+ as defined in [1]_, in which distance is measured from the center
+ nodes to the target node. For the "inward" Voronoi cells, use the
+ :meth:`DiGraph.reverse` method to reverse the orientation of the
+ edges before invoking this function on the directed graph.
+
+ Parameters
+ ----------
+ G : NetworkX graph
+
+ center_nodes : set
+ A nonempty set of nodes in the graph `G` that represent the
+ center of the Voronoi cells.
+
+ weight : string or function
+ The edge attribute (or an arbitrary function) representing the
+ weight of an edge. This keyword argument is as described in the
+ documentation for :func:`~networkx.multi_source_dijkstra_path`,
+ for example.
+
+ Returns
+ -------
+ dictionary
+ A mapping from center node to set of all nodes in the graph
+ closer to that center node than to any other center node. The
+ keys of the dictionary are the element of `center_nodes`, and
+ the values of the dictionary form a partition of the nodes of
+ `G`.
+
+ Examples
+ --------
+ To get only the partition of the graph induced by the Voronoi cells,
+ take the collection of all values in the returned dictionary::
+
+ >>> G = nx.path_graph(6)
+ >>> center_nodes = {0, 3}
+ >>> cells = nx.voronoi_cells(G, center_nodes)
+ >>> partition = set(map(frozenset, cells.values()))
+ >>> sorted(map(sorted, partition))
+ [[0, 1], [2, 3, 4, 5]]
+
+ Raises
+ ------
+ ValueError
+ If `center_nodes` is empty.
+
+ References
+ ----------
+ .. [1] Erwig, Martin. (2000),"The graph Voronoi diagram with applications."
+ *Networks*, 36: 156--163.
+ https://doi.org/10.1002/1097-0037(200010)36:3<156::AID-NET2>3.0.CO;2-L
+
+ """
+ # Determine the shortest paths from any one of the center nodes to
+ # every node in the graph.
+ #
+ # This raises `ValueError` if `center_nodes` is an empty set.
+ paths = nx.multi_source_dijkstra_path(G, center_nodes, weight=weight)
+ # Determine the center node from which the shortest path originates.
+ nearest = {v: p[0] for v, p in paths.items()}
+ # Get the mapping from center node to all nodes closer to it than to
+ # any other center node.
+ cells = groups(nearest)
+ # We collect all unreachable nodes under a special key, if there are any.
+ unreachable = set(G) - set(nearest)
+ if unreachable:
+ cells["unreachable"] = unreachable
+ return cells
diff --git a/pythonProject/.venv/Lib/site-packages/networkx/algorithms/walks.py b/pythonProject/.venv/Lib/site-packages/networkx/algorithms/walks.py
new file mode 100644
index 0000000000000000000000000000000000000000..fe341757750dd36163a9f972ae195489f118d84d
--- /dev/null
+++ b/pythonProject/.venv/Lib/site-packages/networkx/algorithms/walks.py
@@ -0,0 +1,80 @@
+"""Function for computing walks in a graph.
+"""
+
+import networkx as nx
+
+__all__ = ["number_of_walks"]
+
+
+@nx._dispatchable
+def number_of_walks(G, walk_length):
+ """Returns the number of walks connecting each pair of nodes in `G`
+
+ A *walk* is a sequence of nodes in which each adjacent pair of nodes
+ in the sequence is adjacent in the graph. A walk can repeat the same
+ edge and go in the opposite direction just as people can walk on a
+ set of paths, but standing still is not counted as part of the walk.
+
+ This function only counts the walks with `walk_length` edges. Note that
+ the number of nodes in the walk sequence is one more than `walk_length`.
+ The number of walks can grow very quickly on a larger graph
+ and with a larger walk length.
+
+ Parameters
+ ----------
+ G : NetworkX graph
+
+ walk_length : int
+ A nonnegative integer representing the length of a walk.
+
+ Returns
+ -------
+ dict
+ A dictionary of dictionaries in which outer keys are source
+ nodes, inner keys are target nodes, and inner values are the
+ number of walks of length `walk_length` connecting those nodes.
+
+ Raises
+ ------
+ ValueError
+ If `walk_length` is negative
+
+ Examples
+ --------
+
+ >>> G = nx.Graph([(0, 1), (1, 2)])
+ >>> walks = nx.number_of_walks(G, 2)
+ >>> walks
+ {0: {0: 1, 1: 0, 2: 1}, 1: {0: 0, 1: 2, 2: 0}, 2: {0: 1, 1: 0, 2: 1}}
+ >>> total_walks = sum(sum(tgts.values()) for _, tgts in walks.items())
+
+ You can also get the number of walks from a specific source node using the
+ returned dictionary. For example, number of walks of length 1 from node 0
+ can be found as follows:
+
+ >>> walks = nx.number_of_walks(G, 1)
+ >>> walks[0]
+ {0: 0, 1: 1, 2: 0}
+ >>> sum(walks[0].values()) # walks from 0 of length 1
+ 1
+
+ Similarly, a target node can also be specified:
+
+ >>> walks[0][1]
+ 1
+
+ """
+ import numpy as np
+
+ if walk_length < 0:
+ raise ValueError(f"`walk_length` cannot be negative: {walk_length}")
+
+ A = nx.adjacency_matrix(G, weight=None)
+ # TODO: Use matrix_power from scipy.sparse when available
+ # power = sp.sparse.linalg.matrix_power(A, walk_length)
+ power = np.linalg.matrix_power(A.toarray(), walk_length)
+ result = {
+ u: {v: power.item(u_idx, v_idx) for v_idx, v in enumerate(G)}
+ for u_idx, u in enumerate(G)
+ }
+ return result
diff --git a/pythonProject/.venv/Lib/site-packages/networkx/algorithms/wiener.py b/pythonProject/.venv/Lib/site-packages/networkx/algorithms/wiener.py
new file mode 100644
index 0000000000000000000000000000000000000000..cb55d609f7d9f2013b4b0b7738e7477018342c20
--- /dev/null
+++ b/pythonProject/.venv/Lib/site-packages/networkx/algorithms/wiener.py
@@ -0,0 +1,226 @@
+"""Functions related to the Wiener Index of a graph.
+
+The Wiener Index is a topological measure of a graph
+related to the distance between nodes and their degree.
+The Schultz Index and Gutman Index are similar measures.
+They are used categorize molecules via the network of
+atoms connected by chemical bonds. The indices are
+correlated with functional aspects of the molecules.
+
+References
+----------
+.. [1] `Wikipedia: Wiener Index `_
+.. [2] M.V. Diudeaa and I. Gutman, Wiener-Type Topological Indices,
+ Croatica Chemica Acta, 71 (1998), 21-51.
+ https://hrcak.srce.hr/132323
+"""
+
+import itertools as it
+
+import networkx as nx
+
+__all__ = ["wiener_index", "schultz_index", "gutman_index"]
+
+
+@nx._dispatchable(edge_attrs="weight")
+def wiener_index(G, weight=None):
+ """Returns the Wiener index of the given graph.
+
+ The *Wiener index* of a graph is the sum of the shortest-path
+ (weighted) distances between each pair of reachable nodes.
+ For pairs of nodes in undirected graphs, only one orientation
+ of the pair is counted.
+
+ Parameters
+ ----------
+ G : NetworkX graph
+
+ weight : string or None, optional (default: None)
+ If None, every edge has weight 1.
+ If a string, use this edge attribute as the edge weight.
+ Any edge attribute not present defaults to 1.
+ The edge weights are used to computing shortest-path distances.
+
+ Returns
+ -------
+ number
+ The Wiener index of the graph `G`.
+
+ Raises
+ ------
+ NetworkXError
+ If the graph `G` is not connected.
+
+ Notes
+ -----
+ If a pair of nodes is not reachable, the distance is assumed to be
+ infinity. This means that for graphs that are not
+ strongly-connected, this function returns ``inf``.
+
+ The Wiener index is not usually defined for directed graphs, however
+ this function uses the natural generalization of the Wiener index to
+ directed graphs.
+
+ Examples
+ --------
+ The Wiener index of the (unweighted) complete graph on *n* nodes
+ equals the number of pairs of the *n* nodes, since each pair of
+ nodes is at distance one::
+
+ >>> n = 10
+ >>> G = nx.complete_graph(n)
+ >>> nx.wiener_index(G) == n * (n - 1) / 2
+ True
+
+ Graphs that are not strongly-connected have infinite Wiener index::
+
+ >>> G = nx.empty_graph(2)
+ >>> nx.wiener_index(G)
+ inf
+
+ References
+ ----------
+ .. [1] `Wikipedia: Wiener Index `_
+ """
+ connected = nx.is_strongly_connected(G) if G.is_directed() else nx.is_connected(G)
+ if not connected:
+ return float("inf")
+
+ spl = nx.shortest_path_length(G, weight=weight)
+ total = sum(it.chain.from_iterable(nbrs.values() for node, nbrs in spl))
+ # Need to account for double counting pairs of nodes in undirected graphs.
+ return total if G.is_directed() else total / 2
+
+
+@nx.utils.not_implemented_for("directed")
+@nx.utils.not_implemented_for("multigraph")
+@nx._dispatchable(edge_attrs="weight")
+def schultz_index(G, weight=None):
+ r"""Returns the Schultz Index (of the first kind) of `G`
+
+ The *Schultz Index* [3]_ of a graph is the sum over all node pairs of
+ distances times the sum of degrees. Consider an undirected graph `G`.
+ For each node pair ``(u, v)`` compute ``dist(u, v) * (deg(u) + deg(v)``
+ where ``dist`` is the shortest path length between two nodes and ``deg``
+ is the degree of a node.
+
+ The Schultz Index is the sum of these quantities over all (unordered)
+ pairs of nodes.
+
+ Parameters
+ ----------
+ G : NetworkX graph
+ The undirected graph of interest.
+ weight : string or None, optional (default: None)
+ If None, every edge has weight 1.
+ If a string, use this edge attribute as the edge weight.
+ Any edge attribute not present defaults to 1.
+ The edge weights are used to computing shortest-path distances.
+
+ Returns
+ -------
+ number
+ The first kind of Schultz Index of the graph `G`.
+
+ Examples
+ --------
+ The Schultz Index of the (unweighted) complete graph on *n* nodes
+ equals the number of pairs of the *n* nodes times ``2 * (n - 1)``,
+ since each pair of nodes is at distance one and the sum of degree
+ of two nodes is ``2 * (n - 1)``.
+
+ >>> n = 10
+ >>> G = nx.complete_graph(n)
+ >>> nx.schultz_index(G) == (n * (n - 1) / 2) * (2 * (n - 1))
+ True
+
+ Graph that is disconnected
+
+ >>> nx.schultz_index(nx.empty_graph(2))
+ inf
+
+ References
+ ----------
+ .. [1] I. Gutman, Selected properties of the Schultz molecular topological index,
+ J. Chem. Inf. Comput. Sci. 34 (1994), 1087–1089.
+ https://doi.org/10.1021/ci00021a009
+ .. [2] M.V. Diudeaa and I. Gutman, Wiener-Type Topological Indices,
+ Croatica Chemica Acta, 71 (1998), 21-51.
+ https://hrcak.srce.hr/132323
+ .. [3] H. P. Schultz, Topological organic chemistry. 1.
+ Graph theory and topological indices of alkanes,i
+ J. Chem. Inf. Comput. Sci. 29 (1989), 239–257.
+
+ """
+ if not nx.is_connected(G):
+ return float("inf")
+
+ spl = nx.shortest_path_length(G, weight=weight)
+ d = dict(G.degree, weight=weight)
+ return sum(dist * (d[u] + d[v]) for u, info in spl for v, dist in info.items()) / 2
+
+
+@nx.utils.not_implemented_for("directed")
+@nx.utils.not_implemented_for("multigraph")
+@nx._dispatchable(edge_attrs="weight")
+def gutman_index(G, weight=None):
+ r"""Returns the Gutman Index for the graph `G`.
+
+ The *Gutman Index* measures the topology of networks, especially for molecule
+ networks of atoms connected by bonds [1]_. It is also called the Schultz Index
+ of the second kind [2]_.
+
+ Consider an undirected graph `G` with node set ``V``.
+ The Gutman Index of a graph is the sum over all (unordered) pairs of nodes
+ of nodes ``(u, v)``, with distance ``dist(u, v)`` and degrees ``deg(u)``
+ and ``deg(v)``, of ``dist(u, v) * deg(u) * deg(v)``
+
+ Parameters
+ ----------
+ G : NetworkX graph
+
+ weight : string or None, optional (default: None)
+ If None, every edge has weight 1.
+ If a string, use this edge attribute as the edge weight.
+ Any edge attribute not present defaults to 1.
+ The edge weights are used to computing shortest-path distances.
+
+ Returns
+ -------
+ number
+ The Gutman Index of the graph `G`.
+
+ Examples
+ --------
+ The Gutman Index of the (unweighted) complete graph on *n* nodes
+ equals the number of pairs of the *n* nodes times ``(n - 1) * (n - 1)``,
+ since each pair of nodes is at distance one and the product of degree of two
+ vertices is ``(n - 1) * (n - 1)``.
+
+ >>> n = 10
+ >>> G = nx.complete_graph(n)
+ >>> nx.gutman_index(G) == (n * (n - 1) / 2) * ((n - 1) * (n - 1))
+ True
+
+ Graphs that are disconnected
+
+ >>> G = nx.empty_graph(2)
+ >>> nx.gutman_index(G)
+ inf
+
+ References
+ ----------
+ .. [1] M.V. Diudeaa and I. Gutman, Wiener-Type Topological Indices,
+ Croatica Chemica Acta, 71 (1998), 21-51.
+ https://hrcak.srce.hr/132323
+ .. [2] I. Gutman, Selected properties of the Schultz molecular topological index,
+ J. Chem. Inf. Comput. Sci. 34 (1994), 1087–1089.
+ https://doi.org/10.1021/ci00021a009
+
+ """
+ if not nx.is_connected(G):
+ return float("inf")
+
+ spl = nx.shortest_path_length(G, weight=weight)
+ d = dict(G.degree, weight=weight)
+ return sum(dist * d[u] * d[v] for u, vinfo in spl for v, dist in vinfo.items()) / 2
diff --git a/pythonProject/.venv/Lib/site-packages/networkx/conftest.py b/pythonProject/.venv/Lib/site-packages/networkx/conftest.py
new file mode 100644
index 0000000000000000000000000000000000000000..a8d6e158124b2fb56f8510a029b935b3397369c5
--- /dev/null
+++ b/pythonProject/.venv/Lib/site-packages/networkx/conftest.py
@@ -0,0 +1,289 @@
+"""
+Testing
+=======
+
+General guidelines for writing good tests:
+
+- doctests always assume ``import networkx as nx`` so don't add that
+- prefer pytest fixtures over classes with setup methods.
+- use the ``@pytest.mark.parametrize`` decorator
+- use ``pytest.importorskip`` for numpy, scipy, pandas, and matplotlib b/c of PyPy.
+ and add the module to the relevant entries below.
+
+"""
+import os
+import sys
+import warnings
+from importlib.metadata import entry_points
+
+import pytest
+
+import networkx
+
+
+def pytest_addoption(parser):
+ parser.addoption(
+ "--runslow", action="store_true", default=False, help="run slow tests"
+ )
+ parser.addoption(
+ "--backend",
+ action="store",
+ default=None,
+ help="Run tests with a backend by auto-converting nx graphs to backend graphs",
+ )
+ parser.addoption(
+ "--fallback-to-nx",
+ action="store_true",
+ default=False,
+ help="Run nx function if a backend doesn't implement a dispatchable function"
+ " (use with --backend)",
+ )
+
+
+def pytest_configure(config):
+ config.addinivalue_line("markers", "slow: mark test as slow to run")
+ backend = config.getoption("--backend")
+ if backend is None:
+ backend = os.environ.get("NETWORKX_TEST_BACKEND")
+ # nx-loopback backend is only available when testing
+ backends = entry_points(name="nx-loopback", group="networkx.backends")
+ if backends:
+ networkx.utils.backends.backends["nx-loopback"] = next(iter(backends))
+ else:
+ warnings.warn(
+ "\n\n WARNING: Mixed NetworkX configuration! \n\n"
+ " This environment has mixed configuration for networkx.\n"
+ " The test object nx-loopback is not configured correctly.\n"
+ " You should not be seeing this message.\n"
+ " Try `pip install -e .`, or change your PYTHONPATH\n"
+ " Make sure python finds the networkx repo you are testing\n\n"
+ )
+ if backend:
+ networkx.config["backend_priority"] = [backend]
+ fallback_to_nx = config.getoption("--fallback-to-nx")
+ if not fallback_to_nx:
+ fallback_to_nx = os.environ.get("NETWORKX_FALLBACK_TO_NX")
+ networkx.utils.backends._dispatchable._fallback_to_nx = bool(fallback_to_nx)
+
+
+def pytest_collection_modifyitems(config, items):
+ # Setting this to True here allows tests to be set up before dispatching
+ # any function call to a backend.
+ networkx.utils.backends._dispatchable._is_testing = True
+ if backend_priority := networkx.config["backend_priority"]:
+ # Allow pluggable backends to add markers to tests (such as skip or xfail)
+ # when running in auto-conversion test mode
+ backend = networkx.utils.backends.backends[backend_priority[0]].load()
+ if hasattr(backend, "on_start_tests"):
+ getattr(backend, "on_start_tests")(items)
+
+ if config.getoption("--runslow"):
+ # --runslow given in cli: do not skip slow tests
+ return
+ skip_slow = pytest.mark.skip(reason="need --runslow option to run")
+ for item in items:
+ if "slow" in item.keywords:
+ item.add_marker(skip_slow)
+
+
+# TODO: The warnings below need to be dealt with, but for now we silence them.
+@pytest.fixture(autouse=True)
+def set_warnings():
+ warnings.filterwarnings(
+ "ignore",
+ category=FutureWarning,
+ message="\n\nsingle_target_shortest_path_length",
+ )
+ warnings.filterwarnings(
+ "ignore",
+ category=FutureWarning,
+ message="\n\nshortest_path",
+ )
+ warnings.filterwarnings(
+ "ignore", category=DeprecationWarning, message="\nforest_str is deprecated"
+ )
+ warnings.filterwarnings(
+ "ignore", category=DeprecationWarning, message="\n\nrandom_tree"
+ )
+ warnings.filterwarnings(
+ "ignore", category=DeprecationWarning, message="Edmonds has been deprecated"
+ )
+ warnings.filterwarnings(
+ "ignore",
+ category=DeprecationWarning,
+ message="MultiDiGraph_EdgeKey has been deprecated",
+ )
+ warnings.filterwarnings(
+ "ignore", category=DeprecationWarning, message="\n\nThe `normalized`"
+ )
+ warnings.filterwarnings(
+ "ignore",
+ category=DeprecationWarning,
+ message="The function `join` is deprecated",
+ )
+ warnings.filterwarnings(
+ "ignore",
+ category=DeprecationWarning,
+ message="\n\nstrongly_connected_components_recursive",
+ )
+ warnings.filterwarnings(
+ "ignore", category=DeprecationWarning, message="\n\nall_triplets"
+ )
+ warnings.filterwarnings(
+ "ignore", category=DeprecationWarning, message="\n\nrandom_triad"
+ )
+ warnings.filterwarnings(
+ "ignore", category=DeprecationWarning, message="minimal_d_separator"
+ )
+ warnings.filterwarnings(
+ "ignore", category=DeprecationWarning, message="d_separated"
+ )
+ warnings.filterwarnings("ignore", category=DeprecationWarning, message="\n\nk_core")
+ warnings.filterwarnings(
+ "ignore", category=DeprecationWarning, message="\n\nk_shell"
+ )
+ warnings.filterwarnings(
+ "ignore", category=DeprecationWarning, message="\n\nk_crust"
+ )
+ warnings.filterwarnings(
+ "ignore", category=DeprecationWarning, message="\n\nk_corona"
+ )
+ warnings.filterwarnings(
+ "ignore", category=DeprecationWarning, message="\n\ntotal_spanning_tree_weight"
+ )
+ warnings.filterwarnings(
+ "ignore", category=DeprecationWarning, message=r"\n\nThe 'create=matrix'"
+ )
+
+
+@pytest.fixture(autouse=True)
+def add_nx(doctest_namespace):
+ doctest_namespace["nx"] = networkx
+
+
+# What dependencies are installed?
+
+try:
+ import numpy
+
+ has_numpy = True
+except ImportError:
+ has_numpy = False
+
+try:
+ import scipy
+
+ has_scipy = True
+except ImportError:
+ has_scipy = False
+
+try:
+ import matplotlib
+
+ has_matplotlib = True
+except ImportError:
+ has_matplotlib = False
+
+try:
+ import pandas
+
+ has_pandas = True
+except ImportError:
+ has_pandas = False
+
+try:
+ import pygraphviz
+
+ has_pygraphviz = True
+except ImportError:
+ has_pygraphviz = False
+
+try:
+ import pydot
+
+ has_pydot = True
+except ImportError:
+ has_pydot = False
+
+try:
+ import sympy
+
+ has_sympy = True
+except ImportError:
+ has_sympy = False
+
+
+# List of files that pytest should ignore
+
+collect_ignore = []
+
+needs_numpy = [
+ "algorithms/approximation/traveling_salesman.py",
+ "algorithms/centrality/current_flow_closeness.py",
+ "algorithms/node_classification.py",
+ "algorithms/non_randomness.py",
+ "algorithms/shortest_paths/dense.py",
+ "algorithms/tree/mst.py",
+ "generators/expanders.py",
+ "linalg/bethehessianmatrix.py",
+ "linalg/laplacianmatrix.py",
+ "utils/misc.py",
+ "algorithms/centrality/laplacian.py",
+]
+needs_scipy = [
+ "algorithms/approximation/traveling_salesman.py",
+ "algorithms/assortativity/correlation.py",
+ "algorithms/assortativity/mixing.py",
+ "algorithms/assortativity/pairs.py",
+ "algorithms/bipartite/matrix.py",
+ "algorithms/bipartite/spectral.py",
+ "algorithms/centrality/current_flow_betweenness.py",
+ "algorithms/centrality/current_flow_betweenness_subset.py",
+ "algorithms/centrality/eigenvector.py",
+ "algorithms/centrality/katz.py",
+ "algorithms/centrality/laplacian.py",
+ "algorithms/centrality/second_order.py",
+ "algorithms/centrality/subgraph_alg.py",
+ "algorithms/communicability_alg.py",
+ "algorithms/community/divisive.py",
+ "algorithms/distance_measures.py",
+ "algorithms/link_analysis/hits_alg.py",
+ "algorithms/link_analysis/pagerank_alg.py",
+ "algorithms/node_classification.py",
+ "algorithms/similarity.py",
+ "algorithms/tree/mst.py",
+ "algorithms/walks.py",
+ "convert_matrix.py",
+ "drawing/layout.py",
+ "drawing/nx_pylab.py",
+ "generators/spectral_graph_forge.py",
+ "generators/expanders.py",
+ "linalg/algebraicconnectivity.py",
+ "linalg/attrmatrix.py",
+ "linalg/bethehessianmatrix.py",
+ "linalg/graphmatrix.py",
+ "linalg/laplacianmatrix.py",
+ "linalg/modularitymatrix.py",
+ "linalg/spectrum.py",
+ "utils/rcm.py",
+]
+needs_matplotlib = ["drawing/nx_pylab.py"]
+needs_pandas = ["convert_matrix.py"]
+needs_pygraphviz = ["drawing/nx_agraph.py"]
+needs_pydot = ["drawing/nx_pydot.py"]
+needs_sympy = ["algorithms/polynomials.py"]
+
+if not has_numpy:
+ collect_ignore += needs_numpy
+if not has_scipy:
+ collect_ignore += needs_scipy
+if not has_matplotlib:
+ collect_ignore += needs_matplotlib
+if not has_pandas:
+ collect_ignore += needs_pandas
+if not has_pygraphviz:
+ collect_ignore += needs_pygraphviz
+if not has_pydot:
+ collect_ignore += needs_pydot
+if not has_sympy:
+ collect_ignore += needs_sympy
diff --git a/pythonProject/.venv/Lib/site-packages/networkx/convert.py b/pythonProject/.venv/Lib/site-packages/networkx/convert.py
new file mode 100644
index 0000000000000000000000000000000000000000..7cc8fe401261a0af9b8dc8ad261293a735782272
--- /dev/null
+++ b/pythonProject/.venv/Lib/site-packages/networkx/convert.py
@@ -0,0 +1,494 @@
+"""Functions to convert NetworkX graphs to and from other formats.
+
+The preferred way of converting data to a NetworkX graph is through the
+graph constructor. The constructor calls the to_networkx_graph() function
+which attempts to guess the input type and convert it automatically.
+
+Examples
+--------
+Create a graph with a single edge from a dictionary of dictionaries
+
+>>> d = {0: {1: 1}} # dict-of-dicts single edge (0,1)
+>>> G = nx.Graph(d)
+
+See Also
+--------
+nx_agraph, nx_pydot
+"""
+import warnings
+from collections.abc import Collection, Generator, Iterator
+
+import networkx as nx
+
+__all__ = [
+ "to_networkx_graph",
+ "from_dict_of_dicts",
+ "to_dict_of_dicts",
+ "from_dict_of_lists",
+ "to_dict_of_lists",
+ "from_edgelist",
+ "to_edgelist",
+]
+
+
+def to_networkx_graph(data, create_using=None, multigraph_input=False):
+ """Make a NetworkX graph from a known data structure.
+
+ The preferred way to call this is automatically
+ from the class constructor
+
+ >>> d = {0: {1: {"weight": 1}}} # dict-of-dicts single edge (0,1)
+ >>> G = nx.Graph(d)
+
+ instead of the equivalent
+
+ >>> G = nx.from_dict_of_dicts(d)
+
+ Parameters
+ ----------
+ data : object to be converted
+
+ Current known types are:
+ any NetworkX graph
+ dict-of-dicts
+ dict-of-lists
+ container (e.g. set, list, tuple) of edges
+ iterator (e.g. itertools.chain) that produces edges
+ generator of edges
+ Pandas DataFrame (row per edge)
+ 2D numpy array
+ scipy sparse array
+ pygraphviz agraph
+
+ create_using : NetworkX graph constructor, optional (default=nx.Graph)
+ Graph type to create. If graph instance, then cleared before populated.
+
+ multigraph_input : bool (default False)
+ If True and data is a dict_of_dicts,
+ try to create a multigraph assuming dict_of_dict_of_lists.
+ If data and create_using are both multigraphs then create
+ a multigraph from a multigraph.
+
+ """
+ # NX graph
+ if hasattr(data, "adj"):
+ try:
+ result = from_dict_of_dicts(
+ data.adj,
+ create_using=create_using,
+ multigraph_input=data.is_multigraph(),
+ )
+ # data.graph should be dict-like
+ result.graph.update(data.graph)
+ # data.nodes should be dict-like
+ # result.add_node_from(data.nodes.items()) possible but
+ # for custom node_attr_dict_factory which may be hashable
+ # will be unexpected behavior
+ for n, dd in data.nodes.items():
+ result._node[n].update(dd)
+ return result
+ except Exception as err:
+ raise nx.NetworkXError("Input is not a correct NetworkX graph.") from err
+
+ # pygraphviz agraph
+ if hasattr(data, "is_strict"):
+ try:
+ return nx.nx_agraph.from_agraph(data, create_using=create_using)
+ except Exception as err:
+ raise nx.NetworkXError("Input is not a correct pygraphviz graph.") from err
+
+ # dict of dicts/lists
+ if isinstance(data, dict):
+ try:
+ return from_dict_of_dicts(
+ data, create_using=create_using, multigraph_input=multigraph_input
+ )
+ except Exception as err1:
+ if multigraph_input is True:
+ raise nx.NetworkXError(
+ f"converting multigraph_input raised:\n{type(err1)}: {err1}"
+ )
+ try:
+ return from_dict_of_lists(data, create_using=create_using)
+ except Exception as err2:
+ raise TypeError("Input is not known type.") from err2
+
+ # Pandas DataFrame
+ try:
+ import pandas as pd
+
+ if isinstance(data, pd.DataFrame):
+ if data.shape[0] == data.shape[1]:
+ try:
+ return nx.from_pandas_adjacency(data, create_using=create_using)
+ except Exception as err:
+ msg = "Input is not a correct Pandas DataFrame adjacency matrix."
+ raise nx.NetworkXError(msg) from err
+ else:
+ try:
+ return nx.from_pandas_edgelist(
+ data, edge_attr=True, create_using=create_using
+ )
+ except Exception as err:
+ msg = "Input is not a correct Pandas DataFrame edge-list."
+ raise nx.NetworkXError(msg) from err
+ except ImportError:
+ warnings.warn("pandas not found, skipping conversion test.", ImportWarning)
+
+ # numpy array
+ try:
+ import numpy as np
+
+ if isinstance(data, np.ndarray):
+ try:
+ return nx.from_numpy_array(data, create_using=create_using)
+ except Exception as err:
+ raise nx.NetworkXError(
+ f"Failed to interpret array as an adjacency matrix."
+ ) from err
+ except ImportError:
+ warnings.warn("numpy not found, skipping conversion test.", ImportWarning)
+
+ # scipy sparse array - any format
+ try:
+ import scipy
+
+ if hasattr(data, "format"):
+ try:
+ return nx.from_scipy_sparse_array(data, create_using=create_using)
+ except Exception as err:
+ raise nx.NetworkXError(
+ "Input is not a correct scipy sparse array type."
+ ) from err
+ except ImportError:
+ warnings.warn("scipy not found, skipping conversion test.", ImportWarning)
+
+ # Note: most general check - should remain last in order of execution
+ # Includes containers (e.g. list, set, dict, etc.), generators, and
+ # iterators (e.g. itertools.chain) of edges
+
+ if isinstance(data, Collection | Generator | Iterator):
+ try:
+ return from_edgelist(data, create_using=create_using)
+ except Exception as err:
+ raise nx.NetworkXError("Input is not a valid edge list") from err
+
+ raise nx.NetworkXError("Input is not a known data type for conversion.")
+
+
+@nx._dispatchable
+def to_dict_of_lists(G, nodelist=None):
+ """Returns adjacency representation of graph as a dictionary of lists.
+
+ Parameters
+ ----------
+ G : graph
+ A NetworkX graph
+
+ nodelist : list
+ Use only nodes specified in nodelist
+
+ Notes
+ -----
+ Completely ignores edge data for MultiGraph and MultiDiGraph.
+
+ """
+ if nodelist is None:
+ nodelist = G
+
+ d = {}
+ for n in nodelist:
+ d[n] = [nbr for nbr in G.neighbors(n) if nbr in nodelist]
+ return d
+
+
+@nx._dispatchable(graphs=None, returns_graph=True)
+def from_dict_of_lists(d, create_using=None):
+ """Returns a graph from a dictionary of lists.
+
+ Parameters
+ ----------
+ d : dictionary of lists
+ A dictionary of lists adjacency representation.
+
+ create_using : NetworkX graph constructor, optional (default=nx.Graph)
+ Graph type to create. If graph instance, then cleared before populated.
+
+ Examples
+ --------
+ >>> dol = {0: [1]} # single edge (0,1)
+ >>> G = nx.from_dict_of_lists(dol)
+
+ or
+
+ >>> G = nx.Graph(dol) # use Graph constructor
+
+ """
+ G = nx.empty_graph(0, create_using)
+ G.add_nodes_from(d)
+ if G.is_multigraph() and not G.is_directed():
+ # a dict_of_lists can't show multiedges. BUT for undirected graphs,
+ # each edge shows up twice in the dict_of_lists.
+ # So we need to treat this case separately.
+ seen = {}
+ for node, nbrlist in d.items():
+ for nbr in nbrlist:
+ if nbr not in seen:
+ G.add_edge(node, nbr)
+ seen[node] = 1 # don't allow reverse edge to show up
+ else:
+ G.add_edges_from(
+ ((node, nbr) for node, nbrlist in d.items() for nbr in nbrlist)
+ )
+ return G
+
+
+def to_dict_of_dicts(G, nodelist=None, edge_data=None):
+ """Returns adjacency representation of graph as a dictionary of dictionaries.
+
+ Parameters
+ ----------
+ G : graph
+ A NetworkX graph
+
+ nodelist : list
+ Use only nodes specified in nodelist
+
+ edge_data : scalar, optional
+ If provided, the value of the dictionary will be set to `edge_data` for
+ all edges. Usual values could be `1` or `True`. If `edge_data` is
+ `None` (the default), the edgedata in `G` is used, resulting in a
+ dict-of-dict-of-dicts. If `G` is a MultiGraph, the result will be a
+ dict-of-dict-of-dict-of-dicts. See Notes for an approach to customize
+ handling edge data. `edge_data` should *not* be a container.
+
+ Returns
+ -------
+ dod : dict
+ A nested dictionary representation of `G`. Note that the level of
+ nesting depends on the type of `G` and the value of `edge_data`
+ (see Examples).
+
+ See Also
+ --------
+ from_dict_of_dicts, to_dict_of_lists
+
+ Notes
+ -----
+ For a more custom approach to handling edge data, try::
+
+ dod = {
+ n: {nbr: custom(n, nbr, dd) for nbr, dd in nbrdict.items()}
+ for n, nbrdict in G.adj.items()
+ }
+
+ where `custom` returns the desired edge data for each edge between `n` and
+ `nbr`, given existing edge data `dd`.
+
+ Examples
+ --------
+ >>> G = nx.path_graph(3)
+ >>> nx.to_dict_of_dicts(G)
+ {0: {1: {}}, 1: {0: {}, 2: {}}, 2: {1: {}}}
+
+ Edge data is preserved by default (``edge_data=None``), resulting
+ in dict-of-dict-of-dicts where the innermost dictionary contains the
+ edge data:
+
+ >>> G = nx.Graph()
+ >>> G.add_edges_from(
+ ... [
+ ... (0, 1, {"weight": 1.0}),
+ ... (1, 2, {"weight": 2.0}),
+ ... (2, 0, {"weight": 1.0}),
+ ... ]
+ ... )
+ >>> d = nx.to_dict_of_dicts(G)
+ >>> d # doctest: +SKIP
+ {0: {1: {'weight': 1.0}, 2: {'weight': 1.0}},
+ 1: {0: {'weight': 1.0}, 2: {'weight': 2.0}},
+ 2: {1: {'weight': 2.0}, 0: {'weight': 1.0}}}
+ >>> d[1][2]["weight"]
+ 2.0
+
+ If `edge_data` is not `None`, edge data in the original graph (if any) is
+ replaced:
+
+ >>> d = nx.to_dict_of_dicts(G, edge_data=1)
+ >>> d
+ {0: {1: 1, 2: 1}, 1: {0: 1, 2: 1}, 2: {1: 1, 0: 1}}
+ >>> d[1][2]
+ 1
+
+ This also applies to MultiGraphs: edge data is preserved by default:
+
+ >>> G = nx.MultiGraph()
+ >>> G.add_edge(0, 1, key="a", weight=1.0)
+ 'a'
+ >>> G.add_edge(0, 1, key="b", weight=5.0)
+ 'b'
+ >>> d = nx.to_dict_of_dicts(G)
+ >>> d # doctest: +SKIP
+ {0: {1: {'a': {'weight': 1.0}, 'b': {'weight': 5.0}}},
+ 1: {0: {'a': {'weight': 1.0}, 'b': {'weight': 5.0}}}}
+ >>> d[0][1]["b"]["weight"]
+ 5.0
+
+ But multi edge data is lost if `edge_data` is not `None`:
+
+ >>> d = nx.to_dict_of_dicts(G, edge_data=10)
+ >>> d
+ {0: {1: 10}, 1: {0: 10}}
+ """
+ dod = {}
+ if nodelist is None:
+ if edge_data is None:
+ for u, nbrdict in G.adjacency():
+ dod[u] = nbrdict.copy()
+ else: # edge_data is not None
+ for u, nbrdict in G.adjacency():
+ dod[u] = dod.fromkeys(nbrdict, edge_data)
+ else: # nodelist is not None
+ if edge_data is None:
+ for u in nodelist:
+ dod[u] = {}
+ for v, data in ((v, data) for v, data in G[u].items() if v in nodelist):
+ dod[u][v] = data
+ else: # nodelist and edge_data are not None
+ for u in nodelist:
+ dod[u] = {}
+ for v in (v for v in G[u] if v in nodelist):
+ dod[u][v] = edge_data
+ return dod
+
+
+@nx._dispatchable(graphs=None, returns_graph=True)
+def from_dict_of_dicts(d, create_using=None, multigraph_input=False):
+ """Returns a graph from a dictionary of dictionaries.
+
+ Parameters
+ ----------
+ d : dictionary of dictionaries
+ A dictionary of dictionaries adjacency representation.
+
+ create_using : NetworkX graph constructor, optional (default=nx.Graph)
+ Graph type to create. If graph instance, then cleared before populated.
+
+ multigraph_input : bool (default False)
+ When True, the dict `d` is assumed
+ to be a dict-of-dict-of-dict-of-dict structure keyed by
+ node to neighbor to edge keys to edge data for multi-edges.
+ Otherwise this routine assumes dict-of-dict-of-dict keyed by
+ node to neighbor to edge data.
+
+ Examples
+ --------
+ >>> dod = {0: {1: {"weight": 1}}} # single edge (0,1)
+ >>> G = nx.from_dict_of_dicts(dod)
+
+ or
+
+ >>> G = nx.Graph(dod) # use Graph constructor
+
+ """
+ G = nx.empty_graph(0, create_using)
+ G.add_nodes_from(d)
+ # does dict d represent a MultiGraph or MultiDiGraph?
+ if multigraph_input:
+ if G.is_directed():
+ if G.is_multigraph():
+ G.add_edges_from(
+ (u, v, key, data)
+ for u, nbrs in d.items()
+ for v, datadict in nbrs.items()
+ for key, data in datadict.items()
+ )
+ else:
+ G.add_edges_from(
+ (u, v, data)
+ for u, nbrs in d.items()
+ for v, datadict in nbrs.items()
+ for key, data in datadict.items()
+ )
+ else: # Undirected
+ if G.is_multigraph():
+ seen = set() # don't add both directions of undirected graph
+ for u, nbrs in d.items():
+ for v, datadict in nbrs.items():
+ if (u, v) not in seen:
+ G.add_edges_from(
+ (u, v, key, data) for key, data in datadict.items()
+ )
+ seen.add((v, u))
+ else:
+ seen = set() # don't add both directions of undirected graph
+ for u, nbrs in d.items():
+ for v, datadict in nbrs.items():
+ if (u, v) not in seen:
+ G.add_edges_from(
+ (u, v, data) for key, data in datadict.items()
+ )
+ seen.add((v, u))
+
+ else: # not a multigraph to multigraph transfer
+ if G.is_multigraph() and not G.is_directed():
+ # d can have both representations u-v, v-u in dict. Only add one.
+ # We don't need this check for digraphs since we add both directions,
+ # or for Graph() since it is done implicitly (parallel edges not allowed)
+ seen = set()
+ for u, nbrs in d.items():
+ for v, data in nbrs.items():
+ if (u, v) not in seen:
+ G.add_edge(u, v, key=0)
+ G[u][v][0].update(data)
+ seen.add((v, u))
+ else:
+ G.add_edges_from(
+ ((u, v, data) for u, nbrs in d.items() for v, data in nbrs.items())
+ )
+ return G
+
+
+@nx._dispatchable(preserve_edge_attrs=True)
+def to_edgelist(G, nodelist=None):
+ """Returns a list of edges in the graph.
+
+ Parameters
+ ----------
+ G : graph
+ A NetworkX graph
+
+ nodelist : list
+ Use only nodes specified in nodelist
+
+ """
+ if nodelist is None:
+ return G.edges(data=True)
+ return G.edges(nodelist, data=True)
+
+
+@nx._dispatchable(graphs=None, returns_graph=True)
+def from_edgelist(edgelist, create_using=None):
+ """Returns a graph from a list of edges.
+
+ Parameters
+ ----------
+ edgelist : list or iterator
+ Edge tuples
+
+ create_using : NetworkX graph constructor, optional (default=nx.Graph)
+ Graph type to create. If graph instance, then cleared before populated.
+
+ Examples
+ --------
+ >>> edgelist = [(0, 1)] # single edge (0,1)
+ >>> G = nx.from_edgelist(edgelist)
+
+ or
+
+ >>> G = nx.Graph(edgelist) # use Graph constructor
+
+ """
+ G = nx.empty_graph(0, create_using)
+ G.add_edges_from(edgelist)
+ return G
diff --git a/pythonProject/.venv/Lib/site-packages/networkx/convert_matrix.py b/pythonProject/.venv/Lib/site-packages/networkx/convert_matrix.py
new file mode 100644
index 0000000000000000000000000000000000000000..6165ac18e31e1aadb85676095e1110889dfead51
--- /dev/null
+++ b/pythonProject/.venv/Lib/site-packages/networkx/convert_matrix.py
@@ -0,0 +1,1202 @@
+"""Functions to convert NetworkX graphs to and from common data containers
+like numpy arrays, scipy sparse arrays, and pandas DataFrames.
+
+The preferred way of converting data to a NetworkX graph is through the
+graph constructor. The constructor calls the `~networkx.convert.to_networkx_graph`
+function which attempts to guess the input type and convert it automatically.
+
+Examples
+--------
+Create a 10 node random graph from a numpy array
+
+>>> import numpy as np
+>>> rng = np.random.default_rng()
+>>> a = rng.integers(low=0, high=2, size=(10, 10))
+>>> DG = nx.from_numpy_array(a, create_using=nx.DiGraph)
+
+or equivalently:
+
+>>> DG = nx.DiGraph(a)
+
+which calls `from_numpy_array` internally based on the type of ``a``.
+
+See Also
+--------
+nx_agraph, nx_pydot
+"""
+
+import itertools
+from collections import defaultdict
+
+import networkx as nx
+from networkx.utils import not_implemented_for
+
+__all__ = [
+ "from_pandas_adjacency",
+ "to_pandas_adjacency",
+ "from_pandas_edgelist",
+ "to_pandas_edgelist",
+ "from_scipy_sparse_array",
+ "to_scipy_sparse_array",
+ "from_numpy_array",
+ "to_numpy_array",
+]
+
+
+@nx._dispatchable(edge_attrs="weight")
+def to_pandas_adjacency(
+ G,
+ nodelist=None,
+ dtype=None,
+ order=None,
+ multigraph_weight=sum,
+ weight="weight",
+ nonedge=0.0,
+):
+ """Returns the graph adjacency matrix as a Pandas DataFrame.
+
+ Parameters
+ ----------
+ G : graph
+ The NetworkX graph used to construct the Pandas DataFrame.
+
+ nodelist : list, optional
+ The rows and columns are ordered according to the nodes in `nodelist`.
+ If `nodelist` is None, then the ordering is produced by G.nodes().
+
+ multigraph_weight : {sum, min, max}, optional
+ An operator that determines how weights in multigraphs are handled.
+ The default is to sum the weights of the multiple edges.
+
+ weight : string or None, optional
+ The edge attribute that holds the numerical value used for
+ the edge weight. If an edge does not have that attribute, then the
+ value 1 is used instead.
+
+ nonedge : float, optional
+ The matrix values corresponding to nonedges are typically set to zero.
+ However, this could be undesirable if there are matrix values
+ corresponding to actual edges that also have the value zero. If so,
+ one might prefer nonedges to have some other value, such as nan.
+
+ Returns
+ -------
+ df : Pandas DataFrame
+ Graph adjacency matrix
+
+ Notes
+ -----
+ For directed graphs, entry i,j corresponds to an edge from i to j.
+
+ The DataFrame entries are assigned to the weight edge attribute. When
+ an edge does not have a weight attribute, the value of the entry is set to
+ the number 1. For multiple (parallel) edges, the values of the entries
+ are determined by the 'multigraph_weight' parameter. The default is to
+ sum the weight attributes for each of the parallel edges.
+
+ When `nodelist` does not contain every node in `G`, the matrix is built
+ from the subgraph of `G` that is induced by the nodes in `nodelist`.
+
+ The convention used for self-loop edges in graphs is to assign the
+ diagonal matrix entry value to the weight attribute of the edge
+ (or the number 1 if the edge has no weight attribute). If the
+ alternate convention of doubling the edge weight is desired the
+ resulting Pandas DataFrame can be modified as follows::
+
+ >>> import pandas as pd
+ >>> G = nx.Graph([(1, 1), (2, 2)])
+ >>> df = nx.to_pandas_adjacency(G)
+ >>> df
+ 1 2
+ 1 1.0 0.0
+ 2 0.0 1.0
+ >>> diag_idx = list(range(len(df)))
+ >>> df.iloc[diag_idx, diag_idx] *= 2
+ >>> df
+ 1 2
+ 1 2.0 0.0
+ 2 0.0 2.0
+
+ Examples
+ --------
+ >>> G = nx.MultiDiGraph()
+ >>> G.add_edge(0, 1, weight=2)
+ 0
+ >>> G.add_edge(1, 0)
+ 0
+ >>> G.add_edge(2, 2, weight=3)
+ 0
+ >>> G.add_edge(2, 2)
+ 1
+ >>> nx.to_pandas_adjacency(G, nodelist=[0, 1, 2], dtype=int)
+ 0 1 2
+ 0 0 2 0
+ 1 1 0 0
+ 2 0 0 4
+
+ """
+ import pandas as pd
+
+ M = to_numpy_array(
+ G,
+ nodelist=nodelist,
+ dtype=dtype,
+ order=order,
+ multigraph_weight=multigraph_weight,
+ weight=weight,
+ nonedge=nonedge,
+ )
+ if nodelist is None:
+ nodelist = list(G)
+ return pd.DataFrame(data=M, index=nodelist, columns=nodelist)
+
+
+@nx._dispatchable(graphs=None, returns_graph=True)
+def from_pandas_adjacency(df, create_using=None):
+ r"""Returns a graph from Pandas DataFrame.
+
+ The Pandas DataFrame is interpreted as an adjacency matrix for the graph.
+
+ Parameters
+ ----------
+ df : Pandas DataFrame
+ An adjacency matrix representation of a graph
+
+ create_using : NetworkX graph constructor, optional (default=nx.Graph)
+ Graph type to create. If graph instance, then cleared before populated.
+
+ Notes
+ -----
+ For directed graphs, explicitly mention create_using=nx.DiGraph,
+ and entry i,j of df corresponds to an edge from i to j.
+
+ If `df` has a single data type for each entry it will be converted to an
+ appropriate Python data type.
+
+ If you have node attributes stored in a separate dataframe `df_nodes`,
+ you can load those attributes to the graph `G` using the following code:
+
+ ```
+ df_nodes = pd.DataFrame({"node_id": [1, 2, 3], "attribute1": ["A", "B", "C"]})
+ G.add_nodes_from((n, dict(d)) for n, d in df_nodes.iterrows())
+ ```
+
+ If `df` has a user-specified compound data type the names
+ of the data fields will be used as attribute keys in the resulting
+ NetworkX graph.
+
+ See Also
+ --------
+ to_pandas_adjacency
+
+ Examples
+ --------
+ Simple integer weights on edges:
+
+ >>> import pandas as pd
+ >>> pd.options.display.max_columns = 20
+ >>> df = pd.DataFrame([[1, 1], [2, 1]])
+ >>> df
+ 0 1
+ 0 1 1
+ 1 2 1
+ >>> G = nx.from_pandas_adjacency(df)
+ >>> G.name = "Graph from pandas adjacency matrix"
+ >>> print(G)
+ Graph named 'Graph from pandas adjacency matrix' with 2 nodes and 3 edges
+ """
+
+ try:
+ df = df[df.index]
+ except Exception as err:
+ missing = list(set(df.index).difference(set(df.columns)))
+ msg = f"{missing} not in columns"
+ raise nx.NetworkXError("Columns must match Indices.", msg) from err
+
+ A = df.values
+ G = from_numpy_array(A, create_using=create_using)
+
+ nx.relabel.relabel_nodes(G, dict(enumerate(df.columns)), copy=False)
+ return G
+
+
+@nx._dispatchable(preserve_edge_attrs=True)
+def to_pandas_edgelist(
+ G,
+ source="source",
+ target="target",
+ nodelist=None,
+ dtype=None,
+ edge_key=None,
+):
+ """Returns the graph edge list as a Pandas DataFrame.
+
+ Parameters
+ ----------
+ G : graph
+ The NetworkX graph used to construct the Pandas DataFrame.
+
+ source : str or int, optional
+ A valid column name (string or integer) for the source nodes (for the
+ directed case).
+
+ target : str or int, optional
+ A valid column name (string or integer) for the target nodes (for the
+ directed case).
+
+ nodelist : list, optional
+ Use only nodes specified in nodelist
+
+ dtype : dtype, default None
+ Use to create the DataFrame. Data type to force.
+ Only a single dtype is allowed. If None, infer.
+
+ edge_key : str or int or None, optional (default=None)
+ A valid column name (string or integer) for the edge keys (for the
+ multigraph case). If None, edge keys are not stored in the DataFrame.
+
+ Returns
+ -------
+ df : Pandas DataFrame
+ Graph edge list
+
+ Examples
+ --------
+ >>> G = nx.Graph(
+ ... [
+ ... ("A", "B", {"cost": 1, "weight": 7}),
+ ... ("C", "E", {"cost": 9, "weight": 10}),
+ ... ]
+ ... )
+ >>> df = nx.to_pandas_edgelist(G, nodelist=["A", "C"])
+ >>> df[["source", "target", "cost", "weight"]]
+ source target cost weight
+ 0 A B 1 7
+ 1 C E 9 10
+
+ >>> G = nx.MultiGraph([("A", "B", {"cost": 1}), ("A", "B", {"cost": 9})])
+ >>> df = nx.to_pandas_edgelist(G, nodelist=["A", "C"], edge_key="ekey")
+ >>> df[["source", "target", "cost", "ekey"]]
+ source target cost ekey
+ 0 A B 1 0
+ 1 A B 9 1
+
+ """
+ import pandas as pd
+
+ if nodelist is None:
+ edgelist = G.edges(data=True)
+ else:
+ edgelist = G.edges(nodelist, data=True)
+ source_nodes = [s for s, _, _ in edgelist]
+ target_nodes = [t for _, t, _ in edgelist]
+
+ all_attrs = set().union(*(d.keys() for _, _, d in edgelist))
+ if source in all_attrs:
+ raise nx.NetworkXError(f"Source name {source!r} is an edge attr name")
+ if target in all_attrs:
+ raise nx.NetworkXError(f"Target name {target!r} is an edge attr name")
+
+ nan = float("nan")
+ edge_attr = {k: [d.get(k, nan) for _, _, d in edgelist] for k in all_attrs}
+
+ if G.is_multigraph() and edge_key is not None:
+ if edge_key in all_attrs:
+ raise nx.NetworkXError(f"Edge key name {edge_key!r} is an edge attr name")
+ edge_keys = [k for _, _, k in G.edges(keys=True)]
+ edgelistdict = {source: source_nodes, target: target_nodes, edge_key: edge_keys}
+ else:
+ edgelistdict = {source: source_nodes, target: target_nodes}
+
+ edgelistdict.update(edge_attr)
+ return pd.DataFrame(edgelistdict, dtype=dtype)
+
+
+@nx._dispatchable(graphs=None, returns_graph=True)
+def from_pandas_edgelist(
+ df,
+ source="source",
+ target="target",
+ edge_attr=None,
+ create_using=None,
+ edge_key=None,
+):
+ """Returns a graph from Pandas DataFrame containing an edge list.
+
+ The Pandas DataFrame should contain at least two columns of node names and
+ zero or more columns of edge attributes. Each row will be processed as one
+ edge instance.
+
+ Note: This function iterates over DataFrame.values, which is not
+ guaranteed to retain the data type across columns in the row. This is only
+ a problem if your row is entirely numeric and a mix of ints and floats. In
+ that case, all values will be returned as floats. See the
+ DataFrame.iterrows documentation for an example.
+
+ Parameters
+ ----------
+ df : Pandas DataFrame
+ An edge list representation of a graph
+
+ source : str or int
+ A valid column name (string or integer) for the source nodes (for the
+ directed case).
+
+ target : str or int
+ A valid column name (string or integer) for the target nodes (for the
+ directed case).
+
+ edge_attr : str or int, iterable, True, or None
+ A valid column name (str or int) or iterable of column names that are
+ used to retrieve items and add them to the graph as edge attributes.
+ If `True`, all of the remaining columns will be added.
+ If `None`, no edge attributes are added to the graph.
+
+ create_using : NetworkX graph constructor, optional (default=nx.Graph)
+ Graph type to create. If graph instance, then cleared before populated.
+
+ edge_key : str or None, optional (default=None)
+ A valid column name for the edge keys (for a MultiGraph). The values in
+ this column are used for the edge keys when adding edges if create_using
+ is a multigraph.
+
+ If you have node attributes stored in a separate dataframe `df_nodes`,
+ you can load those attributes to the graph `G` using the following code:
+
+ ```
+ df_nodes = pd.DataFrame({"node_id": [1, 2, 3], "attribute1": ["A", "B", "C"]})
+ G.add_nodes_from((n, dict(d)) for n, d in df_nodes.iterrows())
+ ```
+
+ See Also
+ --------
+ to_pandas_edgelist
+
+ Examples
+ --------
+ Simple integer weights on edges:
+
+ >>> import pandas as pd
+ >>> pd.options.display.max_columns = 20
+ >>> import numpy as np
+ >>> rng = np.random.RandomState(seed=5)
+ >>> ints = rng.randint(1, 11, size=(3, 2))
+ >>> a = ["A", "B", "C"]
+ >>> b = ["D", "A", "E"]
+ >>> df = pd.DataFrame(ints, columns=["weight", "cost"])
+ >>> df[0] = a
+ >>> df["b"] = b
+ >>> df[["weight", "cost", 0, "b"]]
+ weight cost 0 b
+ 0 4 7 A D
+ 1 7 1 B A
+ 2 10 9 C E
+ >>> G = nx.from_pandas_edgelist(df, 0, "b", ["weight", "cost"])
+ >>> G["E"]["C"]["weight"]
+ 10
+ >>> G["E"]["C"]["cost"]
+ 9
+ >>> edges = pd.DataFrame(
+ ... {
+ ... "source": [0, 1, 2],
+ ... "target": [2, 2, 3],
+ ... "weight": [3, 4, 5],
+ ... "color": ["red", "blue", "blue"],
+ ... }
+ ... )
+ >>> G = nx.from_pandas_edgelist(edges, edge_attr=True)
+ >>> G[0][2]["color"]
+ 'red'
+
+ Build multigraph with custom keys:
+
+ >>> edges = pd.DataFrame(
+ ... {
+ ... "source": [0, 1, 2, 0],
+ ... "target": [2, 2, 3, 2],
+ ... "my_edge_key": ["A", "B", "C", "D"],
+ ... "weight": [3, 4, 5, 6],
+ ... "color": ["red", "blue", "blue", "blue"],
+ ... }
+ ... )
+ >>> G = nx.from_pandas_edgelist(
+ ... edges,
+ ... edge_key="my_edge_key",
+ ... edge_attr=["weight", "color"],
+ ... create_using=nx.MultiGraph(),
+ ... )
+ >>> G[0][2]
+ AtlasView({'A': {'weight': 3, 'color': 'red'}, 'D': {'weight': 6, 'color': 'blue'}})
+
+
+ """
+ g = nx.empty_graph(0, create_using)
+
+ if edge_attr is None:
+ g.add_edges_from(zip(df[source], df[target]))
+ return g
+
+ reserved_columns = [source, target]
+
+ # Additional columns requested
+ attr_col_headings = []
+ attribute_data = []
+ if edge_attr is True:
+ attr_col_headings = [c for c in df.columns if c not in reserved_columns]
+ elif isinstance(edge_attr, list | tuple):
+ attr_col_headings = edge_attr
+ else:
+ attr_col_headings = [edge_attr]
+ if len(attr_col_headings) == 0:
+ raise nx.NetworkXError(
+ f"Invalid edge_attr argument: No columns found with name: {attr_col_headings}"
+ )
+
+ try:
+ attribute_data = zip(*[df[col] for col in attr_col_headings])
+ except (KeyError, TypeError) as err:
+ msg = f"Invalid edge_attr argument: {edge_attr}"
+ raise nx.NetworkXError(msg) from err
+
+ if g.is_multigraph():
+ # => append the edge keys from the df to the bundled data
+ if edge_key is not None:
+ try:
+ multigraph_edge_keys = df[edge_key]
+ attribute_data = zip(attribute_data, multigraph_edge_keys)
+ except (KeyError, TypeError) as err:
+ msg = f"Invalid edge_key argument: {edge_key}"
+ raise nx.NetworkXError(msg) from err
+
+ for s, t, attrs in zip(df[source], df[target], attribute_data):
+ if edge_key is not None:
+ attrs, multigraph_edge_key = attrs
+ key = g.add_edge(s, t, key=multigraph_edge_key)
+ else:
+ key = g.add_edge(s, t)
+
+ g[s][t][key].update(zip(attr_col_headings, attrs))
+ else:
+ for s, t, attrs in zip(df[source], df[target], attribute_data):
+ g.add_edge(s, t)
+ g[s][t].update(zip(attr_col_headings, attrs))
+
+ return g
+
+
+@nx._dispatchable(edge_attrs="weight")
+def to_scipy_sparse_array(G, nodelist=None, dtype=None, weight="weight", format="csr"):
+ """Returns the graph adjacency matrix as a SciPy sparse array.
+
+ Parameters
+ ----------
+ G : graph
+ The NetworkX graph used to construct the sparse matrix.
+
+ nodelist : list, optional
+ The rows and columns are ordered according to the nodes in `nodelist`.
+ If `nodelist` is None, then the ordering is produced by G.nodes().
+
+ dtype : NumPy data-type, optional
+ A valid NumPy dtype used to initialize the array. If None, then the
+ NumPy default is used.
+
+ weight : string or None optional (default='weight')
+ The edge attribute that holds the numerical value used for
+ the edge weight. If None then all edge weights are 1.
+
+ format : str in {'bsr', 'csr', 'csc', 'coo', 'lil', 'dia', 'dok'}
+ The type of the matrix to be returned (default 'csr'). For
+ some algorithms different implementations of sparse matrices
+ can perform better. See [1]_ for details.
+
+ Returns
+ -------
+ A : SciPy sparse array
+ Graph adjacency matrix.
+
+ Notes
+ -----
+ For directed graphs, matrix entry i,j corresponds to an edge from i to j.
+
+ The matrix entries are populated using the edge attribute held in
+ parameter weight. When an edge does not have that attribute, the
+ value of the entry is 1.
+
+ For multiple edges the matrix values are the sums of the edge weights.
+
+ When `nodelist` does not contain every node in `G`, the adjacency matrix
+ is built from the subgraph of `G` that is induced by the nodes in
+ `nodelist`.
+
+ The convention used for self-loop edges in graphs is to assign the
+ diagonal matrix entry value to the weight attribute of the edge
+ (or the number 1 if the edge has no weight attribute). If the
+ alternate convention of doubling the edge weight is desired the
+ resulting SciPy sparse array can be modified as follows:
+
+ >>> G = nx.Graph([(1, 1)])
+ >>> A = nx.to_scipy_sparse_array(G)
+ >>> print(A.todense())
+ [[1]]
+ >>> A.setdiag(A.diagonal() * 2)
+ >>> print(A.toarray())
+ [[2]]
+
+ Examples
+ --------
+ >>> G = nx.MultiDiGraph()
+ >>> G.add_edge(0, 1, weight=2)
+ 0
+ >>> G.add_edge(1, 0)
+ 0
+ >>> G.add_edge(2, 2, weight=3)
+ 0
+ >>> G.add_edge(2, 2)
+ 1
+ >>> S = nx.to_scipy_sparse_array(G, nodelist=[0, 1, 2])
+ >>> print(S.toarray())
+ [[0 2 0]
+ [1 0 0]
+ [0 0 4]]
+
+ References
+ ----------
+ .. [1] Scipy Dev. References, "Sparse Matrices",
+ https://docs.scipy.org/doc/scipy/reference/sparse.html
+ """
+ import scipy as sp
+
+ if len(G) == 0:
+ raise nx.NetworkXError("Graph has no nodes or edges")
+
+ if nodelist is None:
+ nodelist = list(G)
+ nlen = len(G)
+ else:
+ nlen = len(nodelist)
+ if nlen == 0:
+ raise nx.NetworkXError("nodelist has no nodes")
+ nodeset = set(G.nbunch_iter(nodelist))
+ if nlen != len(nodeset):
+ for n in nodelist:
+ if n not in G:
+ raise nx.NetworkXError(f"Node {n} in nodelist is not in G")
+ raise nx.NetworkXError("nodelist contains duplicates.")
+ if nlen < len(G):
+ G = G.subgraph(nodelist)
+
+ index = dict(zip(nodelist, range(nlen)))
+ coefficients = zip(
+ *((index[u], index[v], wt) for u, v, wt in G.edges(data=weight, default=1))
+ )
+ try:
+ row, col, data = coefficients
+ except ValueError:
+ # there is no edge in the subgraph
+ row, col, data = [], [], []
+
+ if G.is_directed():
+ A = sp.sparse.coo_array((data, (row, col)), shape=(nlen, nlen), dtype=dtype)
+ else:
+ # symmetrize matrix
+ d = data + data
+ r = row + col
+ c = col + row
+ # selfloop entries get double counted when symmetrizing
+ # so we subtract the data on the diagonal
+ selfloops = list(nx.selfloop_edges(G, data=weight, default=1))
+ if selfloops:
+ diag_index, diag_data = zip(*((index[u], -wt) for u, v, wt in selfloops))
+ d += diag_data
+ r += diag_index
+ c += diag_index
+ A = sp.sparse.coo_array((d, (r, c)), shape=(nlen, nlen), dtype=dtype)
+ try:
+ return A.asformat(format)
+ except ValueError as err:
+ raise nx.NetworkXError(f"Unknown sparse matrix format: {format}") from err
+
+
+def _csr_gen_triples(A):
+ """Converts a SciPy sparse array in **Compressed Sparse Row** format to
+ an iterable of weighted edge triples.
+
+ """
+ nrows = A.shape[0]
+ indptr, dst_indices, data = A.indptr, A.indices, A.data
+ import numpy as np
+
+ src_indices = np.repeat(np.arange(nrows), np.diff(indptr))
+ return zip(src_indices.tolist(), dst_indices.tolist(), A.data.tolist())
+
+
+def _csc_gen_triples(A):
+ """Converts a SciPy sparse array in **Compressed Sparse Column** format to
+ an iterable of weighted edge triples.
+
+ """
+ ncols = A.shape[1]
+ indptr, src_indices, data = A.indptr, A.indices, A.data
+ import numpy as np
+
+ dst_indices = np.repeat(np.arange(ncols), np.diff(indptr))
+ return zip(src_indices.tolist(), dst_indices.tolist(), A.data.tolist())
+
+
+def _coo_gen_triples(A):
+ """Converts a SciPy sparse array in **Coordinate** format to an iterable
+ of weighted edge triples.
+
+ """
+ return zip(A.row.tolist(), A.col.tolist(), A.data.tolist())
+
+
+def _dok_gen_triples(A):
+ """Converts a SciPy sparse array in **Dictionary of Keys** format to an
+ iterable of weighted edge triples.
+
+ """
+ for (r, c), v in A.items():
+ # Use `v.item()` to convert a NumPy scalar to the appropriate Python scalar
+ yield int(r), int(c), v.item()
+
+
+def _generate_weighted_edges(A):
+ """Returns an iterable over (u, v, w) triples, where u and v are adjacent
+ vertices and w is the weight of the edge joining u and v.
+
+ `A` is a SciPy sparse array (in any format).
+
+ """
+ if A.format == "csr":
+ return _csr_gen_triples(A)
+ if A.format == "csc":
+ return _csc_gen_triples(A)
+ if A.format == "dok":
+ return _dok_gen_triples(A)
+ # If A is in any other format (including COO), convert it to COO format.
+ return _coo_gen_triples(A.tocoo())
+
+
+@nx._dispatchable(graphs=None, returns_graph=True)
+def from_scipy_sparse_array(
+ A, parallel_edges=False, create_using=None, edge_attribute="weight"
+):
+ """Creates a new graph from an adjacency matrix given as a SciPy sparse
+ array.
+
+ Parameters
+ ----------
+ A: scipy.sparse array
+ An adjacency matrix representation of a graph
+
+ parallel_edges : Boolean
+ If this is True, `create_using` is a multigraph, and `A` is an
+ integer matrix, then entry *(i, j)* in the matrix is interpreted as the
+ number of parallel edges joining vertices *i* and *j* in the graph.
+ If it is False, then the entries in the matrix are interpreted as
+ the weight of a single edge joining the vertices.
+
+ create_using : NetworkX graph constructor, optional (default=nx.Graph)
+ Graph type to create. If graph instance, then cleared before populated.
+
+ edge_attribute: string
+ Name of edge attribute to store matrix numeric value. The data will
+ have the same type as the matrix entry (int, float, (real,imag)).
+
+ Notes
+ -----
+ For directed graphs, explicitly mention create_using=nx.DiGraph,
+ and entry i,j of A corresponds to an edge from i to j.
+
+ If `create_using` is :class:`networkx.MultiGraph` or
+ :class:`networkx.MultiDiGraph`, `parallel_edges` is True, and the
+ entries of `A` are of type :class:`int`, then this function returns a
+ multigraph (constructed from `create_using`) with parallel edges.
+ In this case, `edge_attribute` will be ignored.
+
+ If `create_using` indicates an undirected multigraph, then only the edges
+ indicated by the upper triangle of the matrix `A` will be added to the
+ graph.
+
+ Examples
+ --------
+ >>> import scipy as sp
+ >>> A = sp.sparse.eye(2, 2, 1)
+ >>> G = nx.from_scipy_sparse_array(A)
+
+ If `create_using` indicates a multigraph and the matrix has only integer
+ entries and `parallel_edges` is False, then the entries will be treated
+ as weights for edges joining the nodes (without creating parallel edges):
+
+ >>> A = sp.sparse.csr_array([[1, 1], [1, 2]])
+ >>> G = nx.from_scipy_sparse_array(A, create_using=nx.MultiGraph)
+ >>> G[1][1]
+ AtlasView({0: {'weight': 2}})
+
+ If `create_using` indicates a multigraph and the matrix has only integer
+ entries and `parallel_edges` is True, then the entries will be treated
+ as the number of parallel edges joining those two vertices:
+
+ >>> A = sp.sparse.csr_array([[1, 1], [1, 2]])
+ >>> G = nx.from_scipy_sparse_array(A, parallel_edges=True, create_using=nx.MultiGraph)
+ >>> G[1][1]
+ AtlasView({0: {'weight': 1}, 1: {'weight': 1}})
+
+ """
+ G = nx.empty_graph(0, create_using)
+ n, m = A.shape
+ if n != m:
+ raise nx.NetworkXError(f"Adjacency matrix not square: nx,ny={A.shape}")
+ # Make sure we get even the isolated nodes of the graph.
+ G.add_nodes_from(range(n))
+ # Create an iterable over (u, v, w) triples and for each triple, add an
+ # edge from u to v with weight w.
+ triples = _generate_weighted_edges(A)
+ # If the entries in the adjacency matrix are integers, the graph is a
+ # multigraph, and parallel_edges is True, then create parallel edges, each
+ # with weight 1, for each entry in the adjacency matrix. Otherwise, create
+ # one edge for each positive entry in the adjacency matrix and set the
+ # weight of that edge to be the entry in the matrix.
+ if A.dtype.kind in ("i", "u") and G.is_multigraph() and parallel_edges:
+ chain = itertools.chain.from_iterable
+ # The following line is equivalent to:
+ #
+ # for (u, v) in edges:
+ # for d in range(A[u, v]):
+ # G.add_edge(u, v, weight=1)
+ #
+ triples = chain(((u, v, 1) for d in range(w)) for (u, v, w) in triples)
+ # If we are creating an undirected multigraph, only add the edges from the
+ # upper triangle of the matrix. Otherwise, add all the edges. This relies
+ # on the fact that the vertices created in the
+ # `_generated_weighted_edges()` function are actually the row/column
+ # indices for the matrix `A`.
+ #
+ # Without this check, we run into a problem where each edge is added twice
+ # when `G.add_weighted_edges_from()` is invoked below.
+ if G.is_multigraph() and not G.is_directed():
+ triples = ((u, v, d) for u, v, d in triples if u <= v)
+ G.add_weighted_edges_from(triples, weight=edge_attribute)
+ return G
+
+
+@nx._dispatchable(edge_attrs="weight") # edge attrs may also be obtained from `dtype`
+def to_numpy_array(
+ G,
+ nodelist=None,
+ dtype=None,
+ order=None,
+ multigraph_weight=sum,
+ weight="weight",
+ nonedge=0.0,
+):
+ """Returns the graph adjacency matrix as a NumPy array.
+
+ Parameters
+ ----------
+ G : graph
+ The NetworkX graph used to construct the NumPy array.
+
+ nodelist : list, optional
+ The rows and columns are ordered according to the nodes in `nodelist`.
+ If `nodelist` is ``None``, then the ordering is produced by ``G.nodes()``.
+
+ dtype : NumPy data type, optional
+ A NumPy data type used to initialize the array. If None, then the NumPy
+ default is used. The dtype can be structured if `weight=None`, in which
+ case the dtype field names are used to look up edge attributes. The
+ result is a structured array where each named field in the dtype
+ corresponds to the adjacency for that edge attribute. See examples for
+ details.
+
+ order : {'C', 'F'}, optional
+ Whether to store multidimensional data in C- or Fortran-contiguous
+ (row- or column-wise) order in memory. If None, then the NumPy default
+ is used.
+
+ multigraph_weight : callable, optional
+ An function that determines how weights in multigraphs are handled.
+ The function should accept a sequence of weights and return a single
+ value. The default is to sum the weights of the multiple edges.
+
+ weight : string or None optional (default = 'weight')
+ The edge attribute that holds the numerical value used for
+ the edge weight. If an edge does not have that attribute, then the
+ value 1 is used instead. `weight` must be ``None`` if a structured
+ dtype is used.
+
+ nonedge : array_like (default = 0.0)
+ The value used to represent non-edges in the adjacency matrix.
+ The array values corresponding to nonedges are typically set to zero.
+ However, this could be undesirable if there are array values
+ corresponding to actual edges that also have the value zero. If so,
+ one might prefer nonedges to have some other value, such as ``nan``.
+
+ Returns
+ -------
+ A : NumPy ndarray
+ Graph adjacency matrix
+
+ Raises
+ ------
+ NetworkXError
+ If `dtype` is a structured dtype and `G` is a multigraph
+ ValueError
+ If `dtype` is a structured dtype and `weight` is not `None`
+
+ See Also
+ --------
+ from_numpy_array
+
+ Notes
+ -----
+ For directed graphs, entry ``i, j`` corresponds to an edge from ``i`` to ``j``.
+
+ Entries in the adjacency matrix are given by the `weight` edge attribute.
+ When an edge does not have a weight attribute, the value of the entry is
+ set to the number 1. For multiple (parallel) edges, the values of the
+ entries are determined by the `multigraph_weight` parameter. The default is
+ to sum the weight attributes for each of the parallel edges.
+
+ When `nodelist` does not contain every node in `G`, the adjacency matrix is
+ built from the subgraph of `G` that is induced by the nodes in `nodelist`.
+
+ The convention used for self-loop edges in graphs is to assign the
+ diagonal array entry value to the weight attribute of the edge
+ (or the number 1 if the edge has no weight attribute). If the
+ alternate convention of doubling the edge weight is desired the
+ resulting NumPy array can be modified as follows:
+
+ >>> import numpy as np
+ >>> G = nx.Graph([(1, 1)])
+ >>> A = nx.to_numpy_array(G)
+ >>> A
+ array([[1.]])
+ >>> A[np.diag_indices_from(A)] *= 2
+ >>> A
+ array([[2.]])
+
+ Examples
+ --------
+ >>> G = nx.MultiDiGraph()
+ >>> G.add_edge(0, 1, weight=2)
+ 0
+ >>> G.add_edge(1, 0)
+ 0
+ >>> G.add_edge(2, 2, weight=3)
+ 0
+ >>> G.add_edge(2, 2)
+ 1
+ >>> nx.to_numpy_array(G, nodelist=[0, 1, 2])
+ array([[0., 2., 0.],
+ [1., 0., 0.],
+ [0., 0., 4.]])
+
+ When `nodelist` argument is used, nodes of `G` which do not appear in the `nodelist`
+ and their edges are not included in the adjacency matrix. Here is an example:
+
+ >>> G = nx.Graph()
+ >>> G.add_edge(3, 1)
+ >>> G.add_edge(2, 0)
+ >>> G.add_edge(2, 1)
+ >>> G.add_edge(3, 0)
+ >>> nx.to_numpy_array(G, nodelist=[1, 2, 3])
+ array([[0., 1., 1.],
+ [1., 0., 0.],
+ [1., 0., 0.]])
+
+ This function can also be used to create adjacency matrices for multiple
+ edge attributes with structured dtypes:
+
+ >>> G = nx.Graph()
+ >>> G.add_edge(0, 1, weight=10)
+ >>> G.add_edge(1, 2, cost=5)
+ >>> G.add_edge(2, 3, weight=3, cost=-4.0)
+ >>> dtype = np.dtype([("weight", int), ("cost", float)])
+ >>> A = nx.to_numpy_array(G, dtype=dtype, weight=None)
+ >>> A["weight"]
+ array([[ 0, 10, 0, 0],
+ [10, 0, 1, 0],
+ [ 0, 1, 0, 3],
+ [ 0, 0, 3, 0]])
+ >>> A["cost"]
+ array([[ 0., 1., 0., 0.],
+ [ 1., 0., 5., 0.],
+ [ 0., 5., 0., -4.],
+ [ 0., 0., -4., 0.]])
+
+ As stated above, the argument "nonedge" is useful especially when there are
+ actually edges with weight 0 in the graph. Setting a nonedge value different than 0,
+ makes it much clearer to differentiate such 0-weighted edges and actual nonedge values.
+
+ >>> G = nx.Graph()
+ >>> G.add_edge(3, 1, weight=2)
+ >>> G.add_edge(2, 0, weight=0)
+ >>> G.add_edge(2, 1, weight=0)
+ >>> G.add_edge(3, 0, weight=1)
+ >>> nx.to_numpy_array(G, nonedge=-1.0)
+ array([[-1., 2., -1., 1.],
+ [ 2., -1., 0., -1.],
+ [-1., 0., -1., 0.],
+ [ 1., -1., 0., -1.]])
+ """
+ import numpy as np
+
+ if nodelist is None:
+ nodelist = list(G)
+ nlen = len(nodelist)
+
+ # Input validation
+ nodeset = set(nodelist)
+ if nodeset - set(G):
+ raise nx.NetworkXError(f"Nodes {nodeset - set(G)} in nodelist is not in G")
+ if len(nodeset) < nlen:
+ raise nx.NetworkXError("nodelist contains duplicates.")
+
+ A = np.full((nlen, nlen), fill_value=nonedge, dtype=dtype, order=order)
+
+ # Corner cases: empty nodelist or graph without any edges
+ if nlen == 0 or G.number_of_edges() == 0:
+ return A
+
+ # If dtype is structured and weight is None, use dtype field names as
+ # edge attributes
+ edge_attrs = None # Only single edge attribute by default
+ if A.dtype.names:
+ if weight is None:
+ edge_attrs = dtype.names
+ else:
+ raise ValueError(
+ "Specifying `weight` not supported for structured dtypes\n."
+ "To create adjacency matrices from structured dtypes, use `weight=None`."
+ )
+
+ # Map nodes to row/col in matrix
+ idx = dict(zip(nodelist, range(nlen)))
+ if len(nodelist) < len(G):
+ G = G.subgraph(nodelist).copy()
+
+ # Collect all edge weights and reduce with `multigraph_weights`
+ if G.is_multigraph():
+ if edge_attrs:
+ raise nx.NetworkXError(
+ "Structured arrays are not supported for MultiGraphs"
+ )
+ d = defaultdict(list)
+ for u, v, wt in G.edges(data=weight, default=1.0):
+ d[(idx[u], idx[v])].append(wt)
+ i, j = np.array(list(d.keys())).T # indices
+ wts = [multigraph_weight(ws) for ws in d.values()] # reduced weights
+ else:
+ i, j, wts = [], [], []
+
+ # Special branch: multi-attr adjacency from structured dtypes
+ if edge_attrs:
+ # Extract edges with all data
+ for u, v, data in G.edges(data=True):
+ i.append(idx[u])
+ j.append(idx[v])
+ wts.append(data)
+ # Map each attribute to the appropriate named field in the
+ # structured dtype
+ for attr in edge_attrs:
+ attr_data = [wt.get(attr, 1.0) for wt in wts]
+ A[attr][i, j] = attr_data
+ if not G.is_directed():
+ A[attr][j, i] = attr_data
+ return A
+
+ for u, v, wt in G.edges(data=weight, default=1.0):
+ i.append(idx[u])
+ j.append(idx[v])
+ wts.append(wt)
+
+ # Set array values with advanced indexing
+ A[i, j] = wts
+ if not G.is_directed():
+ A[j, i] = wts
+
+ return A
+
+
+@nx._dispatchable(graphs=None, returns_graph=True)
+def from_numpy_array(A, parallel_edges=False, create_using=None, edge_attr="weight"):
+ """Returns a graph from a 2D NumPy array.
+
+ The 2D NumPy array is interpreted as an adjacency matrix for the graph.
+
+ Parameters
+ ----------
+ A : a 2D numpy.ndarray
+ An adjacency matrix representation of a graph
+
+ parallel_edges : Boolean
+ If this is True, `create_using` is a multigraph, and `A` is an
+ integer array, then entry *(i, j)* in the array is interpreted as the
+ number of parallel edges joining vertices *i* and *j* in the graph.
+ If it is False, then the entries in the array are interpreted as
+ the weight of a single edge joining the vertices.
+
+ create_using : NetworkX graph constructor, optional (default=nx.Graph)
+ Graph type to create. If graph instance, then cleared before populated.
+
+ edge_attr : String, optional (default="weight")
+ The attribute to which the array values are assigned on each edge. If
+ it is None, edge attributes will not be assigned.
+
+ Notes
+ -----
+ For directed graphs, explicitly mention create_using=nx.DiGraph,
+ and entry i,j of A corresponds to an edge from i to j.
+
+ If `create_using` is :class:`networkx.MultiGraph` or
+ :class:`networkx.MultiDiGraph`, `parallel_edges` is True, and the
+ entries of `A` are of type :class:`int`, then this function returns a
+ multigraph (of the same type as `create_using`) with parallel edges.
+
+ If `create_using` indicates an undirected multigraph, then only the edges
+ indicated by the upper triangle of the array `A` will be added to the
+ graph.
+
+ If `edge_attr` is Falsy (False or None), edge attributes will not be
+ assigned, and the array data will be treated like a binary mask of
+ edge presence or absence. Otherwise, the attributes will be assigned
+ as follows:
+
+ If the NumPy array has a single data type for each array entry it
+ will be converted to an appropriate Python data type.
+
+ If the NumPy array has a user-specified compound data type the names
+ of the data fields will be used as attribute keys in the resulting
+ NetworkX graph.
+
+ See Also
+ --------
+ to_numpy_array
+
+ Examples
+ --------
+ Simple integer weights on edges:
+
+ >>> import numpy as np
+ >>> A = np.array([[1, 1], [2, 1]])
+ >>> G = nx.from_numpy_array(A)
+ >>> G.edges(data=True)
+ EdgeDataView([(0, 0, {'weight': 1}), (0, 1, {'weight': 2}), (1, 1, {'weight': 1})])
+
+ If `create_using` indicates a multigraph and the array has only integer
+ entries and `parallel_edges` is False, then the entries will be treated
+ as weights for edges joining the nodes (without creating parallel edges):
+
+ >>> A = np.array([[1, 1], [1, 2]])
+ >>> G = nx.from_numpy_array(A, create_using=nx.MultiGraph)
+ >>> G[1][1]
+ AtlasView({0: {'weight': 2}})
+
+ If `create_using` indicates a multigraph and the array has only integer
+ entries and `parallel_edges` is True, then the entries will be treated
+ as the number of parallel edges joining those two vertices:
+
+ >>> A = np.array([[1, 1], [1, 2]])
+ >>> temp = nx.MultiGraph()
+ >>> G = nx.from_numpy_array(A, parallel_edges=True, create_using=temp)
+ >>> G[1][1]
+ AtlasView({0: {'weight': 1}, 1: {'weight': 1}})
+
+ User defined compound data type on edges:
+
+ >>> dt = [("weight", float), ("cost", int)]
+ >>> A = np.array([[(1.0, 2)]], dtype=dt)
+ >>> G = nx.from_numpy_array(A)
+ >>> G.edges()
+ EdgeView([(0, 0)])
+ >>> G[0][0]["cost"]
+ 2
+ >>> G[0][0]["weight"]
+ 1.0
+
+ """
+ kind_to_python_type = {
+ "f": float,
+ "i": int,
+ "u": int,
+ "b": bool,
+ "c": complex,
+ "S": str,
+ "U": str,
+ "V": "void",
+ }
+ G = nx.empty_graph(0, create_using)
+ if A.ndim != 2:
+ raise nx.NetworkXError(f"Input array must be 2D, not {A.ndim}")
+ n, m = A.shape
+ if n != m:
+ raise nx.NetworkXError(f"Adjacency matrix not square: nx,ny={A.shape}")
+ dt = A.dtype
+ try:
+ python_type = kind_to_python_type[dt.kind]
+ except Exception as err:
+ raise TypeError(f"Unknown numpy data type: {dt}") from err
+
+ # Make sure we get even the isolated nodes of the graph.
+ G.add_nodes_from(range(n))
+ # Get a list of all the entries in the array with nonzero entries. These
+ # coordinates become edges in the graph. (convert to int from np.int64)
+ edges = ((int(e[0]), int(e[1])) for e in zip(*A.nonzero()))
+ # handle numpy constructed data type
+ if python_type == "void":
+ # Sort the fields by their offset, then by dtype, then by name.
+ fields = sorted(
+ (offset, dtype, name) for name, (dtype, offset) in A.dtype.fields.items()
+ )
+ triples = (
+ (
+ u,
+ v,
+ {}
+ if edge_attr in [False, None]
+ else {
+ name: kind_to_python_type[dtype.kind](val)
+ for (_, dtype, name), val in zip(fields, A[u, v])
+ },
+ )
+ for u, v in edges
+ )
+ # If the entries in the adjacency matrix are integers, the graph is a
+ # multigraph, and parallel_edges is True, then create parallel edges, each
+ # with weight 1, for each entry in the adjacency matrix. Otherwise, create
+ # one edge for each positive entry in the adjacency matrix and set the
+ # weight of that edge to be the entry in the matrix.
+ elif python_type is int and G.is_multigraph() and parallel_edges:
+ chain = itertools.chain.from_iterable
+ # The following line is equivalent to:
+ #
+ # for (u, v) in edges:
+ # for d in range(A[u, v]):
+ # G.add_edge(u, v, weight=1)
+ #
+ if edge_attr in [False, None]:
+ triples = chain(((u, v, {}) for d in range(A[u, v])) for (u, v) in edges)
+ else:
+ triples = chain(
+ ((u, v, {edge_attr: 1}) for d in range(A[u, v])) for (u, v) in edges
+ )
+ else: # basic data type
+ if edge_attr in [False, None]:
+ triples = ((u, v, {}) for u, v in edges)
+ else:
+ triples = ((u, v, {edge_attr: python_type(A[u, v])}) for u, v in edges)
+ # If we are creating an undirected multigraph, only add the edges from the
+ # upper triangle of the matrix. Otherwise, add all the edges. This relies
+ # on the fact that the vertices created in the
+ # `_generated_weighted_edges()` function are actually the row/column
+ # indices for the matrix `A`.
+ #
+ # Without this check, we run into a problem where each edge is added twice
+ # when `G.add_edges_from()` is invoked below.
+ if G.is_multigraph() and not G.is_directed():
+ triples = ((u, v, d) for u, v, d in triples if u <= v)
+ G.add_edges_from(triples)
+ return G
diff --git a/pythonProject/.venv/Lib/site-packages/networkx/exception.py b/pythonProject/.venv/Lib/site-packages/networkx/exception.py
new file mode 100644
index 0000000000000000000000000000000000000000..96694cc32dcfbb8307cf99b0fa939e2fa0f5a46d
--- /dev/null
+++ b/pythonProject/.venv/Lib/site-packages/networkx/exception.py
@@ -0,0 +1,125 @@
+"""
+**********
+Exceptions
+**********
+
+Base exceptions and errors for NetworkX.
+"""
+
+__all__ = [
+ "HasACycle",
+ "NodeNotFound",
+ "PowerIterationFailedConvergence",
+ "ExceededMaxIterations",
+ "AmbiguousSolution",
+ "NetworkXAlgorithmError",
+ "NetworkXException",
+ "NetworkXError",
+ "NetworkXNoCycle",
+ "NetworkXNoPath",
+ "NetworkXNotImplemented",
+ "NetworkXPointlessConcept",
+ "NetworkXUnbounded",
+ "NetworkXUnfeasible",
+]
+
+
+class NetworkXException(Exception):
+ """Base class for exceptions in NetworkX."""
+
+
+class NetworkXError(NetworkXException):
+ """Exception for a serious error in NetworkX"""
+
+
+class NetworkXPointlessConcept(NetworkXException):
+ """Raised when a null graph is provided as input to an algorithm
+ that cannot use it.
+
+ The null graph is sometimes considered a pointless concept [1]_,
+ thus the name of the exception.
+
+ References
+ ----------
+ .. [1] Harary, F. and Read, R. "Is the Null Graph a Pointless
+ Concept?" In Graphs and Combinatorics Conference, George
+ Washington University. New York: Springer-Verlag, 1973.
+
+ """
+
+
+class NetworkXAlgorithmError(NetworkXException):
+ """Exception for unexpected termination of algorithms."""
+
+
+class NetworkXUnfeasible(NetworkXAlgorithmError):
+ """Exception raised by algorithms trying to solve a problem
+ instance that has no feasible solution."""
+
+
+class NetworkXNoPath(NetworkXUnfeasible):
+ """Exception for algorithms that should return a path when running
+ on graphs where such a path does not exist."""
+
+
+class NetworkXNoCycle(NetworkXUnfeasible):
+ """Exception for algorithms that should return a cycle when running
+ on graphs where such a cycle does not exist."""
+
+
+class HasACycle(NetworkXException):
+ """Raised if a graph has a cycle when an algorithm expects that it
+ will have no cycles.
+
+ """
+
+
+class NetworkXUnbounded(NetworkXAlgorithmError):
+ """Exception raised by algorithms trying to solve a maximization
+ or a minimization problem instance that is unbounded."""
+
+
+class NetworkXNotImplemented(NetworkXException):
+ """Exception raised by algorithms not implemented for a type of graph."""
+
+
+class NodeNotFound(NetworkXException):
+ """Exception raised if requested node is not present in the graph"""
+
+
+class AmbiguousSolution(NetworkXException):
+ """Raised if more than one valid solution exists for an intermediary step
+ of an algorithm.
+
+ In the face of ambiguity, refuse the temptation to guess.
+ This may occur, for example, when trying to determine the
+ bipartite node sets in a disconnected bipartite graph when
+ computing bipartite matchings.
+
+ """
+
+
+class ExceededMaxIterations(NetworkXException):
+ """Raised if a loop iterates too many times without breaking.
+
+ This may occur, for example, in an algorithm that computes
+ progressively better approximations to a value but exceeds an
+ iteration bound specified by the user.
+
+ """
+
+
+class PowerIterationFailedConvergence(ExceededMaxIterations):
+ """Raised when the power iteration method fails to converge within a
+ specified iteration limit.
+
+ `num_iterations` is the number of iterations that have been
+ completed when this exception was raised.
+
+ """
+
+ def __init__(self, num_iterations, *args, **kw):
+ msg = f"power iteration failed to converge within {num_iterations} iterations"
+ exception_message = msg
+ superinit = super().__init__
+ superinit(self, exception_message, *args, **kw)
diff --git a/pythonProject/.venv/Lib/site-packages/networkx/lazy_imports.py b/pythonProject/.venv/Lib/site-packages/networkx/lazy_imports.py
new file mode 100644
index 0000000000000000000000000000000000000000..396404ba38f5885bfcc65af36d7b4655e94ccc27
--- /dev/null
+++ b/pythonProject/.venv/Lib/site-packages/networkx/lazy_imports.py
@@ -0,0 +1,188 @@
+import importlib
+import importlib.util
+import inspect
+import os
+import sys
+import types
+
+__all__ = ["attach", "_lazy_import"]
+
+
+def attach(module_name, submodules=None, submod_attrs=None):
+ """Attach lazily loaded submodules, and functions or other attributes.
+
+ Typically, modules import submodules and attributes as follows::
+
+ import mysubmodule
+ import anothersubmodule
+
+ from .foo import someattr
+
+ The idea of this function is to replace the `__init__.py`
+ module's `__getattr__`, `__dir__`, and `__all__` attributes such that
+ all imports work exactly the way they normally would, except that the
+ actual import is delayed until the resulting module object is first used.
+
+ The typical way to call this function, replacing the above imports, is::
+
+ __getattr__, __lazy_dir__, __all__ = lazy.attach(
+ __name__, ["mysubmodule", "anothersubmodule"], {"foo": "someattr"}
+ )
+
+ This functionality requires Python 3.7 or higher.
+
+ Parameters
+ ----------
+ module_name : str
+ Typically use __name__.
+ submodules : set
+ List of submodules to lazily import.
+ submod_attrs : dict
+ Dictionary of submodule -> list of attributes / functions.
+ These attributes are imported as they are used.
+
+ Returns
+ -------
+ __getattr__, __dir__, __all__
+
+ """
+ if submod_attrs is None:
+ submod_attrs = {}
+
+ if submodules is None:
+ submodules = set()
+ else:
+ submodules = set(submodules)
+
+ attr_to_modules = {
+ attr: mod for mod, attrs in submod_attrs.items() for attr in attrs
+ }
+
+ __all__ = list(submodules | attr_to_modules.keys())
+
+ def __getattr__(name):
+ if name in submodules:
+ return importlib.import_module(f"{module_name}.{name}")
+ elif name in attr_to_modules:
+ submod = importlib.import_module(f"{module_name}.{attr_to_modules[name]}")
+ return getattr(submod, name)
+ else:
+ raise AttributeError(f"No {module_name} attribute {name}")
+
+ def __dir__():
+ return __all__
+
+ if os.environ.get("EAGER_IMPORT", ""):
+ for attr in set(attr_to_modules.keys()) | submodules:
+ __getattr__(attr)
+
+ return __getattr__, __dir__, list(__all__)
+
+
+class DelayedImportErrorModule(types.ModuleType):
+ def __init__(self, frame_data, *args, **kwargs):
+ self.__frame_data = frame_data
+ super().__init__(*args, **kwargs)
+
+ def __getattr__(self, x):
+ if x in ("__class__", "__file__", "__frame_data"):
+ super().__getattr__(x)
+ else:
+ fd = self.__frame_data
+ raise ModuleNotFoundError(
+ f"No module named '{fd['spec']}'\n\n"
+ "This error is lazily reported, having originally occurred in\n"
+ f' File {fd["filename"]}, line {fd["lineno"]}, in {fd["function"]}\n\n'
+ f'----> {"".join(fd["code_context"] or "").strip()}'
+ )
+
+
+def _lazy_import(fullname):
+ """Return a lazily imported proxy for a module or library.
+
+ Warning
+ -------
+ Importing using this function can currently cause trouble
+ when the user tries to import from a subpackage of a module before
+ the package is fully imported. In particular, this idiom may not work:
+
+ np = lazy_import("numpy")
+ from numpy.lib import recfunctions
+
+ This is due to a difference in the way Python's LazyLoader handles
+ subpackage imports compared to the normal import process. Hopefully
+ we will get Python's LazyLoader to fix this, or find a workaround.
+ In the meantime, this is a potential problem.
+
+ The workaround is to import numpy before importing from the subpackage.
+
+ Notes
+ -----
+ We often see the following pattern::
+
+ def myfunc():
+ import scipy as sp
+ sp.argmin(...)
+ ....
+
+ This is to prevent a library, in this case `scipy`, from being
+ imported at function definition time, since that can be slow.
+
+ This function provides a proxy module that, upon access, imports
+ the actual module. So the idiom equivalent to the above example is::
+
+ sp = lazy.load("scipy")
+
+ def myfunc():
+ sp.argmin(...)
+ ....
+
+ The initial import time is fast because the actual import is delayed
+ until the first attribute is requested. The overall import time may
+ decrease as well for users that don't make use of large portions
+ of the library.
+
+ Parameters
+ ----------
+ fullname : str
+ The full name of the package or subpackage to import. For example::
+
+ sp = lazy.load("scipy") # import scipy as sp
+ spla = lazy.load("scipy.linalg") # import scipy.linalg as spla
+
+ Returns
+ -------
+ pm : importlib.util._LazyModule
+ Proxy module. Can be used like any regularly imported module.
+ Actual loading of the module occurs upon first attribute request.
+
+ """
+ try:
+ return sys.modules[fullname]
+ except:
+ pass
+
+ # Not previously loaded -- look it up
+ spec = importlib.util.find_spec(fullname)
+
+ if spec is None:
+ try:
+ parent = inspect.stack()[1]
+ frame_data = {
+ "spec": fullname,
+ "filename": parent.filename,
+ "lineno": parent.lineno,
+ "function": parent.function,
+ "code_context": parent.code_context,
+ }
+ return DelayedImportErrorModule(frame_data, "DelayedImportErrorModule")
+ finally:
+ del parent
+
+ module = importlib.util.module_from_spec(spec)
+ sys.modules[fullname] = module
+
+ loader = importlib.util.LazyLoader(spec.loader)
+ loader.exec_module(module)
+
+ return module
diff --git a/pythonProject/.venv/Lib/site-packages/networkx/relabel.py b/pythonProject/.venv/Lib/site-packages/networkx/relabel.py
new file mode 100644
index 0000000000000000000000000000000000000000..4b870f726ef42e0bcaa7bf724e2ae6ab4145f288
--- /dev/null
+++ b/pythonProject/.venv/Lib/site-packages/networkx/relabel.py
@@ -0,0 +1,285 @@
+import networkx as nx
+
+__all__ = ["convert_node_labels_to_integers", "relabel_nodes"]
+
+
+@nx._dispatchable(
+ preserve_all_attrs=True, mutates_input={"not copy": 2}, returns_graph=True
+)
+def relabel_nodes(G, mapping, copy=True):
+ """Relabel the nodes of the graph G according to a given mapping.
+
+ The original node ordering may not be preserved if `copy` is `False` and the
+ mapping includes overlap between old and new labels.
+
+ Parameters
+ ----------
+ G : graph
+ A NetworkX graph
+
+ mapping : dictionary
+ A dictionary with the old labels as keys and new labels as values.
+ A partial mapping is allowed. Mapping 2 nodes to a single node is allowed.
+ Any non-node keys in the mapping are ignored.
+
+ copy : bool (optional, default=True)
+ If True return a copy, or if False relabel the nodes in place.
+
+ Examples
+ --------
+ To create a new graph with nodes relabeled according to a given
+ dictionary:
+
+ >>> G = nx.path_graph(3)
+ >>> sorted(G)
+ [0, 1, 2]
+ >>> mapping = {0: "a", 1: "b", 2: "c"}
+ >>> H = nx.relabel_nodes(G, mapping)
+ >>> sorted(H)
+ ['a', 'b', 'c']
+
+ Nodes can be relabeled with any hashable object, including numbers
+ and strings:
+
+ >>> import string
+ >>> G = nx.path_graph(26) # nodes are integers 0 through 25
+ >>> sorted(G)[:3]
+ [0, 1, 2]
+ >>> mapping = dict(zip(G, string.ascii_lowercase))
+ >>> G = nx.relabel_nodes(G, mapping) # nodes are characters a through z
+ >>> sorted(G)[:3]
+ ['a', 'b', 'c']
+ >>> mapping = dict(zip(G, range(1, 27)))
+ >>> G = nx.relabel_nodes(G, mapping) # nodes are integers 1 through 26
+ >>> sorted(G)[:3]
+ [1, 2, 3]
+
+ To perform a partial in-place relabeling, provide a dictionary
+ mapping only a subset of the nodes, and set the `copy` keyword
+ argument to False:
+
+ >>> G = nx.path_graph(3) # nodes 0-1-2
+ >>> mapping = {0: "a", 1: "b"} # 0->'a' and 1->'b'
+ >>> G = nx.relabel_nodes(G, mapping, copy=False)
+ >>> sorted(G, key=str)
+ [2, 'a', 'b']
+
+ A mapping can also be given as a function:
+
+ >>> G = nx.path_graph(3)
+ >>> H = nx.relabel_nodes(G, lambda x: x**2)
+ >>> list(H)
+ [0, 1, 4]
+
+ In a multigraph, relabeling two or more nodes to the same new node
+ will retain all edges, but may change the edge keys in the process:
+
+ >>> G = nx.MultiGraph()
+ >>> G.add_edge(0, 1, value="a") # returns the key for this edge
+ 0
+ >>> G.add_edge(0, 2, value="b")
+ 0
+ >>> G.add_edge(0, 3, value="c")
+ 0
+ >>> mapping = {1: 4, 2: 4, 3: 4}
+ >>> H = nx.relabel_nodes(G, mapping, copy=True)
+ >>> print(H[0])
+ {4: {0: {'value': 'a'}, 1: {'value': 'b'}, 2: {'value': 'c'}}}
+
+ This works for in-place relabeling too:
+
+ >>> G = nx.relabel_nodes(G, mapping, copy=False)
+ >>> print(G[0])
+ {4: {0: {'value': 'a'}, 1: {'value': 'b'}, 2: {'value': 'c'}}}
+
+ Notes
+ -----
+ Only the nodes specified in the mapping will be relabeled.
+ Any non-node keys in the mapping are ignored.
+
+ The keyword setting copy=False modifies the graph in place.
+ Relabel_nodes avoids naming collisions by building a
+ directed graph from ``mapping`` which specifies the order of
+ relabelings. Naming collisions, such as a->b, b->c, are ordered
+ such that "b" gets renamed to "c" before "a" gets renamed "b".
+ In cases of circular mappings (e.g. a->b, b->a), modifying the
+ graph is not possible in-place and an exception is raised.
+ In that case, use copy=True.
+
+ If a relabel operation on a multigraph would cause two or more
+ edges to have the same source, target and key, the second edge must
+ be assigned a new key to retain all edges. The new key is set
+ to the lowest non-negative integer not already used as a key
+ for edges between these two nodes. Note that this means non-numeric
+ keys may be replaced by numeric keys.
+
+ See Also
+ --------
+ convert_node_labels_to_integers
+ """
+ # you can pass any callable e.g. f(old_label) -> new_label or
+ # e.g. str(old_label) -> new_label, but we'll just make a dictionary here regardless
+ m = {n: mapping(n) for n in G} if callable(mapping) else mapping
+
+ if copy:
+ return _relabel_copy(G, m)
+ else:
+ return _relabel_inplace(G, m)
+
+
+def _relabel_inplace(G, mapping):
+ if len(mapping.keys() & mapping.values()) > 0:
+ # labels sets overlap
+ # can we topological sort and still do the relabeling?
+ D = nx.DiGraph(list(mapping.items()))
+ D.remove_edges_from(nx.selfloop_edges(D))
+ try:
+ nodes = reversed(list(nx.topological_sort(D)))
+ except nx.NetworkXUnfeasible as err:
+ raise nx.NetworkXUnfeasible(
+ "The node label sets are overlapping and no ordering can "
+ "resolve the mapping. Use copy=True."
+ ) from err
+ else:
+ # non-overlapping label sets, sort them in the order of G nodes
+ nodes = [n for n in G if n in mapping]
+
+ multigraph = G.is_multigraph()
+ directed = G.is_directed()
+
+ for old in nodes:
+ # Test that old is in both mapping and G, otherwise ignore.
+ try:
+ new = mapping[old]
+ G.add_node(new, **G.nodes[old])
+ except KeyError:
+ continue
+ if new == old:
+ continue
+ if multigraph:
+ new_edges = [
+ (new, new if old == target else target, key, data)
+ for (_, target, key, data) in G.edges(old, data=True, keys=True)
+ ]
+ if directed:
+ new_edges += [
+ (new if old == source else source, new, key, data)
+ for (source, _, key, data) in G.in_edges(old, data=True, keys=True)
+ ]
+ # Ensure new edges won't overwrite existing ones
+ seen = set()
+ for i, (source, target, key, data) in enumerate(new_edges):
+ if target in G[source] and key in G[source][target]:
+ new_key = 0 if not isinstance(key, int | float) else key
+ while new_key in G[source][target] or (target, new_key) in seen:
+ new_key += 1
+ new_edges[i] = (source, target, new_key, data)
+ seen.add((target, new_key))
+ else:
+ new_edges = [
+ (new, new if old == target else target, data)
+ for (_, target, data) in G.edges(old, data=True)
+ ]
+ if directed:
+ new_edges += [
+ (new if old == source else source, new, data)
+ for (source, _, data) in G.in_edges(old, data=True)
+ ]
+ G.remove_node(old)
+ G.add_edges_from(new_edges)
+ return G
+
+
+def _relabel_copy(G, mapping):
+ H = G.__class__()
+ H.add_nodes_from(mapping.get(n, n) for n in G)
+ H._node.update((mapping.get(n, n), d.copy()) for n, d in G.nodes.items())
+ if G.is_multigraph():
+ new_edges = [
+ (mapping.get(n1, n1), mapping.get(n2, n2), k, d.copy())
+ for (n1, n2, k, d) in G.edges(keys=True, data=True)
+ ]
+
+ # check for conflicting edge-keys
+ undirected = not G.is_directed()
+ seen_edges = set()
+ for i, (source, target, key, data) in enumerate(new_edges):
+ while (source, target, key) in seen_edges:
+ if not isinstance(key, int | float):
+ key = 0
+ key += 1
+ seen_edges.add((source, target, key))
+ if undirected:
+ seen_edges.add((target, source, key))
+ new_edges[i] = (source, target, key, data)
+
+ H.add_edges_from(new_edges)
+ else:
+ H.add_edges_from(
+ (mapping.get(n1, n1), mapping.get(n2, n2), d.copy())
+ for (n1, n2, d) in G.edges(data=True)
+ )
+ H.graph.update(G.graph)
+ return H
+
+
+@nx._dispatchable(preserve_all_attrs=True, returns_graph=True)
+def convert_node_labels_to_integers(
+ G, first_label=0, ordering="default", label_attribute=None
+):
+ """Returns a copy of the graph G with the nodes relabeled using
+ consecutive integers.
+
+ Parameters
+ ----------
+ G : graph
+ A NetworkX graph
+
+ first_label : int, optional (default=0)
+ An integer specifying the starting offset in numbering nodes.
+ The new integer labels are numbered first_label, ..., n-1+first_label.
+
+ ordering : string
+ "default" : inherit node ordering from G.nodes()
+ "sorted" : inherit node ordering from sorted(G.nodes())
+ "increasing degree" : nodes are sorted by increasing degree
+ "decreasing degree" : nodes are sorted by decreasing degree
+
+ label_attribute : string, optional (default=None)
+ Name of node attribute to store old label. If None no attribute
+ is created.
+
+ Notes
+ -----
+ Node and edge attribute data are copied to the new (relabeled) graph.
+
+ There is no guarantee that the relabeling of nodes to integers will
+ give the same two integers for two (even identical graphs).
+ Use the `ordering` argument to try to preserve the order.
+
+ See Also
+ --------
+ relabel_nodes
+ """
+ N = G.number_of_nodes() + first_label
+ if ordering == "default":
+ mapping = dict(zip(G.nodes(), range(first_label, N)))
+ elif ordering == "sorted":
+ nlist = sorted(G.nodes())
+ mapping = dict(zip(nlist, range(first_label, N)))
+ elif ordering == "increasing degree":
+ dv_pairs = [(d, n) for (n, d) in G.degree()]
+ dv_pairs.sort() # in-place sort from lowest to highest degree
+ mapping = dict(zip([n for d, n in dv_pairs], range(first_label, N)))
+ elif ordering == "decreasing degree":
+ dv_pairs = [(d, n) for (n, d) in G.degree()]
+ dv_pairs.sort() # in-place sort from lowest to highest degree
+ dv_pairs.reverse()
+ mapping = dict(zip([n for d, n in dv_pairs], range(first_label, N)))
+ else:
+ raise nx.NetworkXError(f"Unknown node ordering: {ordering}")
+ H = relabel_nodes(G, mapping)
+ # create node attribute with the old label
+ if label_attribute is not None:
+ nx.set_node_attributes(H, {v: k for k, v in mapping.items()}, label_attribute)
+ return H