id int64 0 190k | prompt stringlengths 21 13.4M | docstring stringlengths 1 12k ⌀ |
|---|---|---|
168,490 | import typing as t
from threading import local
_local = local()
The provided code snippet includes necessary dependencies for implementing the `pop_context` function. Write a Python function `def pop_context() -> None` to solve the following problem:
Removes the top level from the stack.
Here is the function:
def pop_context() -> None:
"""Removes the top level from the stack."""
_local.stack.pop() | Removes the top level from the stack. |
168,491 | import codecs
import io
import os
import re
import sys
import typing as t
from weakref import WeakKeyDictionary
def _find_binary_writer(stream: t.IO) -> t.Optional[t.BinaryIO]:
# We need to figure out if the given stream is already binary.
# This can happen because the official docs recommend detaching
# the streams to get binary streams. Some code might do this, so
# we need to deal with this case explicitly.
if _is_binary_writer(stream, False):
return t.cast(t.BinaryIO, stream)
buf = getattr(stream, "buffer", None)
# Same situation here; this time we assume that the buffer is
# actually binary in case it's closed.
if buf is not None and _is_binary_writer(buf, True):
return t.cast(t.BinaryIO, buf)
return None
if sys.platform.startswith("win") and WIN:
from ._winconsole import _get_windows_console_stream
_ansi_stream_wrappers: t.MutableMapping[t.TextIO, t.TextIO] = WeakKeyDictionary()
else:
def get_binary_stderr() -> t.BinaryIO:
writer = _find_binary_writer(sys.stderr)
if writer is None:
raise RuntimeError("Was not able to determine binary stream for sys.stderr.")
return writer | null |
168,492 | import codecs
import io
import os
import re
import sys
import typing as t
from weakref import WeakKeyDictionary
def _force_correct_text_writer(
text_writer: t.IO,
encoding: t.Optional[str],
errors: t.Optional[str],
force_writable: bool = False,
) -> t.TextIO:
return _force_correct_text_stream(
text_writer,
encoding,
errors,
_is_binary_writer,
_find_binary_writer,
force_writable=force_writable,
)
if sys.platform.startswith("win") and WIN:
from ._winconsole import _get_windows_console_stream
_ansi_stream_wrappers: t.MutableMapping[t.TextIO, t.TextIO] = WeakKeyDictionary()
else:
def _get_windows_console_stream(
f: t.TextIO, encoding: t.Optional[str], errors: t.Optional[str]
) -> t.Optional[t.TextIO]:
return None
def _get_windows_console_stream(
f: t.TextIO, encoding: t.Optional[str], errors: t.Optional[str]
) -> t.Optional[t.TextIO]:
if (
get_buffer is not None
and encoding in {"utf-16-le", None}
and errors in {"strict", None}
and _is_console(f)
):
func = _stream_factories.get(f.fileno())
if func is not None:
b = getattr(f, "buffer", None)
if b is None:
return None
return func(b)
def get_text_stderr(
encoding: t.Optional[str] = None, errors: t.Optional[str] = None
) -> t.TextIO:
rv = _get_windows_console_stream(sys.stderr, encoding, errors)
if rv is not None:
return rv
return _force_correct_text_writer(sys.stderr, encoding, errors, force_writable=True) | null |
168,493 | import codecs
import io
import os
import re
import sys
import typing as t
from weakref import WeakKeyDictionary
def _get_argv_encoding() -> str:
import locale
return locale.getpreferredencoding() | null |
168,494 | import codecs
import io
import os
import re
import sys
import typing as t
from weakref import WeakKeyDictionary
def get_filesystem_encoding() -> str:
if sys.platform.startswith("win") and WIN:
from ._winconsole import _get_windows_console_stream
_ansi_stream_wrappers: t.MutableMapping[t.TextIO, t.TextIO] = WeakKeyDictionary()
else:
def _get_argv_encoding() -> str:
return getattr(sys.stdin, "encoding", None) or get_filesystem_encoding() | null |
168,495 | import codecs
import io
import os
import re
import sys
import typing as t
from weakref import WeakKeyDictionary
class WeakKeyDictionary(MutableMapping[_KT, _VT]):
def __init__(self, dict: None = ...) -> None: ...
def __init__(self, dict: Union[Mapping[_KT, _VT], Iterable[Tuple[_KT, _VT]]]) -> None: ...
def __len__(self) -> int: ...
def __getitem__(self, k: _KT) -> _VT: ...
def __setitem__(self, k: _KT, v: _VT) -> None: ...
def __delitem__(self, v: _KT) -> None: ...
if sys.version_info < (3, 0):
def has_key(self, key: object) -> bool: ...
def __contains__(self, o: object) -> bool: ...
def __iter__(self) -> Iterator[_KT]: ...
def __str__(self) -> str: ...
def copy(self) -> WeakKeyDictionary[_KT, _VT]: ...
if sys.version_info < (3, 0):
def keys(self) -> List[_KT]: ...
def values(self) -> List[_VT]: ...
def items(self) -> List[Tuple[_KT, _VT]]: ...
def iterkeys(self) -> Iterator[_KT]: ...
def itervalues(self) -> Iterator[_VT]: ...
def iteritems(self) -> Iterator[Tuple[_KT, _VT]]: ...
def iterkeyrefs(self) -> Iterator[ref[_KT]]: ...
else:
# These are incompatible with Mapping
def keys(self) -> Iterator[_KT]: ... # type: ignore
def values(self) -> Iterator[_VT]: ... # type: ignore
def items(self) -> Iterator[Tuple[_KT, _VT]]: ... # type: ignore
def keyrefs(self) -> List[ref[_KT]]: ...
def _make_cached_stream_func(
src_func: t.Callable[[], t.TextIO], wrapper_func: t.Callable[[], t.TextIO]
) -> t.Callable[[], t.TextIO]:
cache: t.MutableMapping[t.TextIO, t.TextIO] = WeakKeyDictionary()
def func() -> t.TextIO:
stream = src_func()
try:
rv = cache.get(stream)
except Exception:
rv = None
if rv is not None:
return rv
rv = wrapper_func()
try:
cache[stream] = rv
except Exception:
pass
return rv
return func | null |
168,496 | import inspect
import io
import itertools
import os
import sys
import typing as t
from gettext import gettext as _
from ._compat import isatty
from ._compat import strip_ansi
from ._compat import WIN
from .exceptions import Abort
from .exceptions import UsageError
from .globals import resolve_color_default
from .types import Choice
from .types import convert_type
from .types import ParamType
from .utils import echo
from .utils import LazyFile
if t.TYPE_CHECKING:
from ._termui_impl import ProgressBar
visible_prompt_func: t.Callable[[str], str] = input
def _build_prompt(
text: str,
suffix: str,
show_default: bool = False,
default: t.Optional[t.Any] = None,
show_choices: bool = True,
type: t.Optional[ParamType] = None,
) -> str:
prompt = text
if type is not None and show_choices and isinstance(type, Choice):
prompt += f" ({', '.join(map(str, type.choices))})"
if default is not None and show_default:
prompt = f"{prompt} [{_format_default(default)}]"
return f"{prompt}{suffix}"
def prompt(
text: str,
default: t.Optional[t.Any] = None,
hide_input: bool = False,
confirmation_prompt: t.Union[bool, str] = False,
type: t.Optional[t.Union[ParamType, t.Any]] = None,
value_proc: t.Optional[t.Callable[[str], t.Any]] = None,
prompt_suffix: str = ": ",
show_default: bool = True,
err: bool = False,
show_choices: bool = True,
) -> t.Any:
"""Prompts a user for input. This is a convenience function that can
be used to prompt a user for input later.
If the user aborts the input by sending an interrupt signal, this
function will catch it and raise a :exc:`Abort` exception.
:param text: the text to show for the prompt.
:param default: the default value to use if no input happens. If this
is not given it will prompt until it's aborted.
:param hide_input: if this is set to true then the input value will
be hidden.
:param confirmation_prompt: Prompt a second time to confirm the
value. Can be set to a string instead of ``True`` to customize
the message.
:param type: the type to use to check the value against.
:param value_proc: if this parameter is provided it's a function that
is invoked instead of the type conversion to
convert a value.
:param prompt_suffix: a suffix that should be added to the prompt.
:param show_default: shows or hides the default value in the prompt.
:param err: if set to true the file defaults to ``stderr`` instead of
``stdout``, the same as with echo.
:param show_choices: Show or hide choices if the passed type is a Choice.
For example if type is a Choice of either day or week,
show_choices is true and text is "Group by" then the
prompt will be "Group by (day, week): ".
.. versionadded:: 8.0
``confirmation_prompt`` can be a custom string.
.. versionadded:: 7.0
Added the ``show_choices`` parameter.
.. versionadded:: 6.0
Added unicode support for cmd.exe on Windows.
.. versionadded:: 4.0
Added the `err` parameter.
"""
def prompt_func(text: str) -> str:
f = hidden_prompt_func if hide_input else visible_prompt_func
try:
# Write the prompt separately so that we get nice
# coloring through colorama on Windows
echo(text.rstrip(" "), nl=False, err=err)
# Echo a space to stdout to work around an issue where
# readline causes backspace to clear the whole line.
return f(" ")
except (KeyboardInterrupt, EOFError):
# getpass doesn't print a newline if the user aborts input with ^C.
# Allegedly this behavior is inherited from getpass(3).
# A doc bug has been filed at https://bugs.python.org/issue24711
if hide_input:
echo(None, err=err)
raise Abort() from None
if value_proc is None:
value_proc = convert_type(type, default)
prompt = _build_prompt(
text, prompt_suffix, show_default, default, show_choices, type
)
if confirmation_prompt:
if confirmation_prompt is True:
confirmation_prompt = _("Repeat for confirmation")
confirmation_prompt = _build_prompt(confirmation_prompt, prompt_suffix)
while True:
while True:
value = prompt_func(prompt)
if value:
break
elif default is not None:
value = default
break
try:
result = value_proc(value)
except UsageError as e:
if hide_input:
echo(_("Error: The value you entered was invalid."), err=err)
else:
echo(_("Error: {e.message}").format(e=e), err=err) # noqa: B306
continue
if not confirmation_prompt:
return result
while True:
value2 = prompt_func(confirmation_prompt)
is_empty = not value and not value2
if value2 or is_empty:
break
if value == value2:
return result
echo(_("Error: The two entered values do not match."), err=err)
class Abort(RuntimeError):
"""An internal signalling exception that signals Click to abort."""
def echo(
message: t.Optional[t.Any] = None,
file: t.Optional[t.IO[t.Any]] = None,
nl: bool = True,
err: bool = False,
color: t.Optional[bool] = None,
) -> None:
"""Print a message and newline to stdout or a file. This should be
used instead of :func:`print` because it provides better support
for different data, files, and environments.
Compared to :func:`print`, this does the following:
- Ensures that the output encoding is not misconfigured on Linux.
- Supports Unicode in the Windows console.
- Supports writing to binary outputs, and supports writing bytes
to text outputs.
- Supports colors and styles on Windows.
- Removes ANSI color and style codes if the output does not look
like an interactive terminal.
- Always flushes the output.
:param message: The string or bytes to output. Other objects are
converted to strings.
:param file: The file to write to. Defaults to ``stdout``.
:param err: Write to ``stderr`` instead of ``stdout``.
:param nl: Print a newline after the message. Enabled by default.
:param color: Force showing or hiding colors and other styles. By
default Click will remove color if the output does not look like
an interactive terminal.
.. versionchanged:: 6.0
Support Unicode output on the Windows console. Click does not
modify ``sys.stdout``, so ``sys.stdout.write()`` and ``print()``
will still not support Unicode.
.. versionchanged:: 4.0
Added the ``color`` parameter.
.. versionadded:: 3.0
Added the ``err`` parameter.
.. versionchanged:: 2.0
Support colors on Windows if colorama is installed.
"""
if file is None:
if err:
file = _default_text_stderr()
else:
file = _default_text_stdout()
# Convert non bytes/text into the native string type.
if message is not None and not isinstance(message, (str, bytes, bytearray)):
out: t.Optional[t.Union[str, bytes]] = str(message)
else:
out = message
if nl:
out = out or ""
if isinstance(out, str):
out += "\n"
else:
out += b"\n"
if not out:
file.flush()
return
# If there is a message and the value looks like bytes, we manually
# need to find the binary stream and write the message in there.
# This is done separately so that most stream types will work as you
# would expect. Eg: you can write to StringIO for other cases.
if isinstance(out, (bytes, bytearray)):
binary_file = _find_binary_writer(file)
if binary_file is not None:
file.flush()
binary_file.write(out)
binary_file.flush()
return
# ANSI style code support. For no message or bytes, nothing happens.
# When outputting to a file instead of a terminal, strip codes.
else:
color = resolve_color_default(color)
if should_strip_ansi(file, color):
out = strip_ansi(out)
elif WIN:
if auto_wrap_for_ansi is not None:
file = auto_wrap_for_ansi(file) # type: ignore
elif not color:
out = strip_ansi(out)
file.write(out) # type: ignore
file.flush()
The provided code snippet includes necessary dependencies for implementing the `confirm` function. Write a Python function `def confirm( text: str, default: t.Optional[bool] = False, abort: bool = False, prompt_suffix: str = ": ", show_default: bool = True, err: bool = False, ) -> bool` to solve the following problem:
Prompts for confirmation (yes/no question). If the user aborts the input by sending a interrupt signal this function will catch it and raise a :exc:`Abort` exception. :param text: the question to ask. :param default: The default value to use when no input is given. If ``None``, repeat until input is given. :param abort: if this is set to `True` a negative answer aborts the exception by raising :exc:`Abort`. :param prompt_suffix: a suffix that should be added to the prompt. :param show_default: shows or hides the default value in the prompt. :param err: if set to true the file defaults to ``stderr`` instead of ``stdout``, the same as with echo. .. versionchanged:: 8.0 Repeat until input is given if ``default`` is ``None``. .. versionadded:: 4.0 Added the ``err`` parameter.
Here is the function:
def confirm(
text: str,
default: t.Optional[bool] = False,
abort: bool = False,
prompt_suffix: str = ": ",
show_default: bool = True,
err: bool = False,
) -> bool:
"""Prompts for confirmation (yes/no question).
If the user aborts the input by sending a interrupt signal this
function will catch it and raise a :exc:`Abort` exception.
:param text: the question to ask.
:param default: The default value to use when no input is given. If
``None``, repeat until input is given.
:param abort: if this is set to `True` a negative answer aborts the
exception by raising :exc:`Abort`.
:param prompt_suffix: a suffix that should be added to the prompt.
:param show_default: shows or hides the default value in the prompt.
:param err: if set to true the file defaults to ``stderr`` instead of
``stdout``, the same as with echo.
.. versionchanged:: 8.0
Repeat until input is given if ``default`` is ``None``.
.. versionadded:: 4.0
Added the ``err`` parameter.
"""
prompt = _build_prompt(
text,
prompt_suffix,
show_default,
"y/n" if default is None else ("Y/n" if default else "y/N"),
)
while True:
try:
# Write the prompt separately so that we get nice
# coloring through colorama on Windows
echo(prompt.rstrip(" "), nl=False, err=err)
# Echo a space to stdout to work around an issue where
# readline causes backspace to clear the whole line.
value = visible_prompt_func(" ").lower().strip()
except (KeyboardInterrupt, EOFError):
raise Abort() from None
if value in ("y", "yes"):
rv = True
elif value in ("n", "no"):
rv = False
elif default is not None and value == "":
rv = default
else:
echo(_("Error: invalid input"), err=err)
continue
break
if abort and not rv:
raise Abort()
return rv | Prompts for confirmation (yes/no question). If the user aborts the input by sending a interrupt signal this function will catch it and raise a :exc:`Abort` exception. :param text: the question to ask. :param default: The default value to use when no input is given. If ``None``, repeat until input is given. :param abort: if this is set to `True` a negative answer aborts the exception by raising :exc:`Abort`. :param prompt_suffix: a suffix that should be added to the prompt. :param show_default: shows or hides the default value in the prompt. :param err: if set to true the file defaults to ``stderr`` instead of ``stdout``, the same as with echo. .. versionchanged:: 8.0 Repeat until input is given if ``default`` is ``None``. .. versionadded:: 4.0 Added the ``err`` parameter. |
168,497 | import inspect
import io
import itertools
import os
import sys
import typing as t
from gettext import gettext as _
from ._compat import isatty
from ._compat import strip_ansi
from ._compat import WIN
from .exceptions import Abort
from .exceptions import UsageError
from .globals import resolve_color_default
from .types import Choice
from .types import convert_type
from .types import ParamType
from .utils import echo
from .utils import LazyFile
if t.TYPE_CHECKING:
from ._termui_impl import ProgressBar
def resolve_color_default(color: t.Optional[bool] = None) -> t.Optional[bool]:
"""Internal helper to get the default value of the color flag. If a
value is passed it's returned unchanged, otherwise it's looked up from
the current context.
"""
if color is not None:
return color
ctx = get_current_context(silent=True)
if ctx is not None:
return ctx.color
return None
def pager(generator: t.Iterable[str], color: t.Optional[bool] = None) -> None:
"""Decide what method to use for paging through text."""
stdout = _default_text_stdout()
if not isatty(sys.stdin) or not isatty(stdout):
return _nullpager(stdout, generator, color)
pager_cmd = (os.environ.get("PAGER", None) or "").strip()
if pager_cmd:
if WIN:
return _tempfilepager(generator, pager_cmd, color)
return _pipepager(generator, pager_cmd, color)
if os.environ.get("TERM") in ("dumb", "emacs"):
return _nullpager(stdout, generator, color)
if WIN or sys.platform.startswith("os2"):
return _tempfilepager(generator, "more <", color)
if hasattr(os, "system") and os.system("(less) 2>/dev/null") == 0:
return _pipepager(generator, "less", color)
import tempfile
fd, filename = tempfile.mkstemp()
os.close(fd)
try:
if hasattr(os, "system") and os.system(f'more "{filename}"') == 0:
return _pipepager(generator, "more", color)
return _nullpager(stdout, generator, color)
finally:
os.unlink(filename)
The provided code snippet includes necessary dependencies for implementing the `echo_via_pager` function. Write a Python function `def echo_via_pager( text_or_generator: t.Union[t.Iterable[str], t.Callable[[], t.Iterable[str]], str], color: t.Optional[bool] = None, ) -> None` to solve the following problem:
This function takes a text and shows it via an environment specific pager on stdout. .. versionchanged:: 3.0 Added the `color` flag. :param text_or_generator: the text to page, or alternatively, a generator emitting the text to page. :param color: controls if the pager supports ANSI colors or not. The default is autodetection.
Here is the function:
def echo_via_pager(
text_or_generator: t.Union[t.Iterable[str], t.Callable[[], t.Iterable[str]], str],
color: t.Optional[bool] = None,
) -> None:
"""This function takes a text and shows it via an environment specific
pager on stdout.
.. versionchanged:: 3.0
Added the `color` flag.
:param text_or_generator: the text to page, or alternatively, a
generator emitting the text to page.
:param color: controls if the pager supports ANSI colors or not. The
default is autodetection.
"""
color = resolve_color_default(color)
if inspect.isgeneratorfunction(text_or_generator):
i = t.cast(t.Callable[[], t.Iterable[str]], text_or_generator)()
elif isinstance(text_or_generator, str):
i = [text_or_generator]
else:
i = iter(t.cast(t.Iterable[str], text_or_generator))
# convert every element of i to a text type if necessary
text_generator = (el if isinstance(el, str) else str(el) for el in i)
from ._termui_impl import pager
return pager(itertools.chain(text_generator, "\n"), color) | This function takes a text and shows it via an environment specific pager on stdout. .. versionchanged:: 3.0 Added the `color` flag. :param text_or_generator: the text to page, or alternatively, a generator emitting the text to page. :param color: controls if the pager supports ANSI colors or not. The default is autodetection. |
168,498 | import inspect
import io
import itertools
import os
import sys
import typing as t
from gettext import gettext as _
from ._compat import isatty
from ._compat import strip_ansi
from ._compat import WIN
from .exceptions import Abort
from .exceptions import UsageError
from .globals import resolve_color_default
from .types import Choice
from .types import convert_type
from .types import ParamType
from .utils import echo
from .utils import LazyFile
if t.TYPE_CHECKING:
from ._termui_impl import ProgressBar
V = t.TypeVar("V")
def resolve_color_default(color: t.Optional[bool] = None) -> t.Optional[bool]:
"""Internal helper to get the default value of the color flag. If a
value is passed it's returned unchanged, otherwise it's looked up from
the current context.
"""
if color is not None:
return color
ctx = get_current_context(silent=True)
if ctx is not None:
return ctx.color
return None
class ProgressBar(t.Generic[V]):
def __init__(
self,
iterable: t.Optional[t.Iterable[V]],
length: t.Optional[int] = None,
fill_char: str = "#",
empty_char: str = " ",
bar_template: str = "%(bar)s",
info_sep: str = " ",
show_eta: bool = True,
show_percent: t.Optional[bool] = None,
show_pos: bool = False,
item_show_func: t.Optional[t.Callable[[t.Optional[V]], t.Optional[str]]] = None,
label: t.Optional[str] = None,
file: t.Optional[t.TextIO] = None,
color: t.Optional[bool] = None,
update_min_steps: int = 1,
width: int = 30,
) -> None:
self.fill_char = fill_char
self.empty_char = empty_char
self.bar_template = bar_template
self.info_sep = info_sep
self.show_eta = show_eta
self.show_percent = show_percent
self.show_pos = show_pos
self.item_show_func = item_show_func
self.label = label or ""
if file is None:
file = _default_text_stdout()
self.file = file
self.color = color
self.update_min_steps = update_min_steps
self._completed_intervals = 0
self.width = width
self.autowidth = width == 0
if length is None:
from operator import length_hint
length = length_hint(iterable, -1)
if length == -1:
length = None
if iterable is None:
if length is None:
raise TypeError("iterable or length is required")
iterable = t.cast(t.Iterable[V], range(length))
self.iter = iter(iterable)
self.length = length
self.pos = 0
self.avg: t.List[float] = []
self.start = self.last_eta = time.time()
self.eta_known = False
self.finished = False
self.max_width: t.Optional[int] = None
self.entered = False
self.current_item: t.Optional[V] = None
self.is_hidden = not isatty(self.file)
self._last_line: t.Optional[str] = None
def __enter__(self) -> "ProgressBar":
self.entered = True
self.render_progress()
return self
def __exit__(self, exc_type, exc_value, tb): # type: ignore
self.render_finish()
def __iter__(self) -> t.Iterator[V]:
if not self.entered:
raise RuntimeError("You need to use progress bars in a with block.")
self.render_progress()
return self.generator()
def __next__(self) -> V:
# Iteration is defined in terms of a generator function,
# returned by iter(self); use that to define next(). This works
# because `self.iter` is an iterable consumed by that generator,
# so it is re-entry safe. Calling `next(self.generator())`
# twice works and does "what you want".
return next(iter(self))
def render_finish(self) -> None:
if self.is_hidden:
return
self.file.write(AFTER_BAR)
self.file.flush()
def pct(self) -> float:
if self.finished:
return 1.0
return min(self.pos / (float(self.length or 1) or 1), 1.0)
def time_per_iteration(self) -> float:
if not self.avg:
return 0.0
return sum(self.avg) / float(len(self.avg))
def eta(self) -> float:
if self.length is not None and not self.finished:
return self.time_per_iteration * (self.length - self.pos)
return 0.0
def format_eta(self) -> str:
if self.eta_known:
t = int(self.eta)
seconds = t % 60
t //= 60
minutes = t % 60
t //= 60
hours = t % 24
t //= 24
if t > 0:
return f"{t}d {hours:02}:{minutes:02}:{seconds:02}"
else:
return f"{hours:02}:{minutes:02}:{seconds:02}"
return ""
def format_pos(self) -> str:
pos = str(self.pos)
if self.length is not None:
pos += f"/{self.length}"
return pos
def format_pct(self) -> str:
return f"{int(self.pct * 100): 4}%"[1:]
def format_bar(self) -> str:
if self.length is not None:
bar_length = int(self.pct * self.width)
bar = self.fill_char * bar_length
bar += self.empty_char * (self.width - bar_length)
elif self.finished:
bar = self.fill_char * self.width
else:
chars = list(self.empty_char * (self.width or 1))
if self.time_per_iteration != 0:
chars[
int(
(math.cos(self.pos * self.time_per_iteration) / 2.0 + 0.5)
* self.width
)
] = self.fill_char
bar = "".join(chars)
return bar
def format_progress_line(self) -> str:
show_percent = self.show_percent
info_bits = []
if self.length is not None and show_percent is None:
show_percent = not self.show_pos
if self.show_pos:
info_bits.append(self.format_pos())
if show_percent:
info_bits.append(self.format_pct())
if self.show_eta and self.eta_known and not self.finished:
info_bits.append(self.format_eta())
if self.item_show_func is not None:
item_info = self.item_show_func(self.current_item)
if item_info is not None:
info_bits.append(item_info)
return (
self.bar_template
% {
"label": self.label,
"bar": self.format_bar(),
"info": self.info_sep.join(info_bits),
}
).rstrip()
def render_progress(self) -> None:
import shutil
if self.is_hidden:
# Only output the label as it changes if the output is not a
# TTY. Use file=stderr if you expect to be piping stdout.
if self._last_line != self.label:
self._last_line = self.label
echo(self.label, file=self.file, color=self.color)
return
buf = []
# Update width in case the terminal has been resized
if self.autowidth:
old_width = self.width
self.width = 0
clutter_length = term_len(self.format_progress_line())
new_width = max(0, shutil.get_terminal_size().columns - clutter_length)
if new_width < old_width:
buf.append(BEFORE_BAR)
buf.append(" " * self.max_width) # type: ignore
self.max_width = new_width
self.width = new_width
clear_width = self.width
if self.max_width is not None:
clear_width = self.max_width
buf.append(BEFORE_BAR)
line = self.format_progress_line()
line_len = term_len(line)
if self.max_width is None or self.max_width < line_len:
self.max_width = line_len
buf.append(line)
buf.append(" " * (clear_width - line_len))
line = "".join(buf)
# Render the line only if it changed.
if line != self._last_line:
self._last_line = line
echo(line, file=self.file, color=self.color, nl=False)
self.file.flush()
def make_step(self, n_steps: int) -> None:
self.pos += n_steps
if self.length is not None and self.pos >= self.length:
self.finished = True
if (time.time() - self.last_eta) < 1.0:
return
self.last_eta = time.time()
# self.avg is a rolling list of length <= 7 of steps where steps are
# defined as time elapsed divided by the total progress through
# self.length.
if self.pos:
step = (time.time() - self.start) / self.pos
else:
step = time.time() - self.start
self.avg = self.avg[-6:] + [step]
self.eta_known = self.length is not None
def update(self, n_steps: int, current_item: t.Optional[V] = None) -> None:
"""Update the progress bar by advancing a specified number of
steps, and optionally set the ``current_item`` for this new
position.
:param n_steps: Number of steps to advance.
:param current_item: Optional item to set as ``current_item``
for the updated position.
.. versionchanged:: 8.0
Added the ``current_item`` optional parameter.
.. versionchanged:: 8.0
Only render when the number of steps meets the
``update_min_steps`` threshold.
"""
if current_item is not None:
self.current_item = current_item
self._completed_intervals += n_steps
if self._completed_intervals >= self.update_min_steps:
self.make_step(self._completed_intervals)
self.render_progress()
self._completed_intervals = 0
def finish(self) -> None:
self.eta_known = False
self.current_item = None
self.finished = True
def generator(self) -> t.Iterator[V]:
"""Return a generator which yields the items added to the bar
during construction, and updates the progress bar *after* the
yielded block returns.
"""
# WARNING: the iterator interface for `ProgressBar` relies on
# this and only works because this is a simple generator which
# doesn't create or manage additional state. If this function
# changes, the impact should be evaluated both against
# `iter(bar)` and `next(bar)`. `next()` in particular may call
# `self.generator()` repeatedly, and this must remain safe in
# order for that interface to work.
if not self.entered:
raise RuntimeError("You need to use progress bars in a with block.")
if self.is_hidden:
yield from self.iter
else:
for rv in self.iter:
self.current_item = rv
# This allows show_item_func to be updated before the
# item is processed. Only trigger at the beginning of
# the update interval.
if self._completed_intervals == 0:
self.render_progress()
yield rv
self.update(1)
self.finish()
self.render_progress()
The provided code snippet includes necessary dependencies for implementing the `progressbar` function. Write a Python function `def progressbar( iterable: t.Optional[t.Iterable[V]] = None, length: t.Optional[int] = None, label: t.Optional[str] = None, show_eta: bool = True, show_percent: t.Optional[bool] = None, show_pos: bool = False, item_show_func: t.Optional[t.Callable[[t.Optional[V]], t.Optional[str]]] = None, fill_char: str = "#", empty_char: str = "-", bar_template: str = "%(label)s [%(bar)s] %(info)s", info_sep: str = " ", width: int = 36, file: t.Optional[t.TextIO] = None, color: t.Optional[bool] = None, update_min_steps: int = 1, ) -> "ProgressBar[V]"` to solve the following problem:
This function creates an iterable context manager that can be used to iterate over something while showing a progress bar. It will either iterate over the `iterable` or `length` items (that are counted up). While iteration happens, this function will print a rendered progress bar to the given `file` (defaults to stdout) and will attempt to calculate remaining time and more. By default, this progress bar will not be rendered if the file is not a terminal. The context manager creates the progress bar. When the context manager is entered the progress bar is already created. With every iteration over the progress bar, the iterable passed to the bar is advanced and the bar is updated. When the context manager exits, a newline is printed and the progress bar is finalized on screen. Note: The progress bar is currently designed for use cases where the total progress can be expected to take at least several seconds. Because of this, the ProgressBar class object won't display progress that is considered too fast, and progress where the time between steps is less than a second. No printing must happen or the progress bar will be unintentionally destroyed. Example usage:: with progressbar(items) as bar: for item in bar: do_something_with(item) Alternatively, if no iterable is specified, one can manually update the progress bar through the `update()` method instead of directly iterating over the progress bar. The update method accepts the number of steps to increment the bar with:: with progressbar(length=chunks.total_bytes) as bar: for chunk in chunks: process_chunk(chunk) bar.update(chunks.bytes) The ``update()`` method also takes an optional value specifying the ``current_item`` at the new position. This is useful when used together with ``item_show_func`` to customize the output for each manual step:: with click.progressbar( length=total_size, label='Unzipping archive', item_show_func=lambda a: a.filename ) as bar: for archive in zip_file: archive.extract() bar.update(archive.size, archive) :param iterable: an iterable to iterate over. If not provided the length is required. :param length: the number of items to iterate over. By default the progressbar will attempt to ask the iterator about its length, which might or might not work. If an iterable is also provided this parameter can be used to override the length. If an iterable is not provided the progress bar will iterate over a range of that length. :param label: the label to show next to the progress bar. :param show_eta: enables or disables the estimated time display. This is automatically disabled if the length cannot be determined. :param show_percent: enables or disables the percentage display. The default is `True` if the iterable has a length or `False` if not. :param show_pos: enables or disables the absolute position display. The default is `False`. :param item_show_func: A function called with the current item which can return a string to show next to the progress bar. If the function returns ``None`` nothing is shown. The current item can be ``None``, such as when entering and exiting the bar. :param fill_char: the character to use to show the filled part of the progress bar. :param empty_char: the character to use to show the non-filled part of the progress bar. :param bar_template: the format string to use as template for the bar. The parameters in it are ``label`` for the label, ``bar`` for the progress bar and ``info`` for the info section. :param info_sep: the separator between multiple info items (eta etc.) :param width: the width of the progress bar in characters, 0 means full terminal width :param file: The file to write to. If this is not a terminal then only the label is printed. :param color: controls if the terminal supports ANSI colors or not. The default is autodetection. This is only needed if ANSI codes are included anywhere in the progress bar output which is not the case by default. :param update_min_steps: Render only when this many updates have completed. This allows tuning for very fast iterators. .. versionchanged:: 8.0 Output is shown even if execution time is less than 0.5 seconds. .. versionchanged:: 8.0 ``item_show_func`` shows the current item, not the previous one. .. versionchanged:: 8.0 Labels are echoed if the output is not a TTY. Reverts a change in 7.0 that removed all output. .. versionadded:: 8.0 Added the ``update_min_steps`` parameter. .. versionchanged:: 4.0 Added the ``color`` parameter. Added the ``update`` method to the object. .. versionadded:: 2.0
Here is the function:
def progressbar(
iterable: t.Optional[t.Iterable[V]] = None,
length: t.Optional[int] = None,
label: t.Optional[str] = None,
show_eta: bool = True,
show_percent: t.Optional[bool] = None,
show_pos: bool = False,
item_show_func: t.Optional[t.Callable[[t.Optional[V]], t.Optional[str]]] = None,
fill_char: str = "#",
empty_char: str = "-",
bar_template: str = "%(label)s [%(bar)s] %(info)s",
info_sep: str = " ",
width: int = 36,
file: t.Optional[t.TextIO] = None,
color: t.Optional[bool] = None,
update_min_steps: int = 1,
) -> "ProgressBar[V]":
"""This function creates an iterable context manager that can be used
to iterate over something while showing a progress bar. It will
either iterate over the `iterable` or `length` items (that are counted
up). While iteration happens, this function will print a rendered
progress bar to the given `file` (defaults to stdout) and will attempt
to calculate remaining time and more. By default, this progress bar
will not be rendered if the file is not a terminal.
The context manager creates the progress bar. When the context
manager is entered the progress bar is already created. With every
iteration over the progress bar, the iterable passed to the bar is
advanced and the bar is updated. When the context manager exits,
a newline is printed and the progress bar is finalized on screen.
Note: The progress bar is currently designed for use cases where the
total progress can be expected to take at least several seconds.
Because of this, the ProgressBar class object won't display
progress that is considered too fast, and progress where the time
between steps is less than a second.
No printing must happen or the progress bar will be unintentionally
destroyed.
Example usage::
with progressbar(items) as bar:
for item in bar:
do_something_with(item)
Alternatively, if no iterable is specified, one can manually update the
progress bar through the `update()` method instead of directly
iterating over the progress bar. The update method accepts the number
of steps to increment the bar with::
with progressbar(length=chunks.total_bytes) as bar:
for chunk in chunks:
process_chunk(chunk)
bar.update(chunks.bytes)
The ``update()`` method also takes an optional value specifying the
``current_item`` at the new position. This is useful when used
together with ``item_show_func`` to customize the output for each
manual step::
with click.progressbar(
length=total_size,
label='Unzipping archive',
item_show_func=lambda a: a.filename
) as bar:
for archive in zip_file:
archive.extract()
bar.update(archive.size, archive)
:param iterable: an iterable to iterate over. If not provided the length
is required.
:param length: the number of items to iterate over. By default the
progressbar will attempt to ask the iterator about its
length, which might or might not work. If an iterable is
also provided this parameter can be used to override the
length. If an iterable is not provided the progress bar
will iterate over a range of that length.
:param label: the label to show next to the progress bar.
:param show_eta: enables or disables the estimated time display. This is
automatically disabled if the length cannot be
determined.
:param show_percent: enables or disables the percentage display. The
default is `True` if the iterable has a length or
`False` if not.
:param show_pos: enables or disables the absolute position display. The
default is `False`.
:param item_show_func: A function called with the current item which
can return a string to show next to the progress bar. If the
function returns ``None`` nothing is shown. The current item can
be ``None``, such as when entering and exiting the bar.
:param fill_char: the character to use to show the filled part of the
progress bar.
:param empty_char: the character to use to show the non-filled part of
the progress bar.
:param bar_template: the format string to use as template for the bar.
The parameters in it are ``label`` for the label,
``bar`` for the progress bar and ``info`` for the
info section.
:param info_sep: the separator between multiple info items (eta etc.)
:param width: the width of the progress bar in characters, 0 means full
terminal width
:param file: The file to write to. If this is not a terminal then
only the label is printed.
:param color: controls if the terminal supports ANSI colors or not. The
default is autodetection. This is only needed if ANSI
codes are included anywhere in the progress bar output
which is not the case by default.
:param update_min_steps: Render only when this many updates have
completed. This allows tuning for very fast iterators.
.. versionchanged:: 8.0
Output is shown even if execution time is less than 0.5 seconds.
.. versionchanged:: 8.0
``item_show_func`` shows the current item, not the previous one.
.. versionchanged:: 8.0
Labels are echoed if the output is not a TTY. Reverts a change
in 7.0 that removed all output.
.. versionadded:: 8.0
Added the ``update_min_steps`` parameter.
.. versionchanged:: 4.0
Added the ``color`` parameter. Added the ``update`` method to
the object.
.. versionadded:: 2.0
"""
from ._termui_impl import ProgressBar
color = resolve_color_default(color)
return ProgressBar(
iterable=iterable,
length=length,
show_eta=show_eta,
show_percent=show_percent,
show_pos=show_pos,
item_show_func=item_show_func,
fill_char=fill_char,
empty_char=empty_char,
bar_template=bar_template,
info_sep=info_sep,
file=file,
label=label,
width=width,
color=color,
update_min_steps=update_min_steps,
) | This function creates an iterable context manager that can be used to iterate over something while showing a progress bar. It will either iterate over the `iterable` or `length` items (that are counted up). While iteration happens, this function will print a rendered progress bar to the given `file` (defaults to stdout) and will attempt to calculate remaining time and more. By default, this progress bar will not be rendered if the file is not a terminal. The context manager creates the progress bar. When the context manager is entered the progress bar is already created. With every iteration over the progress bar, the iterable passed to the bar is advanced and the bar is updated. When the context manager exits, a newline is printed and the progress bar is finalized on screen. Note: The progress bar is currently designed for use cases where the total progress can be expected to take at least several seconds. Because of this, the ProgressBar class object won't display progress that is considered too fast, and progress where the time between steps is less than a second. No printing must happen or the progress bar will be unintentionally destroyed. Example usage:: with progressbar(items) as bar: for item in bar: do_something_with(item) Alternatively, if no iterable is specified, one can manually update the progress bar through the `update()` method instead of directly iterating over the progress bar. The update method accepts the number of steps to increment the bar with:: with progressbar(length=chunks.total_bytes) as bar: for chunk in chunks: process_chunk(chunk) bar.update(chunks.bytes) The ``update()`` method also takes an optional value specifying the ``current_item`` at the new position. This is useful when used together with ``item_show_func`` to customize the output for each manual step:: with click.progressbar( length=total_size, label='Unzipping archive', item_show_func=lambda a: a.filename ) as bar: for archive in zip_file: archive.extract() bar.update(archive.size, archive) :param iterable: an iterable to iterate over. If not provided the length is required. :param length: the number of items to iterate over. By default the progressbar will attempt to ask the iterator about its length, which might or might not work. If an iterable is also provided this parameter can be used to override the length. If an iterable is not provided the progress bar will iterate over a range of that length. :param label: the label to show next to the progress bar. :param show_eta: enables or disables the estimated time display. This is automatically disabled if the length cannot be determined. :param show_percent: enables or disables the percentage display. The default is `True` if the iterable has a length or `False` if not. :param show_pos: enables or disables the absolute position display. The default is `False`. :param item_show_func: A function called with the current item which can return a string to show next to the progress bar. If the function returns ``None`` nothing is shown. The current item can be ``None``, such as when entering and exiting the bar. :param fill_char: the character to use to show the filled part of the progress bar. :param empty_char: the character to use to show the non-filled part of the progress bar. :param bar_template: the format string to use as template for the bar. The parameters in it are ``label`` for the label, ``bar`` for the progress bar and ``info`` for the info section. :param info_sep: the separator between multiple info items (eta etc.) :param width: the width of the progress bar in characters, 0 means full terminal width :param file: The file to write to. If this is not a terminal then only the label is printed. :param color: controls if the terminal supports ANSI colors or not. The default is autodetection. This is only needed if ANSI codes are included anywhere in the progress bar output which is not the case by default. :param update_min_steps: Render only when this many updates have completed. This allows tuning for very fast iterators. .. versionchanged:: 8.0 Output is shown even if execution time is less than 0.5 seconds. .. versionchanged:: 8.0 ``item_show_func`` shows the current item, not the previous one. .. versionchanged:: 8.0 Labels are echoed if the output is not a TTY. Reverts a change in 7.0 that removed all output. .. versionadded:: 8.0 Added the ``update_min_steps`` parameter. .. versionchanged:: 4.0 Added the ``color`` parameter. Added the ``update`` method to the object. .. versionadded:: 2.0 |
168,499 | import inspect
import io
import itertools
import os
import sys
import typing as t
from gettext import gettext as _
from ._compat import isatty
from ._compat import strip_ansi
from ._compat import WIN
from .exceptions import Abort
from .exceptions import UsageError
from .globals import resolve_color_default
from .types import Choice
from .types import convert_type
from .types import ParamType
from .utils import echo
from .utils import LazyFile
WIN = sys.platform.startswith("win") and not APP_ENGINE and not MSYS2
def isatty(stream: t.IO) -> bool:
try:
return stream.isatty()
except Exception:
return False
The provided code snippet includes necessary dependencies for implementing the `clear` function. Write a Python function `def clear() -> None` to solve the following problem:
Clears the terminal screen. This will have the effect of clearing the whole visible space of the terminal and moving the cursor to the top left. This does not do anything if not connected to a terminal. .. versionadded:: 2.0
Here is the function:
def clear() -> None:
"""Clears the terminal screen. This will have the effect of clearing
the whole visible space of the terminal and moving the cursor to the
top left. This does not do anything if not connected to a terminal.
.. versionadded:: 2.0
"""
if not isatty(sys.stdout):
return
if WIN:
os.system("cls")
else:
sys.stdout.write("\033[2J\033[1;1H") | Clears the terminal screen. This will have the effect of clearing the whole visible space of the terminal and moving the cursor to the top left. This does not do anything if not connected to a terminal. .. versionadded:: 2.0 |
168,500 | import inspect
import io
import itertools
import os
import sys
import typing as t
from gettext import gettext as _
from ._compat import isatty
from ._compat import strip_ansi
from ._compat import WIN
from .exceptions import Abort
from .exceptions import UsageError
from .globals import resolve_color_default
from .types import Choice
from .types import convert_type
from .types import ParamType
from .utils import echo
from .utils import LazyFile
def strip_ansi(value: str) -> str:
return _ansi_re.sub("", value)
The provided code snippet includes necessary dependencies for implementing the `unstyle` function. Write a Python function `def unstyle(text: str) -> str` to solve the following problem:
Removes ANSI styling information from a string. Usually it's not necessary to use this function as Click's echo function will automatically remove styling if necessary. .. versionadded:: 2.0 :param text: the text to remove style information from.
Here is the function:
def unstyle(text: str) -> str:
"""Removes ANSI styling information from a string. Usually it's not
necessary to use this function as Click's echo function will
automatically remove styling if necessary.
.. versionadded:: 2.0
:param text: the text to remove style information from.
"""
return strip_ansi(text) | Removes ANSI styling information from a string. Usually it's not necessary to use this function as Click's echo function will automatically remove styling if necessary. .. versionadded:: 2.0 :param text: the text to remove style information from. |
168,501 | import inspect
import io
import itertools
import os
import sys
import typing as t
from gettext import gettext as _
from ._compat import isatty
from ._compat import strip_ansi
from ._compat import WIN
from .exceptions import Abort
from .exceptions import UsageError
from .globals import resolve_color_default
from .types import Choice
from .types import convert_type
from .types import ParamType
from .utils import echo
from .utils import LazyFile
if t.TYPE_CHECKING:
from ._termui_impl import ProgressBar
def style(
text: t.Any,
fg: t.Optional[t.Union[int, t.Tuple[int, int, int], str]] = None,
bg: t.Optional[t.Union[int, t.Tuple[int, int, int], str]] = None,
bold: t.Optional[bool] = None,
dim: t.Optional[bool] = None,
underline: t.Optional[bool] = None,
overline: t.Optional[bool] = None,
italic: t.Optional[bool] = None,
blink: t.Optional[bool] = None,
reverse: t.Optional[bool] = None,
strikethrough: t.Optional[bool] = None,
reset: bool = True,
) -> str:
"""Styles a text with ANSI styles and returns the new string. By
default the styling is self contained which means that at the end
of the string a reset code is issued. This can be prevented by
passing ``reset=False``.
Examples::
click.echo(click.style('Hello World!', fg='green'))
click.echo(click.style('ATTENTION!', blink=True))
click.echo(click.style('Some things', reverse=True, fg='cyan'))
click.echo(click.style('More colors', fg=(255, 12, 128), bg=117))
Supported color names:
* ``black`` (might be a gray)
* ``red``
* ``green``
* ``yellow`` (might be an orange)
* ``blue``
* ``magenta``
* ``cyan``
* ``white`` (might be light gray)
* ``bright_black``
* ``bright_red``
* ``bright_green``
* ``bright_yellow``
* ``bright_blue``
* ``bright_magenta``
* ``bright_cyan``
* ``bright_white``
* ``reset`` (reset the color code only)
If the terminal supports it, color may also be specified as:
- An integer in the interval [0, 255]. The terminal must support
8-bit/256-color mode.
- An RGB tuple of three integers in [0, 255]. The terminal must
support 24-bit/true-color mode.
See https://en.wikipedia.org/wiki/ANSI_color and
https://gist.github.com/XVilka/8346728 for more information.
:param text: the string to style with ansi codes.
:param fg: if provided this will become the foreground color.
:param bg: if provided this will become the background color.
:param bold: if provided this will enable or disable bold mode.
:param dim: if provided this will enable or disable dim mode. This is
badly supported.
:param underline: if provided this will enable or disable underline.
:param overline: if provided this will enable or disable overline.
:param italic: if provided this will enable or disable italic.
:param blink: if provided this will enable or disable blinking.
:param reverse: if provided this will enable or disable inverse
rendering (foreground becomes background and the
other way round).
:param strikethrough: if provided this will enable or disable
striking through text.
:param reset: by default a reset-all code is added at the end of the
string which means that styles do not carry over. This
can be disabled to compose styles.
.. versionchanged:: 8.0
A non-string ``message`` is converted to a string.
.. versionchanged:: 8.0
Added support for 256 and RGB color codes.
.. versionchanged:: 8.0
Added the ``strikethrough``, ``italic``, and ``overline``
parameters.
.. versionchanged:: 7.0
Added support for bright colors.
.. versionadded:: 2.0
"""
if not isinstance(text, str):
text = str(text)
bits = []
if fg:
try:
bits.append(f"\033[{_interpret_color(fg)}m")
except KeyError:
raise TypeError(f"Unknown color {fg!r}") from None
if bg:
try:
bits.append(f"\033[{_interpret_color(bg, 10)}m")
except KeyError:
raise TypeError(f"Unknown color {bg!r}") from None
if bold is not None:
bits.append(f"\033[{1 if bold else 22}m")
if dim is not None:
bits.append(f"\033[{2 if dim else 22}m")
if underline is not None:
bits.append(f"\033[{4 if underline else 24}m")
if overline is not None:
bits.append(f"\033[{53 if overline else 55}m")
if italic is not None:
bits.append(f"\033[{3 if italic else 23}m")
if blink is not None:
bits.append(f"\033[{5 if blink else 25}m")
if reverse is not None:
bits.append(f"\033[{7 if reverse else 27}m")
if strikethrough is not None:
bits.append(f"\033[{9 if strikethrough else 29}m")
bits.append(text)
if reset:
bits.append(_ansi_reset_all)
return "".join(bits)
def echo(
message: t.Optional[t.Any] = None,
file: t.Optional[t.IO[t.Any]] = None,
nl: bool = True,
err: bool = False,
color: t.Optional[bool] = None,
) -> None:
"""Print a message and newline to stdout or a file. This should be
used instead of :func:`print` because it provides better support
for different data, files, and environments.
Compared to :func:`print`, this does the following:
- Ensures that the output encoding is not misconfigured on Linux.
- Supports Unicode in the Windows console.
- Supports writing to binary outputs, and supports writing bytes
to text outputs.
- Supports colors and styles on Windows.
- Removes ANSI color and style codes if the output does not look
like an interactive terminal.
- Always flushes the output.
:param message: The string or bytes to output. Other objects are
converted to strings.
:param file: The file to write to. Defaults to ``stdout``.
:param err: Write to ``stderr`` instead of ``stdout``.
:param nl: Print a newline after the message. Enabled by default.
:param color: Force showing or hiding colors and other styles. By
default Click will remove color if the output does not look like
an interactive terminal.
.. versionchanged:: 6.0
Support Unicode output on the Windows console. Click does not
modify ``sys.stdout``, so ``sys.stdout.write()`` and ``print()``
will still not support Unicode.
.. versionchanged:: 4.0
Added the ``color`` parameter.
.. versionadded:: 3.0
Added the ``err`` parameter.
.. versionchanged:: 2.0
Support colors on Windows if colorama is installed.
"""
if file is None:
if err:
file = _default_text_stderr()
else:
file = _default_text_stdout()
# Convert non bytes/text into the native string type.
if message is not None and not isinstance(message, (str, bytes, bytearray)):
out: t.Optional[t.Union[str, bytes]] = str(message)
else:
out = message
if nl:
out = out or ""
if isinstance(out, str):
out += "\n"
else:
out += b"\n"
if not out:
file.flush()
return
# If there is a message and the value looks like bytes, we manually
# need to find the binary stream and write the message in there.
# This is done separately so that most stream types will work as you
# would expect. Eg: you can write to StringIO for other cases.
if isinstance(out, (bytes, bytearray)):
binary_file = _find_binary_writer(file)
if binary_file is not None:
file.flush()
binary_file.write(out)
binary_file.flush()
return
# ANSI style code support. For no message or bytes, nothing happens.
# When outputting to a file instead of a terminal, strip codes.
else:
color = resolve_color_default(color)
if should_strip_ansi(file, color):
out = strip_ansi(out)
elif WIN:
if auto_wrap_for_ansi is not None:
file = auto_wrap_for_ansi(file) # type: ignore
elif not color:
out = strip_ansi(out)
file.write(out) # type: ignore
file.flush()
The provided code snippet includes necessary dependencies for implementing the `secho` function. Write a Python function `def secho( message: t.Optional[t.Any] = None, file: t.Optional[t.IO[t.AnyStr]] = None, nl: bool = True, err: bool = False, color: t.Optional[bool] = None, **styles: t.Any, ) -> None` to solve the following problem:
This function combines :func:`echo` and :func:`style` into one call. As such the following two calls are the same:: click.secho('Hello World!', fg='green') click.echo(click.style('Hello World!', fg='green')) All keyword arguments are forwarded to the underlying functions depending on which one they go with. Non-string types will be converted to :class:`str`. However, :class:`bytes` are passed directly to :meth:`echo` without applying style. If you want to style bytes that represent text, call :meth:`bytes.decode` first. .. versionchanged:: 8.0 A non-string ``message`` is converted to a string. Bytes are passed through without style applied. .. versionadded:: 2.0
Here is the function:
def secho(
message: t.Optional[t.Any] = None,
file: t.Optional[t.IO[t.AnyStr]] = None,
nl: bool = True,
err: bool = False,
color: t.Optional[bool] = None,
**styles: t.Any,
) -> None:
"""This function combines :func:`echo` and :func:`style` into one
call. As such the following two calls are the same::
click.secho('Hello World!', fg='green')
click.echo(click.style('Hello World!', fg='green'))
All keyword arguments are forwarded to the underlying functions
depending on which one they go with.
Non-string types will be converted to :class:`str`. However,
:class:`bytes` are passed directly to :meth:`echo` without applying
style. If you want to style bytes that represent text, call
:meth:`bytes.decode` first.
.. versionchanged:: 8.0
A non-string ``message`` is converted to a string. Bytes are
passed through without style applied.
.. versionadded:: 2.0
"""
if message is not None and not isinstance(message, (bytes, bytearray)):
message = style(message, **styles)
return echo(message, file=file, nl=nl, err=err, color=color) | This function combines :func:`echo` and :func:`style` into one call. As such the following two calls are the same:: click.secho('Hello World!', fg='green') click.echo(click.style('Hello World!', fg='green')) All keyword arguments are forwarded to the underlying functions depending on which one they go with. Non-string types will be converted to :class:`str`. However, :class:`bytes` are passed directly to :meth:`echo` without applying style. If you want to style bytes that represent text, call :meth:`bytes.decode` first. .. versionchanged:: 8.0 A non-string ``message`` is converted to a string. Bytes are passed through without style applied. .. versionadded:: 2.0 |
168,502 | import inspect
import io
import itertools
import os
import sys
import typing as t
from gettext import gettext as _
from ._compat import isatty
from ._compat import strip_ansi
from ._compat import WIN
from .exceptions import Abort
from .exceptions import UsageError
from .globals import resolve_color_default
from .types import Choice
from .types import convert_type
from .types import ParamType
from .utils import echo
from .utils import LazyFile
if t.TYPE_CHECKING:
from ._termui_impl import ProgressBar
class Editor:
def __init__(
self,
editor: t.Optional[str] = None,
env: t.Optional[t.Mapping[str, str]] = None,
require_save: bool = True,
extension: str = ".txt",
) -> None:
self.editor = editor
self.env = env
self.require_save = require_save
self.extension = extension
def get_editor(self) -> str:
if self.editor is not None:
return self.editor
for key in "VISUAL", "EDITOR":
rv = os.environ.get(key)
if rv:
return rv
if WIN:
return "notepad"
for editor in "sensible-editor", "vim", "nano":
if os.system(f"which {editor} >/dev/null 2>&1") == 0:
return editor
return "vi"
def edit_file(self, filename: str) -> None:
import subprocess
editor = self.get_editor()
environ: t.Optional[t.Dict[str, str]] = None
if self.env:
environ = os.environ.copy()
environ.update(self.env)
try:
c = subprocess.Popen(f'{editor} "{filename}"', env=environ, shell=True)
exit_code = c.wait()
if exit_code != 0:
raise ClickException(
_("{editor}: Editing failed").format(editor=editor)
)
except OSError as e:
raise ClickException(
_("{editor}: Editing failed: {e}").format(editor=editor, e=e)
) from e
def edit(self, text: t.Optional[t.AnyStr]) -> t.Optional[t.AnyStr]:
import tempfile
if not text:
data = b""
elif isinstance(text, (bytes, bytearray)):
data = text
else:
if text and not text.endswith("\n"):
text += "\n"
if WIN:
data = text.replace("\n", "\r\n").encode("utf-8-sig")
else:
data = text.encode("utf-8")
fd, name = tempfile.mkstemp(prefix="editor-", suffix=self.extension)
f: t.BinaryIO
try:
with os.fdopen(fd, "wb") as f:
f.write(data)
# If the filesystem resolution is 1 second, like Mac OS
# 10.12 Extended, or 2 seconds, like FAT32, and the editor
# closes very fast, require_save can fail. Set the modified
# time to be 2 seconds in the past to work around this.
os.utime(name, (os.path.getatime(name), os.path.getmtime(name) - 2))
# Depending on the resolution, the exact value might not be
# recorded, so get the new recorded value.
timestamp = os.path.getmtime(name)
self.edit_file(name)
if self.require_save and os.path.getmtime(name) == timestamp:
return None
with open(name, "rb") as f:
rv = f.read()
if isinstance(text, (bytes, bytearray)):
return rv
return rv.decode("utf-8-sig").replace("\r\n", "\n") # type: ignore
finally:
os.unlink(name)
The provided code snippet includes necessary dependencies for implementing the `edit` function. Write a Python function `def edit( text: t.Optional[t.AnyStr] = None, editor: t.Optional[str] = None, env: t.Optional[t.Mapping[str, str]] = None, require_save: bool = True, extension: str = ".txt", filename: t.Optional[str] = None, ) -> t.Optional[t.AnyStr]` to solve the following problem:
r"""Edits the given text in the defined editor. If an editor is given (should be the full path to the executable but the regular operating system search path is used for finding the executable) it overrides the detected editor. Optionally, some environment variables can be used. If the editor is closed without changes, `None` is returned. In case a file is edited directly the return value is always `None` and `require_save` and `extension` are ignored. If the editor cannot be opened a :exc:`UsageError` is raised. Note for Windows: to simplify cross-platform usage, the newlines are automatically converted from POSIX to Windows and vice versa. As such, the message here will have ``\n`` as newline markers. :param text: the text to edit. :param editor: optionally the editor to use. Defaults to automatic detection. :param env: environment variables to forward to the editor. :param require_save: if this is true, then not saving in the editor will make the return value become `None`. :param extension: the extension to tell the editor about. This defaults to `.txt` but changing this might change syntax highlighting. :param filename: if provided it will edit this file instead of the provided text contents. It will not use a temporary file as an indirection in that case.
Here is the function:
def edit(
text: t.Optional[t.AnyStr] = None,
editor: t.Optional[str] = None,
env: t.Optional[t.Mapping[str, str]] = None,
require_save: bool = True,
extension: str = ".txt",
filename: t.Optional[str] = None,
) -> t.Optional[t.AnyStr]:
r"""Edits the given text in the defined editor. If an editor is given
(should be the full path to the executable but the regular operating
system search path is used for finding the executable) it overrides
the detected editor. Optionally, some environment variables can be
used. If the editor is closed without changes, `None` is returned. In
case a file is edited directly the return value is always `None` and
`require_save` and `extension` are ignored.
If the editor cannot be opened a :exc:`UsageError` is raised.
Note for Windows: to simplify cross-platform usage, the newlines are
automatically converted from POSIX to Windows and vice versa. As such,
the message here will have ``\n`` as newline markers.
:param text: the text to edit.
:param editor: optionally the editor to use. Defaults to automatic
detection.
:param env: environment variables to forward to the editor.
:param require_save: if this is true, then not saving in the editor
will make the return value become `None`.
:param extension: the extension to tell the editor about. This defaults
to `.txt` but changing this might change syntax
highlighting.
:param filename: if provided it will edit this file instead of the
provided text contents. It will not use a temporary
file as an indirection in that case.
"""
from ._termui_impl import Editor
ed = Editor(editor=editor, env=env, require_save=require_save, extension=extension)
if filename is None:
return ed.edit(text)
ed.edit_file(filename)
return None | r"""Edits the given text in the defined editor. If an editor is given (should be the full path to the executable but the regular operating system search path is used for finding the executable) it overrides the detected editor. Optionally, some environment variables can be used. If the editor is closed without changes, `None` is returned. In case a file is edited directly the return value is always `None` and `require_save` and `extension` are ignored. If the editor cannot be opened a :exc:`UsageError` is raised. Note for Windows: to simplify cross-platform usage, the newlines are automatically converted from POSIX to Windows and vice versa. As such, the message here will have ``\n`` as newline markers. :param text: the text to edit. :param editor: optionally the editor to use. Defaults to automatic detection. :param env: environment variables to forward to the editor. :param require_save: if this is true, then not saving in the editor will make the return value become `None`. :param extension: the extension to tell the editor about. This defaults to `.txt` but changing this might change syntax highlighting. :param filename: if provided it will edit this file instead of the provided text contents. It will not use a temporary file as an indirection in that case. |
168,503 | import inspect
import io
import itertools
import os
import sys
import typing as t
from gettext import gettext as _
from ._compat import isatty
from ._compat import strip_ansi
from ._compat import WIN
from .exceptions import Abort
from .exceptions import UsageError
from .globals import resolve_color_default
from .types import Choice
from .types import convert_type
from .types import ParamType
from .utils import echo
from .utils import LazyFile
def open_url(url: str, wait: bool = False, locate: bool = False) -> int:
import subprocess
def _unquote_file(url: str) -> str:
from urllib.parse import unquote
if url.startswith("file://"):
url = unquote(url[7:])
return url
if sys.platform == "darwin":
args = ["open"]
if wait:
args.append("-W")
if locate:
args.append("-R")
args.append(_unquote_file(url))
null = open("/dev/null", "w")
try:
return subprocess.Popen(args, stderr=null).wait()
finally:
null.close()
elif WIN:
if locate:
url = _unquote_file(url.replace('"', ""))
args = f'explorer /select,"{url}"'
else:
url = url.replace('"', "")
wait_str = "/WAIT" if wait else ""
args = f'start {wait_str} "" "{url}"'
return os.system(args)
elif CYGWIN:
if locate:
url = os.path.dirname(_unquote_file(url).replace('"', ""))
args = f'cygstart "{url}"'
else:
url = url.replace('"', "")
wait_str = "-w" if wait else ""
args = f'cygstart {wait_str} "{url}"'
return os.system(args)
try:
if locate:
url = os.path.dirname(_unquote_file(url)) or "."
else:
url = _unquote_file(url)
c = subprocess.Popen(["xdg-open", url])
if wait:
return c.wait()
return 0
except OSError:
if url.startswith(("http://", "https://")) and not locate and not wait:
import webbrowser
webbrowser.open(url)
return 0
return 1
The provided code snippet includes necessary dependencies for implementing the `launch` function. Write a Python function `def launch(url: str, wait: bool = False, locate: bool = False) -> int` to solve the following problem:
This function launches the given URL (or filename) in the default viewer application for this file type. If this is an executable, it might launch the executable in a new session. The return value is the exit code of the launched application. Usually, ``0`` indicates success. Examples:: click.launch('https://click.palletsprojects.com/') click.launch('/my/downloaded/file', locate=True) .. versionadded:: 2.0 :param url: URL or filename of the thing to launch. :param wait: Wait for the program to exit before returning. This only works if the launched program blocks. In particular, ``xdg-open`` on Linux does not block. :param locate: if this is set to `True` then instead of launching the application associated with the URL it will attempt to launch a file manager with the file located. This might have weird effects if the URL does not point to the filesystem.
Here is the function:
def launch(url: str, wait: bool = False, locate: bool = False) -> int:
"""This function launches the given URL (or filename) in the default
viewer application for this file type. If this is an executable, it
might launch the executable in a new session. The return value is
the exit code of the launched application. Usually, ``0`` indicates
success.
Examples::
click.launch('https://click.palletsprojects.com/')
click.launch('/my/downloaded/file', locate=True)
.. versionadded:: 2.0
:param url: URL or filename of the thing to launch.
:param wait: Wait for the program to exit before returning. This
only works if the launched program blocks. In particular,
``xdg-open`` on Linux does not block.
:param locate: if this is set to `True` then instead of launching the
application associated with the URL it will attempt to
launch a file manager with the file located. This
might have weird effects if the URL does not point to
the filesystem.
"""
from ._termui_impl import open_url
return open_url(url, wait=wait, locate=locate) | This function launches the given URL (or filename) in the default viewer application for this file type. If this is an executable, it might launch the executable in a new session. The return value is the exit code of the launched application. Usually, ``0`` indicates success. Examples:: click.launch('https://click.palletsprojects.com/') click.launch('/my/downloaded/file', locate=True) .. versionadded:: 2.0 :param url: URL or filename of the thing to launch. :param wait: Wait for the program to exit before returning. This only works if the launched program blocks. In particular, ``xdg-open`` on Linux does not block. :param locate: if this is set to `True` then instead of launching the application associated with the URL it will attempt to launch a file manager with the file located. This might have weird effects if the URL does not point to the filesystem. |
168,504 | import inspect
import io
import itertools
import os
import sys
import typing as t
from gettext import gettext as _
from ._compat import isatty
from ._compat import strip_ansi
from ._compat import WIN
from .exceptions import Abort
from .exceptions import UsageError
from .globals import resolve_color_default
from .types import Choice
from .types import convert_type
from .types import ParamType
from .utils import echo
from .utils import LazyFile
if t.TYPE_CHECKING:
from ._termui_impl import ProgressBar
def raw_terminal() -> t.ContextManager[int]:
from ._termui_impl import raw_terminal as f
return f() | null |
168,505 | import inspect
import io
import itertools
import os
import sys
import typing as t
from gettext import gettext as _
from ._compat import isatty
from ._compat import strip_ansi
from ._compat import WIN
from .exceptions import Abort
from .exceptions import UsageError
from .globals import resolve_color_default
from .types import Choice
from .types import convert_type
from .types import ParamType
from .utils import echo
from .utils import LazyFile
if t.TYPE_CHECKING:
from ._termui_impl import ProgressBar
def getchar(echo: bool = False) -> str:
"""Fetches a single character from the terminal and returns it. This
will always return a unicode character and under certain rare
circumstances this might return more than one character. The
situations which more than one character is returned is when for
whatever reason multiple characters end up in the terminal buffer or
standard input was not actually a terminal.
Note that this will always read from the terminal, even if something
is piped into the standard input.
Note for Windows: in rare cases when typing non-ASCII characters, this
function might wait for a second character and then return both at once.
This is because certain Unicode characters look like special-key markers.
.. versionadded:: 2.0
:param echo: if set to `True`, the character read will also show up on
the terminal. The default is to not show it.
"""
global _getchar
if _getchar is None:
from ._termui_impl import getchar as f
_getchar = f
return _getchar(echo)
def isatty(stream: t.IO) -> bool:
try:
return stream.isatty()
except Exception:
return False
def echo(
message: t.Optional[t.Any] = None,
file: t.Optional[t.IO[t.Any]] = None,
nl: bool = True,
err: bool = False,
color: t.Optional[bool] = None,
) -> None:
"""Print a message and newline to stdout or a file. This should be
used instead of :func:`print` because it provides better support
for different data, files, and environments.
Compared to :func:`print`, this does the following:
- Ensures that the output encoding is not misconfigured on Linux.
- Supports Unicode in the Windows console.
- Supports writing to binary outputs, and supports writing bytes
to text outputs.
- Supports colors and styles on Windows.
- Removes ANSI color and style codes if the output does not look
like an interactive terminal.
- Always flushes the output.
:param message: The string or bytes to output. Other objects are
converted to strings.
:param file: The file to write to. Defaults to ``stdout``.
:param err: Write to ``stderr`` instead of ``stdout``.
:param nl: Print a newline after the message. Enabled by default.
:param color: Force showing or hiding colors and other styles. By
default Click will remove color if the output does not look like
an interactive terminal.
.. versionchanged:: 6.0
Support Unicode output on the Windows console. Click does not
modify ``sys.stdout``, so ``sys.stdout.write()`` and ``print()``
will still not support Unicode.
.. versionchanged:: 4.0
Added the ``color`` parameter.
.. versionadded:: 3.0
Added the ``err`` parameter.
.. versionchanged:: 2.0
Support colors on Windows if colorama is installed.
"""
if file is None:
if err:
file = _default_text_stderr()
else:
file = _default_text_stdout()
# Convert non bytes/text into the native string type.
if message is not None and not isinstance(message, (str, bytes, bytearray)):
out: t.Optional[t.Union[str, bytes]] = str(message)
else:
out = message
if nl:
out = out or ""
if isinstance(out, str):
out += "\n"
else:
out += b"\n"
if not out:
file.flush()
return
# If there is a message and the value looks like bytes, we manually
# need to find the binary stream and write the message in there.
# This is done separately so that most stream types will work as you
# would expect. Eg: you can write to StringIO for other cases.
if isinstance(out, (bytes, bytearray)):
binary_file = _find_binary_writer(file)
if binary_file is not None:
file.flush()
binary_file.write(out)
binary_file.flush()
return
# ANSI style code support. For no message or bytes, nothing happens.
# When outputting to a file instead of a terminal, strip codes.
else:
color = resolve_color_default(color)
if should_strip_ansi(file, color):
out = strip_ansi(out)
elif WIN:
if auto_wrap_for_ansi is not None:
file = auto_wrap_for_ansi(file) # type: ignore
elif not color:
out = strip_ansi(out)
file.write(out) # type: ignore
file.flush()
The provided code snippet includes necessary dependencies for implementing the `pause` function. Write a Python function `def pause(info: t.Optional[str] = None, err: bool = False) -> None` to solve the following problem:
This command stops execution and waits for the user to press any key to continue. This is similar to the Windows batch "pause" command. If the program is not run through a terminal, this command will instead do nothing. .. versionadded:: 2.0 .. versionadded:: 4.0 Added the `err` parameter. :param info: The message to print before pausing. Defaults to ``"Press any key to continue..."``. :param err: if set to message goes to ``stderr`` instead of ``stdout``, the same as with echo.
Here is the function:
def pause(info: t.Optional[str] = None, err: bool = False) -> None:
"""This command stops execution and waits for the user to press any
key to continue. This is similar to the Windows batch "pause"
command. If the program is not run through a terminal, this command
will instead do nothing.
.. versionadded:: 2.0
.. versionadded:: 4.0
Added the `err` parameter.
:param info: The message to print before pausing. Defaults to
``"Press any key to continue..."``.
:param err: if set to message goes to ``stderr`` instead of
``stdout``, the same as with echo.
"""
if not isatty(sys.stdin) or not isatty(sys.stdout):
return
if info is None:
info = _("Press any key to continue...")
try:
if info:
echo(info, nl=False, err=err)
try:
getchar()
except (KeyboardInterrupt, EOFError):
pass
finally:
if info:
echo(err=err) | This command stops execution and waits for the user to press any key to continue. This is similar to the Windows batch "pause" command. If the program is not run through a terminal, this command will instead do nothing. .. versionadded:: 2.0 .. versionadded:: 4.0 Added the `err` parameter. :param info: The message to print before pausing. Defaults to ``"Press any key to continue..."``. :param err: if set to message goes to ``stderr`` instead of ``stdout``, the same as with echo. |
168,506 | import re
import textwrap
constants = []
del constants, add_newdoc
def add_newdoc(module, name, doc):
constants.append((name, doc)) | null |
168,507 |
class Configuration:
_list_keys = ['packages', 'ext_modules', 'data_files', 'include_dirs',
'libraries', 'headers', 'scripts', 'py_modules',
'installed_libraries', 'define_macros']
_dict_keys = ['package_dir', 'installed_pkg_config']
_extra_keys = ['name', 'version']
numpy_include_dirs = []
def __init__(self,
package_name=None,
parent_name=None,
top_path=None,
package_path=None,
caller_level=1,
setup_name='setup.py',
**attrs):
"""Construct configuration instance of a package.
package_name -- name of the package
Ex.: 'distutils'
parent_name -- name of the parent package
Ex.: 'numpy'
top_path -- directory of the toplevel package
Ex.: the directory where the numpy package source sits
package_path -- directory of package. Will be computed by magic from the
directory of the caller module if not specified
Ex.: the directory where numpy.distutils is
caller_level -- frame level to caller namespace, internal parameter.
"""
self.name = dot_join(parent_name, package_name)
self.version = None
caller_frame = get_frame(caller_level)
self.local_path = get_path_from_frame(caller_frame, top_path)
# local_path -- directory of a file (usually setup.py) that
# defines a configuration() function.
# local_path -- directory of a file (usually setup.py) that
# defines a configuration() function.
if top_path is None:
top_path = self.local_path
self.local_path = ''
if package_path is None:
package_path = self.local_path
elif os.path.isdir(njoin(self.local_path, package_path)):
package_path = njoin(self.local_path, package_path)
if not os.path.isdir(package_path or '.'):
raise ValueError("%r is not a directory" % (package_path,))
self.top_path = top_path
self.package_path = package_path
# this is the relative path in the installed package
self.path_in_package = os.path.join(*self.name.split('.'))
self.list_keys = self._list_keys[:]
self.dict_keys = self._dict_keys[:]
for n in self.list_keys:
v = copy.copy(attrs.get(n, []))
setattr(self, n, as_list(v))
for n in self.dict_keys:
v = copy.copy(attrs.get(n, {}))
setattr(self, n, v)
known_keys = self.list_keys + self.dict_keys
self.extra_keys = self._extra_keys[:]
for n in attrs.keys():
if n in known_keys:
continue
a = attrs[n]
setattr(self, n, a)
if isinstance(a, list):
self.list_keys.append(n)
elif isinstance(a, dict):
self.dict_keys.append(n)
else:
self.extra_keys.append(n)
if os.path.exists(njoin(package_path, '__init__.py')):
self.packages.append(self.name)
self.package_dir[self.name] = package_path
self.options = dict(
ignore_setup_xxx_py = False,
assume_default_configuration = False,
delegate_options_to_subpackages = False,
quiet = False,
)
caller_instance = None
for i in range(1, 3):
try:
f = get_frame(i)
except ValueError:
break
try:
caller_instance = eval('self', f.f_globals, f.f_locals)
break
except NameError:
pass
if isinstance(caller_instance, self.__class__):
if caller_instance.options['delegate_options_to_subpackages']:
self.set_options(**caller_instance.options)
self.setup_name = setup_name
def todict(self):
"""
Return a dictionary compatible with the keyword arguments of distutils
setup function.
Examples
--------
>>> setup(**config.todict()) #doctest: +SKIP
"""
self._optimize_data_files()
d = {}
known_keys = self.list_keys + self.dict_keys + self.extra_keys
for n in known_keys:
a = getattr(self, n)
if a:
d[n] = a
return d
def info(self, message):
if not self.options['quiet']:
print(message)
def warn(self, message):
sys.stderr.write('Warning: %s\n' % (message,))
def set_options(self, **options):
"""
Configure Configuration instance.
The following options are available:
- ignore_setup_xxx_py
- assume_default_configuration
- delegate_options_to_subpackages
- quiet
"""
for key, value in options.items():
if key in self.options:
self.options[key] = value
else:
raise ValueError('Unknown option: '+key)
def get_distribution(self):
"""Return the distutils distribution object for self."""
from numpy.distutils.core import get_distribution
return get_distribution()
def _wildcard_get_subpackage(self, subpackage_name,
parent_name,
caller_level = 1):
l = subpackage_name.split('.')
subpackage_path = njoin([self.local_path]+l)
dirs = [_m for _m in sorted_glob(subpackage_path) if os.path.isdir(_m)]
config_list = []
for d in dirs:
if not os.path.isfile(njoin(d, '__init__.py')):
continue
if 'build' in d.split(os.sep):
continue
n = '.'.join(d.split(os.sep)[-len(l):])
c = self.get_subpackage(n,
parent_name = parent_name,
caller_level = caller_level+1)
config_list.extend(c)
return config_list
def _get_configuration_from_setup_py(self, setup_py,
subpackage_name,
subpackage_path,
parent_name,
caller_level = 1):
# In case setup_py imports local modules:
sys.path.insert(0, os.path.dirname(setup_py))
try:
setup_name = os.path.splitext(os.path.basename(setup_py))[0]
n = dot_join(self.name, subpackage_name, setup_name)
setup_module = exec_mod_from_location(
'_'.join(n.split('.')), setup_py)
if not hasattr(setup_module, 'configuration'):
if not self.options['assume_default_configuration']:
self.warn('Assuming default configuration '\
'(%s does not define configuration())'\
% (setup_module))
config = Configuration(subpackage_name, parent_name,
self.top_path, subpackage_path,
caller_level = caller_level + 1)
else:
pn = dot_join(*([parent_name] + subpackage_name.split('.')[:-1]))
args = (pn,)
if setup_module.configuration.__code__.co_argcount > 1:
args = args + (self.top_path,)
config = setup_module.configuration(*args)
if config.name!=dot_join(parent_name, subpackage_name):
self.warn('Subpackage %r configuration returned as %r' % \
(dot_join(parent_name, subpackage_name), config.name))
finally:
del sys.path[0]
return config
def get_subpackage(self,subpackage_name,
subpackage_path=None,
parent_name=None,
caller_level = 1):
"""Return list of subpackage configurations.
Parameters
----------
subpackage_name : str or None
Name of the subpackage to get the configuration. '*' in
subpackage_name is handled as a wildcard.
subpackage_path : str
If None, then the path is assumed to be the local path plus the
subpackage_name. If a setup.py file is not found in the
subpackage_path, then a default configuration is used.
parent_name : str
Parent name.
"""
if subpackage_name is None:
if subpackage_path is None:
raise ValueError(
"either subpackage_name or subpackage_path must be specified")
subpackage_name = os.path.basename(subpackage_path)
# handle wildcards
l = subpackage_name.split('.')
if subpackage_path is None and '*' in subpackage_name:
return self._wildcard_get_subpackage(subpackage_name,
parent_name,
caller_level = caller_level+1)
assert '*' not in subpackage_name, repr((subpackage_name, subpackage_path, parent_name))
if subpackage_path is None:
subpackage_path = njoin([self.local_path] + l)
else:
subpackage_path = njoin([subpackage_path] + l[:-1])
subpackage_path = self.paths([subpackage_path])[0]
setup_py = njoin(subpackage_path, self.setup_name)
if not self.options['ignore_setup_xxx_py']:
if not os.path.isfile(setup_py):
setup_py = njoin(subpackage_path,
'setup_%s.py' % (subpackage_name))
if not os.path.isfile(setup_py):
if not self.options['assume_default_configuration']:
self.warn('Assuming default configuration '\
'(%s/{setup_%s,setup}.py was not found)' \
% (os.path.dirname(setup_py), subpackage_name))
config = Configuration(subpackage_name, parent_name,
self.top_path, subpackage_path,
caller_level = caller_level+1)
else:
config = self._get_configuration_from_setup_py(
setup_py,
subpackage_name,
subpackage_path,
parent_name,
caller_level = caller_level + 1)
if config:
return [config]
else:
return []
def add_subpackage(self,subpackage_name,
subpackage_path=None,
standalone = False):
"""Add a sub-package to the current Configuration instance.
This is useful in a setup.py script for adding sub-packages to a
package.
Parameters
----------
subpackage_name : str
name of the subpackage
subpackage_path : str
if given, the subpackage path such as the subpackage is in
subpackage_path / subpackage_name. If None,the subpackage is
assumed to be located in the local path / subpackage_name.
standalone : bool
"""
if standalone:
parent_name = None
else:
parent_name = self.name
config_list = self.get_subpackage(subpackage_name, subpackage_path,
parent_name = parent_name,
caller_level = 2)
if not config_list:
self.warn('No configuration returned, assuming unavailable.')
for config in config_list:
d = config
if isinstance(config, Configuration):
d = config.todict()
assert isinstance(d, dict), repr(type(d))
self.info('Appending %s configuration to %s' \
% (d.get('name'), self.name))
self.dict_append(**d)
dist = self.get_distribution()
if dist is not None:
self.warn('distutils distribution has been initialized,'\
' it may be too late to add a subpackage '+ subpackage_name)
def add_data_dir(self, data_path):
"""Recursively add files under data_path to data_files list.
Recursively add files under data_path to the list of data_files to be
installed (and distributed). The data_path can be either a relative
path-name, or an absolute path-name, or a 2-tuple where the first
argument shows where in the install directory the data directory
should be installed to.
Parameters
----------
data_path : seq or str
Argument can be either
* 2-sequence (<datadir suffix>, <path to data directory>)
* path to data directory where python datadir suffix defaults
to package dir.
Notes
-----
Rules for installation paths::
foo/bar -> (foo/bar, foo/bar) -> parent/foo/bar
(gun, foo/bar) -> parent/gun
foo/* -> (foo/a, foo/a), (foo/b, foo/b) -> parent/foo/a, parent/foo/b
(gun, foo/*) -> (gun, foo/a), (gun, foo/b) -> gun
(gun/*, foo/*) -> parent/gun/a, parent/gun/b
/foo/bar -> (bar, /foo/bar) -> parent/bar
(gun, /foo/bar) -> parent/gun
(fun/*/gun/*, sun/foo/bar) -> parent/fun/foo/gun/bar
Examples
--------
For example suppose the source directory contains fun/foo.dat and
fun/bar/car.dat:
>>> self.add_data_dir('fun') #doctest: +SKIP
>>> self.add_data_dir(('sun', 'fun')) #doctest: +SKIP
>>> self.add_data_dir(('gun', '/full/path/to/fun'))#doctest: +SKIP
Will install data-files to the locations::
<package install directory>/
fun/
foo.dat
bar/
car.dat
sun/
foo.dat
bar/
car.dat
gun/
foo.dat
car.dat
"""
if is_sequence(data_path):
d, data_path = data_path
else:
d = None
if is_sequence(data_path):
[self.add_data_dir((d, p)) for p in data_path]
return
if not is_string(data_path):
raise TypeError("not a string: %r" % (data_path,))
if d is None:
if os.path.isabs(data_path):
return self.add_data_dir((os.path.basename(data_path), data_path))
return self.add_data_dir((data_path, data_path))
paths = self.paths(data_path, include_non_existing=False)
if is_glob_pattern(data_path):
if is_glob_pattern(d):
pattern_list = allpath(d).split(os.sep)
pattern_list.reverse()
# /a/*//b/ -> /a/*/b
rl = list(range(len(pattern_list)-1)); rl.reverse()
for i in rl:
if not pattern_list[i]:
del pattern_list[i]
#
for path in paths:
if not os.path.isdir(path):
print('Not a directory, skipping', path)
continue
rpath = rel_path(path, self.local_path)
path_list = rpath.split(os.sep)
path_list.reverse()
target_list = []
i = 0
for s in pattern_list:
if is_glob_pattern(s):
if i>=len(path_list):
raise ValueError('cannot fill pattern %r with %r' \
% (d, path))
target_list.append(path_list[i])
else:
assert s==path_list[i], repr((s, path_list[i], data_path, d, path, rpath))
target_list.append(s)
i += 1
if path_list[i:]:
self.warn('mismatch of pattern_list=%s and path_list=%s'\
% (pattern_list, path_list))
target_list.reverse()
self.add_data_dir((os.sep.join(target_list), path))
else:
for path in paths:
self.add_data_dir((d, path))
return
assert not is_glob_pattern(d), repr(d)
dist = self.get_distribution()
if dist is not None and dist.data_files is not None:
data_files = dist.data_files
else:
data_files = self.data_files
for path in paths:
for d1, f in list(general_source_directories_files(path)):
target_path = os.path.join(self.path_in_package, d, d1)
data_files.append((target_path, f))
def _optimize_data_files(self):
data_dict = {}
for p, files in self.data_files:
if p not in data_dict:
data_dict[p] = set()
for f in files:
data_dict[p].add(f)
self.data_files[:] = [(p, list(files)) for p, files in data_dict.items()]
def add_data_files(self,*files):
"""Add data files to configuration data_files.
Parameters
----------
files : sequence
Argument(s) can be either
* 2-sequence (<datadir prefix>,<path to data file(s)>)
* paths to data files where python datadir prefix defaults
to package dir.
Notes
-----
The form of each element of the files sequence is very flexible
allowing many combinations of where to get the files from the package
and where they should ultimately be installed on the system. The most
basic usage is for an element of the files argument sequence to be a
simple filename. This will cause that file from the local path to be
installed to the installation path of the self.name package (package
path). The file argument can also be a relative path in which case the
entire relative path will be installed into the package directory.
Finally, the file can be an absolute path name in which case the file
will be found at the absolute path name but installed to the package
path.
This basic behavior can be augmented by passing a 2-tuple in as the
file argument. The first element of the tuple should specify the
relative path (under the package install directory) where the
remaining sequence of files should be installed to (it has nothing to
do with the file-names in the source distribution). The second element
of the tuple is the sequence of files that should be installed. The
files in this sequence can be filenames, relative paths, or absolute
paths. For absolute paths the file will be installed in the top-level
package installation directory (regardless of the first argument).
Filenames and relative path names will be installed in the package
install directory under the path name given as the first element of
the tuple.
Rules for installation paths:
#. file.txt -> (., file.txt)-> parent/file.txt
#. foo/file.txt -> (foo, foo/file.txt) -> parent/foo/file.txt
#. /foo/bar/file.txt -> (., /foo/bar/file.txt) -> parent/file.txt
#. ``*``.txt -> parent/a.txt, parent/b.txt
#. foo/``*``.txt`` -> parent/foo/a.txt, parent/foo/b.txt
#. ``*/*.txt`` -> (``*``, ``*``/``*``.txt) -> parent/c/a.txt, parent/d/b.txt
#. (sun, file.txt) -> parent/sun/file.txt
#. (sun, bar/file.txt) -> parent/sun/file.txt
#. (sun, /foo/bar/file.txt) -> parent/sun/file.txt
#. (sun, ``*``.txt) -> parent/sun/a.txt, parent/sun/b.txt
#. (sun, bar/``*``.txt) -> parent/sun/a.txt, parent/sun/b.txt
#. (sun/``*``, ``*``/``*``.txt) -> parent/sun/c/a.txt, parent/d/b.txt
An additional feature is that the path to a data-file can actually be
a function that takes no arguments and returns the actual path(s) to
the data-files. This is useful when the data files are generated while
building the package.
Examples
--------
Add files to the list of data_files to be included with the package.
>>> self.add_data_files('foo.dat',
... ('fun', ['gun.dat', 'nun/pun.dat', '/tmp/sun.dat']),
... 'bar/cat.dat',
... '/full/path/to/can.dat') #doctest: +SKIP
will install these data files to::
<package install directory>/
foo.dat
fun/
gun.dat
nun/
pun.dat
sun.dat
bar/
car.dat
can.dat
where <package install directory> is the package (or sub-package)
directory such as '/usr/lib/python2.4/site-packages/mypackage' ('C:
\\Python2.4 \\Lib \\site-packages \\mypackage') or
'/usr/lib/python2.4/site- packages/mypackage/mysubpackage' ('C:
\\Python2.4 \\Lib \\site-packages \\mypackage \\mysubpackage').
"""
if len(files)>1:
for f in files:
self.add_data_files(f)
return
assert len(files)==1
if is_sequence(files[0]):
d, files = files[0]
else:
d = None
if is_string(files):
filepat = files
elif is_sequence(files):
if len(files)==1:
filepat = files[0]
else:
for f in files:
self.add_data_files((d, f))
return
else:
raise TypeError(repr(type(files)))
if d is None:
if hasattr(filepat, '__call__'):
d = ''
elif os.path.isabs(filepat):
d = ''
else:
d = os.path.dirname(filepat)
self.add_data_files((d, files))
return
paths = self.paths(filepat, include_non_existing=False)
if is_glob_pattern(filepat):
if is_glob_pattern(d):
pattern_list = d.split(os.sep)
pattern_list.reverse()
for path in paths:
path_list = path.split(os.sep)
path_list.reverse()
path_list.pop() # filename
target_list = []
i = 0
for s in pattern_list:
if is_glob_pattern(s):
target_list.append(path_list[i])
i += 1
else:
target_list.append(s)
target_list.reverse()
self.add_data_files((os.sep.join(target_list), path))
else:
self.add_data_files((d, paths))
return
assert not is_glob_pattern(d), repr((d, filepat))
dist = self.get_distribution()
if dist is not None and dist.data_files is not None:
data_files = dist.data_files
else:
data_files = self.data_files
data_files.append((os.path.join(self.path_in_package, d), paths))
### XXX Implement add_py_modules
def add_define_macros(self, macros):
"""Add define macros to configuration
Add the given sequence of macro name and value duples to the beginning
of the define_macros list This list will be visible to all extension
modules of the current package.
"""
dist = self.get_distribution()
if dist is not None:
if not hasattr(dist, 'define_macros'):
dist.define_macros = []
dist.define_macros.extend(macros)
else:
self.define_macros.extend(macros)
def add_include_dirs(self,*paths):
"""Add paths to configuration include directories.
Add the given sequence of paths to the beginning of the include_dirs
list. This list will be visible to all extension modules of the
current package.
"""
include_dirs = self.paths(paths)
dist = self.get_distribution()
if dist is not None:
if dist.include_dirs is None:
dist.include_dirs = []
dist.include_dirs.extend(include_dirs)
else:
self.include_dirs.extend(include_dirs)
def add_headers(self,*files):
"""Add installable headers to configuration.
Add the given sequence of files to the beginning of the headers list.
By default, headers will be installed under <python-
include>/<self.name.replace('.','/')>/ directory. If an item of files
is a tuple, then its first argument specifies the actual installation
location relative to the <python-include> path.
Parameters
----------
files : str or seq
Argument(s) can be either:
* 2-sequence (<includedir suffix>,<path to header file(s)>)
* path(s) to header file(s) where python includedir suffix will
default to package name.
"""
headers = []
for path in files:
if is_string(path):
[headers.append((self.name, p)) for p in self.paths(path)]
else:
if not isinstance(path, (tuple, list)) or len(path) != 2:
raise TypeError(repr(path))
[headers.append((path[0], p)) for p in self.paths(path[1])]
dist = self.get_distribution()
if dist is not None:
if dist.headers is None:
dist.headers = []
dist.headers.extend(headers)
else:
self.headers.extend(headers)
def paths(self,*paths,**kws):
"""Apply glob to paths and prepend local_path if needed.
Applies glob.glob(...) to each path in the sequence (if needed) and
pre-pends the local_path if needed. Because this is called on all
source lists, this allows wildcard characters to be specified in lists
of sources for extension modules and libraries and scripts and allows
path-names be relative to the source directory.
"""
include_non_existing = kws.get('include_non_existing', True)
return gpaths(paths,
local_path = self.local_path,
include_non_existing=include_non_existing)
def _fix_paths_dict(self, kw):
for k in kw.keys():
v = kw[k]
if k in ['sources', 'depends', 'include_dirs', 'library_dirs',
'module_dirs', 'extra_objects']:
new_v = self.paths(v)
kw[k] = new_v
def add_extension(self,name,sources,**kw):
"""Add extension to configuration.
Create and add an Extension instance to the ext_modules list. This
method also takes the following optional keyword arguments that are
passed on to the Extension constructor.
Parameters
----------
name : str
name of the extension
sources : seq
list of the sources. The list of sources may contain functions
(called source generators) which must take an extension instance
and a build directory as inputs and return a source file or list of
source files or None. If None is returned then no sources are
generated. If the Extension instance has no sources after
processing all source generators, then no extension module is
built.
include_dirs :
define_macros :
undef_macros :
library_dirs :
libraries :
runtime_library_dirs :
extra_objects :
extra_compile_args :
extra_link_args :
extra_f77_compile_args :
extra_f90_compile_args :
export_symbols :
swig_opts :
depends :
The depends list contains paths to files or directories that the
sources of the extension module depend on. If any path in the
depends list is newer than the extension module, then the module
will be rebuilt.
language :
f2py_options :
module_dirs :
extra_info : dict or list
dict or list of dict of keywords to be appended to keywords.
Notes
-----
The self.paths(...) method is applied to all lists that may contain
paths.
"""
ext_args = copy.copy(kw)
ext_args['name'] = dot_join(self.name, name)
ext_args['sources'] = sources
if 'extra_info' in ext_args:
extra_info = ext_args['extra_info']
del ext_args['extra_info']
if isinstance(extra_info, dict):
extra_info = [extra_info]
for info in extra_info:
assert isinstance(info, dict), repr(info)
dict_append(ext_args,**info)
self._fix_paths_dict(ext_args)
# Resolve out-of-tree dependencies
libraries = ext_args.get('libraries', [])
libnames = []
ext_args['libraries'] = []
for libname in libraries:
if isinstance(libname, tuple):
self._fix_paths_dict(libname[1])
# Handle library names of the form libname@relative/path/to/library
if '@' in libname:
lname, lpath = libname.split('@', 1)
lpath = os.path.abspath(njoin(self.local_path, lpath))
if os.path.isdir(lpath):
c = self.get_subpackage(None, lpath,
caller_level = 2)
if isinstance(c, Configuration):
c = c.todict()
for l in [l[0] for l in c.get('libraries', [])]:
llname = l.split('__OF__', 1)[0]
if llname == lname:
c.pop('name', None)
dict_append(ext_args,**c)
break
continue
libnames.append(libname)
ext_args['libraries'] = libnames + ext_args['libraries']
ext_args['define_macros'] = \
self.define_macros + ext_args.get('define_macros', [])
from numpy.distutils.core import Extension
ext = Extension(**ext_args)
self.ext_modules.append(ext)
dist = self.get_distribution()
if dist is not None:
self.warn('distutils distribution has been initialized,'\
' it may be too late to add an extension '+name)
return ext
def add_library(self,name,sources,**build_info):
"""
Add library to configuration.
Parameters
----------
name : str
Name of the extension.
sources : sequence
List of the sources. The list of sources may contain functions
(called source generators) which must take an extension instance
and a build directory as inputs and return a source file or list of
source files or None. If None is returned then no sources are
generated. If the Extension instance has no sources after
processing all source generators, then no extension module is
built.
build_info : dict, optional
The following keys are allowed:
* depends
* macros
* include_dirs
* extra_compiler_args
* extra_f77_compile_args
* extra_f90_compile_args
* f2py_options
* language
"""
self._add_library(name, sources, None, build_info)
dist = self.get_distribution()
if dist is not None:
self.warn('distutils distribution has been initialized,'\
' it may be too late to add a library '+ name)
def _add_library(self, name, sources, install_dir, build_info):
"""Common implementation for add_library and add_installed_library. Do
not use directly"""
build_info = copy.copy(build_info)
build_info['sources'] = sources
# Sometimes, depends is not set up to an empty list by default, and if
# depends is not given to add_library, distutils barfs (#1134)
if not 'depends' in build_info:
build_info['depends'] = []
self._fix_paths_dict(build_info)
# Add to libraries list so that it is build with build_clib
self.libraries.append((name, build_info))
def add_installed_library(self, name, sources, install_dir, build_info=None):
"""
Similar to add_library, but the specified library is installed.
Most C libraries used with `distutils` are only used to build python
extensions, but libraries built through this method will be installed
so that they can be reused by third-party packages.
Parameters
----------
name : str
Name of the installed library.
sources : sequence
List of the library's source files. See `add_library` for details.
install_dir : str
Path to install the library, relative to the current sub-package.
build_info : dict, optional
The following keys are allowed:
* depends
* macros
* include_dirs
* extra_compiler_args
* extra_f77_compile_args
* extra_f90_compile_args
* f2py_options
* language
Returns
-------
None
See Also
--------
add_library, add_npy_pkg_config, get_info
Notes
-----
The best way to encode the options required to link against the specified
C libraries is to use a "libname.ini" file, and use `get_info` to
retrieve the required options (see `add_npy_pkg_config` for more
information).
"""
if not build_info:
build_info = {}
install_dir = os.path.join(self.package_path, install_dir)
self._add_library(name, sources, install_dir, build_info)
self.installed_libraries.append(InstallableLib(name, build_info, install_dir))
def add_npy_pkg_config(self, template, install_dir, subst_dict=None):
"""
Generate and install a npy-pkg config file from a template.
The config file generated from `template` is installed in the
given install directory, using `subst_dict` for variable substitution.
Parameters
----------
template : str
The path of the template, relatively to the current package path.
install_dir : str
Where to install the npy-pkg config file, relatively to the current
package path.
subst_dict : dict, optional
If given, any string of the form ``@key@`` will be replaced by
``subst_dict[key]`` in the template file when installed. The install
prefix is always available through the variable ``@prefix@``, since the
install prefix is not easy to get reliably from setup.py.
See also
--------
add_installed_library, get_info
Notes
-----
This works for both standard installs and in-place builds, i.e. the
``@prefix@`` refer to the source directory for in-place builds.
Examples
--------
::
config.add_npy_pkg_config('foo.ini.in', 'lib', {'foo': bar})
Assuming the foo.ini.in file has the following content::
[meta]
Name=@foo@
Version=1.0
Description=dummy description
[default]
Cflags=-I@prefix@/include
Libs=
The generated file will have the following content::
[meta]
Name=bar
Version=1.0
Description=dummy description
[default]
Cflags=-Iprefix_dir/include
Libs=
and will be installed as foo.ini in the 'lib' subpath.
When cross-compiling with numpy distutils, it might be necessary to
use modified npy-pkg-config files. Using the default/generated files
will link with the host libraries (i.e. libnpymath.a). For
cross-compilation you of-course need to link with target libraries,
while using the host Python installation.
You can copy out the numpy/core/lib/npy-pkg-config directory, add a
pkgdir value to the .ini files and set NPY_PKG_CONFIG_PATH environment
variable to point to the directory with the modified npy-pkg-config
files.
Example npymath.ini modified for cross-compilation::
[meta]
Name=npymath
Description=Portable, core math library implementing C99 standard
Version=0.1
[variables]
pkgname=numpy.core
pkgdir=/build/arm-linux-gnueabi/sysroot/usr/lib/python3.7/site-packages/numpy/core
prefix=${pkgdir}
libdir=${prefix}/lib
includedir=${prefix}/include
[default]
Libs=-L${libdir} -lnpymath
Cflags=-I${includedir}
Requires=mlib
[msvc]
Libs=/LIBPATH:${libdir} npymath.lib
Cflags=/INCLUDE:${includedir}
Requires=mlib
"""
if subst_dict is None:
subst_dict = {}
template = os.path.join(self.package_path, template)
if self.name in self.installed_pkg_config:
self.installed_pkg_config[self.name].append((template, install_dir,
subst_dict))
else:
self.installed_pkg_config[self.name] = [(template, install_dir,
subst_dict)]
def add_scripts(self,*files):
"""Add scripts to configuration.
Add the sequence of files to the beginning of the scripts list.
Scripts will be installed under the <prefix>/bin/ directory.
"""
scripts = self.paths(files)
dist = self.get_distribution()
if dist is not None:
if dist.scripts is None:
dist.scripts = []
dist.scripts.extend(scripts)
else:
self.scripts.extend(scripts)
def dict_append(self,**dict):
for key in self.list_keys:
a = getattr(self, key)
a.extend(dict.get(key, []))
for key in self.dict_keys:
a = getattr(self, key)
a.update(dict.get(key, {}))
known_keys = self.list_keys + self.dict_keys + self.extra_keys
for key in dict.keys():
if key not in known_keys:
a = getattr(self, key, None)
if a and a==dict[key]: continue
self.warn('Inheriting attribute %r=%r from %r' \
% (key, dict[key], dict.get('name', '?')))
setattr(self, key, dict[key])
self.extra_keys.append(key)
elif key in self.extra_keys:
self.info('Ignoring attempt to set %r (from %r to %r)' \
% (key, getattr(self, key), dict[key]))
elif key in known_keys:
# key is already processed above
pass
else:
raise ValueError("Don't know about key=%r" % (key))
def __str__(self):
from pprint import pformat
known_keys = self.list_keys + self.dict_keys + self.extra_keys
s = '<'+5*'-' + '\n'
s += 'Configuration of '+self.name+':\n'
known_keys.sort()
for k in known_keys:
a = getattr(self, k, None)
if a:
s += '%s = %s\n' % (k, pformat(a))
s += 5*'-' + '>'
return s
def get_config_cmd(self):
"""
Returns the numpy.distutils config command instance.
"""
cmd = get_cmd('config')
cmd.ensure_finalized()
cmd.dump_source = 0
cmd.noisy = 0
old_path = os.environ.get('PATH')
if old_path:
path = os.pathsep.join(['.', old_path])
os.environ['PATH'] = path
return cmd
def get_build_temp_dir(self):
"""
Return a path to a temporary directory where temporary files should be
placed.
"""
cmd = get_cmd('build')
cmd.ensure_finalized()
return cmd.build_temp
def have_f77c(self):
"""Check for availability of Fortran 77 compiler.
Use it inside source generating function to ensure that
setup distribution instance has been initialized.
Notes
-----
True if a Fortran 77 compiler is available (because a simple Fortran 77
code was able to be compiled successfully).
"""
simple_fortran_subroutine = '''
subroutine simple
end
'''
config_cmd = self.get_config_cmd()
flag = config_cmd.try_compile(simple_fortran_subroutine, lang='f77')
return flag
def have_f90c(self):
"""Check for availability of Fortran 90 compiler.
Use it inside source generating function to ensure that
setup distribution instance has been initialized.
Notes
-----
True if a Fortran 90 compiler is available (because a simple Fortran
90 code was able to be compiled successfully)
"""
simple_fortran_subroutine = '''
subroutine simple
end
'''
config_cmd = self.get_config_cmd()
flag = config_cmd.try_compile(simple_fortran_subroutine, lang='f90')
return flag
def append_to(self, extlib):
"""Append libraries, include_dirs to extension or library item.
"""
if is_sequence(extlib):
lib_name, build_info = extlib
dict_append(build_info,
libraries=self.libraries,
include_dirs=self.include_dirs)
else:
from numpy.distutils.core import Extension
assert isinstance(extlib, Extension), repr(extlib)
extlib.libraries.extend(self.libraries)
extlib.include_dirs.extend(self.include_dirs)
def _get_svn_revision(self, path):
"""Return path's SVN revision number.
"""
try:
output = subprocess.check_output(['svnversion'], cwd=path)
except (subprocess.CalledProcessError, OSError):
pass
else:
m = re.match(rb'(?P<revision>\d+)', output)
if m:
return int(m.group('revision'))
if sys.platform=='win32' and os.environ.get('SVN_ASP_DOT_NET_HACK', None):
entries = njoin(path, '_svn', 'entries')
else:
entries = njoin(path, '.svn', 'entries')
if os.path.isfile(entries):
with open(entries) as f:
fstr = f.read()
if fstr[:5] == '<?xml': # pre 1.4
m = re.search(r'revision="(?P<revision>\d+)"', fstr)
if m:
return int(m.group('revision'))
else: # non-xml entries file --- check to be sure that
m = re.search(r'dir[\n\r]+(?P<revision>\d+)', fstr)
if m:
return int(m.group('revision'))
return None
def _get_hg_revision(self, path):
"""Return path's Mercurial revision number.
"""
try:
output = subprocess.check_output(
['hg', 'identify', '--num'], cwd=path)
except (subprocess.CalledProcessError, OSError):
pass
else:
m = re.match(rb'(?P<revision>\d+)', output)
if m:
return int(m.group('revision'))
branch_fn = njoin(path, '.hg', 'branch')
branch_cache_fn = njoin(path, '.hg', 'branch.cache')
if os.path.isfile(branch_fn):
branch0 = None
with open(branch_fn) as f:
revision0 = f.read().strip()
branch_map = {}
with open(branch_cache_fn, 'r') as f:
for line in f:
branch1, revision1 = line.split()[:2]
if revision1==revision0:
branch0 = branch1
try:
revision1 = int(revision1)
except ValueError:
continue
branch_map[branch1] = revision1
return branch_map.get(branch0)
return None
def get_version(self, version_file=None, version_variable=None):
"""Try to get version string of a package.
Return a version string of the current package or None if the version
information could not be detected.
Notes
-----
This method scans files named
__version__.py, <packagename>_version.py, version.py, and
__svn_version__.py for string variables version, __version__, and
<packagename>_version, until a version number is found.
"""
version = getattr(self, 'version', None)
if version is not None:
return version
# Get version from version file.
if version_file is None:
files = ['__version__.py',
self.name.split('.')[-1]+'_version.py',
'version.py',
'__svn_version__.py',
'__hg_version__.py']
else:
files = [version_file]
if version_variable is None:
version_vars = ['version',
'__version__',
self.name.split('.')[-1]+'_version']
else:
version_vars = [version_variable]
for f in files:
fn = njoin(self.local_path, f)
if os.path.isfile(fn):
info = ('.py', 'U', 1)
name = os.path.splitext(os.path.basename(fn))[0]
n = dot_join(self.name, name)
try:
version_module = exec_mod_from_location(
'_'.join(n.split('.')), fn)
except ImportError as e:
self.warn(str(e))
version_module = None
if version_module is None:
continue
for a in version_vars:
version = getattr(version_module, a, None)
if version is not None:
break
# Try if versioneer module
try:
version = version_module.get_versions()['version']
except AttributeError:
pass
if version is not None:
break
if version is not None:
self.version = version
return version
# Get version as SVN or Mercurial revision number
revision = self._get_svn_revision(self.local_path)
if revision is None:
revision = self._get_hg_revision(self.local_path)
if revision is not None:
version = str(revision)
self.version = version
return version
def make_svn_version_py(self, delete=True):
"""Appends a data function to the data_files list that will generate
__svn_version__.py file to the current package directory.
Generate package __svn_version__.py file from SVN revision number,
it will be removed after python exits but will be available
when sdist, etc commands are executed.
Notes
-----
If __svn_version__.py existed before, nothing is done.
This is
intended for working with source directories that are in an SVN
repository.
"""
target = njoin(self.local_path, '__svn_version__.py')
revision = self._get_svn_revision(self.local_path)
if os.path.isfile(target) or revision is None:
return
else:
def generate_svn_version_py():
if not os.path.isfile(target):
version = str(revision)
self.info('Creating %s (version=%r)' % (target, version))
with open(target, 'w') as f:
f.write('version = %r\n' % (version))
def rm_file(f=target,p=self.info):
if delete:
try: os.remove(f); p('removed '+f)
except OSError: pass
try: os.remove(f+'c'); p('removed '+f+'c')
except OSError: pass
atexit.register(rm_file)
return target
self.add_data_files(('', generate_svn_version_py()))
def make_hg_version_py(self, delete=True):
"""Appends a data function to the data_files list that will generate
__hg_version__.py file to the current package directory.
Generate package __hg_version__.py file from Mercurial revision,
it will be removed after python exits but will be available
when sdist, etc commands are executed.
Notes
-----
If __hg_version__.py existed before, nothing is done.
This is intended for working with source directories that are
in an Mercurial repository.
"""
target = njoin(self.local_path, '__hg_version__.py')
revision = self._get_hg_revision(self.local_path)
if os.path.isfile(target) or revision is None:
return
else:
def generate_hg_version_py():
if not os.path.isfile(target):
version = str(revision)
self.info('Creating %s (version=%r)' % (target, version))
with open(target, 'w') as f:
f.write('version = %r\n' % (version))
def rm_file(f=target,p=self.info):
if delete:
try: os.remove(f); p('removed '+f)
except OSError: pass
try: os.remove(f+'c'); p('removed '+f+'c')
except OSError: pass
atexit.register(rm_file)
return target
self.add_data_files(('', generate_hg_version_py()))
def make_config_py(self,name='__config__'):
"""Generate package __config__.py file containing system_info
information used during building the package.
This file is installed to the
package installation directory.
"""
self.py_modules.append((self.name, name, generate_config_py))
def get_info(self,*names):
"""Get resources information.
Return information (from system_info.get_info) for all of the names in
the argument list in a single dictionary.
"""
from .system_info import get_info, dict_append
info_dict = {}
for a in names:
dict_append(info_dict,**get_info(a))
return info_dict
def configuration(parent_package='',top_path=None):
from numpy.distutils.misc_util import Configuration
config = Configuration('numpy', parent_package, top_path)
config.add_subpackage('array_api')
config.add_subpackage('compat')
config.add_subpackage('core')
config.add_subpackage('distutils')
config.add_subpackage('doc')
config.add_subpackage('f2py')
config.add_subpackage('fft')
config.add_subpackage('lib')
config.add_subpackage('linalg')
config.add_subpackage('ma')
config.add_subpackage('matrixlib')
config.add_subpackage('polynomial')
config.add_subpackage('random')
config.add_subpackage('testing')
config.add_subpackage('typing')
config.add_subpackage('_typing')
config.add_data_dir('doc')
config.add_data_files('py.typed')
config.add_data_files('*.pyi')
config.add_subpackage('tests')
config.add_subpackage('_pyinstaller')
config.make_config_py() # installs __config__.py
return config | null |
168,508 |
class Configuration:
_list_keys = ['packages', 'ext_modules', 'data_files', 'include_dirs',
'libraries', 'headers', 'scripts', 'py_modules',
'installed_libraries', 'define_macros']
_dict_keys = ['package_dir', 'installed_pkg_config']
_extra_keys = ['name', 'version']
numpy_include_dirs = []
def __init__(self,
package_name=None,
parent_name=None,
top_path=None,
package_path=None,
caller_level=1,
setup_name='setup.py',
**attrs):
"""Construct configuration instance of a package.
package_name -- name of the package
Ex.: 'distutils'
parent_name -- name of the parent package
Ex.: 'numpy'
top_path -- directory of the toplevel package
Ex.: the directory where the numpy package source sits
package_path -- directory of package. Will be computed by magic from the
directory of the caller module if not specified
Ex.: the directory where numpy.distutils is
caller_level -- frame level to caller namespace, internal parameter.
"""
self.name = dot_join(parent_name, package_name)
self.version = None
caller_frame = get_frame(caller_level)
self.local_path = get_path_from_frame(caller_frame, top_path)
# local_path -- directory of a file (usually setup.py) that
# defines a configuration() function.
# local_path -- directory of a file (usually setup.py) that
# defines a configuration() function.
if top_path is None:
top_path = self.local_path
self.local_path = ''
if package_path is None:
package_path = self.local_path
elif os.path.isdir(njoin(self.local_path, package_path)):
package_path = njoin(self.local_path, package_path)
if not os.path.isdir(package_path or '.'):
raise ValueError("%r is not a directory" % (package_path,))
self.top_path = top_path
self.package_path = package_path
# this is the relative path in the installed package
self.path_in_package = os.path.join(*self.name.split('.'))
self.list_keys = self._list_keys[:]
self.dict_keys = self._dict_keys[:]
for n in self.list_keys:
v = copy.copy(attrs.get(n, []))
setattr(self, n, as_list(v))
for n in self.dict_keys:
v = copy.copy(attrs.get(n, {}))
setattr(self, n, v)
known_keys = self.list_keys + self.dict_keys
self.extra_keys = self._extra_keys[:]
for n in attrs.keys():
if n in known_keys:
continue
a = attrs[n]
setattr(self, n, a)
if isinstance(a, list):
self.list_keys.append(n)
elif isinstance(a, dict):
self.dict_keys.append(n)
else:
self.extra_keys.append(n)
if os.path.exists(njoin(package_path, '__init__.py')):
self.packages.append(self.name)
self.package_dir[self.name] = package_path
self.options = dict(
ignore_setup_xxx_py = False,
assume_default_configuration = False,
delegate_options_to_subpackages = False,
quiet = False,
)
caller_instance = None
for i in range(1, 3):
try:
f = get_frame(i)
except ValueError:
break
try:
caller_instance = eval('self', f.f_globals, f.f_locals)
break
except NameError:
pass
if isinstance(caller_instance, self.__class__):
if caller_instance.options['delegate_options_to_subpackages']:
self.set_options(**caller_instance.options)
self.setup_name = setup_name
def todict(self):
"""
Return a dictionary compatible with the keyword arguments of distutils
setup function.
Examples
--------
>>> setup(**config.todict()) #doctest: +SKIP
"""
self._optimize_data_files()
d = {}
known_keys = self.list_keys + self.dict_keys + self.extra_keys
for n in known_keys:
a = getattr(self, n)
if a:
d[n] = a
return d
def info(self, message):
if not self.options['quiet']:
print(message)
def warn(self, message):
sys.stderr.write('Warning: %s\n' % (message,))
def set_options(self, **options):
"""
Configure Configuration instance.
The following options are available:
- ignore_setup_xxx_py
- assume_default_configuration
- delegate_options_to_subpackages
- quiet
"""
for key, value in options.items():
if key in self.options:
self.options[key] = value
else:
raise ValueError('Unknown option: '+key)
def get_distribution(self):
"""Return the distutils distribution object for self."""
from numpy.distutils.core import get_distribution
return get_distribution()
def _wildcard_get_subpackage(self, subpackage_name,
parent_name,
caller_level = 1):
l = subpackage_name.split('.')
subpackage_path = njoin([self.local_path]+l)
dirs = [_m for _m in sorted_glob(subpackage_path) if os.path.isdir(_m)]
config_list = []
for d in dirs:
if not os.path.isfile(njoin(d, '__init__.py')):
continue
if 'build' in d.split(os.sep):
continue
n = '.'.join(d.split(os.sep)[-len(l):])
c = self.get_subpackage(n,
parent_name = parent_name,
caller_level = caller_level+1)
config_list.extend(c)
return config_list
def _get_configuration_from_setup_py(self, setup_py,
subpackage_name,
subpackage_path,
parent_name,
caller_level = 1):
# In case setup_py imports local modules:
sys.path.insert(0, os.path.dirname(setup_py))
try:
setup_name = os.path.splitext(os.path.basename(setup_py))[0]
n = dot_join(self.name, subpackage_name, setup_name)
setup_module = exec_mod_from_location(
'_'.join(n.split('.')), setup_py)
if not hasattr(setup_module, 'configuration'):
if not self.options['assume_default_configuration']:
self.warn('Assuming default configuration '\
'(%s does not define configuration())'\
% (setup_module))
config = Configuration(subpackage_name, parent_name,
self.top_path, subpackage_path,
caller_level = caller_level + 1)
else:
pn = dot_join(*([parent_name] + subpackage_name.split('.')[:-1]))
args = (pn,)
if setup_module.configuration.__code__.co_argcount > 1:
args = args + (self.top_path,)
config = setup_module.configuration(*args)
if config.name!=dot_join(parent_name, subpackage_name):
self.warn('Subpackage %r configuration returned as %r' % \
(dot_join(parent_name, subpackage_name), config.name))
finally:
del sys.path[0]
return config
def get_subpackage(self,subpackage_name,
subpackage_path=None,
parent_name=None,
caller_level = 1):
"""Return list of subpackage configurations.
Parameters
----------
subpackage_name : str or None
Name of the subpackage to get the configuration. '*' in
subpackage_name is handled as a wildcard.
subpackage_path : str
If None, then the path is assumed to be the local path plus the
subpackage_name. If a setup.py file is not found in the
subpackage_path, then a default configuration is used.
parent_name : str
Parent name.
"""
if subpackage_name is None:
if subpackage_path is None:
raise ValueError(
"either subpackage_name or subpackage_path must be specified")
subpackage_name = os.path.basename(subpackage_path)
# handle wildcards
l = subpackage_name.split('.')
if subpackage_path is None and '*' in subpackage_name:
return self._wildcard_get_subpackage(subpackage_name,
parent_name,
caller_level = caller_level+1)
assert '*' not in subpackage_name, repr((subpackage_name, subpackage_path, parent_name))
if subpackage_path is None:
subpackage_path = njoin([self.local_path] + l)
else:
subpackage_path = njoin([subpackage_path] + l[:-1])
subpackage_path = self.paths([subpackage_path])[0]
setup_py = njoin(subpackage_path, self.setup_name)
if not self.options['ignore_setup_xxx_py']:
if not os.path.isfile(setup_py):
setup_py = njoin(subpackage_path,
'setup_%s.py' % (subpackage_name))
if not os.path.isfile(setup_py):
if not self.options['assume_default_configuration']:
self.warn('Assuming default configuration '\
'(%s/{setup_%s,setup}.py was not found)' \
% (os.path.dirname(setup_py), subpackage_name))
config = Configuration(subpackage_name, parent_name,
self.top_path, subpackage_path,
caller_level = caller_level+1)
else:
config = self._get_configuration_from_setup_py(
setup_py,
subpackage_name,
subpackage_path,
parent_name,
caller_level = caller_level + 1)
if config:
return [config]
else:
return []
def add_subpackage(self,subpackage_name,
subpackage_path=None,
standalone = False):
"""Add a sub-package to the current Configuration instance.
This is useful in a setup.py script for adding sub-packages to a
package.
Parameters
----------
subpackage_name : str
name of the subpackage
subpackage_path : str
if given, the subpackage path such as the subpackage is in
subpackage_path / subpackage_name. If None,the subpackage is
assumed to be located in the local path / subpackage_name.
standalone : bool
"""
if standalone:
parent_name = None
else:
parent_name = self.name
config_list = self.get_subpackage(subpackage_name, subpackage_path,
parent_name = parent_name,
caller_level = 2)
if not config_list:
self.warn('No configuration returned, assuming unavailable.')
for config in config_list:
d = config
if isinstance(config, Configuration):
d = config.todict()
assert isinstance(d, dict), repr(type(d))
self.info('Appending %s configuration to %s' \
% (d.get('name'), self.name))
self.dict_append(**d)
dist = self.get_distribution()
if dist is not None:
self.warn('distutils distribution has been initialized,'\
' it may be too late to add a subpackage '+ subpackage_name)
def add_data_dir(self, data_path):
"""Recursively add files under data_path to data_files list.
Recursively add files under data_path to the list of data_files to be
installed (and distributed). The data_path can be either a relative
path-name, or an absolute path-name, or a 2-tuple where the first
argument shows where in the install directory the data directory
should be installed to.
Parameters
----------
data_path : seq or str
Argument can be either
* 2-sequence (<datadir suffix>, <path to data directory>)
* path to data directory where python datadir suffix defaults
to package dir.
Notes
-----
Rules for installation paths::
foo/bar -> (foo/bar, foo/bar) -> parent/foo/bar
(gun, foo/bar) -> parent/gun
foo/* -> (foo/a, foo/a), (foo/b, foo/b) -> parent/foo/a, parent/foo/b
(gun, foo/*) -> (gun, foo/a), (gun, foo/b) -> gun
(gun/*, foo/*) -> parent/gun/a, parent/gun/b
/foo/bar -> (bar, /foo/bar) -> parent/bar
(gun, /foo/bar) -> parent/gun
(fun/*/gun/*, sun/foo/bar) -> parent/fun/foo/gun/bar
Examples
--------
For example suppose the source directory contains fun/foo.dat and
fun/bar/car.dat:
>>> self.add_data_dir('fun') #doctest: +SKIP
>>> self.add_data_dir(('sun', 'fun')) #doctest: +SKIP
>>> self.add_data_dir(('gun', '/full/path/to/fun'))#doctest: +SKIP
Will install data-files to the locations::
<package install directory>/
fun/
foo.dat
bar/
car.dat
sun/
foo.dat
bar/
car.dat
gun/
foo.dat
car.dat
"""
if is_sequence(data_path):
d, data_path = data_path
else:
d = None
if is_sequence(data_path):
[self.add_data_dir((d, p)) for p in data_path]
return
if not is_string(data_path):
raise TypeError("not a string: %r" % (data_path,))
if d is None:
if os.path.isabs(data_path):
return self.add_data_dir((os.path.basename(data_path), data_path))
return self.add_data_dir((data_path, data_path))
paths = self.paths(data_path, include_non_existing=False)
if is_glob_pattern(data_path):
if is_glob_pattern(d):
pattern_list = allpath(d).split(os.sep)
pattern_list.reverse()
# /a/*//b/ -> /a/*/b
rl = list(range(len(pattern_list)-1)); rl.reverse()
for i in rl:
if not pattern_list[i]:
del pattern_list[i]
#
for path in paths:
if not os.path.isdir(path):
print('Not a directory, skipping', path)
continue
rpath = rel_path(path, self.local_path)
path_list = rpath.split(os.sep)
path_list.reverse()
target_list = []
i = 0
for s in pattern_list:
if is_glob_pattern(s):
if i>=len(path_list):
raise ValueError('cannot fill pattern %r with %r' \
% (d, path))
target_list.append(path_list[i])
else:
assert s==path_list[i], repr((s, path_list[i], data_path, d, path, rpath))
target_list.append(s)
i += 1
if path_list[i:]:
self.warn('mismatch of pattern_list=%s and path_list=%s'\
% (pattern_list, path_list))
target_list.reverse()
self.add_data_dir((os.sep.join(target_list), path))
else:
for path in paths:
self.add_data_dir((d, path))
return
assert not is_glob_pattern(d), repr(d)
dist = self.get_distribution()
if dist is not None and dist.data_files is not None:
data_files = dist.data_files
else:
data_files = self.data_files
for path in paths:
for d1, f in list(general_source_directories_files(path)):
target_path = os.path.join(self.path_in_package, d, d1)
data_files.append((target_path, f))
def _optimize_data_files(self):
data_dict = {}
for p, files in self.data_files:
if p not in data_dict:
data_dict[p] = set()
for f in files:
data_dict[p].add(f)
self.data_files[:] = [(p, list(files)) for p, files in data_dict.items()]
def add_data_files(self,*files):
"""Add data files to configuration data_files.
Parameters
----------
files : sequence
Argument(s) can be either
* 2-sequence (<datadir prefix>,<path to data file(s)>)
* paths to data files where python datadir prefix defaults
to package dir.
Notes
-----
The form of each element of the files sequence is very flexible
allowing many combinations of where to get the files from the package
and where they should ultimately be installed on the system. The most
basic usage is for an element of the files argument sequence to be a
simple filename. This will cause that file from the local path to be
installed to the installation path of the self.name package (package
path). The file argument can also be a relative path in which case the
entire relative path will be installed into the package directory.
Finally, the file can be an absolute path name in which case the file
will be found at the absolute path name but installed to the package
path.
This basic behavior can be augmented by passing a 2-tuple in as the
file argument. The first element of the tuple should specify the
relative path (under the package install directory) where the
remaining sequence of files should be installed to (it has nothing to
do with the file-names in the source distribution). The second element
of the tuple is the sequence of files that should be installed. The
files in this sequence can be filenames, relative paths, or absolute
paths. For absolute paths the file will be installed in the top-level
package installation directory (regardless of the first argument).
Filenames and relative path names will be installed in the package
install directory under the path name given as the first element of
the tuple.
Rules for installation paths:
#. file.txt -> (., file.txt)-> parent/file.txt
#. foo/file.txt -> (foo, foo/file.txt) -> parent/foo/file.txt
#. /foo/bar/file.txt -> (., /foo/bar/file.txt) -> parent/file.txt
#. ``*``.txt -> parent/a.txt, parent/b.txt
#. foo/``*``.txt`` -> parent/foo/a.txt, parent/foo/b.txt
#. ``*/*.txt`` -> (``*``, ``*``/``*``.txt) -> parent/c/a.txt, parent/d/b.txt
#. (sun, file.txt) -> parent/sun/file.txt
#. (sun, bar/file.txt) -> parent/sun/file.txt
#. (sun, /foo/bar/file.txt) -> parent/sun/file.txt
#. (sun, ``*``.txt) -> parent/sun/a.txt, parent/sun/b.txt
#. (sun, bar/``*``.txt) -> parent/sun/a.txt, parent/sun/b.txt
#. (sun/``*``, ``*``/``*``.txt) -> parent/sun/c/a.txt, parent/d/b.txt
An additional feature is that the path to a data-file can actually be
a function that takes no arguments and returns the actual path(s) to
the data-files. This is useful when the data files are generated while
building the package.
Examples
--------
Add files to the list of data_files to be included with the package.
>>> self.add_data_files('foo.dat',
... ('fun', ['gun.dat', 'nun/pun.dat', '/tmp/sun.dat']),
... 'bar/cat.dat',
... '/full/path/to/can.dat') #doctest: +SKIP
will install these data files to::
<package install directory>/
foo.dat
fun/
gun.dat
nun/
pun.dat
sun.dat
bar/
car.dat
can.dat
where <package install directory> is the package (or sub-package)
directory such as '/usr/lib/python2.4/site-packages/mypackage' ('C:
\\Python2.4 \\Lib \\site-packages \\mypackage') or
'/usr/lib/python2.4/site- packages/mypackage/mysubpackage' ('C:
\\Python2.4 \\Lib \\site-packages \\mypackage \\mysubpackage').
"""
if len(files)>1:
for f in files:
self.add_data_files(f)
return
assert len(files)==1
if is_sequence(files[0]):
d, files = files[0]
else:
d = None
if is_string(files):
filepat = files
elif is_sequence(files):
if len(files)==1:
filepat = files[0]
else:
for f in files:
self.add_data_files((d, f))
return
else:
raise TypeError(repr(type(files)))
if d is None:
if hasattr(filepat, '__call__'):
d = ''
elif os.path.isabs(filepat):
d = ''
else:
d = os.path.dirname(filepat)
self.add_data_files((d, files))
return
paths = self.paths(filepat, include_non_existing=False)
if is_glob_pattern(filepat):
if is_glob_pattern(d):
pattern_list = d.split(os.sep)
pattern_list.reverse()
for path in paths:
path_list = path.split(os.sep)
path_list.reverse()
path_list.pop() # filename
target_list = []
i = 0
for s in pattern_list:
if is_glob_pattern(s):
target_list.append(path_list[i])
i += 1
else:
target_list.append(s)
target_list.reverse()
self.add_data_files((os.sep.join(target_list), path))
else:
self.add_data_files((d, paths))
return
assert not is_glob_pattern(d), repr((d, filepat))
dist = self.get_distribution()
if dist is not None and dist.data_files is not None:
data_files = dist.data_files
else:
data_files = self.data_files
data_files.append((os.path.join(self.path_in_package, d), paths))
### XXX Implement add_py_modules
def add_define_macros(self, macros):
"""Add define macros to configuration
Add the given sequence of macro name and value duples to the beginning
of the define_macros list This list will be visible to all extension
modules of the current package.
"""
dist = self.get_distribution()
if dist is not None:
if not hasattr(dist, 'define_macros'):
dist.define_macros = []
dist.define_macros.extend(macros)
else:
self.define_macros.extend(macros)
def add_include_dirs(self,*paths):
"""Add paths to configuration include directories.
Add the given sequence of paths to the beginning of the include_dirs
list. This list will be visible to all extension modules of the
current package.
"""
include_dirs = self.paths(paths)
dist = self.get_distribution()
if dist is not None:
if dist.include_dirs is None:
dist.include_dirs = []
dist.include_dirs.extend(include_dirs)
else:
self.include_dirs.extend(include_dirs)
def add_headers(self,*files):
"""Add installable headers to configuration.
Add the given sequence of files to the beginning of the headers list.
By default, headers will be installed under <python-
include>/<self.name.replace('.','/')>/ directory. If an item of files
is a tuple, then its first argument specifies the actual installation
location relative to the <python-include> path.
Parameters
----------
files : str or seq
Argument(s) can be either:
* 2-sequence (<includedir suffix>,<path to header file(s)>)
* path(s) to header file(s) where python includedir suffix will
default to package name.
"""
headers = []
for path in files:
if is_string(path):
[headers.append((self.name, p)) for p in self.paths(path)]
else:
if not isinstance(path, (tuple, list)) or len(path) != 2:
raise TypeError(repr(path))
[headers.append((path[0], p)) for p in self.paths(path[1])]
dist = self.get_distribution()
if dist is not None:
if dist.headers is None:
dist.headers = []
dist.headers.extend(headers)
else:
self.headers.extend(headers)
def paths(self,*paths,**kws):
"""Apply glob to paths and prepend local_path if needed.
Applies glob.glob(...) to each path in the sequence (if needed) and
pre-pends the local_path if needed. Because this is called on all
source lists, this allows wildcard characters to be specified in lists
of sources for extension modules and libraries and scripts and allows
path-names be relative to the source directory.
"""
include_non_existing = kws.get('include_non_existing', True)
return gpaths(paths,
local_path = self.local_path,
include_non_existing=include_non_existing)
def _fix_paths_dict(self, kw):
for k in kw.keys():
v = kw[k]
if k in ['sources', 'depends', 'include_dirs', 'library_dirs',
'module_dirs', 'extra_objects']:
new_v = self.paths(v)
kw[k] = new_v
def add_extension(self,name,sources,**kw):
"""Add extension to configuration.
Create and add an Extension instance to the ext_modules list. This
method also takes the following optional keyword arguments that are
passed on to the Extension constructor.
Parameters
----------
name : str
name of the extension
sources : seq
list of the sources. The list of sources may contain functions
(called source generators) which must take an extension instance
and a build directory as inputs and return a source file or list of
source files or None. If None is returned then no sources are
generated. If the Extension instance has no sources after
processing all source generators, then no extension module is
built.
include_dirs :
define_macros :
undef_macros :
library_dirs :
libraries :
runtime_library_dirs :
extra_objects :
extra_compile_args :
extra_link_args :
extra_f77_compile_args :
extra_f90_compile_args :
export_symbols :
swig_opts :
depends :
The depends list contains paths to files or directories that the
sources of the extension module depend on. If any path in the
depends list is newer than the extension module, then the module
will be rebuilt.
language :
f2py_options :
module_dirs :
extra_info : dict or list
dict or list of dict of keywords to be appended to keywords.
Notes
-----
The self.paths(...) method is applied to all lists that may contain
paths.
"""
ext_args = copy.copy(kw)
ext_args['name'] = dot_join(self.name, name)
ext_args['sources'] = sources
if 'extra_info' in ext_args:
extra_info = ext_args['extra_info']
del ext_args['extra_info']
if isinstance(extra_info, dict):
extra_info = [extra_info]
for info in extra_info:
assert isinstance(info, dict), repr(info)
dict_append(ext_args,**info)
self._fix_paths_dict(ext_args)
# Resolve out-of-tree dependencies
libraries = ext_args.get('libraries', [])
libnames = []
ext_args['libraries'] = []
for libname in libraries:
if isinstance(libname, tuple):
self._fix_paths_dict(libname[1])
# Handle library names of the form libname@relative/path/to/library
if '@' in libname:
lname, lpath = libname.split('@', 1)
lpath = os.path.abspath(njoin(self.local_path, lpath))
if os.path.isdir(lpath):
c = self.get_subpackage(None, lpath,
caller_level = 2)
if isinstance(c, Configuration):
c = c.todict()
for l in [l[0] for l in c.get('libraries', [])]:
llname = l.split('__OF__', 1)[0]
if llname == lname:
c.pop('name', None)
dict_append(ext_args,**c)
break
continue
libnames.append(libname)
ext_args['libraries'] = libnames + ext_args['libraries']
ext_args['define_macros'] = \
self.define_macros + ext_args.get('define_macros', [])
from numpy.distutils.core import Extension
ext = Extension(**ext_args)
self.ext_modules.append(ext)
dist = self.get_distribution()
if dist is not None:
self.warn('distutils distribution has been initialized,'\
' it may be too late to add an extension '+name)
return ext
def add_library(self,name,sources,**build_info):
"""
Add library to configuration.
Parameters
----------
name : str
Name of the extension.
sources : sequence
List of the sources. The list of sources may contain functions
(called source generators) which must take an extension instance
and a build directory as inputs and return a source file or list of
source files or None. If None is returned then no sources are
generated. If the Extension instance has no sources after
processing all source generators, then no extension module is
built.
build_info : dict, optional
The following keys are allowed:
* depends
* macros
* include_dirs
* extra_compiler_args
* extra_f77_compile_args
* extra_f90_compile_args
* f2py_options
* language
"""
self._add_library(name, sources, None, build_info)
dist = self.get_distribution()
if dist is not None:
self.warn('distutils distribution has been initialized,'\
' it may be too late to add a library '+ name)
def _add_library(self, name, sources, install_dir, build_info):
"""Common implementation for add_library and add_installed_library. Do
not use directly"""
build_info = copy.copy(build_info)
build_info['sources'] = sources
# Sometimes, depends is not set up to an empty list by default, and if
# depends is not given to add_library, distutils barfs (#1134)
if not 'depends' in build_info:
build_info['depends'] = []
self._fix_paths_dict(build_info)
# Add to libraries list so that it is build with build_clib
self.libraries.append((name, build_info))
def add_installed_library(self, name, sources, install_dir, build_info=None):
"""
Similar to add_library, but the specified library is installed.
Most C libraries used with `distutils` are only used to build python
extensions, but libraries built through this method will be installed
so that they can be reused by third-party packages.
Parameters
----------
name : str
Name of the installed library.
sources : sequence
List of the library's source files. See `add_library` for details.
install_dir : str
Path to install the library, relative to the current sub-package.
build_info : dict, optional
The following keys are allowed:
* depends
* macros
* include_dirs
* extra_compiler_args
* extra_f77_compile_args
* extra_f90_compile_args
* f2py_options
* language
Returns
-------
None
See Also
--------
add_library, add_npy_pkg_config, get_info
Notes
-----
The best way to encode the options required to link against the specified
C libraries is to use a "libname.ini" file, and use `get_info` to
retrieve the required options (see `add_npy_pkg_config` for more
information).
"""
if not build_info:
build_info = {}
install_dir = os.path.join(self.package_path, install_dir)
self._add_library(name, sources, install_dir, build_info)
self.installed_libraries.append(InstallableLib(name, build_info, install_dir))
def add_npy_pkg_config(self, template, install_dir, subst_dict=None):
"""
Generate and install a npy-pkg config file from a template.
The config file generated from `template` is installed in the
given install directory, using `subst_dict` for variable substitution.
Parameters
----------
template : str
The path of the template, relatively to the current package path.
install_dir : str
Where to install the npy-pkg config file, relatively to the current
package path.
subst_dict : dict, optional
If given, any string of the form ``@key@`` will be replaced by
``subst_dict[key]`` in the template file when installed. The install
prefix is always available through the variable ``@prefix@``, since the
install prefix is not easy to get reliably from setup.py.
See also
--------
add_installed_library, get_info
Notes
-----
This works for both standard installs and in-place builds, i.e. the
``@prefix@`` refer to the source directory for in-place builds.
Examples
--------
::
config.add_npy_pkg_config('foo.ini.in', 'lib', {'foo': bar})
Assuming the foo.ini.in file has the following content::
[meta]
Name=@foo@
Version=1.0
Description=dummy description
[default]
Cflags=-I@prefix@/include
Libs=
The generated file will have the following content::
[meta]
Name=bar
Version=1.0
Description=dummy description
[default]
Cflags=-Iprefix_dir/include
Libs=
and will be installed as foo.ini in the 'lib' subpath.
When cross-compiling with numpy distutils, it might be necessary to
use modified npy-pkg-config files. Using the default/generated files
will link with the host libraries (i.e. libnpymath.a). For
cross-compilation you of-course need to link with target libraries,
while using the host Python installation.
You can copy out the numpy/core/lib/npy-pkg-config directory, add a
pkgdir value to the .ini files and set NPY_PKG_CONFIG_PATH environment
variable to point to the directory with the modified npy-pkg-config
files.
Example npymath.ini modified for cross-compilation::
[meta]
Name=npymath
Description=Portable, core math library implementing C99 standard
Version=0.1
[variables]
pkgname=numpy.core
pkgdir=/build/arm-linux-gnueabi/sysroot/usr/lib/python3.7/site-packages/numpy/core
prefix=${pkgdir}
libdir=${prefix}/lib
includedir=${prefix}/include
[default]
Libs=-L${libdir} -lnpymath
Cflags=-I${includedir}
Requires=mlib
[msvc]
Libs=/LIBPATH:${libdir} npymath.lib
Cflags=/INCLUDE:${includedir}
Requires=mlib
"""
if subst_dict is None:
subst_dict = {}
template = os.path.join(self.package_path, template)
if self.name in self.installed_pkg_config:
self.installed_pkg_config[self.name].append((template, install_dir,
subst_dict))
else:
self.installed_pkg_config[self.name] = [(template, install_dir,
subst_dict)]
def add_scripts(self,*files):
"""Add scripts to configuration.
Add the sequence of files to the beginning of the scripts list.
Scripts will be installed under the <prefix>/bin/ directory.
"""
scripts = self.paths(files)
dist = self.get_distribution()
if dist is not None:
if dist.scripts is None:
dist.scripts = []
dist.scripts.extend(scripts)
else:
self.scripts.extend(scripts)
def dict_append(self,**dict):
for key in self.list_keys:
a = getattr(self, key)
a.extend(dict.get(key, []))
for key in self.dict_keys:
a = getattr(self, key)
a.update(dict.get(key, {}))
known_keys = self.list_keys + self.dict_keys + self.extra_keys
for key in dict.keys():
if key not in known_keys:
a = getattr(self, key, None)
if a and a==dict[key]: continue
self.warn('Inheriting attribute %r=%r from %r' \
% (key, dict[key], dict.get('name', '?')))
setattr(self, key, dict[key])
self.extra_keys.append(key)
elif key in self.extra_keys:
self.info('Ignoring attempt to set %r (from %r to %r)' \
% (key, getattr(self, key), dict[key]))
elif key in known_keys:
# key is already processed above
pass
else:
raise ValueError("Don't know about key=%r" % (key))
def __str__(self):
from pprint import pformat
known_keys = self.list_keys + self.dict_keys + self.extra_keys
s = '<'+5*'-' + '\n'
s += 'Configuration of '+self.name+':\n'
known_keys.sort()
for k in known_keys:
a = getattr(self, k, None)
if a:
s += '%s = %s\n' % (k, pformat(a))
s += 5*'-' + '>'
return s
def get_config_cmd(self):
"""
Returns the numpy.distutils config command instance.
"""
cmd = get_cmd('config')
cmd.ensure_finalized()
cmd.dump_source = 0
cmd.noisy = 0
old_path = os.environ.get('PATH')
if old_path:
path = os.pathsep.join(['.', old_path])
os.environ['PATH'] = path
return cmd
def get_build_temp_dir(self):
"""
Return a path to a temporary directory where temporary files should be
placed.
"""
cmd = get_cmd('build')
cmd.ensure_finalized()
return cmd.build_temp
def have_f77c(self):
"""Check for availability of Fortran 77 compiler.
Use it inside source generating function to ensure that
setup distribution instance has been initialized.
Notes
-----
True if a Fortran 77 compiler is available (because a simple Fortran 77
code was able to be compiled successfully).
"""
simple_fortran_subroutine = '''
subroutine simple
end
'''
config_cmd = self.get_config_cmd()
flag = config_cmd.try_compile(simple_fortran_subroutine, lang='f77')
return flag
def have_f90c(self):
"""Check for availability of Fortran 90 compiler.
Use it inside source generating function to ensure that
setup distribution instance has been initialized.
Notes
-----
True if a Fortran 90 compiler is available (because a simple Fortran
90 code was able to be compiled successfully)
"""
simple_fortran_subroutine = '''
subroutine simple
end
'''
config_cmd = self.get_config_cmd()
flag = config_cmd.try_compile(simple_fortran_subroutine, lang='f90')
return flag
def append_to(self, extlib):
"""Append libraries, include_dirs to extension or library item.
"""
if is_sequence(extlib):
lib_name, build_info = extlib
dict_append(build_info,
libraries=self.libraries,
include_dirs=self.include_dirs)
else:
from numpy.distutils.core import Extension
assert isinstance(extlib, Extension), repr(extlib)
extlib.libraries.extend(self.libraries)
extlib.include_dirs.extend(self.include_dirs)
def _get_svn_revision(self, path):
"""Return path's SVN revision number.
"""
try:
output = subprocess.check_output(['svnversion'], cwd=path)
except (subprocess.CalledProcessError, OSError):
pass
else:
m = re.match(rb'(?P<revision>\d+)', output)
if m:
return int(m.group('revision'))
if sys.platform=='win32' and os.environ.get('SVN_ASP_DOT_NET_HACK', None):
entries = njoin(path, '_svn', 'entries')
else:
entries = njoin(path, '.svn', 'entries')
if os.path.isfile(entries):
with open(entries) as f:
fstr = f.read()
if fstr[:5] == '<?xml': # pre 1.4
m = re.search(r'revision="(?P<revision>\d+)"', fstr)
if m:
return int(m.group('revision'))
else: # non-xml entries file --- check to be sure that
m = re.search(r'dir[\n\r]+(?P<revision>\d+)', fstr)
if m:
return int(m.group('revision'))
return None
def _get_hg_revision(self, path):
"""Return path's Mercurial revision number.
"""
try:
output = subprocess.check_output(
['hg', 'identify', '--num'], cwd=path)
except (subprocess.CalledProcessError, OSError):
pass
else:
m = re.match(rb'(?P<revision>\d+)', output)
if m:
return int(m.group('revision'))
branch_fn = njoin(path, '.hg', 'branch')
branch_cache_fn = njoin(path, '.hg', 'branch.cache')
if os.path.isfile(branch_fn):
branch0 = None
with open(branch_fn) as f:
revision0 = f.read().strip()
branch_map = {}
with open(branch_cache_fn, 'r') as f:
for line in f:
branch1, revision1 = line.split()[:2]
if revision1==revision0:
branch0 = branch1
try:
revision1 = int(revision1)
except ValueError:
continue
branch_map[branch1] = revision1
return branch_map.get(branch0)
return None
def get_version(self, version_file=None, version_variable=None):
"""Try to get version string of a package.
Return a version string of the current package or None if the version
information could not be detected.
Notes
-----
This method scans files named
__version__.py, <packagename>_version.py, version.py, and
__svn_version__.py for string variables version, __version__, and
<packagename>_version, until a version number is found.
"""
version = getattr(self, 'version', None)
if version is not None:
return version
# Get version from version file.
if version_file is None:
files = ['__version__.py',
self.name.split('.')[-1]+'_version.py',
'version.py',
'__svn_version__.py',
'__hg_version__.py']
else:
files = [version_file]
if version_variable is None:
version_vars = ['version',
'__version__',
self.name.split('.')[-1]+'_version']
else:
version_vars = [version_variable]
for f in files:
fn = njoin(self.local_path, f)
if os.path.isfile(fn):
info = ('.py', 'U', 1)
name = os.path.splitext(os.path.basename(fn))[0]
n = dot_join(self.name, name)
try:
version_module = exec_mod_from_location(
'_'.join(n.split('.')), fn)
except ImportError as e:
self.warn(str(e))
version_module = None
if version_module is None:
continue
for a in version_vars:
version = getattr(version_module, a, None)
if version is not None:
break
# Try if versioneer module
try:
version = version_module.get_versions()['version']
except AttributeError:
pass
if version is not None:
break
if version is not None:
self.version = version
return version
# Get version as SVN or Mercurial revision number
revision = self._get_svn_revision(self.local_path)
if revision is None:
revision = self._get_hg_revision(self.local_path)
if revision is not None:
version = str(revision)
self.version = version
return version
def make_svn_version_py(self, delete=True):
"""Appends a data function to the data_files list that will generate
__svn_version__.py file to the current package directory.
Generate package __svn_version__.py file from SVN revision number,
it will be removed after python exits but will be available
when sdist, etc commands are executed.
Notes
-----
If __svn_version__.py existed before, nothing is done.
This is
intended for working with source directories that are in an SVN
repository.
"""
target = njoin(self.local_path, '__svn_version__.py')
revision = self._get_svn_revision(self.local_path)
if os.path.isfile(target) or revision is None:
return
else:
def generate_svn_version_py():
if not os.path.isfile(target):
version = str(revision)
self.info('Creating %s (version=%r)' % (target, version))
with open(target, 'w') as f:
f.write('version = %r\n' % (version))
def rm_file(f=target,p=self.info):
if delete:
try: os.remove(f); p('removed '+f)
except OSError: pass
try: os.remove(f+'c'); p('removed '+f+'c')
except OSError: pass
atexit.register(rm_file)
return target
self.add_data_files(('', generate_svn_version_py()))
def make_hg_version_py(self, delete=True):
"""Appends a data function to the data_files list that will generate
__hg_version__.py file to the current package directory.
Generate package __hg_version__.py file from Mercurial revision,
it will be removed after python exits but will be available
when sdist, etc commands are executed.
Notes
-----
If __hg_version__.py existed before, nothing is done.
This is intended for working with source directories that are
in an Mercurial repository.
"""
target = njoin(self.local_path, '__hg_version__.py')
revision = self._get_hg_revision(self.local_path)
if os.path.isfile(target) or revision is None:
return
else:
def generate_hg_version_py():
if not os.path.isfile(target):
version = str(revision)
self.info('Creating %s (version=%r)' % (target, version))
with open(target, 'w') as f:
f.write('version = %r\n' % (version))
def rm_file(f=target,p=self.info):
if delete:
try: os.remove(f); p('removed '+f)
except OSError: pass
try: os.remove(f+'c'); p('removed '+f+'c')
except OSError: pass
atexit.register(rm_file)
return target
self.add_data_files(('', generate_hg_version_py()))
def make_config_py(self,name='__config__'):
"""Generate package __config__.py file containing system_info
information used during building the package.
This file is installed to the
package installation directory.
"""
self.py_modules.append((self.name, name, generate_config_py))
def get_info(self,*names):
"""Get resources information.
Return information (from system_info.get_info) for all of the names in
the argument list in a single dictionary.
"""
from .system_info import get_info, dict_append
info_dict = {}
for a in names:
dict_append(info_dict,**get_info(a))
return info_dict
def configuration(parent_package='', top_path=None):
from numpy.distutils.misc_util import Configuration
config = Configuration('typing', parent_package, top_path)
config.add_subpackage('tests')
config.add_data_dir('tests/data')
return config | null |
168,509 | from __future__ import annotations
from collections.abc import Iterable
from typing import Final, TYPE_CHECKING, Callable
import numpy as np
def _get_precision_dict() -> dict[str, str]:
names = [
("_NBitByte", np.byte),
("_NBitShort", np.short),
("_NBitIntC", np.intc),
("_NBitIntP", np.intp),
("_NBitInt", np.int_),
("_NBitLongLong", np.longlong),
("_NBitHalf", np.half),
("_NBitSingle", np.single),
("_NBitDouble", np.double),
("_NBitLongDouble", np.longdouble),
]
ret = {}
for name, typ in names:
n: int = 8 * typ().dtype.itemsize
ret[f'numpy._typing._nbit.{name}'] = f"numpy._{n}Bit"
return ret | null |
168,510 | from __future__ import annotations
from collections.abc import Iterable
from typing import Final, TYPE_CHECKING, Callable
import numpy as np
def _get_extended_precision_list() -> list[str]:
extended_types = [np.ulonglong, np.longlong, np.longdouble, np.clongdouble]
extended_names = {
"uint128",
"uint256",
"int128",
"int256",
"float80",
"float96",
"float128",
"float256",
"complex160",
"complex192",
"complex256",
"complex512",
}
return [i.__name__ for i in extended_types if i.__name__ in extended_names] | null |
168,511 | from __future__ import annotations
from collections.abc import Iterable
from typing import Final, TYPE_CHECKING, Callable
import numpy as np
def _get_c_intp_name() -> str:
# Adapted from `np.core._internal._getintp_ctype`
char = np.dtype('p').char
if char == 'i':
return "c_int"
elif char == 'l':
return "c_long"
elif char == 'q':
return "c_longlong"
else:
return "c_long" | null |
168,512 | from __future__ import annotations
from collections.abc import Iterable
from typing import Final, TYPE_CHECKING, Callable
import numpy as np
_PRECISION_DICT: Final = _get_precision_dict()
The provided code snippet includes necessary dependencies for implementing the `_hook` function. Write a Python function `def _hook(ctx: AnalyzeTypeContext) -> Type` to solve the following problem:
Replace a type-alias with a concrete ``NBitBase`` subclass.
Here is the function:
def _hook(ctx: AnalyzeTypeContext) -> Type:
"""Replace a type-alias with a concrete ``NBitBase`` subclass."""
typ, _, api = ctx
name = typ.name.split(".")[-1]
name_new = _PRECISION_DICT[f"numpy._typing._nbit.{name}"]
return api.named_type(name_new) | Replace a type-alias with a concrete ``NBitBase`` subclass. |
168,513 | from __future__ import annotations
from collections.abc import Iterable
from typing import Final, TYPE_CHECKING, Callable
import numpy as np
if TYPE_CHECKING or MYPY_EX is None:
def _index(iterable: Iterable[Statement], id: str) -> int:
"""Identify the first ``ImportFrom`` instance the specified `id`."""
for i, value in enumerate(iterable):
if getattr(value, "id", None) == id:
return i
raise ValueError("Failed to identify a `ImportFrom` instance "
f"with the following id: {id!r}")
else:
The provided code snippet includes necessary dependencies for implementing the `_override_imports` function. Write a Python function `def _override_imports( file: MypyFile, module: str, imports: list[tuple[str, None | str]], ) -> None` to solve the following problem:
Override the first `module`-based import with new `imports`.
Here is the function:
def _override_imports(
file: MypyFile,
module: str,
imports: list[tuple[str, None | str]],
) -> None:
"""Override the first `module`-based import with new `imports`."""
# Construct a new `from module import y` statement
import_obj = ImportFrom(module, 0, names=imports)
import_obj.is_top_level = True
# Replace the first `module`-based import statement with `import_obj`
for lst in [file.defs, file.imports]: # type: list[Statement]
i = _index(lst, module)
lst[i] = import_obj | Override the first `module`-based import with new `imports`. |
168,514 | from __future__ import annotations
from collections.abc import Iterable
from typing import Final, TYPE_CHECKING, Callable
import numpy as np
if TYPE_CHECKING or MYPY_EX is None:
class _NumpyPlugin(Plugin):
"""A mypy plugin for handling versus numpy-specific typing tasks."""
def get_type_analyze_hook(self, fullname: str) -> None | _HookFunc:
"""Set the precision of platform-specific `numpy.number`
subclasses.
For example: `numpy.int_`, `numpy.longlong` and `numpy.longdouble`.
"""
if fullname in _PRECISION_DICT:
return _hook
return None
def get_additional_deps(
self, file: MypyFile
) -> list[tuple[int, str, int]]:
"""Handle all import-based overrides.
* Import platform-specific extended-precision `numpy.number`
subclasses (*e.g.* `numpy.float96`, `numpy.float128` and
`numpy.complex256`).
* Import the appropriate `ctypes` equivalent to `numpy.intp`.
"""
ret = [(PRI_MED, file.fullname, -1)]
if file.fullname == "numpy":
_override_imports(
file, "numpy._typing._extended_precision",
imports=[(v, v) for v in _EXTENDED_PRECISION_LIST],
)
elif file.fullname == "numpy.ctypeslib":
_override_imports(
file, "ctypes",
imports=[(_C_INTP, "_c_intp")],
)
return ret
else:
The provided code snippet includes necessary dependencies for implementing the `plugin` function. Write a Python function `def plugin(version: str) -> type[_NumpyPlugin]` to solve the following problem:
An entry-point for mypy.
Here is the function:
def plugin(version: str) -> type[_NumpyPlugin]:
"""An entry-point for mypy."""
return _NumpyPlugin | An entry-point for mypy. |
168,515 | from __future__ import annotations
from collections.abc import Iterable
from typing import Final, TYPE_CHECKING, Callable
import numpy as np
if TYPE_CHECKING or MYPY_EX is None:
class _NumpyPlugin(Plugin):
"""A mypy plugin for handling versus numpy-specific typing tasks."""
def get_type_analyze_hook(self, fullname: str) -> None | _HookFunc:
"""Set the precision of platform-specific `numpy.number`
subclasses.
For example: `numpy.int_`, `numpy.longlong` and `numpy.longdouble`.
"""
if fullname in _PRECISION_DICT:
return _hook
return None
def get_additional_deps(
self, file: MypyFile
) -> list[tuple[int, str, int]]:
"""Handle all import-based overrides.
* Import platform-specific extended-precision `numpy.number`
subclasses (*e.g.* `numpy.float96`, `numpy.float128` and
`numpy.complex256`).
* Import the appropriate `ctypes` equivalent to `numpy.intp`.
"""
ret = [(PRI_MED, file.fullname, -1)]
if file.fullname == "numpy":
_override_imports(
file, "numpy._typing._extended_precision",
imports=[(v, v) for v in _EXTENDED_PRECISION_LIST],
)
elif file.fullname == "numpy.ctypeslib":
_override_imports(
file, "ctypes",
imports=[(_C_INTP, "_c_intp")],
)
return ret
else:
The provided code snippet includes necessary dependencies for implementing the `plugin` function. Write a Python function `def plugin(version: str) -> type[_NumpyPlugin]` to solve the following problem:
An entry-point for mypy.
Here is the function:
def plugin(version: str) -> type[_NumpyPlugin]:
"""An entry-point for mypy."""
raise MYPY_EX | An entry-point for mypy. |
168,516 | import json
version_json = '''
{
"date": "2023-02-05T11:25:52-0500",
"dirty": false,
"error": null,
"full-revisionid": "85f38ab180ece5290f64e8ddbd9cf06ad8fa4a5e",
"version": "1.24.2"
}
'''
def get_versions():
return json.loads(version_json) | null |
168,517 | from numpy.distutils.core import setup
from numpy.distutils.misc_util import Configuration
from __version__ import version
class Configuration:
_list_keys = ['packages', 'ext_modules', 'data_files', 'include_dirs',
'libraries', 'headers', 'scripts', 'py_modules',
'installed_libraries', 'define_macros']
_dict_keys = ['package_dir', 'installed_pkg_config']
_extra_keys = ['name', 'version']
numpy_include_dirs = []
def __init__(self,
package_name=None,
parent_name=None,
top_path=None,
package_path=None,
caller_level=1,
setup_name='setup.py',
**attrs):
"""Construct configuration instance of a package.
package_name -- name of the package
Ex.: 'distutils'
parent_name -- name of the parent package
Ex.: 'numpy'
top_path -- directory of the toplevel package
Ex.: the directory where the numpy package source sits
package_path -- directory of package. Will be computed by magic from the
directory of the caller module if not specified
Ex.: the directory where numpy.distutils is
caller_level -- frame level to caller namespace, internal parameter.
"""
self.name = dot_join(parent_name, package_name)
self.version = None
caller_frame = get_frame(caller_level)
self.local_path = get_path_from_frame(caller_frame, top_path)
# local_path -- directory of a file (usually setup.py) that
# defines a configuration() function.
# local_path -- directory of a file (usually setup.py) that
# defines a configuration() function.
if top_path is None:
top_path = self.local_path
self.local_path = ''
if package_path is None:
package_path = self.local_path
elif os.path.isdir(njoin(self.local_path, package_path)):
package_path = njoin(self.local_path, package_path)
if not os.path.isdir(package_path or '.'):
raise ValueError("%r is not a directory" % (package_path,))
self.top_path = top_path
self.package_path = package_path
# this is the relative path in the installed package
self.path_in_package = os.path.join(*self.name.split('.'))
self.list_keys = self._list_keys[:]
self.dict_keys = self._dict_keys[:]
for n in self.list_keys:
v = copy.copy(attrs.get(n, []))
setattr(self, n, as_list(v))
for n in self.dict_keys:
v = copy.copy(attrs.get(n, {}))
setattr(self, n, v)
known_keys = self.list_keys + self.dict_keys
self.extra_keys = self._extra_keys[:]
for n in attrs.keys():
if n in known_keys:
continue
a = attrs[n]
setattr(self, n, a)
if isinstance(a, list):
self.list_keys.append(n)
elif isinstance(a, dict):
self.dict_keys.append(n)
else:
self.extra_keys.append(n)
if os.path.exists(njoin(package_path, '__init__.py')):
self.packages.append(self.name)
self.package_dir[self.name] = package_path
self.options = dict(
ignore_setup_xxx_py = False,
assume_default_configuration = False,
delegate_options_to_subpackages = False,
quiet = False,
)
caller_instance = None
for i in range(1, 3):
try:
f = get_frame(i)
except ValueError:
break
try:
caller_instance = eval('self', f.f_globals, f.f_locals)
break
except NameError:
pass
if isinstance(caller_instance, self.__class__):
if caller_instance.options['delegate_options_to_subpackages']:
self.set_options(**caller_instance.options)
self.setup_name = setup_name
def todict(self):
"""
Return a dictionary compatible with the keyword arguments of distutils
setup function.
Examples
--------
>>> setup(**config.todict()) #doctest: +SKIP
"""
self._optimize_data_files()
d = {}
known_keys = self.list_keys + self.dict_keys + self.extra_keys
for n in known_keys:
a = getattr(self, n)
if a:
d[n] = a
return d
def info(self, message):
if not self.options['quiet']:
print(message)
def warn(self, message):
sys.stderr.write('Warning: %s\n' % (message,))
def set_options(self, **options):
"""
Configure Configuration instance.
The following options are available:
- ignore_setup_xxx_py
- assume_default_configuration
- delegate_options_to_subpackages
- quiet
"""
for key, value in options.items():
if key in self.options:
self.options[key] = value
else:
raise ValueError('Unknown option: '+key)
def get_distribution(self):
"""Return the distutils distribution object for self."""
from numpy.distutils.core import get_distribution
return get_distribution()
def _wildcard_get_subpackage(self, subpackage_name,
parent_name,
caller_level = 1):
l = subpackage_name.split('.')
subpackage_path = njoin([self.local_path]+l)
dirs = [_m for _m in sorted_glob(subpackage_path) if os.path.isdir(_m)]
config_list = []
for d in dirs:
if not os.path.isfile(njoin(d, '__init__.py')):
continue
if 'build' in d.split(os.sep):
continue
n = '.'.join(d.split(os.sep)[-len(l):])
c = self.get_subpackage(n,
parent_name = parent_name,
caller_level = caller_level+1)
config_list.extend(c)
return config_list
def _get_configuration_from_setup_py(self, setup_py,
subpackage_name,
subpackage_path,
parent_name,
caller_level = 1):
# In case setup_py imports local modules:
sys.path.insert(0, os.path.dirname(setup_py))
try:
setup_name = os.path.splitext(os.path.basename(setup_py))[0]
n = dot_join(self.name, subpackage_name, setup_name)
setup_module = exec_mod_from_location(
'_'.join(n.split('.')), setup_py)
if not hasattr(setup_module, 'configuration'):
if not self.options['assume_default_configuration']:
self.warn('Assuming default configuration '\
'(%s does not define configuration())'\
% (setup_module))
config = Configuration(subpackage_name, parent_name,
self.top_path, subpackage_path,
caller_level = caller_level + 1)
else:
pn = dot_join(*([parent_name] + subpackage_name.split('.')[:-1]))
args = (pn,)
if setup_module.configuration.__code__.co_argcount > 1:
args = args + (self.top_path,)
config = setup_module.configuration(*args)
if config.name!=dot_join(parent_name, subpackage_name):
self.warn('Subpackage %r configuration returned as %r' % \
(dot_join(parent_name, subpackage_name), config.name))
finally:
del sys.path[0]
return config
def get_subpackage(self,subpackage_name,
subpackage_path=None,
parent_name=None,
caller_level = 1):
"""Return list of subpackage configurations.
Parameters
----------
subpackage_name : str or None
Name of the subpackage to get the configuration. '*' in
subpackage_name is handled as a wildcard.
subpackage_path : str
If None, then the path is assumed to be the local path plus the
subpackage_name. If a setup.py file is not found in the
subpackage_path, then a default configuration is used.
parent_name : str
Parent name.
"""
if subpackage_name is None:
if subpackage_path is None:
raise ValueError(
"either subpackage_name or subpackage_path must be specified")
subpackage_name = os.path.basename(subpackage_path)
# handle wildcards
l = subpackage_name.split('.')
if subpackage_path is None and '*' in subpackage_name:
return self._wildcard_get_subpackage(subpackage_name,
parent_name,
caller_level = caller_level+1)
assert '*' not in subpackage_name, repr((subpackage_name, subpackage_path, parent_name))
if subpackage_path is None:
subpackage_path = njoin([self.local_path] + l)
else:
subpackage_path = njoin([subpackage_path] + l[:-1])
subpackage_path = self.paths([subpackage_path])[0]
setup_py = njoin(subpackage_path, self.setup_name)
if not self.options['ignore_setup_xxx_py']:
if not os.path.isfile(setup_py):
setup_py = njoin(subpackage_path,
'setup_%s.py' % (subpackage_name))
if not os.path.isfile(setup_py):
if not self.options['assume_default_configuration']:
self.warn('Assuming default configuration '\
'(%s/{setup_%s,setup}.py was not found)' \
% (os.path.dirname(setup_py), subpackage_name))
config = Configuration(subpackage_name, parent_name,
self.top_path, subpackage_path,
caller_level = caller_level+1)
else:
config = self._get_configuration_from_setup_py(
setup_py,
subpackage_name,
subpackage_path,
parent_name,
caller_level = caller_level + 1)
if config:
return [config]
else:
return []
def add_subpackage(self,subpackage_name,
subpackage_path=None,
standalone = False):
"""Add a sub-package to the current Configuration instance.
This is useful in a setup.py script for adding sub-packages to a
package.
Parameters
----------
subpackage_name : str
name of the subpackage
subpackage_path : str
if given, the subpackage path such as the subpackage is in
subpackage_path / subpackage_name. If None,the subpackage is
assumed to be located in the local path / subpackage_name.
standalone : bool
"""
if standalone:
parent_name = None
else:
parent_name = self.name
config_list = self.get_subpackage(subpackage_name, subpackage_path,
parent_name = parent_name,
caller_level = 2)
if not config_list:
self.warn('No configuration returned, assuming unavailable.')
for config in config_list:
d = config
if isinstance(config, Configuration):
d = config.todict()
assert isinstance(d, dict), repr(type(d))
self.info('Appending %s configuration to %s' \
% (d.get('name'), self.name))
self.dict_append(**d)
dist = self.get_distribution()
if dist is not None:
self.warn('distutils distribution has been initialized,'\
' it may be too late to add a subpackage '+ subpackage_name)
def add_data_dir(self, data_path):
"""Recursively add files under data_path to data_files list.
Recursively add files under data_path to the list of data_files to be
installed (and distributed). The data_path can be either a relative
path-name, or an absolute path-name, or a 2-tuple where the first
argument shows where in the install directory the data directory
should be installed to.
Parameters
----------
data_path : seq or str
Argument can be either
* 2-sequence (<datadir suffix>, <path to data directory>)
* path to data directory where python datadir suffix defaults
to package dir.
Notes
-----
Rules for installation paths::
foo/bar -> (foo/bar, foo/bar) -> parent/foo/bar
(gun, foo/bar) -> parent/gun
foo/* -> (foo/a, foo/a), (foo/b, foo/b) -> parent/foo/a, parent/foo/b
(gun, foo/*) -> (gun, foo/a), (gun, foo/b) -> gun
(gun/*, foo/*) -> parent/gun/a, parent/gun/b
/foo/bar -> (bar, /foo/bar) -> parent/bar
(gun, /foo/bar) -> parent/gun
(fun/*/gun/*, sun/foo/bar) -> parent/fun/foo/gun/bar
Examples
--------
For example suppose the source directory contains fun/foo.dat and
fun/bar/car.dat:
>>> self.add_data_dir('fun') #doctest: +SKIP
>>> self.add_data_dir(('sun', 'fun')) #doctest: +SKIP
>>> self.add_data_dir(('gun', '/full/path/to/fun'))#doctest: +SKIP
Will install data-files to the locations::
<package install directory>/
fun/
foo.dat
bar/
car.dat
sun/
foo.dat
bar/
car.dat
gun/
foo.dat
car.dat
"""
if is_sequence(data_path):
d, data_path = data_path
else:
d = None
if is_sequence(data_path):
[self.add_data_dir((d, p)) for p in data_path]
return
if not is_string(data_path):
raise TypeError("not a string: %r" % (data_path,))
if d is None:
if os.path.isabs(data_path):
return self.add_data_dir((os.path.basename(data_path), data_path))
return self.add_data_dir((data_path, data_path))
paths = self.paths(data_path, include_non_existing=False)
if is_glob_pattern(data_path):
if is_glob_pattern(d):
pattern_list = allpath(d).split(os.sep)
pattern_list.reverse()
# /a/*//b/ -> /a/*/b
rl = list(range(len(pattern_list)-1)); rl.reverse()
for i in rl:
if not pattern_list[i]:
del pattern_list[i]
#
for path in paths:
if not os.path.isdir(path):
print('Not a directory, skipping', path)
continue
rpath = rel_path(path, self.local_path)
path_list = rpath.split(os.sep)
path_list.reverse()
target_list = []
i = 0
for s in pattern_list:
if is_glob_pattern(s):
if i>=len(path_list):
raise ValueError('cannot fill pattern %r with %r' \
% (d, path))
target_list.append(path_list[i])
else:
assert s==path_list[i], repr((s, path_list[i], data_path, d, path, rpath))
target_list.append(s)
i += 1
if path_list[i:]:
self.warn('mismatch of pattern_list=%s and path_list=%s'\
% (pattern_list, path_list))
target_list.reverse()
self.add_data_dir((os.sep.join(target_list), path))
else:
for path in paths:
self.add_data_dir((d, path))
return
assert not is_glob_pattern(d), repr(d)
dist = self.get_distribution()
if dist is not None and dist.data_files is not None:
data_files = dist.data_files
else:
data_files = self.data_files
for path in paths:
for d1, f in list(general_source_directories_files(path)):
target_path = os.path.join(self.path_in_package, d, d1)
data_files.append((target_path, f))
def _optimize_data_files(self):
data_dict = {}
for p, files in self.data_files:
if p not in data_dict:
data_dict[p] = set()
for f in files:
data_dict[p].add(f)
self.data_files[:] = [(p, list(files)) for p, files in data_dict.items()]
def add_data_files(self,*files):
"""Add data files to configuration data_files.
Parameters
----------
files : sequence
Argument(s) can be either
* 2-sequence (<datadir prefix>,<path to data file(s)>)
* paths to data files where python datadir prefix defaults
to package dir.
Notes
-----
The form of each element of the files sequence is very flexible
allowing many combinations of where to get the files from the package
and where they should ultimately be installed on the system. The most
basic usage is for an element of the files argument sequence to be a
simple filename. This will cause that file from the local path to be
installed to the installation path of the self.name package (package
path). The file argument can also be a relative path in which case the
entire relative path will be installed into the package directory.
Finally, the file can be an absolute path name in which case the file
will be found at the absolute path name but installed to the package
path.
This basic behavior can be augmented by passing a 2-tuple in as the
file argument. The first element of the tuple should specify the
relative path (under the package install directory) where the
remaining sequence of files should be installed to (it has nothing to
do with the file-names in the source distribution). The second element
of the tuple is the sequence of files that should be installed. The
files in this sequence can be filenames, relative paths, or absolute
paths. For absolute paths the file will be installed in the top-level
package installation directory (regardless of the first argument).
Filenames and relative path names will be installed in the package
install directory under the path name given as the first element of
the tuple.
Rules for installation paths:
#. file.txt -> (., file.txt)-> parent/file.txt
#. foo/file.txt -> (foo, foo/file.txt) -> parent/foo/file.txt
#. /foo/bar/file.txt -> (., /foo/bar/file.txt) -> parent/file.txt
#. ``*``.txt -> parent/a.txt, parent/b.txt
#. foo/``*``.txt`` -> parent/foo/a.txt, parent/foo/b.txt
#. ``*/*.txt`` -> (``*``, ``*``/``*``.txt) -> parent/c/a.txt, parent/d/b.txt
#. (sun, file.txt) -> parent/sun/file.txt
#. (sun, bar/file.txt) -> parent/sun/file.txt
#. (sun, /foo/bar/file.txt) -> parent/sun/file.txt
#. (sun, ``*``.txt) -> parent/sun/a.txt, parent/sun/b.txt
#. (sun, bar/``*``.txt) -> parent/sun/a.txt, parent/sun/b.txt
#. (sun/``*``, ``*``/``*``.txt) -> parent/sun/c/a.txt, parent/d/b.txt
An additional feature is that the path to a data-file can actually be
a function that takes no arguments and returns the actual path(s) to
the data-files. This is useful when the data files are generated while
building the package.
Examples
--------
Add files to the list of data_files to be included with the package.
>>> self.add_data_files('foo.dat',
... ('fun', ['gun.dat', 'nun/pun.dat', '/tmp/sun.dat']),
... 'bar/cat.dat',
... '/full/path/to/can.dat') #doctest: +SKIP
will install these data files to::
<package install directory>/
foo.dat
fun/
gun.dat
nun/
pun.dat
sun.dat
bar/
car.dat
can.dat
where <package install directory> is the package (or sub-package)
directory such as '/usr/lib/python2.4/site-packages/mypackage' ('C:
\\Python2.4 \\Lib \\site-packages \\mypackage') or
'/usr/lib/python2.4/site- packages/mypackage/mysubpackage' ('C:
\\Python2.4 \\Lib \\site-packages \\mypackage \\mysubpackage').
"""
if len(files)>1:
for f in files:
self.add_data_files(f)
return
assert len(files)==1
if is_sequence(files[0]):
d, files = files[0]
else:
d = None
if is_string(files):
filepat = files
elif is_sequence(files):
if len(files)==1:
filepat = files[0]
else:
for f in files:
self.add_data_files((d, f))
return
else:
raise TypeError(repr(type(files)))
if d is None:
if hasattr(filepat, '__call__'):
d = ''
elif os.path.isabs(filepat):
d = ''
else:
d = os.path.dirname(filepat)
self.add_data_files((d, files))
return
paths = self.paths(filepat, include_non_existing=False)
if is_glob_pattern(filepat):
if is_glob_pattern(d):
pattern_list = d.split(os.sep)
pattern_list.reverse()
for path in paths:
path_list = path.split(os.sep)
path_list.reverse()
path_list.pop() # filename
target_list = []
i = 0
for s in pattern_list:
if is_glob_pattern(s):
target_list.append(path_list[i])
i += 1
else:
target_list.append(s)
target_list.reverse()
self.add_data_files((os.sep.join(target_list), path))
else:
self.add_data_files((d, paths))
return
assert not is_glob_pattern(d), repr((d, filepat))
dist = self.get_distribution()
if dist is not None and dist.data_files is not None:
data_files = dist.data_files
else:
data_files = self.data_files
data_files.append((os.path.join(self.path_in_package, d), paths))
### XXX Implement add_py_modules
def add_define_macros(self, macros):
"""Add define macros to configuration
Add the given sequence of macro name and value duples to the beginning
of the define_macros list This list will be visible to all extension
modules of the current package.
"""
dist = self.get_distribution()
if dist is not None:
if not hasattr(dist, 'define_macros'):
dist.define_macros = []
dist.define_macros.extend(macros)
else:
self.define_macros.extend(macros)
def add_include_dirs(self,*paths):
"""Add paths to configuration include directories.
Add the given sequence of paths to the beginning of the include_dirs
list. This list will be visible to all extension modules of the
current package.
"""
include_dirs = self.paths(paths)
dist = self.get_distribution()
if dist is not None:
if dist.include_dirs is None:
dist.include_dirs = []
dist.include_dirs.extend(include_dirs)
else:
self.include_dirs.extend(include_dirs)
def add_headers(self,*files):
"""Add installable headers to configuration.
Add the given sequence of files to the beginning of the headers list.
By default, headers will be installed under <python-
include>/<self.name.replace('.','/')>/ directory. If an item of files
is a tuple, then its first argument specifies the actual installation
location relative to the <python-include> path.
Parameters
----------
files : str or seq
Argument(s) can be either:
* 2-sequence (<includedir suffix>,<path to header file(s)>)
* path(s) to header file(s) where python includedir suffix will
default to package name.
"""
headers = []
for path in files:
if is_string(path):
[headers.append((self.name, p)) for p in self.paths(path)]
else:
if not isinstance(path, (tuple, list)) or len(path) != 2:
raise TypeError(repr(path))
[headers.append((path[0], p)) for p in self.paths(path[1])]
dist = self.get_distribution()
if dist is not None:
if dist.headers is None:
dist.headers = []
dist.headers.extend(headers)
else:
self.headers.extend(headers)
def paths(self,*paths,**kws):
"""Apply glob to paths and prepend local_path if needed.
Applies glob.glob(...) to each path in the sequence (if needed) and
pre-pends the local_path if needed. Because this is called on all
source lists, this allows wildcard characters to be specified in lists
of sources for extension modules and libraries and scripts and allows
path-names be relative to the source directory.
"""
include_non_existing = kws.get('include_non_existing', True)
return gpaths(paths,
local_path = self.local_path,
include_non_existing=include_non_existing)
def _fix_paths_dict(self, kw):
for k in kw.keys():
v = kw[k]
if k in ['sources', 'depends', 'include_dirs', 'library_dirs',
'module_dirs', 'extra_objects']:
new_v = self.paths(v)
kw[k] = new_v
def add_extension(self,name,sources,**kw):
"""Add extension to configuration.
Create and add an Extension instance to the ext_modules list. This
method also takes the following optional keyword arguments that are
passed on to the Extension constructor.
Parameters
----------
name : str
name of the extension
sources : seq
list of the sources. The list of sources may contain functions
(called source generators) which must take an extension instance
and a build directory as inputs and return a source file or list of
source files or None. If None is returned then no sources are
generated. If the Extension instance has no sources after
processing all source generators, then no extension module is
built.
include_dirs :
define_macros :
undef_macros :
library_dirs :
libraries :
runtime_library_dirs :
extra_objects :
extra_compile_args :
extra_link_args :
extra_f77_compile_args :
extra_f90_compile_args :
export_symbols :
swig_opts :
depends :
The depends list contains paths to files or directories that the
sources of the extension module depend on. If any path in the
depends list is newer than the extension module, then the module
will be rebuilt.
language :
f2py_options :
module_dirs :
extra_info : dict or list
dict or list of dict of keywords to be appended to keywords.
Notes
-----
The self.paths(...) method is applied to all lists that may contain
paths.
"""
ext_args = copy.copy(kw)
ext_args['name'] = dot_join(self.name, name)
ext_args['sources'] = sources
if 'extra_info' in ext_args:
extra_info = ext_args['extra_info']
del ext_args['extra_info']
if isinstance(extra_info, dict):
extra_info = [extra_info]
for info in extra_info:
assert isinstance(info, dict), repr(info)
dict_append(ext_args,**info)
self._fix_paths_dict(ext_args)
# Resolve out-of-tree dependencies
libraries = ext_args.get('libraries', [])
libnames = []
ext_args['libraries'] = []
for libname in libraries:
if isinstance(libname, tuple):
self._fix_paths_dict(libname[1])
# Handle library names of the form libname@relative/path/to/library
if '@' in libname:
lname, lpath = libname.split('@', 1)
lpath = os.path.abspath(njoin(self.local_path, lpath))
if os.path.isdir(lpath):
c = self.get_subpackage(None, lpath,
caller_level = 2)
if isinstance(c, Configuration):
c = c.todict()
for l in [l[0] for l in c.get('libraries', [])]:
llname = l.split('__OF__', 1)[0]
if llname == lname:
c.pop('name', None)
dict_append(ext_args,**c)
break
continue
libnames.append(libname)
ext_args['libraries'] = libnames + ext_args['libraries']
ext_args['define_macros'] = \
self.define_macros + ext_args.get('define_macros', [])
from numpy.distutils.core import Extension
ext = Extension(**ext_args)
self.ext_modules.append(ext)
dist = self.get_distribution()
if dist is not None:
self.warn('distutils distribution has been initialized,'\
' it may be too late to add an extension '+name)
return ext
def add_library(self,name,sources,**build_info):
"""
Add library to configuration.
Parameters
----------
name : str
Name of the extension.
sources : sequence
List of the sources. The list of sources may contain functions
(called source generators) which must take an extension instance
and a build directory as inputs and return a source file or list of
source files or None. If None is returned then no sources are
generated. If the Extension instance has no sources after
processing all source generators, then no extension module is
built.
build_info : dict, optional
The following keys are allowed:
* depends
* macros
* include_dirs
* extra_compiler_args
* extra_f77_compile_args
* extra_f90_compile_args
* f2py_options
* language
"""
self._add_library(name, sources, None, build_info)
dist = self.get_distribution()
if dist is not None:
self.warn('distutils distribution has been initialized,'\
' it may be too late to add a library '+ name)
def _add_library(self, name, sources, install_dir, build_info):
"""Common implementation for add_library and add_installed_library. Do
not use directly"""
build_info = copy.copy(build_info)
build_info['sources'] = sources
# Sometimes, depends is not set up to an empty list by default, and if
# depends is not given to add_library, distutils barfs (#1134)
if not 'depends' in build_info:
build_info['depends'] = []
self._fix_paths_dict(build_info)
# Add to libraries list so that it is build with build_clib
self.libraries.append((name, build_info))
def add_installed_library(self, name, sources, install_dir, build_info=None):
"""
Similar to add_library, but the specified library is installed.
Most C libraries used with `distutils` are only used to build python
extensions, but libraries built through this method will be installed
so that they can be reused by third-party packages.
Parameters
----------
name : str
Name of the installed library.
sources : sequence
List of the library's source files. See `add_library` for details.
install_dir : str
Path to install the library, relative to the current sub-package.
build_info : dict, optional
The following keys are allowed:
* depends
* macros
* include_dirs
* extra_compiler_args
* extra_f77_compile_args
* extra_f90_compile_args
* f2py_options
* language
Returns
-------
None
See Also
--------
add_library, add_npy_pkg_config, get_info
Notes
-----
The best way to encode the options required to link against the specified
C libraries is to use a "libname.ini" file, and use `get_info` to
retrieve the required options (see `add_npy_pkg_config` for more
information).
"""
if not build_info:
build_info = {}
install_dir = os.path.join(self.package_path, install_dir)
self._add_library(name, sources, install_dir, build_info)
self.installed_libraries.append(InstallableLib(name, build_info, install_dir))
def add_npy_pkg_config(self, template, install_dir, subst_dict=None):
"""
Generate and install a npy-pkg config file from a template.
The config file generated from `template` is installed in the
given install directory, using `subst_dict` for variable substitution.
Parameters
----------
template : str
The path of the template, relatively to the current package path.
install_dir : str
Where to install the npy-pkg config file, relatively to the current
package path.
subst_dict : dict, optional
If given, any string of the form ``@key@`` will be replaced by
``subst_dict[key]`` in the template file when installed. The install
prefix is always available through the variable ``@prefix@``, since the
install prefix is not easy to get reliably from setup.py.
See also
--------
add_installed_library, get_info
Notes
-----
This works for both standard installs and in-place builds, i.e. the
``@prefix@`` refer to the source directory for in-place builds.
Examples
--------
::
config.add_npy_pkg_config('foo.ini.in', 'lib', {'foo': bar})
Assuming the foo.ini.in file has the following content::
[meta]
Name=@foo@
Version=1.0
Description=dummy description
[default]
Cflags=-I@prefix@/include
Libs=
The generated file will have the following content::
[meta]
Name=bar
Version=1.0
Description=dummy description
[default]
Cflags=-Iprefix_dir/include
Libs=
and will be installed as foo.ini in the 'lib' subpath.
When cross-compiling with numpy distutils, it might be necessary to
use modified npy-pkg-config files. Using the default/generated files
will link with the host libraries (i.e. libnpymath.a). For
cross-compilation you of-course need to link with target libraries,
while using the host Python installation.
You can copy out the numpy/core/lib/npy-pkg-config directory, add a
pkgdir value to the .ini files and set NPY_PKG_CONFIG_PATH environment
variable to point to the directory with the modified npy-pkg-config
files.
Example npymath.ini modified for cross-compilation::
[meta]
Name=npymath
Description=Portable, core math library implementing C99 standard
Version=0.1
[variables]
pkgname=numpy.core
pkgdir=/build/arm-linux-gnueabi/sysroot/usr/lib/python3.7/site-packages/numpy/core
prefix=${pkgdir}
libdir=${prefix}/lib
includedir=${prefix}/include
[default]
Libs=-L${libdir} -lnpymath
Cflags=-I${includedir}
Requires=mlib
[msvc]
Libs=/LIBPATH:${libdir} npymath.lib
Cflags=/INCLUDE:${includedir}
Requires=mlib
"""
if subst_dict is None:
subst_dict = {}
template = os.path.join(self.package_path, template)
if self.name in self.installed_pkg_config:
self.installed_pkg_config[self.name].append((template, install_dir,
subst_dict))
else:
self.installed_pkg_config[self.name] = [(template, install_dir,
subst_dict)]
def add_scripts(self,*files):
"""Add scripts to configuration.
Add the sequence of files to the beginning of the scripts list.
Scripts will be installed under the <prefix>/bin/ directory.
"""
scripts = self.paths(files)
dist = self.get_distribution()
if dist is not None:
if dist.scripts is None:
dist.scripts = []
dist.scripts.extend(scripts)
else:
self.scripts.extend(scripts)
def dict_append(self,**dict):
for key in self.list_keys:
a = getattr(self, key)
a.extend(dict.get(key, []))
for key in self.dict_keys:
a = getattr(self, key)
a.update(dict.get(key, {}))
known_keys = self.list_keys + self.dict_keys + self.extra_keys
for key in dict.keys():
if key not in known_keys:
a = getattr(self, key, None)
if a and a==dict[key]: continue
self.warn('Inheriting attribute %r=%r from %r' \
% (key, dict[key], dict.get('name', '?')))
setattr(self, key, dict[key])
self.extra_keys.append(key)
elif key in self.extra_keys:
self.info('Ignoring attempt to set %r (from %r to %r)' \
% (key, getattr(self, key), dict[key]))
elif key in known_keys:
# key is already processed above
pass
else:
raise ValueError("Don't know about key=%r" % (key))
def __str__(self):
from pprint import pformat
known_keys = self.list_keys + self.dict_keys + self.extra_keys
s = '<'+5*'-' + '\n'
s += 'Configuration of '+self.name+':\n'
known_keys.sort()
for k in known_keys:
a = getattr(self, k, None)
if a:
s += '%s = %s\n' % (k, pformat(a))
s += 5*'-' + '>'
return s
def get_config_cmd(self):
"""
Returns the numpy.distutils config command instance.
"""
cmd = get_cmd('config')
cmd.ensure_finalized()
cmd.dump_source = 0
cmd.noisy = 0
old_path = os.environ.get('PATH')
if old_path:
path = os.pathsep.join(['.', old_path])
os.environ['PATH'] = path
return cmd
def get_build_temp_dir(self):
"""
Return a path to a temporary directory where temporary files should be
placed.
"""
cmd = get_cmd('build')
cmd.ensure_finalized()
return cmd.build_temp
def have_f77c(self):
"""Check for availability of Fortran 77 compiler.
Use it inside source generating function to ensure that
setup distribution instance has been initialized.
Notes
-----
True if a Fortran 77 compiler is available (because a simple Fortran 77
code was able to be compiled successfully).
"""
simple_fortran_subroutine = '''
subroutine simple
end
'''
config_cmd = self.get_config_cmd()
flag = config_cmd.try_compile(simple_fortran_subroutine, lang='f77')
return flag
def have_f90c(self):
"""Check for availability of Fortran 90 compiler.
Use it inside source generating function to ensure that
setup distribution instance has been initialized.
Notes
-----
True if a Fortran 90 compiler is available (because a simple Fortran
90 code was able to be compiled successfully)
"""
simple_fortran_subroutine = '''
subroutine simple
end
'''
config_cmd = self.get_config_cmd()
flag = config_cmd.try_compile(simple_fortran_subroutine, lang='f90')
return flag
def append_to(self, extlib):
"""Append libraries, include_dirs to extension or library item.
"""
if is_sequence(extlib):
lib_name, build_info = extlib
dict_append(build_info,
libraries=self.libraries,
include_dirs=self.include_dirs)
else:
from numpy.distutils.core import Extension
assert isinstance(extlib, Extension), repr(extlib)
extlib.libraries.extend(self.libraries)
extlib.include_dirs.extend(self.include_dirs)
def _get_svn_revision(self, path):
"""Return path's SVN revision number.
"""
try:
output = subprocess.check_output(['svnversion'], cwd=path)
except (subprocess.CalledProcessError, OSError):
pass
else:
m = re.match(rb'(?P<revision>\d+)', output)
if m:
return int(m.group('revision'))
if sys.platform=='win32' and os.environ.get('SVN_ASP_DOT_NET_HACK', None):
entries = njoin(path, '_svn', 'entries')
else:
entries = njoin(path, '.svn', 'entries')
if os.path.isfile(entries):
with open(entries) as f:
fstr = f.read()
if fstr[:5] == '<?xml': # pre 1.4
m = re.search(r'revision="(?P<revision>\d+)"', fstr)
if m:
return int(m.group('revision'))
else: # non-xml entries file --- check to be sure that
m = re.search(r'dir[\n\r]+(?P<revision>\d+)', fstr)
if m:
return int(m.group('revision'))
return None
def _get_hg_revision(self, path):
"""Return path's Mercurial revision number.
"""
try:
output = subprocess.check_output(
['hg', 'identify', '--num'], cwd=path)
except (subprocess.CalledProcessError, OSError):
pass
else:
m = re.match(rb'(?P<revision>\d+)', output)
if m:
return int(m.group('revision'))
branch_fn = njoin(path, '.hg', 'branch')
branch_cache_fn = njoin(path, '.hg', 'branch.cache')
if os.path.isfile(branch_fn):
branch0 = None
with open(branch_fn) as f:
revision0 = f.read().strip()
branch_map = {}
with open(branch_cache_fn, 'r') as f:
for line in f:
branch1, revision1 = line.split()[:2]
if revision1==revision0:
branch0 = branch1
try:
revision1 = int(revision1)
except ValueError:
continue
branch_map[branch1] = revision1
return branch_map.get(branch0)
return None
def get_version(self, version_file=None, version_variable=None):
"""Try to get version string of a package.
Return a version string of the current package or None if the version
information could not be detected.
Notes
-----
This method scans files named
__version__.py, <packagename>_version.py, version.py, and
__svn_version__.py for string variables version, __version__, and
<packagename>_version, until a version number is found.
"""
version = getattr(self, 'version', None)
if version is not None:
return version
# Get version from version file.
if version_file is None:
files = ['__version__.py',
self.name.split('.')[-1]+'_version.py',
'version.py',
'__svn_version__.py',
'__hg_version__.py']
else:
files = [version_file]
if version_variable is None:
version_vars = ['version',
'__version__',
self.name.split('.')[-1]+'_version']
else:
version_vars = [version_variable]
for f in files:
fn = njoin(self.local_path, f)
if os.path.isfile(fn):
info = ('.py', 'U', 1)
name = os.path.splitext(os.path.basename(fn))[0]
n = dot_join(self.name, name)
try:
version_module = exec_mod_from_location(
'_'.join(n.split('.')), fn)
except ImportError as e:
self.warn(str(e))
version_module = None
if version_module is None:
continue
for a in version_vars:
version = getattr(version_module, a, None)
if version is not None:
break
# Try if versioneer module
try:
version = version_module.get_versions()['version']
except AttributeError:
pass
if version is not None:
break
if version is not None:
self.version = version
return version
# Get version as SVN or Mercurial revision number
revision = self._get_svn_revision(self.local_path)
if revision is None:
revision = self._get_hg_revision(self.local_path)
if revision is not None:
version = str(revision)
self.version = version
return version
def make_svn_version_py(self, delete=True):
"""Appends a data function to the data_files list that will generate
__svn_version__.py file to the current package directory.
Generate package __svn_version__.py file from SVN revision number,
it will be removed after python exits but will be available
when sdist, etc commands are executed.
Notes
-----
If __svn_version__.py existed before, nothing is done.
This is
intended for working with source directories that are in an SVN
repository.
"""
target = njoin(self.local_path, '__svn_version__.py')
revision = self._get_svn_revision(self.local_path)
if os.path.isfile(target) or revision is None:
return
else:
def generate_svn_version_py():
if not os.path.isfile(target):
version = str(revision)
self.info('Creating %s (version=%r)' % (target, version))
with open(target, 'w') as f:
f.write('version = %r\n' % (version))
def rm_file(f=target,p=self.info):
if delete:
try: os.remove(f); p('removed '+f)
except OSError: pass
try: os.remove(f+'c'); p('removed '+f+'c')
except OSError: pass
atexit.register(rm_file)
return target
self.add_data_files(('', generate_svn_version_py()))
def make_hg_version_py(self, delete=True):
"""Appends a data function to the data_files list that will generate
__hg_version__.py file to the current package directory.
Generate package __hg_version__.py file from Mercurial revision,
it will be removed after python exits but will be available
when sdist, etc commands are executed.
Notes
-----
If __hg_version__.py existed before, nothing is done.
This is intended for working with source directories that are
in an Mercurial repository.
"""
target = njoin(self.local_path, '__hg_version__.py')
revision = self._get_hg_revision(self.local_path)
if os.path.isfile(target) or revision is None:
return
else:
def generate_hg_version_py():
if not os.path.isfile(target):
version = str(revision)
self.info('Creating %s (version=%r)' % (target, version))
with open(target, 'w') as f:
f.write('version = %r\n' % (version))
def rm_file(f=target,p=self.info):
if delete:
try: os.remove(f); p('removed '+f)
except OSError: pass
try: os.remove(f+'c'); p('removed '+f+'c')
except OSError: pass
atexit.register(rm_file)
return target
self.add_data_files(('', generate_hg_version_py()))
def make_config_py(self,name='__config__'):
"""Generate package __config__.py file containing system_info
information used during building the package.
This file is installed to the
package installation directory.
"""
self.py_modules.append((self.name, name, generate_config_py))
def get_info(self,*names):
"""Get resources information.
Return information (from system_info.get_info) for all of the names in
the argument list in a single dictionary.
"""
from .system_info import get_info, dict_append
info_dict = {}
for a in names:
dict_append(info_dict,**get_info(a))
return info_dict
def configuration(parent_package='', top_path=None):
config = Configuration('f2py', parent_package, top_path)
config.add_subpackage('tests')
config.add_data_dir('tests/src')
config.add_data_files(
'src/fortranobject.c',
'src/fortranobject.h')
config.add_data_files('*.pyi')
return config | null |
168,518 | import pprint
import sys
import types
from functools import reduce
from . import __version__
from . import cfuncs
def isexternal(var):
return 'attrspec' in var and 'external' in var['attrspec']
def _isstring(var):
return 'typespec' in var and var['typespec'] == 'character' and \
not isexternal(var) | null |
168,519 | import pprint
import sys
import types
from functools import reduce
from . import __version__
from . import cfuncs
def isscalar(var):
def isinteger(var):
return isscalar(var) and var.get('typespec') == 'integer' | null |
168,520 | import pprint
import sys
import types
from functools import reduce
from . import __version__
from . import cfuncs
def isscalar(var):
return not (isarray(var) or isstring(var) or isexternal(var))
def isreal(var):
return isscalar(var) and var.get('typespec') == 'real' | null |
168,521 | import pprint
import sys
import types
from functools import reduce
from . import __version__
from . import cfuncs
def isarray(var):
return 'dimension' in var and not isexternal(var)
def get_kind(var):
try:
return var['kindselector']['*']
except KeyError:
try:
return var['kindselector']['kind']
except KeyError:
pass
def isint1(var):
return var.get('typespec') == 'integer' \
and get_kind(var) == '1' and not isarray(var) | null |
168,522 | import pprint
import sys
import types
from functools import reduce
from . import __version__
from . import cfuncs
def isscalar(var):
def get_kind(var):
def isunsigned_char(var):
if not isscalar(var):
return 0
if var.get('typespec') != 'integer':
return 0
return get_kind(var) == '-1' | null |
168,523 | import pprint
import sys
import types
from functools import reduce
from . import __version__
from . import cfuncs
def isscalar(var):
def get_kind(var):
def isunsigned_short(var):
if not isscalar(var):
return 0
if var.get('typespec') != 'integer':
return 0
return get_kind(var) == '-2' | null |
168,524 | import pprint
import sys
import types
from functools import reduce
from . import __version__
from . import cfuncs
def isscalar(var):
return not (isarray(var) or isstring(var) or isexternal(var))
def get_kind(var):
try:
return var['kindselector']['*']
except KeyError:
try:
return var['kindselector']['kind']
except KeyError:
pass
def isunsigned(var):
if not isscalar(var):
return 0
if var.get('typespec') != 'integer':
return 0
return get_kind(var) == '-4' | null |
168,525 | import pprint
import sys
import types
from functools import reduce
from . import __version__
from . import cfuncs
def isscalar(var):
return not (isarray(var) or isstring(var) or isexternal(var))
def get_kind(var):
try:
return var['kindselector']['*']
except KeyError:
try:
return var['kindselector']['kind']
except KeyError:
pass
def isunsigned_long_long(var):
if not isscalar(var):
return 0
if var.get('typespec') != 'integer':
return 0
return get_kind(var) == '-8' | null |
168,526 | import pprint
import sys
import types
from functools import reduce
from . import __version__
from . import cfuncs
def iscomplex(var):
return isscalar(var) and \
var.get('typespec') in ['complex', 'double complex']
def get_kind(var):
try:
return var['kindselector']['*']
except KeyError:
try:
return var['kindselector']['kind']
except KeyError:
pass
def islong_complex(var):
if not iscomplex(var):
return 0
return get_kind(var) == '32' | null |
168,527 | import pprint
import sys
import types
from functools import reduce
from . import __version__
from . import cfuncs
def isarray(var):
return 'dimension' in var and not isexternal(var)
def get_kind(var):
try:
return var['kindselector']['*']
except KeyError:
try:
return var['kindselector']['kind']
except KeyError:
pass
def isint1array(var):
return isarray(var) and var.get('typespec') == 'integer' \
and get_kind(var) == '1' | null |
168,528 | import pprint
import sys
import types
from functools import reduce
from . import __version__
from . import cfuncs
def isarray(var):
return 'dimension' in var and not isexternal(var)
def get_kind(var):
try:
return var['kindselector']['*']
except KeyError:
try:
return var['kindselector']['kind']
except KeyError:
pass
def isunsigned_chararray(var):
return isarray(var) and var.get('typespec') in ['integer', 'logical']\
and get_kind(var) == '-1' | null |
168,529 | import pprint
import sys
import types
from functools import reduce
from . import __version__
from . import cfuncs
def isarray(var):
return 'dimension' in var and not isexternal(var)
def get_kind(var):
try:
return var['kindselector']['*']
except KeyError:
try:
return var['kindselector']['kind']
except KeyError:
pass
def isunsigned_shortarray(var):
return isarray(var) and var.get('typespec') in ['integer', 'logical']\
and get_kind(var) == '-2' | null |
168,530 | import pprint
import sys
import types
from functools import reduce
from . import __version__
from . import cfuncs
def isarray(var):
return 'dimension' in var and not isexternal(var)
def get_kind(var):
try:
return var['kindselector']['*']
except KeyError:
try:
return var['kindselector']['kind']
except KeyError:
pass
def isunsignedarray(var):
return isarray(var) and var.get('typespec') in ['integer', 'logical']\
and get_kind(var) == '-4' | null |
168,531 | import pprint
import sys
import types
from functools import reduce
from . import __version__
from . import cfuncs
def isarray(var):
return 'dimension' in var and not isexternal(var)
def get_kind(var):
try:
return var['kindselector']['*']
except KeyError:
try:
return var['kindselector']['kind']
except KeyError:
pass
def isunsigned_long_longarray(var):
return isarray(var) and var.get('typespec') in ['integer', 'logical']\
and get_kind(var) == '-8' | null |
168,532 | import pprint
import sys
import types
from functools import reduce
from . import __version__
from . import cfuncs
def isarray(var):
return 'dimension' in var and not isexternal(var)
def get_kind(var):
try:
return var['kindselector']['*']
except KeyError:
try:
return var['kindselector']['kind']
except KeyError:
pass
def issigned_chararray(var):
return isarray(var) and var.get('typespec') in ['integer', 'logical']\
and get_kind(var) == '1' | null |
168,533 | import pprint
import sys
import types
from functools import reduce
from . import __version__
from . import cfuncs
def isarray(var):
return 'dimension' in var and not isexternal(var)
def get_kind(var):
try:
return var['kindselector']['*']
except KeyError:
try:
return var['kindselector']['kind']
except KeyError:
pass
def issigned_shortarray(var):
return isarray(var) and var.get('typespec') in ['integer', 'logical']\
and get_kind(var) == '2' | null |
168,534 | import pprint
import sys
import types
from functools import reduce
from . import __version__
from . import cfuncs
def isarray(var):
return 'dimension' in var and not isexternal(var)
def get_kind(var):
try:
return var['kindselector']['*']
except KeyError:
try:
return var['kindselector']['kind']
except KeyError:
pass
def issigned_array(var):
return isarray(var) and var.get('typespec') in ['integer', 'logical']\
and get_kind(var) == '4' | null |
168,535 | import pprint
import sys
import types
from functools import reduce
from . import __version__
from . import cfuncs
def isarray(var):
return 'dimension' in var and not isexternal(var)
def get_kind(var):
try:
return var['kindselector']['*']
except KeyError:
try:
return var['kindselector']['kind']
except KeyError:
pass
def issigned_long_longarray(var):
return isarray(var) and var.get('typespec') in ['integer', 'logical']\
and get_kind(var) == '8' | null |
168,536 | import pprint
import sys
import types
from functools import reduce
from . import __version__
from . import cfuncs
def isstring(var):
return isstring_or_stringarray(var) and not isarray(var)
def ismutable(var):
return not ('dimension' not in var or isstring(var)) | null |
168,537 | import pprint
import sys
import types
from functools import reduce
from . import __version__
from . import cfuncs
def islong_long(var):
if not isscalar(var):
return 0
if var.get('typespec') not in ['integer', 'logical']:
return 0
return get_kind(var) == '8'
def isfunction(rout):
return 'block' in rout and 'function' == rout['block']
def islong_longfunction(rout):
if not isfunction(rout):
return 0
if 'result' in rout:
a = rout['result']
else:
a = rout['name']
if a in rout['vars']:
return islong_long(rout['vars'][a])
return 0 | null |
168,538 | import pprint
import sys
import types
from functools import reduce
from . import __version__
from . import cfuncs
def islong_double(var):
def isfunction(rout):
def islong_doublefunction(rout):
if not isfunction(rout):
return 0
if 'result' in rout:
a = rout['result']
else:
a = rout['name']
if a in rout['vars']:
return islong_double(rout['vars'][a])
return 0 | null |
168,539 | import pprint
import sys
import types
from functools import reduce
from . import __version__
from . import cfuncs
def outmess(t):
if options.get('verbose', 1):
sys.stdout.write(t)
def iscomplexfunction(rout):
if not isfunction(rout):
return 0
if 'result' in rout:
a = rout['result']
else:
a = rout['name']
if a in rout['vars']:
return iscomplex(rout['vars'][a])
return 0
def iscomplexfunction_warn(rout):
if iscomplexfunction(rout):
outmess("""\
**************************************************************
Warning: code with a function returning complex value
may not work correctly with your Fortran compiler.
When using GNU gcc/g77 compilers, codes should work
correctly for callbacks with:
f2py -c -DF2PY_CB_RETURNCOMPLEX
**************************************************************\n""")
return 1
return 0 | null |
168,540 | import pprint
import sys
import types
from functools import reduce
from . import __version__
from . import cfuncs
def hasexternals(rout):
return 'externals' in rout and rout['externals'] | null |
168,541 | import pprint
import sys
import types
from functools import reduce
from . import __version__
from . import cfuncs
def isthreadsafe(rout):
return 'f2pyenhancements' in rout and \
'threadsafe' in rout['f2pyenhancements'] | null |
168,542 | import pprint
import sys
import types
from functools import reduce
from . import __version__
from . import cfuncs
def hasvariables(rout):
return 'vars' in rout and rout['vars'] | null |
168,543 | import pprint
import sys
import types
from functools import reduce
from . import __version__
from . import cfuncs
def isintent_cache(var):
return 'cache' in var.get('intent', []) | null |
168,544 | import pprint
import sys
import types
from functools import reduce
from . import __version__
from . import cfuncs
def isintent_copy(var):
return 'copy' in var.get('intent', []) | null |
168,545 | import pprint
import sys
import types
from functools import reduce
from . import __version__
from . import cfuncs
def isintent_overwrite(var):
return 'overwrite' in var.get('intent', []) | null |
168,546 | import pprint
import sys
import types
from functools import reduce
from . import __version__
from . import cfuncs
def isintent_aligned4(var):
return 'aligned4' in var.get('intent', []) | null |
168,547 | import pprint
import sys
import types
from functools import reduce
from . import __version__
from . import cfuncs
def isintent_aligned8(var):
return 'aligned8' in var.get('intent', []) | null |
168,548 | import pprint
import sys
import types
from functools import reduce
from . import __version__
from . import cfuncs
def isintent_aligned16(var):
return 'aligned16' in var.get('intent', []) | null |
168,549 | import pprint
import sys
import types
from functools import reduce
from . import __version__
from . import cfuncs
def hasinitvalue(var):
return '=' in var
def hasinitvalueasstring(var):
if not hasinitvalue(var):
return 0
return var['='][0] in ['"', "'"] | null |
168,550 | import pprint
import sys
import types
from functools import reduce
from . import __version__
from . import cfuncs
def hascommon(rout):
return 'common' in rout
def hasbody(rout):
return 'body' in rout
def containscommon(rout):
if hascommon(rout):
return 1
if hasbody(rout):
for b in rout['body']:
if containscommon(b):
return 1
return 0 | null |
168,551 | import pprint
import sys
import types
from functools import reduce
from . import __version__
from . import cfuncs
def ismodule(rout):
return 'block' in rout and 'module' == rout['block']
def hasbody(rout):
return 'body' in rout
def containsmodule(block):
if ismodule(block):
return 1
if not hasbody(block):
return 0
for b in block['body']:
if containsmodule(b):
return 1
return 0 | null |
168,552 | import pprint
import sys
import types
from functools import reduce
from . import __version__
from . import cfuncs
def istrue(var):
return 1 | null |
168,553 | import pprint
import sys
import types
from functools import reduce
from . import __version__
from . import cfuncs
def isfalse(var):
return 0 | null |
168,554 | import pprint
import sys
import types
from functools import reduce
from . import __version__
from . import cfuncs
def isdummyroutine(rout):
try:
return rout['f2pyenhancements']['fortranname'] == ''
except KeyError:
return 0 | null |
168,555 | import sys
import os
import pprint
import re
from pathlib import Path
from . import crackfortran
from . import rules
from . import cb_rules
from . import auxfuncs
from . import cfuncs
from . import f90mod_rules
from . import __version__
from . import capi_maps
errmess = sys.stderr.write
outmess = auxfuncs.outmess
def scaninputline(inputline):
files, skipfuncs, onlyfuncs, debug = [], [], [], []
f, f2, f3, f5, f6, f7, f8, f9, f10 = 1, 0, 0, 0, 0, 0, 0, 0, 0
verbose = 1
emptygen = True
dolc = -1
dolatexdoc = 0
dorestdoc = 0
wrapfuncs = 1
buildpath = '.'
include_paths = []
signsfile, modulename = None, None
options = {'buildpath': buildpath,
'coutput': None,
'f2py_wrapper_output': None}
for l in inputline:
if l == '':
pass
elif l == 'only:':
f = 0
elif l == 'skip:':
f = -1
elif l == ':':
f = 1
elif l[:8] == '--debug-':
debug.append(l[8:])
elif l == '--lower':
dolc = 1
elif l == '--build-dir':
f6 = 1
elif l == '--no-lower':
dolc = 0
elif l == '--quiet':
verbose = 0
elif l == '--verbose':
verbose += 1
elif l == '--latex-doc':
dolatexdoc = 1
elif l == '--no-latex-doc':
dolatexdoc = 0
elif l == '--rest-doc':
dorestdoc = 1
elif l == '--no-rest-doc':
dorestdoc = 0
elif l == '--wrap-functions':
wrapfuncs = 1
elif l == '--no-wrap-functions':
wrapfuncs = 0
elif l == '--short-latex':
options['shortlatex'] = 1
elif l == '--coutput':
f8 = 1
elif l == '--f2py-wrapper-output':
f9 = 1
elif l == '--f2cmap':
f10 = 1
elif l == '--overwrite-signature':
options['h-overwrite'] = 1
elif l == '-h':
f2 = 1
elif l == '-m':
f3 = 1
elif l[:2] == '-v':
print(f2py_version)
sys.exit()
elif l == '--show-compilers':
f5 = 1
elif l[:8] == '-include':
cfuncs.outneeds['userincludes'].append(l[9:-1])
cfuncs.userincludes[l[9:-1]] = '#include ' + l[8:]
elif l[:15] in '--include_paths':
outmess(
'f2py option --include_paths is deprecated, use --include-paths instead.\n')
f7 = 1
elif l[:15] in '--include-paths':
f7 = 1
elif l == '--skip-empty-wrappers':
emptygen = False
elif l[0] == '-':
errmess('Unknown option %s\n' % repr(l))
sys.exit()
elif f2:
f2 = 0
signsfile = l
elif f3:
f3 = 0
modulename = l
elif f6:
f6 = 0
buildpath = l
elif f7:
f7 = 0
include_paths.extend(l.split(os.pathsep))
elif f8:
f8 = 0
options["coutput"] = l
elif f9:
f9 = 0
options["f2py_wrapper_output"] = l
elif f10:
f10 = 0
options["f2cmap_file"] = l
elif f == 1:
try:
with open(l):
pass
files.append(l)
except OSError as detail:
errmess(f'OSError: {detail!s}. Skipping file "{l!s}".\n')
elif f == -1:
skipfuncs.append(l)
elif f == 0:
onlyfuncs.append(l)
if not f5 and not files and not modulename:
print(__usage__)
sys.exit()
if not os.path.isdir(buildpath):
if not verbose:
outmess('Creating build directory %s\n' % (buildpath))
os.mkdir(buildpath)
if signsfile:
signsfile = os.path.join(buildpath, signsfile)
if signsfile and os.path.isfile(signsfile) and 'h-overwrite' not in options:
errmess(
'Signature file "%s" exists!!! Use --overwrite-signature to overwrite.\n' % (signsfile))
sys.exit()
options['emptygen'] = emptygen
options['debug'] = debug
options['verbose'] = verbose
if dolc == -1 and not signsfile:
options['do-lower'] = 0
else:
options['do-lower'] = dolc
if modulename:
options['module'] = modulename
if signsfile:
options['signsfile'] = signsfile
if onlyfuncs:
options['onlyfuncs'] = onlyfuncs
if skipfuncs:
options['skipfuncs'] = skipfuncs
options['dolatexdoc'] = dolatexdoc
options['dorestdoc'] = dorestdoc
options['wrapfuncs'] = wrapfuncs
options['buildpath'] = buildpath
options['include_paths'] = include_paths
options.setdefault('f2cmap_file', None)
return files, options
def callcrackfortran(files, options):
rules.options = options
crackfortran.debug = options['debug']
crackfortran.verbose = options['verbose']
if 'module' in options:
crackfortran.f77modulename = options['module']
if 'skipfuncs' in options:
crackfortran.skipfuncs = options['skipfuncs']
if 'onlyfuncs' in options:
crackfortran.onlyfuncs = options['onlyfuncs']
crackfortran.include_paths[:] = options['include_paths']
crackfortran.dolowercase = options['do-lower']
postlist = crackfortran.crackfortran(files)
if 'signsfile' in options:
outmess('Saving signatures to file "%s"\n' % (options['signsfile']))
pyf = crackfortran.crack2fortran(postlist)
if options['signsfile'][-6:] == 'stdout':
sys.stdout.write(pyf)
else:
with open(options['signsfile'], 'w') as f:
f.write(pyf)
if options["coutput"] is None:
for mod in postlist:
mod["coutput"] = "%smodule.c" % mod["name"]
else:
for mod in postlist:
mod["coutput"] = options["coutput"]
if options["f2py_wrapper_output"] is None:
for mod in postlist:
mod["f2py_wrapper_output"] = "%s-f2pywrappers.f" % mod["name"]
else:
for mod in postlist:
mod["f2py_wrapper_output"] = options["f2py_wrapper_output"]
return postlist
def buildmodules(lst):
cfuncs.buildcfuncs()
outmess('Building modules...\n')
modules, mnames, isusedby = [], [], {}
for item in lst:
if '__user__' in item['name']:
cb_rules.buildcallbacks(item)
else:
if 'use' in item:
for u in item['use'].keys():
if u not in isusedby:
isusedby[u] = []
isusedby[u].append(item['name'])
modules.append(item)
mnames.append(item['name'])
ret = {}
for module, name in zip(modules, mnames):
if name in isusedby:
outmess('\tSkipping module "%s" which is used by %s.\n' % (
name, ','.join('"%s"' % s for s in isusedby[name])))
else:
um = []
if 'use' in module:
for u in module['use'].keys():
if u in isusedby and u in mnames:
um.append(modules[mnames.index(u)])
else:
outmess(
f'\tModule "{name}" uses nonexisting "{u}" '
'which will be ignored.\n')
ret[name] = {}
dict_append(ret[name], rules.buildmodule(module, um))
return ret
def dict_append(d_out, d_in):
for (k, v) in d_in.items():
if k not in d_out:
d_out[k] = []
if isinstance(v, list):
d_out[k] = d_out[k] + v
else:
d_out[k].append(v)
def crackfortran(files):
global usermodules, post_processing_hooks
outmess('Reading fortran codes...\n', 0)
readfortrancode(files, crackline)
outmess('Post-processing...\n', 0)
usermodules = []
postlist = postcrack(grouplist[0])
outmess('Applying post-processing hooks...\n', 0)
for hook in post_processing_hooks:
outmess(f' {hook.__name__}\n', 0)
postlist = traverse(postlist, hook)
outmess('Post-processing (stage 2)...\n', 0)
postlist = postcrack2(postlist)
return usermodules + postlist
cfuncs = {'cfuncs': '/*need_cfuncs*/'}
cfuncs['calcarrindex'] = """\
static int calcarrindex(int *i,PyArrayObject *arr) {
int k,ii = i[0];
for (k=1; k < PyArray_NDIM(arr); k++)
ii += (ii*(PyArray_DIM(arr,k) - 1)+i[k]); /* assuming contiguous arr */
return ii;
}"""
cfuncs['calcarrindextr'] = """\
static int calcarrindextr(int *i,PyArrayObject *arr) {
int k,ii = i[PyArray_NDIM(arr)-1];
for (k=1; k < PyArray_NDIM(arr); k++)
ii += (ii*(PyArray_DIM(arr,PyArray_NDIM(arr)-k-1) - 1)+i[PyArray_NDIM(arr)-k-1]); /* assuming contiguous arr */
return ii;
}"""
cfuncs['forcomb'] = """\
static struct { int nd;npy_intp *d;int *i,*i_tr,tr; } forcombcache;
static int initforcomb(npy_intp *dims,int nd,int tr) {
int k;
if (dims==NULL) return 0;
if (nd<0) return 0;
forcombcache.nd = nd;
forcombcache.d = dims;
forcombcache.tr = tr;
if ((forcombcache.i = (int *)malloc(sizeof(int)*nd))==NULL) return 0;
if ((forcombcache.i_tr = (int *)malloc(sizeof(int)*nd))==NULL) return 0;
for (k=1;k<nd;k++) {
forcombcache.i[k] = forcombcache.i_tr[nd-k-1] = 0;
}
forcombcache.i[0] = forcombcache.i_tr[nd-1] = -1;
return 1;
}
static int *nextforcomb(void) {
int j,*i,*i_tr,k;
int nd=forcombcache.nd;
if ((i=forcombcache.i) == NULL) return NULL;
if ((i_tr=forcombcache.i_tr) == NULL) return NULL;
if (forcombcache.d == NULL) return NULL;
i[0]++;
if (i[0]==forcombcache.d[0]) {
j=1;
while ((j<nd) && (i[j]==forcombcache.d[j]-1)) j++;
if (j==nd) {
free(i);
free(i_tr);
return NULL;
}
for (k=0;k<j;k++) i[k] = i_tr[nd-k-1] = 0;
i[j]++;
i_tr[nd-j-1]++;
} else
i_tr[nd-1]++;
if (forcombcache.tr) return i_tr;
return i;
}"""
cfuncs['try_pyarr_from_string'] = """\
/*
try_pyarr_from_string copies str[:len(obj)] to the data of an `ndarray`.
If obj is an `ndarray`, it is assumed to be contiguous.
If the specified len==-1, str must be null-terminated.
*/
static int try_pyarr_from_string(PyObject *obj,
const string str, const int len) {
#ifdef DEBUGCFUNCS
fprintf(stderr, "try_pyarr_from_string(str='%s', len=%d, obj=%p)\\n",
(char*)str,len, obj);
#endif
if (PyArray_Check(obj)) {
PyArrayObject *arr = (PyArrayObject *)obj;
assert(ISCONTIGUOUS(arr));
string buf = PyArray_DATA(arr);
npy_intp n = len;
if (n == -1) {
/* Assuming null-terminated str. */
n = strlen(str);
}
if (n > PyArray_NBYTES(arr)) {
n = PyArray_NBYTES(arr);
}
STRINGCOPYN(buf, str, n);
return 1;
}
capi_fail:
PRINTPYOBJERR(obj);
PyErr_SetString(#modulename#_error, \"try_pyarr_from_string failed\");
return 0;
}
"""
cfuncs['string_from_pyobj'] = """\
/*
Create a new string buffer `str` of at most length `len` from a
Python string-like object `obj`.
The string buffer has given size (len) or the size of inistr when len==-1.
The string buffer is padded with blanks: in Fortran, trailing blanks
are insignificant contrary to C nulls.
*/
static int
string_from_pyobj(string *str, int *len, const string inistr, PyObject *obj,
const char *errmess)
{
PyObject *tmp = NULL;
string buf = NULL;
npy_intp n = -1;
#ifdef DEBUGCFUNCS
fprintf(stderr,\"string_from_pyobj(str='%s',len=%d,inistr='%s',obj=%p)\\n\",
(char*)str, *len, (char *)inistr, obj);
#endif
if (obj == Py_None) {
n = strlen(inistr);
buf = inistr;
}
else if (PyArray_Check(obj)) {
PyArrayObject *arr = (PyArrayObject *)obj;
if (!ISCONTIGUOUS(arr)) {
PyErr_SetString(PyExc_ValueError,
\"array object is non-contiguous.\");
goto capi_fail;
}
n = PyArray_NBYTES(arr);
buf = PyArray_DATA(arr);
n = strnlen(buf, n);
}
else {
if (PyBytes_Check(obj)) {
tmp = obj;
Py_INCREF(tmp);
}
else if (PyUnicode_Check(obj)) {
tmp = PyUnicode_AsASCIIString(obj);
}
else {
PyObject *tmp2;
tmp2 = PyObject_Str(obj);
if (tmp2) {
tmp = PyUnicode_AsASCIIString(tmp2);
Py_DECREF(tmp2);
}
else {
tmp = NULL;
}
}
if (tmp == NULL) goto capi_fail;
n = PyBytes_GET_SIZE(tmp);
buf = PyBytes_AS_STRING(tmp);
}
if (*len == -1) {
/* TODO: change the type of `len` so that we can remove this */
if (n > NPY_MAX_INT) {
PyErr_SetString(PyExc_OverflowError,
"object too large for a 32-bit int");
goto capi_fail;
}
*len = n;
}
else if (*len < n) {
/* discard the last (len-n) bytes of input buf */
n = *len;
}
if (n < 0 || *len < 0 || buf == NULL) {
goto capi_fail;
}
STRINGMALLOC(*str, *len); // *str is allocated with size (*len + 1)
if (n < *len) {
/*
Pad fixed-width string with nulls. The caller will replace
nulls with blanks when the corresponding argument is not
intent(c).
*/
memset(*str + n, '\\0', *len - n);
}
STRINGCOPYN(*str, buf, n);
Py_XDECREF(tmp);
return 1;
capi_fail:
Py_XDECREF(tmp);
{
PyObject* err = PyErr_Occurred();
if (err == NULL) {
err = #modulename#_error;
}
PyErr_SetString(err, errmess);
}
return 0;
}
"""
cfuncs['character_from_pyobj'] = """\
static int
character_from_pyobj(character* v, PyObject *obj, const char *errmess) {
if (PyBytes_Check(obj)) {
/* empty bytes has trailing null, so dereferencing is always safe */
*v = PyBytes_AS_STRING(obj)[0];
return 1;
} else if (PyUnicode_Check(obj)) {
PyObject* tmp = PyUnicode_AsASCIIString(obj);
if (tmp != NULL) {
*v = PyBytes_AS_STRING(tmp)[0];
Py_DECREF(tmp);
return 1;
}
} else if (PyArray_Check(obj)) {
PyArrayObject* arr = (PyArrayObject*)obj;
if (F2PY_ARRAY_IS_CHARACTER_COMPATIBLE(arr)) {
*v = PyArray_BYTES(arr)[0];
return 1;
} else if (F2PY_IS_UNICODE_ARRAY(arr)) {
// TODO: update when numpy will support 1-byte and
// 2-byte unicode dtypes
PyObject* tmp = PyUnicode_FromKindAndData(
PyUnicode_4BYTE_KIND,
PyArray_BYTES(arr),
(PyArray_NBYTES(arr)>0?1:0));
if (tmp != NULL) {
if (character_from_pyobj(v, tmp, errmess)) {
Py_DECREF(tmp);
return 1;
}
Py_DECREF(tmp);
}
}
} else if (PySequence_Check(obj)) {
PyObject* tmp = PySequence_GetItem(obj,0);
if (tmp != NULL) {
if (character_from_pyobj(v, tmp, errmess)) {
Py_DECREF(tmp);
return 1;
}
Py_DECREF(tmp);
}
}
{
char mess[F2PY_MESSAGE_BUFFER_SIZE];
strcpy(mess, errmess);
PyObject* err = PyErr_Occurred();
if (err == NULL) {
err = PyExc_TypeError;
}
sprintf(mess + strlen(mess),
" -- expected str|bytes|sequence-of-str-or-bytes, got ");
f2py_describe(obj, mess + strlen(mess));
PyErr_SetString(err, mess);
}
return 0;
}
"""
cfuncs['char_from_pyobj'] = """\
static int
char_from_pyobj(char* v, PyObject *obj, const char *errmess) {
int i = 0;
if (int_from_pyobj(&i, obj, errmess)) {
*v = (char)i;
return 1;
}
return 0;
}
"""
cfuncs['signed_char_from_pyobj'] = """\
static int
signed_char_from_pyobj(signed_char* v, PyObject *obj, const char *errmess) {
int i = 0;
if (int_from_pyobj(&i, obj, errmess)) {
*v = (signed_char)i;
return 1;
}
return 0;
}
"""
cfuncs['short_from_pyobj'] = """\
static int
short_from_pyobj(short* v, PyObject *obj, const char *errmess) {
int i = 0;
if (int_from_pyobj(&i, obj, errmess)) {
*v = (short)i;
return 1;
}
return 0;
}
"""
cfuncs['int_from_pyobj'] = """\
static int
int_from_pyobj(int* v, PyObject *obj, const char *errmess)
{
PyObject* tmp = NULL;
if (PyLong_Check(obj)) {
*v = Npy__PyLong_AsInt(obj);
return !(*v == -1 && PyErr_Occurred());
}
tmp = PyNumber_Long(obj);
if (tmp) {
*v = Npy__PyLong_AsInt(tmp);
Py_DECREF(tmp);
return !(*v == -1 && PyErr_Occurred());
}
if (PyComplex_Check(obj)) {
PyErr_Clear();
tmp = PyObject_GetAttrString(obj,\"real\");
}
else if (PyBytes_Check(obj) || PyUnicode_Check(obj)) {
/*pass*/;
}
else if (PySequence_Check(obj)) {
PyErr_Clear();
tmp = PySequence_GetItem(obj, 0);
}
if (tmp) {
if (int_from_pyobj(v, tmp, errmess)) {
Py_DECREF(tmp);
return 1;
}
Py_DECREF(tmp);
}
{
PyObject* err = PyErr_Occurred();
if (err == NULL) {
err = #modulename#_error;
}
PyErr_SetString(err, errmess);
}
return 0;
}
"""
cfuncs['long_from_pyobj'] = """\
static int
long_from_pyobj(long* v, PyObject *obj, const char *errmess) {
PyObject* tmp = NULL;
if (PyLong_Check(obj)) {
*v = PyLong_AsLong(obj);
return !(*v == -1 && PyErr_Occurred());
}
tmp = PyNumber_Long(obj);
if (tmp) {
*v = PyLong_AsLong(tmp);
Py_DECREF(tmp);
return !(*v == -1 && PyErr_Occurred());
}
if (PyComplex_Check(obj)) {
PyErr_Clear();
tmp = PyObject_GetAttrString(obj,\"real\");
}
else if (PyBytes_Check(obj) || PyUnicode_Check(obj)) {
/*pass*/;
}
else if (PySequence_Check(obj)) {
PyErr_Clear();
tmp = PySequence_GetItem(obj, 0);
}
if (tmp) {
if (long_from_pyobj(v, tmp, errmess)) {
Py_DECREF(tmp);
return 1;
}
Py_DECREF(tmp);
}
{
PyObject* err = PyErr_Occurred();
if (err == NULL) {
err = #modulename#_error;
}
PyErr_SetString(err, errmess);
}
return 0;
}
"""
cfuncs['long_long_from_pyobj'] = """\
static int
long_long_from_pyobj(long_long* v, PyObject *obj, const char *errmess)
{
PyObject* tmp = NULL;
if (PyLong_Check(obj)) {
*v = PyLong_AsLongLong(obj);
return !(*v == -1 && PyErr_Occurred());
}
tmp = PyNumber_Long(obj);
if (tmp) {
*v = PyLong_AsLongLong(tmp);
Py_DECREF(tmp);
return !(*v == -1 && PyErr_Occurred());
}
if (PyComplex_Check(obj)) {
PyErr_Clear();
tmp = PyObject_GetAttrString(obj,\"real\");
}
else if (PyBytes_Check(obj) || PyUnicode_Check(obj)) {
/*pass*/;
}
else if (PySequence_Check(obj)) {
PyErr_Clear();
tmp = PySequence_GetItem(obj, 0);
}
if (tmp) {
if (long_long_from_pyobj(v, tmp, errmess)) {
Py_DECREF(tmp);
return 1;
}
Py_DECREF(tmp);
}
{
PyObject* err = PyErr_Occurred();
if (err == NULL) {
err = #modulename#_error;
}
PyErr_SetString(err,errmess);
}
return 0;
}
"""
cfuncs['long_double_from_pyobj'] = """\
static int
long_double_from_pyobj(long_double* v, PyObject *obj, const char *errmess)
{
double d=0;
if (PyArray_CheckScalar(obj)){
if PyArray_IsScalar(obj, LongDouble) {
PyArray_ScalarAsCtype(obj, v);
return 1;
}
else if (PyArray_Check(obj) && PyArray_TYPE(obj) == NPY_LONGDOUBLE) {
(*v) = *((npy_longdouble *)PyArray_DATA(obj));
return 1;
}
}
if (double_from_pyobj(&d, obj, errmess)) {
*v = (long_double)d;
return 1;
}
return 0;
}
"""
cfuncs['double_from_pyobj'] = """\
static int
double_from_pyobj(double* v, PyObject *obj, const char *errmess)
{
PyObject* tmp = NULL;
if (PyFloat_Check(obj)) {
*v = PyFloat_AsDouble(obj);
return !(*v == -1.0 && PyErr_Occurred());
}
tmp = PyNumber_Float(obj);
if (tmp) {
*v = PyFloat_AsDouble(tmp);
Py_DECREF(tmp);
return !(*v == -1.0 && PyErr_Occurred());
}
if (PyComplex_Check(obj)) {
PyErr_Clear();
tmp = PyObject_GetAttrString(obj,\"real\");
}
else if (PyBytes_Check(obj) || PyUnicode_Check(obj)) {
/*pass*/;
}
else if (PySequence_Check(obj)) {
PyErr_Clear();
tmp = PySequence_GetItem(obj, 0);
}
if (tmp) {
if (double_from_pyobj(v,tmp,errmess)) {Py_DECREF(tmp); return 1;}
Py_DECREF(tmp);
}
{
PyObject* err = PyErr_Occurred();
if (err==NULL) err = #modulename#_error;
PyErr_SetString(err,errmess);
}
return 0;
}
"""
cfuncs['float_from_pyobj'] = """\
static int
float_from_pyobj(float* v, PyObject *obj, const char *errmess)
{
double d=0.0;
if (double_from_pyobj(&d,obj,errmess)) {
*v = (float)d;
return 1;
}
return 0;
}
"""
cfuncs['complex_long_double_from_pyobj'] = """\
static int
complex_long_double_from_pyobj(complex_long_double* v, PyObject *obj, const char *errmess)
{
complex_double cd = {0.0,0.0};
if (PyArray_CheckScalar(obj)){
if PyArray_IsScalar(obj, CLongDouble) {
PyArray_ScalarAsCtype(obj, v);
return 1;
}
else if (PyArray_Check(obj) && PyArray_TYPE(obj)==NPY_CLONGDOUBLE) {
(*v).r = ((npy_clongdouble *)PyArray_DATA(obj))->real;
(*v).i = ((npy_clongdouble *)PyArray_DATA(obj))->imag;
return 1;
}
}
if (complex_double_from_pyobj(&cd,obj,errmess)) {
(*v).r = (long_double)cd.r;
(*v).i = (long_double)cd.i;
return 1;
}
return 0;
}
"""
cfuncs['complex_double_from_pyobj'] = """\
static int
complex_double_from_pyobj(complex_double* v, PyObject *obj, const char *errmess) {
Py_complex c;
if (PyComplex_Check(obj)) {
c = PyComplex_AsCComplex(obj);
(*v).r = c.real;
(*v).i = c.imag;
return 1;
}
if (PyArray_IsScalar(obj, ComplexFloating)) {
if (PyArray_IsScalar(obj, CFloat)) {
npy_cfloat new;
PyArray_ScalarAsCtype(obj, &new);
(*v).r = (double)new.real;
(*v).i = (double)new.imag;
}
else if (PyArray_IsScalar(obj, CLongDouble)) {
npy_clongdouble new;
PyArray_ScalarAsCtype(obj, &new);
(*v).r = (double)new.real;
(*v).i = (double)new.imag;
}
else { /* if (PyArray_IsScalar(obj, CDouble)) */
PyArray_ScalarAsCtype(obj, v);
}
return 1;
}
if (PyArray_CheckScalar(obj)) { /* 0-dim array or still array scalar */
PyArrayObject *arr;
if (PyArray_Check(obj)) {
arr = (PyArrayObject *)PyArray_Cast((PyArrayObject *)obj, NPY_CDOUBLE);
}
else {
arr = (PyArrayObject *)PyArray_FromScalar(obj, PyArray_DescrFromType(NPY_CDOUBLE));
}
if (arr == NULL) {
return 0;
}
(*v).r = ((npy_cdouble *)PyArray_DATA(arr))->real;
(*v).i = ((npy_cdouble *)PyArray_DATA(arr))->imag;
Py_DECREF(arr);
return 1;
}
/* Python does not provide PyNumber_Complex function :-( */
(*v).i = 0.0;
if (PyFloat_Check(obj)) {
(*v).r = PyFloat_AsDouble(obj);
return !((*v).r == -1.0 && PyErr_Occurred());
}
if (PyLong_Check(obj)) {
(*v).r = PyLong_AsDouble(obj);
return !((*v).r == -1.0 && PyErr_Occurred());
}
if (PySequence_Check(obj) && !(PyBytes_Check(obj) || PyUnicode_Check(obj))) {
PyObject *tmp = PySequence_GetItem(obj,0);
if (tmp) {
if (complex_double_from_pyobj(v,tmp,errmess)) {
Py_DECREF(tmp);
return 1;
}
Py_DECREF(tmp);
}
}
{
PyObject* err = PyErr_Occurred();
if (err==NULL)
err = PyExc_TypeError;
PyErr_SetString(err,errmess);
}
return 0;
}
"""
cfuncs['complex_float_from_pyobj'] = """\
static int
complex_float_from_pyobj(complex_float* v,PyObject *obj,const char *errmess)
{
complex_double cd={0.0,0.0};
if (complex_double_from_pyobj(&cd,obj,errmess)) {
(*v).r = (float)cd.r;
(*v).i = (float)cd.i;
return 1;
}
return 0;
}
"""
cfuncs['try_pyarr_from_character'] = """\
static int try_pyarr_from_character(PyObject* obj, character* v) {
PyArrayObject *arr = (PyArrayObject*)obj;
if (!obj) return -2;
if (PyArray_Check(obj)) {
if (F2PY_ARRAY_IS_CHARACTER_COMPATIBLE(arr)) {
*(character *)(PyArray_DATA(arr)) = *v;
return 1;
}
}
{
char mess[F2PY_MESSAGE_BUFFER_SIZE];
PyObject* err = PyErr_Occurred();
if (err == NULL) {
err = PyExc_ValueError;
strcpy(mess, "try_pyarr_from_character failed"
" -- expected bytes array-scalar|array, got ");
f2py_describe(obj, mess + strlen(mess));
}
PyErr_SetString(err, mess);
}
return 0;
}
"""
cfuncs[
'try_pyarr_from_char'] = 'static int try_pyarr_from_char(PyObject* obj,char* v) {\n TRYPYARRAYTEMPLATE(char,\'c\');\n}\n'
cfuncs[
'try_pyarr_from_unsigned_char'] = 'static int try_pyarr_from_unsigned_char(PyObject* obj,unsigned_char* v) {\n TRYPYARRAYTEMPLATE(unsigned_char,\'b\');\n}\n'
cfuncs[
'try_pyarr_from_signed_char'] = 'static int try_pyarr_from_signed_char(PyObject* obj,signed_char* v) {\n TRYPYARRAYTEMPLATE(signed_char,\'1\');\n}\n'
cfuncs[
'try_pyarr_from_short'] = 'static int try_pyarr_from_short(PyObject* obj,short* v) {\n TRYPYARRAYTEMPLATE(short,\'s\');\n}\n'
cfuncs[
'try_pyarr_from_int'] = 'static int try_pyarr_from_int(PyObject* obj,int* v) {\n TRYPYARRAYTEMPLATE(int,\'i\');\n}\n'
cfuncs[
'try_pyarr_from_long'] = 'static int try_pyarr_from_long(PyObject* obj,long* v) {\n TRYPYARRAYTEMPLATE(long,\'l\');\n}\n'
cfuncs[
'try_pyarr_from_long_long'] = 'static int try_pyarr_from_long_long(PyObject* obj,long_long* v) {\n TRYPYARRAYTEMPLATE(long_long,\'L\');\n}\n'
cfuncs[
'try_pyarr_from_float'] = 'static int try_pyarr_from_float(PyObject* obj,float* v) {\n TRYPYARRAYTEMPLATE(float,\'f\');\n}\n'
cfuncs[
'try_pyarr_from_double'] = 'static int try_pyarr_from_double(PyObject* obj,double* v) {\n TRYPYARRAYTEMPLATE(double,\'d\');\n}\n'
cfuncs[
'try_pyarr_from_complex_float'] = 'static int try_pyarr_from_complex_float(PyObject* obj,complex_float* v) {\n TRYCOMPLEXPYARRAYTEMPLATE(float,\'F\');\n}\n'
cfuncs[
'try_pyarr_from_complex_double'] = 'static int try_pyarr_from_complex_double(PyObject* obj,complex_double* v) {\n TRYCOMPLEXPYARRAYTEMPLATE(double,\'D\');\n}\n'
cfuncs['create_cb_arglist'] = """\
static int
create_cb_arglist(PyObject* fun, PyTupleObject* xa , const int maxnofargs,
const int nofoptargs, int *nofargs, PyTupleObject **args,
const char *errmess)
{
PyObject *tmp = NULL;
PyObject *tmp_fun = NULL;
Py_ssize_t tot, opt, ext, siz, i, di = 0;
CFUNCSMESS(\"create_cb_arglist\\n\");
tot=opt=ext=siz=0;
/* Get the total number of arguments */
if (PyFunction_Check(fun)) {
tmp_fun = fun;
Py_INCREF(tmp_fun);
}
else {
di = 1;
if (PyObject_HasAttrString(fun,\"im_func\")) {
tmp_fun = PyObject_GetAttrString(fun,\"im_func\");
}
else if (PyObject_HasAttrString(fun,\"__call__\")) {
tmp = PyObject_GetAttrString(fun,\"__call__\");
if (PyObject_HasAttrString(tmp,\"im_func\"))
tmp_fun = PyObject_GetAttrString(tmp,\"im_func\");
else {
tmp_fun = fun; /* built-in function */
Py_INCREF(tmp_fun);
tot = maxnofargs;
if (PyCFunction_Check(fun)) {
/* In case the function has a co_argcount (like on PyPy) */
di = 0;
}
if (xa != NULL)
tot += PyTuple_Size((PyObject *)xa);
}
Py_XDECREF(tmp);
}
else if (PyFortran_Check(fun) || PyFortran_Check1(fun)) {
tot = maxnofargs;
if (xa != NULL)
tot += PyTuple_Size((PyObject *)xa);
tmp_fun = fun;
Py_INCREF(tmp_fun);
}
else if (F2PyCapsule_Check(fun)) {
tot = maxnofargs;
if (xa != NULL)
ext = PyTuple_Size((PyObject *)xa);
if(ext>0) {
fprintf(stderr,\"extra arguments tuple cannot be used with PyCapsule call-back\\n\");
goto capi_fail;
}
tmp_fun = fun;
Py_INCREF(tmp_fun);
}
}
if (tmp_fun == NULL) {
fprintf(stderr,
\"Call-back argument must be function|instance|instance.__call__|f2py-function \"
\"but got %s.\\n\",
((fun == NULL) ? \"NULL\" : Py_TYPE(fun)->tp_name));
goto capi_fail;
}
if (PyObject_HasAttrString(tmp_fun,\"__code__\")) {
if (PyObject_HasAttrString(tmp = PyObject_GetAttrString(tmp_fun,\"__code__\"),\"co_argcount\")) {
PyObject *tmp_argcount = PyObject_GetAttrString(tmp,\"co_argcount\");
Py_DECREF(tmp);
if (tmp_argcount == NULL) {
goto capi_fail;
}
tot = PyLong_AsSsize_t(tmp_argcount) - di;
Py_DECREF(tmp_argcount);
}
}
/* Get the number of optional arguments */
if (PyObject_HasAttrString(tmp_fun,\"__defaults__\")) {
if (PyTuple_Check(tmp = PyObject_GetAttrString(tmp_fun,\"__defaults__\")))
opt = PyTuple_Size(tmp);
Py_XDECREF(tmp);
}
/* Get the number of extra arguments */
if (xa != NULL)
ext = PyTuple_Size((PyObject *)xa);
/* Calculate the size of call-backs argument list */
siz = MIN(maxnofargs+ext,tot);
*nofargs = MAX(0,siz-ext);
#ifdef DEBUGCFUNCS
fprintf(stderr,
\"debug-capi:create_cb_arglist:maxnofargs(-nofoptargs),\"
\"tot,opt,ext,siz,nofargs = %d(-%d), %zd, %zd, %zd, %zd, %d\\n\",
maxnofargs, nofoptargs, tot, opt, ext, siz, *nofargs);
#endif
if (siz < tot-opt) {
fprintf(stderr,
\"create_cb_arglist: Failed to build argument list \"
\"(siz) with enough arguments (tot-opt) required by \"
\"user-supplied function (siz,tot,opt=%zd, %zd, %zd).\\n\",
siz, tot, opt);
goto capi_fail;
}
/* Initialize argument list */
*args = (PyTupleObject *)PyTuple_New(siz);
for (i=0;i<*nofargs;i++) {
Py_INCREF(Py_None);
PyTuple_SET_ITEM((PyObject *)(*args),i,Py_None);
}
if (xa != NULL)
for (i=(*nofargs);i<siz;i++) {
tmp = PyTuple_GetItem((PyObject *)xa,i-(*nofargs));
Py_INCREF(tmp);
PyTuple_SET_ITEM(*args,i,tmp);
}
CFUNCSMESS(\"create_cb_arglist-end\\n\");
Py_DECREF(tmp_fun);
return 1;
capi_fail:
if (PyErr_Occurred() == NULL)
PyErr_SetString(#modulename#_error, errmess);
Py_XDECREF(tmp_fun);
return 0;
}
"""
def dict_append(d, **kws):
for k, v in kws.items():
if k in d:
ov = d[k]
if isinstance(ov, str):
d[k] = v
else:
d[k].extend(v)
else:
d[k] = v
The provided code snippet includes necessary dependencies for implementing the `run_main` function. Write a Python function `def run_main(comline_list)` to solve the following problem:
Equivalent to running:: f2py <args> where ``<args>=string.join(<list>,' ')``, but in Python. Unless ``-h`` is used, this function returns a dictionary containing information on generated modules and their dependencies on source files. You cannot build extension modules with this function, that is, using ``-c`` is not allowed. Use the ``compile`` command instead. Examples -------- The command ``f2py -m scalar scalar.f`` can be executed from Python as follows. .. literalinclude:: ../../source/f2py/code/results/run_main_session.dat :language: python
Here is the function:
def run_main(comline_list):
"""
Equivalent to running::
f2py <args>
where ``<args>=string.join(<list>,' ')``, but in Python. Unless
``-h`` is used, this function returns a dictionary containing
information on generated modules and their dependencies on source
files.
You cannot build extension modules with this function, that is,
using ``-c`` is not allowed. Use the ``compile`` command instead.
Examples
--------
The command ``f2py -m scalar scalar.f`` can be executed from Python as
follows.
.. literalinclude:: ../../source/f2py/code/results/run_main_session.dat
:language: python
"""
crackfortran.reset_global_f2py_vars()
f2pydir = os.path.dirname(os.path.abspath(cfuncs.__file__))
fobjhsrc = os.path.join(f2pydir, 'src', 'fortranobject.h')
fobjcsrc = os.path.join(f2pydir, 'src', 'fortranobject.c')
files, options = scaninputline(comline_list)
auxfuncs.options = options
capi_maps.load_f2cmap_file(options['f2cmap_file'])
postlist = callcrackfortran(files, options)
isusedby = {}
for plist in postlist:
if 'use' in plist:
for u in plist['use'].keys():
if u not in isusedby:
isusedby[u] = []
isusedby[u].append(plist['name'])
for plist in postlist:
if plist['block'] == 'python module' and '__user__' in plist['name']:
if plist['name'] in isusedby:
# if not quiet:
outmess(
f'Skipping Makefile build for module "{plist["name"]}" '
'which is used by {}\n'.format(
','.join(f'"{s}"' for s in isusedby[plist['name']])))
if 'signsfile' in options:
if options['verbose'] > 1:
outmess(
'Stopping. Edit the signature file and then run f2py on the signature file: ')
outmess('%s %s\n' %
(os.path.basename(sys.argv[0]), options['signsfile']))
return
for plist in postlist:
if plist['block'] != 'python module':
if 'python module' not in options:
errmess(
'Tip: If your original code is Fortran source then you must use -m option.\n')
raise TypeError('All blocks must be python module blocks but got %s' % (
repr(plist['block'])))
auxfuncs.debugoptions = options['debug']
f90mod_rules.options = options
auxfuncs.wrapfuncs = options['wrapfuncs']
ret = buildmodules(postlist)
for mn in ret.keys():
dict_append(ret[mn], {'csrc': fobjcsrc, 'h': fobjhsrc})
return ret | Equivalent to running:: f2py <args> where ``<args>=string.join(<list>,' ')``, but in Python. Unless ``-h`` is used, this function returns a dictionary containing information on generated modules and their dependencies on source files. You cannot build extension modules with this function, that is, using ``-c`` is not allowed. Use the ``compile`` command instead. Examples -------- The command ``f2py -m scalar scalar.f`` can be executed from Python as follows. .. literalinclude:: ../../source/f2py/code/results/run_main_session.dat :language: python |
168,556 | import sys
import os
import pprint
import re
from pathlib import Path
from . import crackfortran
from . import rules
from . import cb_rules
from . import auxfuncs
from . import cfuncs
from . import f90mod_rules
from . import __version__
from . import capi_maps
def get_prefix(module):
p = os.path.dirname(os.path.dirname(module.__file__))
return p | null |
168,557 | import sys
import os
import pprint
import re
from pathlib import Path
from . import crackfortran
from . import rules
from . import cb_rules
from . import auxfuncs
from . import cfuncs
from . import f90mod_rules
from . import __version__
from . import capi_maps
outmess = auxfuncs.outmess
def dict_append(d_out, d_in):
for (k, v) in d_in.items():
if k not in d_out:
d_out[k] = []
if isinstance(v, list):
d_out[k] = d_out[k] + v
else:
d_out[k].append(v)
def filter_files(prefix, suffix, files, remove_prefix=None):
"""
Filter files by prefix and suffix.
"""
filtered, rest = [], []
match = re.compile(prefix + r'.*' + suffix + r'\Z').match
if remove_prefix:
ind = len(prefix)
else:
ind = 0
for file in [x.strip() for x in files]:
if match(file):
filtered.append(file[ind:])
else:
rest.append(file)
return filtered, rest
def get_f2py_modulename(source):
name = None
with open(source) as f:
for line in f:
m = _f2py_module_name_match(line)
if m:
if _f2py_user_module_name_match(line): # skip *__user__* names
continue
name = m.group('name')
break
return name
def get_info(name, notfound_action=0):
"""
notfound_action:
0 - do nothing
1 - display warning message
2 - raise error
"""
cl = {'armpl': armpl_info,
'blas_armpl': blas_armpl_info,
'lapack_armpl': lapack_armpl_info,
'fftw3_armpl': fftw3_armpl_info,
'atlas': atlas_info, # use lapack_opt or blas_opt instead
'atlas_threads': atlas_threads_info, # ditto
'atlas_blas': atlas_blas_info,
'atlas_blas_threads': atlas_blas_threads_info,
'lapack_atlas': lapack_atlas_info, # use lapack_opt instead
'lapack_atlas_threads': lapack_atlas_threads_info, # ditto
'atlas_3_10': atlas_3_10_info, # use lapack_opt or blas_opt instead
'atlas_3_10_threads': atlas_3_10_threads_info, # ditto
'atlas_3_10_blas': atlas_3_10_blas_info,
'atlas_3_10_blas_threads': atlas_3_10_blas_threads_info,
'lapack_atlas_3_10': lapack_atlas_3_10_info, # use lapack_opt instead
'lapack_atlas_3_10_threads': lapack_atlas_3_10_threads_info, # ditto
'flame': flame_info, # use lapack_opt instead
'mkl': mkl_info,
# openblas which may or may not have embedded lapack
'openblas': openblas_info, # use blas_opt instead
# openblas with embedded lapack
'openblas_lapack': openblas_lapack_info, # use blas_opt instead
'openblas_clapack': openblas_clapack_info, # use blas_opt instead
'blis': blis_info, # use blas_opt instead
'lapack_mkl': lapack_mkl_info, # use lapack_opt instead
'blas_mkl': blas_mkl_info, # use blas_opt instead
'accelerate': accelerate_info, # use blas_opt instead
'openblas64_': openblas64__info,
'openblas64__lapack': openblas64__lapack_info,
'openblas_ilp64': openblas_ilp64_info,
'openblas_ilp64_lapack': openblas_ilp64_lapack_info,
'x11': x11_info,
'fft_opt': fft_opt_info,
'fftw': fftw_info,
'fftw2': fftw2_info,
'fftw3': fftw3_info,
'dfftw': dfftw_info,
'sfftw': sfftw_info,
'fftw_threads': fftw_threads_info,
'dfftw_threads': dfftw_threads_info,
'sfftw_threads': sfftw_threads_info,
'djbfft': djbfft_info,
'blas': blas_info, # use blas_opt instead
'lapack': lapack_info, # use lapack_opt instead
'lapack_src': lapack_src_info,
'blas_src': blas_src_info,
'numpy': numpy_info,
'f2py': f2py_info,
'Numeric': Numeric_info,
'numeric': Numeric_info,
'numarray': numarray_info,
'numerix': numerix_info,
'lapack_opt': lapack_opt_info,
'lapack_ilp64_opt': lapack_ilp64_opt_info,
'lapack_ilp64_plain_opt': lapack_ilp64_plain_opt_info,
'lapack64__opt': lapack64__opt_info,
'blas_opt': blas_opt_info,
'blas_ilp64_opt': blas_ilp64_opt_info,
'blas_ilp64_plain_opt': blas_ilp64_plain_opt_info,
'blas64__opt': blas64__opt_info,
'boost_python': boost_python_info,
'agg2': agg2_info,
'wx': wx_info,
'gdk_pixbuf_xlib_2': gdk_pixbuf_xlib_2_info,
'gdk-pixbuf-xlib-2.0': gdk_pixbuf_xlib_2_info,
'gdk_pixbuf_2': gdk_pixbuf_2_info,
'gdk-pixbuf-2.0': gdk_pixbuf_2_info,
'gdk': gdk_info,
'gdk_2': gdk_2_info,
'gdk-2.0': gdk_2_info,
'gdk_x11_2': gdk_x11_2_info,
'gdk-x11-2.0': gdk_x11_2_info,
'gtkp_x11_2': gtkp_x11_2_info,
'gtk+-x11-2.0': gtkp_x11_2_info,
'gtkp_2': gtkp_2_info,
'gtk+-2.0': gtkp_2_info,
'xft': xft_info,
'freetype2': freetype2_info,
'umfpack': umfpack_info,
'amd': amd_info,
}.get(name.lower(), system_info)
return cl().get_info(notfound_action)
def setup(**attr):
cmdclass = numpy_cmdclass.copy()
new_attr = attr.copy()
if 'cmdclass' in new_attr:
cmdclass.update(new_attr['cmdclass'])
new_attr['cmdclass'] = cmdclass
if 'configuration' in new_attr:
# To avoid calling configuration if there are any errors
# or help request in command in the line.
configuration = new_attr.pop('configuration')
old_dist = distutils.core._setup_distribution
old_stop = distutils.core._setup_stop_after
distutils.core._setup_distribution = None
distutils.core._setup_stop_after = "commandline"
try:
dist = setup(**new_attr)
finally:
distutils.core._setup_distribution = old_dist
distutils.core._setup_stop_after = old_stop
if dist.help or not _command_line_ok():
# probably displayed help, skip running any commands
return dist
# create setup dictionary and append to new_attr
config = configuration()
if hasattr(config, 'todict'):
config = config.todict()
_dict_append(new_attr, **config)
# Move extension source libraries to libraries
libraries = []
for ext in new_attr.get('ext_modules', []):
new_libraries = []
for item in ext.libraries:
if is_sequence(item):
lib_name, build_info = item
_check_append_ext_library(libraries, lib_name, build_info)
new_libraries.append(lib_name)
elif is_string(item):
new_libraries.append(item)
else:
raise TypeError("invalid description of extension module "
"library %r" % (item,))
ext.libraries = new_libraries
if libraries:
if 'libraries' not in new_attr:
new_attr['libraries'] = []
for item in libraries:
_check_append_library(new_attr['libraries'], item)
# sources in ext_modules or libraries may contain header files
if ('ext_modules' in new_attr or 'libraries' in new_attr) \
and 'headers' not in new_attr:
new_attr['headers'] = []
# Use our custom NumpyDistribution class instead of distutils' one
new_attr['distclass'] = NumpyDistribution
return old_setup(**new_attr)
def dict_append(d, **kws):
for k, v in kws.items():
if k in d:
ov = d[k]
if isinstance(ov, str):
d[k] = v
else:
d[k].extend(v)
else:
d[k] = v
The provided code snippet includes necessary dependencies for implementing the `run_compile` function. Write a Python function `def run_compile()` to solve the following problem:
Do it all in one call!
Here is the function:
def run_compile():
"""
Do it all in one call!
"""
import tempfile
i = sys.argv.index('-c')
del sys.argv[i]
remove_build_dir = 0
try:
i = sys.argv.index('--build-dir')
except ValueError:
i = None
if i is not None:
build_dir = sys.argv[i + 1]
del sys.argv[i + 1]
del sys.argv[i]
else:
remove_build_dir = 1
build_dir = tempfile.mkdtemp()
_reg1 = re.compile(r'--link-')
sysinfo_flags = [_m for _m in sys.argv[1:] if _reg1.match(_m)]
sys.argv = [_m for _m in sys.argv if _m not in sysinfo_flags]
if sysinfo_flags:
sysinfo_flags = [f[7:] for f in sysinfo_flags]
_reg2 = re.compile(
r'--((no-|)(wrap-functions|lower)|debug-capi|quiet|skip-empty-wrappers)|-include')
f2py_flags = [_m for _m in sys.argv[1:] if _reg2.match(_m)]
sys.argv = [_m for _m in sys.argv if _m not in f2py_flags]
f2py_flags2 = []
fl = 0
for a in sys.argv[1:]:
if a in ['only:', 'skip:']:
fl = 1
elif a == ':':
fl = 0
if fl or a == ':':
f2py_flags2.append(a)
if f2py_flags2 and f2py_flags2[-1] != ':':
f2py_flags2.append(':')
f2py_flags.extend(f2py_flags2)
sys.argv = [_m for _m in sys.argv if _m not in f2py_flags2]
_reg3 = re.compile(
r'--((f(90)?compiler(-exec|)|compiler)=|help-compiler)')
flib_flags = [_m for _m in sys.argv[1:] if _reg3.match(_m)]
sys.argv = [_m for _m in sys.argv if _m not in flib_flags]
_reg4 = re.compile(
r'--((f(77|90)(flags|exec)|opt|arch)=|(debug|noopt|noarch|help-fcompiler))')
fc_flags = [_m for _m in sys.argv[1:] if _reg4.match(_m)]
sys.argv = [_m for _m in sys.argv if _m not in fc_flags]
del_list = []
for s in flib_flags:
v = '--fcompiler='
if s[:len(v)] == v:
from numpy.distutils import fcompiler
fcompiler.load_all_fcompiler_classes()
allowed_keys = list(fcompiler.fcompiler_class.keys())
nv = ov = s[len(v):].lower()
if ov not in allowed_keys:
vmap = {} # XXX
try:
nv = vmap[ov]
except KeyError:
if ov not in vmap.values():
print('Unknown vendor: "%s"' % (s[len(v):]))
nv = ov
i = flib_flags.index(s)
flib_flags[i] = '--fcompiler=' + nv
continue
for s in del_list:
i = flib_flags.index(s)
del flib_flags[i]
assert len(flib_flags) <= 2, repr(flib_flags)
_reg5 = re.compile(r'--(verbose)')
setup_flags = [_m for _m in sys.argv[1:] if _reg5.match(_m)]
sys.argv = [_m for _m in sys.argv if _m not in setup_flags]
if '--quiet' in f2py_flags:
setup_flags.append('--quiet')
modulename = 'untitled'
sources = sys.argv[1:]
for optname in ['--include_paths', '--include-paths', '--f2cmap']:
if optname in sys.argv:
i = sys.argv.index(optname)
f2py_flags.extend(sys.argv[i:i + 2])
del sys.argv[i + 1], sys.argv[i]
sources = sys.argv[1:]
if '-m' in sys.argv:
i = sys.argv.index('-m')
modulename = sys.argv[i + 1]
del sys.argv[i + 1], sys.argv[i]
sources = sys.argv[1:]
else:
from numpy.distutils.command.build_src import get_f2py_modulename
pyf_files, sources = filter_files('', '[.]pyf([.]src|)', sources)
sources = pyf_files + sources
for f in pyf_files:
modulename = get_f2py_modulename(f)
if modulename:
break
extra_objects, sources = filter_files('', '[.](o|a|so|dylib)', sources)
include_dirs, sources = filter_files('-I', '', sources, remove_prefix=1)
library_dirs, sources = filter_files('-L', '', sources, remove_prefix=1)
libraries, sources = filter_files('-l', '', sources, remove_prefix=1)
undef_macros, sources = filter_files('-U', '', sources, remove_prefix=1)
define_macros, sources = filter_files('-D', '', sources, remove_prefix=1)
for i in range(len(define_macros)):
name_value = define_macros[i].split('=', 1)
if len(name_value) == 1:
name_value.append(None)
if len(name_value) == 2:
define_macros[i] = tuple(name_value)
else:
print('Invalid use of -D:', name_value)
from numpy.distutils.system_info import get_info
num_info = {}
if num_info:
include_dirs.extend(num_info.get('include_dirs', []))
from numpy.distutils.core import setup, Extension
ext_args = {'name': modulename, 'sources': sources,
'include_dirs': include_dirs,
'library_dirs': library_dirs,
'libraries': libraries,
'define_macros': define_macros,
'undef_macros': undef_macros,
'extra_objects': extra_objects,
'f2py_options': f2py_flags,
}
if sysinfo_flags:
from numpy.distutils.misc_util import dict_append
for n in sysinfo_flags:
i = get_info(n)
if not i:
outmess('No %s resources found in system'
' (try `f2py --help-link`)\n' % (repr(n)))
dict_append(ext_args, **i)
ext = Extension(**ext_args)
sys.argv = [sys.argv[0]] + setup_flags
sys.argv.extend(['build',
'--build-temp', build_dir,
'--build-base', build_dir,
'--build-platlib', '.',
# disable CCompilerOpt
'--disable-optimization'])
if fc_flags:
sys.argv.extend(['config_fc'] + fc_flags)
if flib_flags:
sys.argv.extend(['build_ext'] + flib_flags)
setup(ext_modules=[ext])
if remove_build_dir and os.path.exists(build_dir):
import shutil
outmess('Removing build directory %s\n' % (build_dir))
shutil.rmtree(build_dir) | Do it all in one call! |
168,558 | from . import __version__
import copy
import re
import os
from .crackfortran import markoutercomma
from . import cb_rules
from .auxfuncs import *
c2capi_map = {'double': 'NPY_DOUBLE',
'float': 'NPY_FLOAT',
'long_double': 'NPY_DOUBLE', # forced casting
'char': 'NPY_STRING',
'unsigned_char': 'NPY_UBYTE',
'signed_char': 'NPY_BYTE',
'short': 'NPY_SHORT',
'unsigned_short': 'NPY_USHORT',
'int': 'NPY_INT',
'unsigned': 'NPY_UINT',
'long': 'NPY_LONG',
'long_long': 'NPY_LONG', # forced casting
'complex_float': 'NPY_CFLOAT',
'complex_double': 'NPY_CDOUBLE',
'complex_long_double': 'NPY_CDOUBLE', # forced casting
'string': 'NPY_STRING',
'character': 'NPY_CHAR'}
cformat_map = {'double': '%g',
'float': '%g',
'long_double': '%Lg',
'char': '%d',
'signed_char': '%d',
'unsigned_char': '%hhu',
'short': '%hd',
'unsigned_short': '%hu',
'int': '%d',
'unsigned': '%u',
'long': '%ld',
'unsigned_long': '%lu',
'long_long': '%ld',
'complex_float': '(%g,%g)',
'complex_double': '(%g,%g)',
'complex_long_double': '(%Lg,%Lg)',
'string': '\\"%s\\"',
'character': "'%c'",
}
def getctype(var):
"""
Determines C type
"""
ctype = 'void'
if isfunction(var):
if 'result' in var:
a = var['result']
else:
a = var['name']
if a in var['vars']:
return getctype(var['vars'][a])
else:
errmess('getctype: function %s has no return value?!\n' % a)
elif issubroutine(var):
return ctype
elif ischaracter_or_characterarray(var):
return 'character'
elif isstring_or_stringarray(var):
return 'string'
elif 'typespec' in var and var['typespec'].lower() in f2cmap_all:
typespec = var['typespec'].lower()
f2cmap = f2cmap_all[typespec]
ctype = f2cmap[''] # default type
if 'kindselector' in var:
if '*' in var['kindselector']:
try:
ctype = f2cmap[var['kindselector']['*']]
except KeyError:
errmess('getctype: "%s %s %s" not supported.\n' %
(var['typespec'], '*', var['kindselector']['*']))
elif 'kind' in var['kindselector']:
if typespec + 'kind' in f2cmap_all:
f2cmap = f2cmap_all[typespec + 'kind']
try:
ctype = f2cmap[var['kindselector']['kind']]
except KeyError:
if typespec in f2cmap_all:
f2cmap = f2cmap_all[typespec]
try:
ctype = f2cmap[str(var['kindselector']['kind'])]
except KeyError:
errmess('getctype: "%s(kind=%s)" is mapped to C "%s" (to override define dict(%s = dict(%s="<C typespec>")) in %s/.f2py_f2cmap file).\n'
% (typespec, var['kindselector']['kind'], ctype,
typespec, var['kindselector']['kind'], os.getcwd()))
else:
if not isexternal(var):
errmess('getctype: No C-type found in "%s", assuming void.\n' % var)
return ctype
def getstrlength(var):
if isstringfunction(var):
if 'result' in var:
a = var['result']
else:
a = var['name']
if a in var['vars']:
return getstrlength(var['vars'][a])
else:
errmess('getstrlength: function %s has no return value?!\n' % a)
if not isstring(var):
errmess(
'getstrlength: expected a signature of a string but got: %s\n' % (repr(var)))
len = '1'
if 'charselector' in var:
a = var['charselector']
if '*' in a:
len = a['*']
elif 'len' in a:
len = f2cexpr(a['len'])
if re.match(r'\(\s*(\*|:)\s*\)', len) or re.match(r'(\*|:)', len):
if isintent_hide(var):
errmess('getstrlength:intent(hide): expected a string with defined length but got: %s\n' % (
repr(var)))
len = '-1'
return len
def getarrdims(a, var, verbose=0):
ret = {}
if isstring(var) and not isarray(var):
ret['dims'] = getstrlength(var)
ret['size'] = ret['dims']
ret['rank'] = '1'
elif isscalar(var):
ret['size'] = '1'
ret['rank'] = '0'
ret['dims'] = ''
elif isarray(var):
dim = copy.copy(var['dimension'])
ret['size'] = '*'.join(dim)
try:
ret['size'] = repr(eval(ret['size']))
except Exception:
pass
ret['dims'] = ','.join(dim)
ret['rank'] = repr(len(dim))
ret['rank*[-1]'] = repr(len(dim) * [-1])[1:-1]
for i in range(len(dim)): # solve dim for dependencies
v = []
if dim[i] in depargs:
v = [dim[i]]
else:
for va in depargs:
if re.match(r'.*?\b%s\b.*' % va, dim[i]):
v.append(va)
for va in v:
if depargs.index(va) > depargs.index(a):
dim[i] = '*'
break
ret['setdims'], i = '', -1
for d in dim:
i = i + 1
if d not in ['*', ':', '(*)', '(:)']:
ret['setdims'] = '%s#varname#_Dims[%d]=%s,' % (
ret['setdims'], i, d)
if ret['setdims']:
ret['setdims'] = ret['setdims'][:-1]
ret['cbsetdims'], i = '', -1
for d in var['dimension']:
i = i + 1
if d not in ['*', ':', '(*)', '(:)']:
ret['cbsetdims'] = '%s#varname#_Dims[%d]=%s,' % (
ret['cbsetdims'], i, d)
elif isintent_in(var):
outmess('getarrdims:warning: assumed shape array, using 0 instead of %r\n'
% (d))
ret['cbsetdims'] = '%s#varname#_Dims[%d]=%s,' % (
ret['cbsetdims'], i, 0)
elif verbose:
errmess(
'getarrdims: If in call-back function: array argument %s must have bounded dimensions: got %s\n' % (repr(a), repr(d)))
if ret['cbsetdims']:
ret['cbsetdims'] = ret['cbsetdims'][:-1]
# if not isintent_c(var):
# var['dimension'].reverse()
return ret
def getpydocsign(a, var):
global lcb_map
if isfunction(var):
if 'result' in var:
af = var['result']
else:
af = var['name']
if af in var['vars']:
return getpydocsign(af, var['vars'][af])
else:
errmess('getctype: function %s has no return value?!\n' % af)
return '', ''
sig, sigout = a, a
opt = ''
if isintent_in(var):
opt = 'input'
elif isintent_inout(var):
opt = 'in/output'
out_a = a
if isintent_out(var):
for k in var['intent']:
if k[:4] == 'out=':
out_a = k[4:]
break
init = ''
ctype = getctype(var)
if hasinitvalue(var):
init, showinit = getinit(a, var)
init = ', optional\\n Default: %s' % showinit
if isscalar(var):
if isintent_inout(var):
sig = '%s : %s rank-0 array(%s,\'%s\')%s' % (a, opt, c2py_map[ctype],
c2pycode_map[ctype], init)
else:
sig = '%s : %s %s%s' % (a, opt, c2py_map[ctype], init)
sigout = '%s : %s' % (out_a, c2py_map[ctype])
elif isstring(var):
if isintent_inout(var):
sig = '%s : %s rank-0 array(string(len=%s),\'c\')%s' % (
a, opt, getstrlength(var), init)
else:
sig = '%s : %s string(len=%s)%s' % (
a, opt, getstrlength(var), init)
sigout = '%s : string(len=%s)' % (out_a, getstrlength(var))
elif isarray(var):
dim = var['dimension']
rank = repr(len(dim))
sig = '%s : %s rank-%s array(\'%s\') with bounds (%s)%s' % (a, opt, rank,
c2pycode_map[
ctype],
','.join(dim), init)
if a == out_a:
sigout = '%s : rank-%s array(\'%s\') with bounds (%s)'\
% (a, rank, c2pycode_map[ctype], ','.join(dim))
else:
sigout = '%s : rank-%s array(\'%s\') with bounds (%s) and %s storage'\
% (out_a, rank, c2pycode_map[ctype], ','.join(dim), a)
elif isexternal(var):
ua = ''
if a in lcb_map and lcb_map[a] in lcb2_map and 'argname' in lcb2_map[lcb_map[a]]:
ua = lcb2_map[lcb_map[a]]['argname']
if not ua == a:
ua = ' => %s' % ua
else:
ua = ''
sig = '%s : call-back function%s' % (a, ua)
sigout = sig
else:
errmess(
'getpydocsign: Could not resolve docsignature for "%s".\n' % a)
return sig, sigout
def getarrdocsign(a, var):
ctype = getctype(var)
if isstring(var) and (not isarray(var)):
sig = '%s : rank-0 array(string(len=%s),\'c\')' % (a,
getstrlength(var))
elif isscalar(var):
sig = '%s : rank-0 array(%s,\'%s\')' % (a, c2py_map[ctype],
c2pycode_map[ctype],)
elif isarray(var):
dim = var['dimension']
rank = repr(len(dim))
sig = '%s : rank-%s array(\'%s\') with bounds (%s)' % (a, rank,
c2pycode_map[
ctype],
','.join(dim))
return sig
def get_elsize(var):
if isstring(var) or isstringarray(var):
elsize = getstrlength(var)
# override with user-specified length when available:
elsize = var['charselector'].get('f2py_len', elsize)
return elsize
if ischaracter(var) or ischaracterarray(var):
return '1'
# for numerical types, PyArray_New* functions ignore specified
# elsize, so we just return 1 and let elsize be determined at
# runtime, see fortranobject.c
return '1'
def isstring(var):
return isstring_or_stringarray(var) and not isarray(var)
def isstringarray(var):
return isstring_or_stringarray(var) and isarray(var)
def isarray(var):
return 'dimension' in var and not isexternal(var)
def hasnote(var):
return 'note' in var
def dictappend(rd, ar):
if isinstance(ar, list):
for a in ar:
rd = dictappend(rd, a)
return rd
for k in ar.keys():
if k[0] == '_':
continue
if k in rd:
if isinstance(rd[k], str):
rd[k] = [rd[k]]
if isinstance(rd[k], list):
if isinstance(ar[k], list):
rd[k] = rd[k] + ar[k]
else:
rd[k].append(ar[k])
elif isinstance(rd[k], dict):
if isinstance(ar[k], dict):
if k == 'separatorsfor':
for k1 in ar[k].keys():
if k1 not in rd[k]:
rd[k][k1] = ar[k][k1]
else:
rd[k] = dictappend(rd[k], ar[k])
else:
rd[k] = ar[k]
return rd
def common_sign2map(a, var): # obsolute
ret = {'varname': a, 'ctype': getctype(var)}
if isstringarray(var):
ret['ctype'] = 'char'
if ret['ctype'] in c2capi_map:
ret['atype'] = c2capi_map[ret['ctype']]
ret['elsize'] = get_elsize(var)
if ret['ctype'] in cformat_map:
ret['showvalueformat'] = '%s' % (cformat_map[ret['ctype']])
if isarray(var):
ret = dictappend(ret, getarrdims(a, var))
elif isstring(var):
ret['size'] = getstrlength(var)
ret['rank'] = '1'
ret['pydocsign'], ret['pydocsignout'] = getpydocsign(a, var)
if hasnote(var):
ret['note'] = var['note']
var['note'] = ['See elsewhere.']
# for strings this returns 0-rank but actually is 1-rank
ret['arrdocstr'] = getarrdocsign(a, var)
return ret | null |
168,559 | import re
import warnings
from enum import Enum
from math import gcd
class ExprWarning(UserWarning):
pass
def ewarn(message):
warnings.warn(message, ExprWarning, stacklevel=2) | null |
168,560 | import re
import warnings
from enum import Enum
from math import gcd
class Op(Enum):
"""
Used as Expr op attribute.
"""
INTEGER = 10
REAL = 12
COMPLEX = 15
STRING = 20
ARRAY = 30
SYMBOL = 40
TERNARY = 100
APPLY = 200
INDEXING = 210
CONCAT = 220
RELATIONAL = 300
TERMS = 1000
FACTORS = 2000
REF = 3000
DEREF = 3001
class Expr:
"""Represents a Fortran expression as a op-data pair.
Expr instances are hashable and sortable.
"""
def parse(s, language=Language.C):
"""Parse a Fortran expression to a Expr.
"""
return fromstring(s, language=language)
def __init__(self, op, data):
assert isinstance(op, Op)
# sanity checks
if op is Op.INTEGER:
# data is a 2-tuple of numeric object and a kind value
# (default is 4)
assert isinstance(data, tuple) and len(data) == 2
assert isinstance(data[0], int)
assert isinstance(data[1], (int, str)), data
elif op is Op.REAL:
# data is a 2-tuple of numeric object and a kind value
# (default is 4)
assert isinstance(data, tuple) and len(data) == 2
assert isinstance(data[0], float)
assert isinstance(data[1], (int, str)), data
elif op is Op.COMPLEX:
# data is a 2-tuple of constant expressions
assert isinstance(data, tuple) and len(data) == 2
elif op is Op.STRING:
# data is a 2-tuple of quoted string and a kind value
# (default is 1)
assert isinstance(data, tuple) and len(data) == 2
assert (isinstance(data[0], str)
and data[0][::len(data[0])-1] in ('""', "''", '@@'))
assert isinstance(data[1], (int, str)), data
elif op is Op.SYMBOL:
# data is any hashable object
assert hash(data) is not None
elif op in (Op.ARRAY, Op.CONCAT):
# data is a tuple of expressions
assert isinstance(data, tuple)
assert all(isinstance(item, Expr) for item in data), data
elif op in (Op.TERMS, Op.FACTORS):
# data is {<term|base>:<coeff|exponent>} where dict values
# are nonzero Python integers
assert isinstance(data, dict)
elif op is Op.APPLY:
# data is (<function>, <operands>, <kwoperands>) where
# operands are Expr instances
assert isinstance(data, tuple) and len(data) == 3
# function is any hashable object
assert hash(data[0]) is not None
assert isinstance(data[1], tuple)
assert isinstance(data[2], dict)
elif op is Op.INDEXING:
# data is (<object>, <indices>)
assert isinstance(data, tuple) and len(data) == 2
# function is any hashable object
assert hash(data[0]) is not None
elif op is Op.TERNARY:
# data is (<cond>, <expr1>, <expr2>)
assert isinstance(data, tuple) and len(data) == 3
elif op in (Op.REF, Op.DEREF):
# data is Expr instance
assert isinstance(data, Expr)
elif op is Op.RELATIONAL:
# data is (<relop>, <left>, <right>)
assert isinstance(data, tuple) and len(data) == 3
else:
raise NotImplementedError(
f'unknown op or missing sanity check: {op}')
self.op = op
self.data = data
def __eq__(self, other):
return (isinstance(other, Expr)
and self.op is other.op
and self.data == other.data)
def __hash__(self):
if self.op in (Op.TERMS, Op.FACTORS):
data = tuple(sorted(self.data.items()))
elif self.op is Op.APPLY:
data = self.data[:2] + tuple(sorted(self.data[2].items()))
else:
data = self.data
return hash((self.op, data))
def __lt__(self, other):
if isinstance(other, Expr):
if self.op is not other.op:
return self.op.value < other.op.value
if self.op in (Op.TERMS, Op.FACTORS):
return (tuple(sorted(self.data.items()))
< tuple(sorted(other.data.items())))
if self.op is Op.APPLY:
if self.data[:2] != other.data[:2]:
return self.data[:2] < other.data[:2]
return tuple(sorted(self.data[2].items())) < tuple(
sorted(other.data[2].items()))
return self.data < other.data
return NotImplemented
def __le__(self, other): return self == other or self < other
def __gt__(self, other): return not (self <= other)
def __ge__(self, other): return not (self < other)
def __repr__(self):
return f'{type(self).__name__}({self.op}, {self.data!r})'
def __str__(self):
return self.tostring()
def tostring(self, parent_precedence=Precedence.NONE,
language=Language.Fortran):
"""Return a string representation of Expr.
"""
if self.op in (Op.INTEGER, Op.REAL):
precedence = (Precedence.SUM if self.data[0] < 0
else Precedence.ATOM)
r = str(self.data[0]) + (f'_{self.data[1]}'
if self.data[1] != 4 else '')
elif self.op is Op.COMPLEX:
r = ', '.join(item.tostring(Precedence.TUPLE, language=language)
for item in self.data)
r = '(' + r + ')'
precedence = Precedence.ATOM
elif self.op is Op.SYMBOL:
precedence = Precedence.ATOM
r = str(self.data)
elif self.op is Op.STRING:
r = self.data[0]
if self.data[1] != 1:
r = self.data[1] + '_' + r
precedence = Precedence.ATOM
elif self.op is Op.ARRAY:
r = ', '.join(item.tostring(Precedence.TUPLE, language=language)
for item in self.data)
r = '[' + r + ']'
precedence = Precedence.ATOM
elif self.op is Op.TERMS:
terms = []
for term, coeff in sorted(self.data.items()):
if coeff < 0:
op = ' - '
coeff = -coeff
else:
op = ' + '
if coeff == 1:
term = term.tostring(Precedence.SUM, language=language)
else:
if term == as_number(1):
term = str(coeff)
else:
term = f'{coeff} * ' + term.tostring(
Precedence.PRODUCT, language=language)
if terms:
terms.append(op)
elif op == ' - ':
terms.append('-')
terms.append(term)
r = ''.join(terms) or '0'
precedence = Precedence.SUM if terms else Precedence.ATOM
elif self.op is Op.FACTORS:
factors = []
tail = []
for base, exp in sorted(self.data.items()):
op = ' * '
if exp == 1:
factor = base.tostring(Precedence.PRODUCT,
language=language)
elif language is Language.C:
if exp in range(2, 10):
factor = base.tostring(Precedence.PRODUCT,
language=language)
factor = ' * '.join([factor] * exp)
elif exp in range(-10, 0):
factor = base.tostring(Precedence.PRODUCT,
language=language)
tail += [factor] * -exp
continue
else:
factor = base.tostring(Precedence.TUPLE,
language=language)
factor = f'pow({factor}, {exp})'
else:
factor = base.tostring(Precedence.POWER,
language=language) + f' ** {exp}'
if factors:
factors.append(op)
factors.append(factor)
if tail:
if not factors:
factors += ['1']
factors += ['/', '(', ' * '.join(tail), ')']
r = ''.join(factors) or '1'
precedence = Precedence.PRODUCT if factors else Precedence.ATOM
elif self.op is Op.APPLY:
name, args, kwargs = self.data
if name is ArithOp.DIV and language is Language.C:
numer, denom = [arg.tostring(Precedence.PRODUCT,
language=language)
for arg in args]
r = f'{numer} / {denom}'
precedence = Precedence.PRODUCT
else:
args = [arg.tostring(Precedence.TUPLE, language=language)
for arg in args]
args += [k + '=' + v.tostring(Precedence.NONE)
for k, v in kwargs.items()]
r = f'{name}({", ".join(args)})'
precedence = Precedence.ATOM
elif self.op is Op.INDEXING:
name = self.data[0]
args = [arg.tostring(Precedence.TUPLE, language=language)
for arg in self.data[1:]]
r = f'{name}[{", ".join(args)}]'
precedence = Precedence.ATOM
elif self.op is Op.CONCAT:
args = [arg.tostring(Precedence.PRODUCT, language=language)
for arg in self.data]
r = " // ".join(args)
precedence = Precedence.PRODUCT
elif self.op is Op.TERNARY:
cond, expr1, expr2 = [a.tostring(Precedence.TUPLE,
language=language)
for a in self.data]
if language is Language.C:
r = f'({cond}?{expr1}:{expr2})'
elif language is Language.Python:
r = f'({expr1} if {cond} else {expr2})'
elif language is Language.Fortran:
r = f'merge({expr1}, {expr2}, {cond})'
else:
raise NotImplementedError(
f'tostring for {self.op} and {language}')
precedence = Precedence.ATOM
elif self.op is Op.REF:
r = '&' + self.data.tostring(Precedence.UNARY, language=language)
precedence = Precedence.UNARY
elif self.op is Op.DEREF:
r = '*' + self.data.tostring(Precedence.UNARY, language=language)
precedence = Precedence.UNARY
elif self.op is Op.RELATIONAL:
rop, left, right = self.data
precedence = (Precedence.EQ if rop in (RelOp.EQ, RelOp.NE)
else Precedence.LT)
left = left.tostring(precedence, language=language)
right = right.tostring(precedence, language=language)
rop = rop.tostring(language=language)
r = f'{left} {rop} {right}'
else:
raise NotImplementedError(f'tostring for op {self.op}')
if parent_precedence.value < precedence.value:
# If parent precedence is higher than operand precedence,
# operand will be enclosed in parenthesis.
return '(' + r + ')'
return r
def __pos__(self):
return self
def __neg__(self):
return self * -1
def __add__(self, other):
other = as_expr(other)
if isinstance(other, Expr):
if self.op is other.op:
if self.op in (Op.INTEGER, Op.REAL):
return as_number(
self.data[0] + other.data[0],
max(self.data[1], other.data[1]))
if self.op is Op.COMPLEX:
r1, i1 = self.data
r2, i2 = other.data
return as_complex(r1 + r2, i1 + i2)
if self.op is Op.TERMS:
r = Expr(self.op, dict(self.data))
for k, v in other.data.items():
_pairs_add(r.data, k, v)
return normalize(r)
if self.op is Op.COMPLEX and other.op in (Op.INTEGER, Op.REAL):
return self + as_complex(other)
elif self.op in (Op.INTEGER, Op.REAL) and other.op is Op.COMPLEX:
return as_complex(self) + other
elif self.op is Op.REAL and other.op is Op.INTEGER:
return self + as_real(other, kind=self.data[1])
elif self.op is Op.INTEGER and other.op is Op.REAL:
return as_real(self, kind=other.data[1]) + other
return as_terms(self) + as_terms(other)
return NotImplemented
def __radd__(self, other):
if isinstance(other, number_types):
return as_number(other) + self
return NotImplemented
def __sub__(self, other):
return self + (-other)
def __rsub__(self, other):
if isinstance(other, number_types):
return as_number(other) - self
return NotImplemented
def __mul__(self, other):
other = as_expr(other)
if isinstance(other, Expr):
if self.op is other.op:
if self.op in (Op.INTEGER, Op.REAL):
return as_number(self.data[0] * other.data[0],
max(self.data[1], other.data[1]))
elif self.op is Op.COMPLEX:
r1, i1 = self.data
r2, i2 = other.data
return as_complex(r1 * r2 - i1 * i2, r1 * i2 + r2 * i1)
if self.op is Op.FACTORS:
r = Expr(self.op, dict(self.data))
for k, v in other.data.items():
_pairs_add(r.data, k, v)
return normalize(r)
elif self.op is Op.TERMS:
r = Expr(self.op, {})
for t1, c1 in self.data.items():
for t2, c2 in other.data.items():
_pairs_add(r.data, t1 * t2, c1 * c2)
return normalize(r)
if self.op is Op.COMPLEX and other.op in (Op.INTEGER, Op.REAL):
return self * as_complex(other)
elif other.op is Op.COMPLEX and self.op in (Op.INTEGER, Op.REAL):
return as_complex(self) * other
elif self.op is Op.REAL and other.op is Op.INTEGER:
return self * as_real(other, kind=self.data[1])
elif self.op is Op.INTEGER and other.op is Op.REAL:
return as_real(self, kind=other.data[1]) * other
if self.op is Op.TERMS:
return self * as_terms(other)
elif other.op is Op.TERMS:
return as_terms(self) * other
return as_factors(self) * as_factors(other)
return NotImplemented
def __rmul__(self, other):
if isinstance(other, number_types):
return as_number(other) * self
return NotImplemented
def __pow__(self, other):
other = as_expr(other)
if isinstance(other, Expr):
if other.op is Op.INTEGER:
exponent = other.data[0]
# TODO: other kind not used
if exponent == 0:
return as_number(1)
if exponent == 1:
return self
if exponent > 0:
if self.op is Op.FACTORS:
r = Expr(self.op, {})
for k, v in self.data.items():
r.data[k] = v * exponent
return normalize(r)
return self * (self ** (exponent - 1))
elif exponent != -1:
return (self ** (-exponent)) ** -1
return Expr(Op.FACTORS, {self: exponent})
return as_apply(ArithOp.POW, self, other)
return NotImplemented
def __truediv__(self, other):
other = as_expr(other)
if isinstance(other, Expr):
# Fortran / is different from Python /:
# - `/` is a truncate operation for integer operands
return normalize(as_apply(ArithOp.DIV, self, other))
return NotImplemented
def __rtruediv__(self, other):
other = as_expr(other)
if isinstance(other, Expr):
return other / self
return NotImplemented
def __floordiv__(self, other):
other = as_expr(other)
if isinstance(other, Expr):
# Fortran // is different from Python //:
# - `//` is a concatenate operation for string operands
return normalize(Expr(Op.CONCAT, (self, other)))
return NotImplemented
def __rfloordiv__(self, other):
other = as_expr(other)
if isinstance(other, Expr):
return other // self
return NotImplemented
def __call__(self, *args, **kwargs):
# In Fortran, parenthesis () are use for both function call as
# well as indexing operations.
#
# TODO: implement a method for deciding when __call__ should
# return an INDEXING expression.
return as_apply(self, *map(as_expr, args),
**dict((k, as_expr(v)) for k, v in kwargs.items()))
def __getitem__(self, index):
# Provided to support C indexing operations that .pyf files
# may contain.
index = as_expr(index)
if not isinstance(index, tuple):
index = index,
if len(index) > 1:
ewarn(f'C-index should be a single expression but got `{index}`')
return Expr(Op.INDEXING, (self,) + index)
def substitute(self, symbols_map):
"""Recursively substitute symbols with values in symbols map.
Symbols map is a dictionary of symbol-expression pairs.
"""
if self.op is Op.SYMBOL:
value = symbols_map.get(self)
if value is None:
return self
m = re.match(r'\A(@__f2py_PARENTHESIS_(\w+)_\d+@)\Z', self.data)
if m:
# complement to fromstring method
items, paren = m.groups()
if paren in ['ROUNDDIV', 'SQUARE']:
return as_array(value)
assert paren == 'ROUND', (paren, value)
return value
if self.op in (Op.INTEGER, Op.REAL, Op.STRING):
return self
if self.op in (Op.ARRAY, Op.COMPLEX):
return Expr(self.op, tuple(item.substitute(symbols_map)
for item in self.data))
if self.op is Op.CONCAT:
return normalize(Expr(self.op, tuple(item.substitute(symbols_map)
for item in self.data)))
if self.op is Op.TERMS:
r = None
for term, coeff in self.data.items():
if r is None:
r = term.substitute(symbols_map) * coeff
else:
r += term.substitute(symbols_map) * coeff
if r is None:
ewarn('substitute: empty TERMS expression interpreted as'
' int-literal 0')
return as_number(0)
return r
if self.op is Op.FACTORS:
r = None
for base, exponent in self.data.items():
if r is None:
r = base.substitute(symbols_map) ** exponent
else:
r *= base.substitute(symbols_map) ** exponent
if r is None:
ewarn('substitute: empty FACTORS expression interpreted'
' as int-literal 1')
return as_number(1)
return r
if self.op is Op.APPLY:
target, args, kwargs = self.data
if isinstance(target, Expr):
target = target.substitute(symbols_map)
args = tuple(a.substitute(symbols_map) for a in args)
kwargs = dict((k, v.substitute(symbols_map))
for k, v in kwargs.items())
return normalize(Expr(self.op, (target, args, kwargs)))
if self.op is Op.INDEXING:
func = self.data[0]
if isinstance(func, Expr):
func = func.substitute(symbols_map)
args = tuple(a.substitute(symbols_map) for a in self.data[1:])
return normalize(Expr(self.op, (func,) + args))
if self.op is Op.TERNARY:
operands = tuple(a.substitute(symbols_map) for a in self.data)
return normalize(Expr(self.op, operands))
if self.op in (Op.REF, Op.DEREF):
return normalize(Expr(self.op, self.data.substitute(symbols_map)))
if self.op is Op.RELATIONAL:
rop, left, right = self.data
left = left.substitute(symbols_map)
right = right.substitute(symbols_map)
return normalize(Expr(self.op, (rop, left, right)))
raise NotImplementedError(f'substitute method for {self.op}: {self!r}')
def traverse(self, visit, *args, **kwargs):
"""Traverse expression tree with visit function.
The visit function is applied to an expression with given args
and kwargs.
Traverse call returns an expression returned by visit when not
None, otherwise return a new normalized expression with
traverse-visit sub-expressions.
"""
result = visit(self, *args, **kwargs)
if result is not None:
return result
if self.op in (Op.INTEGER, Op.REAL, Op.STRING, Op.SYMBOL):
return self
elif self.op in (Op.COMPLEX, Op.ARRAY, Op.CONCAT, Op.TERNARY):
return normalize(Expr(self.op, tuple(
item.traverse(visit, *args, **kwargs)
for item in self.data)))
elif self.op in (Op.TERMS, Op.FACTORS):
data = {}
for k, v in self.data.items():
k = k.traverse(visit, *args, **kwargs)
v = (v.traverse(visit, *args, **kwargs)
if isinstance(v, Expr) else v)
if k in data:
v = data[k] + v
data[k] = v
return normalize(Expr(self.op, data))
elif self.op is Op.APPLY:
obj = self.data[0]
func = (obj.traverse(visit, *args, **kwargs)
if isinstance(obj, Expr) else obj)
operands = tuple(operand.traverse(visit, *args, **kwargs)
for operand in self.data[1])
kwoperands = dict((k, v.traverse(visit, *args, **kwargs))
for k, v in self.data[2].items())
return normalize(Expr(self.op, (func, operands, kwoperands)))
elif self.op is Op.INDEXING:
obj = self.data[0]
obj = (obj.traverse(visit, *args, **kwargs)
if isinstance(obj, Expr) else obj)
indices = tuple(index.traverse(visit, *args, **kwargs)
for index in self.data[1:])
return normalize(Expr(self.op, (obj,) + indices))
elif self.op in (Op.REF, Op.DEREF):
return normalize(Expr(self.op,
self.data.traverse(visit, *args, **kwargs)))
elif self.op is Op.RELATIONAL:
rop, left, right = self.data
left = left.traverse(visit, *args, **kwargs)
right = right.traverse(visit, *args, **kwargs)
return normalize(Expr(self.op, (rop, left, right)))
raise NotImplementedError(f'traverse method for {self.op}')
def contains(self, other):
"""Check if self contains other.
"""
found = []
def visit(expr, found=found):
if found:
return expr
elif expr == other:
found.append(1)
return expr
self.traverse(visit)
return len(found) != 0
def symbols(self):
"""Return a set of symbols contained in self.
"""
found = set()
def visit(expr, found=found):
if expr.op is Op.SYMBOL:
found.add(expr)
self.traverse(visit)
return found
def polynomial_atoms(self):
"""Return a set of expressions used as atoms in polynomial self.
"""
found = set()
def visit(expr, found=found):
if expr.op is Op.FACTORS:
for b in expr.data:
b.traverse(visit)
return expr
if expr.op in (Op.TERMS, Op.COMPLEX):
return
if expr.op is Op.APPLY and isinstance(expr.data[0], ArithOp):
if expr.data[0] is ArithOp.POW:
expr.data[1][0].traverse(visit)
return expr
return
if expr.op in (Op.INTEGER, Op.REAL):
return expr
found.add(expr)
if expr.op in (Op.INDEXING, Op.APPLY):
return expr
self.traverse(visit)
return found
def linear_solve(self, symbol):
"""Return a, b such that a * symbol + b == self.
If self is not linear with respect to symbol, raise RuntimeError.
"""
b = self.substitute({symbol: as_number(0)})
ax = self - b
a = ax.substitute({symbol: as_number(1)})
zero, _ = as_numer_denom(a * symbol - ax)
if zero != as_number(0):
raise RuntimeError(f'not a {symbol}-linear equation:'
f' {a} * {symbol} + {b} == {self}')
return a, b
The provided code snippet includes necessary dependencies for implementing the `as_array` function. Write a Python function `def as_array(obj)` to solve the following problem:
Return object as ARRAY expression (array constant).
Here is the function:
def as_array(obj):
"""Return object as ARRAY expression (array constant).
"""
if isinstance(obj, Expr):
obj = obj,
return Expr(Op.ARRAY, obj) | Return object as ARRAY expression (array constant). |
168,561 | import re
import warnings
from enum import Enum
from math import gcd
class Op(Enum):
"""
Used as Expr op attribute.
"""
INTEGER = 10
REAL = 12
COMPLEX = 15
STRING = 20
ARRAY = 30
SYMBOL = 40
TERNARY = 100
APPLY = 200
INDEXING = 210
CONCAT = 220
RELATIONAL = 300
TERMS = 1000
FACTORS = 2000
REF = 3000
DEREF = 3001
class Expr:
"""Represents a Fortran expression as a op-data pair.
Expr instances are hashable and sortable.
"""
def parse(s, language=Language.C):
"""Parse a Fortran expression to a Expr.
"""
return fromstring(s, language=language)
def __init__(self, op, data):
assert isinstance(op, Op)
# sanity checks
if op is Op.INTEGER:
# data is a 2-tuple of numeric object and a kind value
# (default is 4)
assert isinstance(data, tuple) and len(data) == 2
assert isinstance(data[0], int)
assert isinstance(data[1], (int, str)), data
elif op is Op.REAL:
# data is a 2-tuple of numeric object and a kind value
# (default is 4)
assert isinstance(data, tuple) and len(data) == 2
assert isinstance(data[0], float)
assert isinstance(data[1], (int, str)), data
elif op is Op.COMPLEX:
# data is a 2-tuple of constant expressions
assert isinstance(data, tuple) and len(data) == 2
elif op is Op.STRING:
# data is a 2-tuple of quoted string and a kind value
# (default is 1)
assert isinstance(data, tuple) and len(data) == 2
assert (isinstance(data[0], str)
and data[0][::len(data[0])-1] in ('""', "''", '@@'))
assert isinstance(data[1], (int, str)), data
elif op is Op.SYMBOL:
# data is any hashable object
assert hash(data) is not None
elif op in (Op.ARRAY, Op.CONCAT):
# data is a tuple of expressions
assert isinstance(data, tuple)
assert all(isinstance(item, Expr) for item in data), data
elif op in (Op.TERMS, Op.FACTORS):
# data is {<term|base>:<coeff|exponent>} where dict values
# are nonzero Python integers
assert isinstance(data, dict)
elif op is Op.APPLY:
# data is (<function>, <operands>, <kwoperands>) where
# operands are Expr instances
assert isinstance(data, tuple) and len(data) == 3
# function is any hashable object
assert hash(data[0]) is not None
assert isinstance(data[1], tuple)
assert isinstance(data[2], dict)
elif op is Op.INDEXING:
# data is (<object>, <indices>)
assert isinstance(data, tuple) and len(data) == 2
# function is any hashable object
assert hash(data[0]) is not None
elif op is Op.TERNARY:
# data is (<cond>, <expr1>, <expr2>)
assert isinstance(data, tuple) and len(data) == 3
elif op in (Op.REF, Op.DEREF):
# data is Expr instance
assert isinstance(data, Expr)
elif op is Op.RELATIONAL:
# data is (<relop>, <left>, <right>)
assert isinstance(data, tuple) and len(data) == 3
else:
raise NotImplementedError(
f'unknown op or missing sanity check: {op}')
self.op = op
self.data = data
def __eq__(self, other):
return (isinstance(other, Expr)
and self.op is other.op
and self.data == other.data)
def __hash__(self):
if self.op in (Op.TERMS, Op.FACTORS):
data = tuple(sorted(self.data.items()))
elif self.op is Op.APPLY:
data = self.data[:2] + tuple(sorted(self.data[2].items()))
else:
data = self.data
return hash((self.op, data))
def __lt__(self, other):
if isinstance(other, Expr):
if self.op is not other.op:
return self.op.value < other.op.value
if self.op in (Op.TERMS, Op.FACTORS):
return (tuple(sorted(self.data.items()))
< tuple(sorted(other.data.items())))
if self.op is Op.APPLY:
if self.data[:2] != other.data[:2]:
return self.data[:2] < other.data[:2]
return tuple(sorted(self.data[2].items())) < tuple(
sorted(other.data[2].items()))
return self.data < other.data
return NotImplemented
def __le__(self, other): return self == other or self < other
def __gt__(self, other): return not (self <= other)
def __ge__(self, other): return not (self < other)
def __repr__(self):
return f'{type(self).__name__}({self.op}, {self.data!r})'
def __str__(self):
return self.tostring()
def tostring(self, parent_precedence=Precedence.NONE,
language=Language.Fortran):
"""Return a string representation of Expr.
"""
if self.op in (Op.INTEGER, Op.REAL):
precedence = (Precedence.SUM if self.data[0] < 0
else Precedence.ATOM)
r = str(self.data[0]) + (f'_{self.data[1]}'
if self.data[1] != 4 else '')
elif self.op is Op.COMPLEX:
r = ', '.join(item.tostring(Precedence.TUPLE, language=language)
for item in self.data)
r = '(' + r + ')'
precedence = Precedence.ATOM
elif self.op is Op.SYMBOL:
precedence = Precedence.ATOM
r = str(self.data)
elif self.op is Op.STRING:
r = self.data[0]
if self.data[1] != 1:
r = self.data[1] + '_' + r
precedence = Precedence.ATOM
elif self.op is Op.ARRAY:
r = ', '.join(item.tostring(Precedence.TUPLE, language=language)
for item in self.data)
r = '[' + r + ']'
precedence = Precedence.ATOM
elif self.op is Op.TERMS:
terms = []
for term, coeff in sorted(self.data.items()):
if coeff < 0:
op = ' - '
coeff = -coeff
else:
op = ' + '
if coeff == 1:
term = term.tostring(Precedence.SUM, language=language)
else:
if term == as_number(1):
term = str(coeff)
else:
term = f'{coeff} * ' + term.tostring(
Precedence.PRODUCT, language=language)
if terms:
terms.append(op)
elif op == ' - ':
terms.append('-')
terms.append(term)
r = ''.join(terms) or '0'
precedence = Precedence.SUM if terms else Precedence.ATOM
elif self.op is Op.FACTORS:
factors = []
tail = []
for base, exp in sorted(self.data.items()):
op = ' * '
if exp == 1:
factor = base.tostring(Precedence.PRODUCT,
language=language)
elif language is Language.C:
if exp in range(2, 10):
factor = base.tostring(Precedence.PRODUCT,
language=language)
factor = ' * '.join([factor] * exp)
elif exp in range(-10, 0):
factor = base.tostring(Precedence.PRODUCT,
language=language)
tail += [factor] * -exp
continue
else:
factor = base.tostring(Precedence.TUPLE,
language=language)
factor = f'pow({factor}, {exp})'
else:
factor = base.tostring(Precedence.POWER,
language=language) + f' ** {exp}'
if factors:
factors.append(op)
factors.append(factor)
if tail:
if not factors:
factors += ['1']
factors += ['/', '(', ' * '.join(tail), ')']
r = ''.join(factors) or '1'
precedence = Precedence.PRODUCT if factors else Precedence.ATOM
elif self.op is Op.APPLY:
name, args, kwargs = self.data
if name is ArithOp.DIV and language is Language.C:
numer, denom = [arg.tostring(Precedence.PRODUCT,
language=language)
for arg in args]
r = f'{numer} / {denom}'
precedence = Precedence.PRODUCT
else:
args = [arg.tostring(Precedence.TUPLE, language=language)
for arg in args]
args += [k + '=' + v.tostring(Precedence.NONE)
for k, v in kwargs.items()]
r = f'{name}({", ".join(args)})'
precedence = Precedence.ATOM
elif self.op is Op.INDEXING:
name = self.data[0]
args = [arg.tostring(Precedence.TUPLE, language=language)
for arg in self.data[1:]]
r = f'{name}[{", ".join(args)}]'
precedence = Precedence.ATOM
elif self.op is Op.CONCAT:
args = [arg.tostring(Precedence.PRODUCT, language=language)
for arg in self.data]
r = " // ".join(args)
precedence = Precedence.PRODUCT
elif self.op is Op.TERNARY:
cond, expr1, expr2 = [a.tostring(Precedence.TUPLE,
language=language)
for a in self.data]
if language is Language.C:
r = f'({cond}?{expr1}:{expr2})'
elif language is Language.Python:
r = f'({expr1} if {cond} else {expr2})'
elif language is Language.Fortran:
r = f'merge({expr1}, {expr2}, {cond})'
else:
raise NotImplementedError(
f'tostring for {self.op} and {language}')
precedence = Precedence.ATOM
elif self.op is Op.REF:
r = '&' + self.data.tostring(Precedence.UNARY, language=language)
precedence = Precedence.UNARY
elif self.op is Op.DEREF:
r = '*' + self.data.tostring(Precedence.UNARY, language=language)
precedence = Precedence.UNARY
elif self.op is Op.RELATIONAL:
rop, left, right = self.data
precedence = (Precedence.EQ if rop in (RelOp.EQ, RelOp.NE)
else Precedence.LT)
left = left.tostring(precedence, language=language)
right = right.tostring(precedence, language=language)
rop = rop.tostring(language=language)
r = f'{left} {rop} {right}'
else:
raise NotImplementedError(f'tostring for op {self.op}')
if parent_precedence.value < precedence.value:
# If parent precedence is higher than operand precedence,
# operand will be enclosed in parenthesis.
return '(' + r + ')'
return r
def __pos__(self):
return self
def __neg__(self):
return self * -1
def __add__(self, other):
other = as_expr(other)
if isinstance(other, Expr):
if self.op is other.op:
if self.op in (Op.INTEGER, Op.REAL):
return as_number(
self.data[0] + other.data[0],
max(self.data[1], other.data[1]))
if self.op is Op.COMPLEX:
r1, i1 = self.data
r2, i2 = other.data
return as_complex(r1 + r2, i1 + i2)
if self.op is Op.TERMS:
r = Expr(self.op, dict(self.data))
for k, v in other.data.items():
_pairs_add(r.data, k, v)
return normalize(r)
if self.op is Op.COMPLEX and other.op in (Op.INTEGER, Op.REAL):
return self + as_complex(other)
elif self.op in (Op.INTEGER, Op.REAL) and other.op is Op.COMPLEX:
return as_complex(self) + other
elif self.op is Op.REAL and other.op is Op.INTEGER:
return self + as_real(other, kind=self.data[1])
elif self.op is Op.INTEGER and other.op is Op.REAL:
return as_real(self, kind=other.data[1]) + other
return as_terms(self) + as_terms(other)
return NotImplemented
def __radd__(self, other):
if isinstance(other, number_types):
return as_number(other) + self
return NotImplemented
def __sub__(self, other):
return self + (-other)
def __rsub__(self, other):
if isinstance(other, number_types):
return as_number(other) - self
return NotImplemented
def __mul__(self, other):
other = as_expr(other)
if isinstance(other, Expr):
if self.op is other.op:
if self.op in (Op.INTEGER, Op.REAL):
return as_number(self.data[0] * other.data[0],
max(self.data[1], other.data[1]))
elif self.op is Op.COMPLEX:
r1, i1 = self.data
r2, i2 = other.data
return as_complex(r1 * r2 - i1 * i2, r1 * i2 + r2 * i1)
if self.op is Op.FACTORS:
r = Expr(self.op, dict(self.data))
for k, v in other.data.items():
_pairs_add(r.data, k, v)
return normalize(r)
elif self.op is Op.TERMS:
r = Expr(self.op, {})
for t1, c1 in self.data.items():
for t2, c2 in other.data.items():
_pairs_add(r.data, t1 * t2, c1 * c2)
return normalize(r)
if self.op is Op.COMPLEX and other.op in (Op.INTEGER, Op.REAL):
return self * as_complex(other)
elif other.op is Op.COMPLEX and self.op in (Op.INTEGER, Op.REAL):
return as_complex(self) * other
elif self.op is Op.REAL and other.op is Op.INTEGER:
return self * as_real(other, kind=self.data[1])
elif self.op is Op.INTEGER and other.op is Op.REAL:
return as_real(self, kind=other.data[1]) * other
if self.op is Op.TERMS:
return self * as_terms(other)
elif other.op is Op.TERMS:
return as_terms(self) * other
return as_factors(self) * as_factors(other)
return NotImplemented
def __rmul__(self, other):
if isinstance(other, number_types):
return as_number(other) * self
return NotImplemented
def __pow__(self, other):
other = as_expr(other)
if isinstance(other, Expr):
if other.op is Op.INTEGER:
exponent = other.data[0]
# TODO: other kind not used
if exponent == 0:
return as_number(1)
if exponent == 1:
return self
if exponent > 0:
if self.op is Op.FACTORS:
r = Expr(self.op, {})
for k, v in self.data.items():
r.data[k] = v * exponent
return normalize(r)
return self * (self ** (exponent - 1))
elif exponent != -1:
return (self ** (-exponent)) ** -1
return Expr(Op.FACTORS, {self: exponent})
return as_apply(ArithOp.POW, self, other)
return NotImplemented
def __truediv__(self, other):
other = as_expr(other)
if isinstance(other, Expr):
# Fortran / is different from Python /:
# - `/` is a truncate operation for integer operands
return normalize(as_apply(ArithOp.DIV, self, other))
return NotImplemented
def __rtruediv__(self, other):
other = as_expr(other)
if isinstance(other, Expr):
return other / self
return NotImplemented
def __floordiv__(self, other):
other = as_expr(other)
if isinstance(other, Expr):
# Fortran // is different from Python //:
# - `//` is a concatenate operation for string operands
return normalize(Expr(Op.CONCAT, (self, other)))
return NotImplemented
def __rfloordiv__(self, other):
other = as_expr(other)
if isinstance(other, Expr):
return other // self
return NotImplemented
def __call__(self, *args, **kwargs):
# In Fortran, parenthesis () are use for both function call as
# well as indexing operations.
#
# TODO: implement a method for deciding when __call__ should
# return an INDEXING expression.
return as_apply(self, *map(as_expr, args),
**dict((k, as_expr(v)) for k, v in kwargs.items()))
def __getitem__(self, index):
# Provided to support C indexing operations that .pyf files
# may contain.
index = as_expr(index)
if not isinstance(index, tuple):
index = index,
if len(index) > 1:
ewarn(f'C-index should be a single expression but got `{index}`')
return Expr(Op.INDEXING, (self,) + index)
def substitute(self, symbols_map):
"""Recursively substitute symbols with values in symbols map.
Symbols map is a dictionary of symbol-expression pairs.
"""
if self.op is Op.SYMBOL:
value = symbols_map.get(self)
if value is None:
return self
m = re.match(r'\A(@__f2py_PARENTHESIS_(\w+)_\d+@)\Z', self.data)
if m:
# complement to fromstring method
items, paren = m.groups()
if paren in ['ROUNDDIV', 'SQUARE']:
return as_array(value)
assert paren == 'ROUND', (paren, value)
return value
if self.op in (Op.INTEGER, Op.REAL, Op.STRING):
return self
if self.op in (Op.ARRAY, Op.COMPLEX):
return Expr(self.op, tuple(item.substitute(symbols_map)
for item in self.data))
if self.op is Op.CONCAT:
return normalize(Expr(self.op, tuple(item.substitute(symbols_map)
for item in self.data)))
if self.op is Op.TERMS:
r = None
for term, coeff in self.data.items():
if r is None:
r = term.substitute(symbols_map) * coeff
else:
r += term.substitute(symbols_map) * coeff
if r is None:
ewarn('substitute: empty TERMS expression interpreted as'
' int-literal 0')
return as_number(0)
return r
if self.op is Op.FACTORS:
r = None
for base, exponent in self.data.items():
if r is None:
r = base.substitute(symbols_map) ** exponent
else:
r *= base.substitute(symbols_map) ** exponent
if r is None:
ewarn('substitute: empty FACTORS expression interpreted'
' as int-literal 1')
return as_number(1)
return r
if self.op is Op.APPLY:
target, args, kwargs = self.data
if isinstance(target, Expr):
target = target.substitute(symbols_map)
args = tuple(a.substitute(symbols_map) for a in args)
kwargs = dict((k, v.substitute(symbols_map))
for k, v in kwargs.items())
return normalize(Expr(self.op, (target, args, kwargs)))
if self.op is Op.INDEXING:
func = self.data[0]
if isinstance(func, Expr):
func = func.substitute(symbols_map)
args = tuple(a.substitute(symbols_map) for a in self.data[1:])
return normalize(Expr(self.op, (func,) + args))
if self.op is Op.TERNARY:
operands = tuple(a.substitute(symbols_map) for a in self.data)
return normalize(Expr(self.op, operands))
if self.op in (Op.REF, Op.DEREF):
return normalize(Expr(self.op, self.data.substitute(symbols_map)))
if self.op is Op.RELATIONAL:
rop, left, right = self.data
left = left.substitute(symbols_map)
right = right.substitute(symbols_map)
return normalize(Expr(self.op, (rop, left, right)))
raise NotImplementedError(f'substitute method for {self.op}: {self!r}')
def traverse(self, visit, *args, **kwargs):
"""Traverse expression tree with visit function.
The visit function is applied to an expression with given args
and kwargs.
Traverse call returns an expression returned by visit when not
None, otherwise return a new normalized expression with
traverse-visit sub-expressions.
"""
result = visit(self, *args, **kwargs)
if result is not None:
return result
if self.op in (Op.INTEGER, Op.REAL, Op.STRING, Op.SYMBOL):
return self
elif self.op in (Op.COMPLEX, Op.ARRAY, Op.CONCAT, Op.TERNARY):
return normalize(Expr(self.op, tuple(
item.traverse(visit, *args, **kwargs)
for item in self.data)))
elif self.op in (Op.TERMS, Op.FACTORS):
data = {}
for k, v in self.data.items():
k = k.traverse(visit, *args, **kwargs)
v = (v.traverse(visit, *args, **kwargs)
if isinstance(v, Expr) else v)
if k in data:
v = data[k] + v
data[k] = v
return normalize(Expr(self.op, data))
elif self.op is Op.APPLY:
obj = self.data[0]
func = (obj.traverse(visit, *args, **kwargs)
if isinstance(obj, Expr) else obj)
operands = tuple(operand.traverse(visit, *args, **kwargs)
for operand in self.data[1])
kwoperands = dict((k, v.traverse(visit, *args, **kwargs))
for k, v in self.data[2].items())
return normalize(Expr(self.op, (func, operands, kwoperands)))
elif self.op is Op.INDEXING:
obj = self.data[0]
obj = (obj.traverse(visit, *args, **kwargs)
if isinstance(obj, Expr) else obj)
indices = tuple(index.traverse(visit, *args, **kwargs)
for index in self.data[1:])
return normalize(Expr(self.op, (obj,) + indices))
elif self.op in (Op.REF, Op.DEREF):
return normalize(Expr(self.op,
self.data.traverse(visit, *args, **kwargs)))
elif self.op is Op.RELATIONAL:
rop, left, right = self.data
left = left.traverse(visit, *args, **kwargs)
right = right.traverse(visit, *args, **kwargs)
return normalize(Expr(self.op, (rop, left, right)))
raise NotImplementedError(f'traverse method for {self.op}')
def contains(self, other):
"""Check if self contains other.
"""
found = []
def visit(expr, found=found):
if found:
return expr
elif expr == other:
found.append(1)
return expr
self.traverse(visit)
return len(found) != 0
def symbols(self):
"""Return a set of symbols contained in self.
"""
found = set()
def visit(expr, found=found):
if expr.op is Op.SYMBOL:
found.add(expr)
self.traverse(visit)
return found
def polynomial_atoms(self):
"""Return a set of expressions used as atoms in polynomial self.
"""
found = set()
def visit(expr, found=found):
if expr.op is Op.FACTORS:
for b in expr.data:
b.traverse(visit)
return expr
if expr.op in (Op.TERMS, Op.COMPLEX):
return
if expr.op is Op.APPLY and isinstance(expr.data[0], ArithOp):
if expr.data[0] is ArithOp.POW:
expr.data[1][0].traverse(visit)
return expr
return
if expr.op in (Op.INTEGER, Op.REAL):
return expr
found.add(expr)
if expr.op in (Op.INDEXING, Op.APPLY):
return expr
self.traverse(visit)
return found
def linear_solve(self, symbol):
"""Return a, b such that a * symbol + b == self.
If self is not linear with respect to symbol, raise RuntimeError.
"""
b = self.substitute({symbol: as_number(0)})
ax = self - b
a = ax.substitute({symbol: as_number(1)})
zero, _ = as_numer_denom(a * symbol - ax)
if zero != as_number(0):
raise RuntimeError(f'not a {symbol}-linear equation:'
f' {a} * {symbol} + {b} == {self}')
return a, b
The provided code snippet includes necessary dependencies for implementing the `as_ternary` function. Write a Python function `def as_ternary(cond, expr1, expr2)` to solve the following problem:
Return object as TERNARY expression (cond?expr1:expr2).
Here is the function:
def as_ternary(cond, expr1, expr2):
"""Return object as TERNARY expression (cond?expr1:expr2).
"""
return Expr(Op.TERNARY, (cond, expr1, expr2)) | Return object as TERNARY expression (cond?expr1:expr2). |
168,562 | import re
import warnings
from enum import Enum
from math import gcd
class Op(Enum):
"""
Used as Expr op attribute.
"""
INTEGER = 10
REAL = 12
COMPLEX = 15
STRING = 20
ARRAY = 30
SYMBOL = 40
TERNARY = 100
APPLY = 200
INDEXING = 210
CONCAT = 220
RELATIONAL = 300
TERMS = 1000
FACTORS = 2000
REF = 3000
DEREF = 3001
class Expr:
"""Represents a Fortran expression as a op-data pair.
Expr instances are hashable and sortable.
"""
def parse(s, language=Language.C):
"""Parse a Fortran expression to a Expr.
"""
return fromstring(s, language=language)
def __init__(self, op, data):
assert isinstance(op, Op)
# sanity checks
if op is Op.INTEGER:
# data is a 2-tuple of numeric object and a kind value
# (default is 4)
assert isinstance(data, tuple) and len(data) == 2
assert isinstance(data[0], int)
assert isinstance(data[1], (int, str)), data
elif op is Op.REAL:
# data is a 2-tuple of numeric object and a kind value
# (default is 4)
assert isinstance(data, tuple) and len(data) == 2
assert isinstance(data[0], float)
assert isinstance(data[1], (int, str)), data
elif op is Op.COMPLEX:
# data is a 2-tuple of constant expressions
assert isinstance(data, tuple) and len(data) == 2
elif op is Op.STRING:
# data is a 2-tuple of quoted string and a kind value
# (default is 1)
assert isinstance(data, tuple) and len(data) == 2
assert (isinstance(data[0], str)
and data[0][::len(data[0])-1] in ('""', "''", '@@'))
assert isinstance(data[1], (int, str)), data
elif op is Op.SYMBOL:
# data is any hashable object
assert hash(data) is not None
elif op in (Op.ARRAY, Op.CONCAT):
# data is a tuple of expressions
assert isinstance(data, tuple)
assert all(isinstance(item, Expr) for item in data), data
elif op in (Op.TERMS, Op.FACTORS):
# data is {<term|base>:<coeff|exponent>} where dict values
# are nonzero Python integers
assert isinstance(data, dict)
elif op is Op.APPLY:
# data is (<function>, <operands>, <kwoperands>) where
# operands are Expr instances
assert isinstance(data, tuple) and len(data) == 3
# function is any hashable object
assert hash(data[0]) is not None
assert isinstance(data[1], tuple)
assert isinstance(data[2], dict)
elif op is Op.INDEXING:
# data is (<object>, <indices>)
assert isinstance(data, tuple) and len(data) == 2
# function is any hashable object
assert hash(data[0]) is not None
elif op is Op.TERNARY:
# data is (<cond>, <expr1>, <expr2>)
assert isinstance(data, tuple) and len(data) == 3
elif op in (Op.REF, Op.DEREF):
# data is Expr instance
assert isinstance(data, Expr)
elif op is Op.RELATIONAL:
# data is (<relop>, <left>, <right>)
assert isinstance(data, tuple) and len(data) == 3
else:
raise NotImplementedError(
f'unknown op or missing sanity check: {op}')
self.op = op
self.data = data
def __eq__(self, other):
return (isinstance(other, Expr)
and self.op is other.op
and self.data == other.data)
def __hash__(self):
if self.op in (Op.TERMS, Op.FACTORS):
data = tuple(sorted(self.data.items()))
elif self.op is Op.APPLY:
data = self.data[:2] + tuple(sorted(self.data[2].items()))
else:
data = self.data
return hash((self.op, data))
def __lt__(self, other):
if isinstance(other, Expr):
if self.op is not other.op:
return self.op.value < other.op.value
if self.op in (Op.TERMS, Op.FACTORS):
return (tuple(sorted(self.data.items()))
< tuple(sorted(other.data.items())))
if self.op is Op.APPLY:
if self.data[:2] != other.data[:2]:
return self.data[:2] < other.data[:2]
return tuple(sorted(self.data[2].items())) < tuple(
sorted(other.data[2].items()))
return self.data < other.data
return NotImplemented
def __le__(self, other): return self == other or self < other
def __gt__(self, other): return not (self <= other)
def __ge__(self, other): return not (self < other)
def __repr__(self):
return f'{type(self).__name__}({self.op}, {self.data!r})'
def __str__(self):
return self.tostring()
def tostring(self, parent_precedence=Precedence.NONE,
language=Language.Fortran):
"""Return a string representation of Expr.
"""
if self.op in (Op.INTEGER, Op.REAL):
precedence = (Precedence.SUM if self.data[0] < 0
else Precedence.ATOM)
r = str(self.data[0]) + (f'_{self.data[1]}'
if self.data[1] != 4 else '')
elif self.op is Op.COMPLEX:
r = ', '.join(item.tostring(Precedence.TUPLE, language=language)
for item in self.data)
r = '(' + r + ')'
precedence = Precedence.ATOM
elif self.op is Op.SYMBOL:
precedence = Precedence.ATOM
r = str(self.data)
elif self.op is Op.STRING:
r = self.data[0]
if self.data[1] != 1:
r = self.data[1] + '_' + r
precedence = Precedence.ATOM
elif self.op is Op.ARRAY:
r = ', '.join(item.tostring(Precedence.TUPLE, language=language)
for item in self.data)
r = '[' + r + ']'
precedence = Precedence.ATOM
elif self.op is Op.TERMS:
terms = []
for term, coeff in sorted(self.data.items()):
if coeff < 0:
op = ' - '
coeff = -coeff
else:
op = ' + '
if coeff == 1:
term = term.tostring(Precedence.SUM, language=language)
else:
if term == as_number(1):
term = str(coeff)
else:
term = f'{coeff} * ' + term.tostring(
Precedence.PRODUCT, language=language)
if terms:
terms.append(op)
elif op == ' - ':
terms.append('-')
terms.append(term)
r = ''.join(terms) or '0'
precedence = Precedence.SUM if terms else Precedence.ATOM
elif self.op is Op.FACTORS:
factors = []
tail = []
for base, exp in sorted(self.data.items()):
op = ' * '
if exp == 1:
factor = base.tostring(Precedence.PRODUCT,
language=language)
elif language is Language.C:
if exp in range(2, 10):
factor = base.tostring(Precedence.PRODUCT,
language=language)
factor = ' * '.join([factor] * exp)
elif exp in range(-10, 0):
factor = base.tostring(Precedence.PRODUCT,
language=language)
tail += [factor] * -exp
continue
else:
factor = base.tostring(Precedence.TUPLE,
language=language)
factor = f'pow({factor}, {exp})'
else:
factor = base.tostring(Precedence.POWER,
language=language) + f' ** {exp}'
if factors:
factors.append(op)
factors.append(factor)
if tail:
if not factors:
factors += ['1']
factors += ['/', '(', ' * '.join(tail), ')']
r = ''.join(factors) or '1'
precedence = Precedence.PRODUCT if factors else Precedence.ATOM
elif self.op is Op.APPLY:
name, args, kwargs = self.data
if name is ArithOp.DIV and language is Language.C:
numer, denom = [arg.tostring(Precedence.PRODUCT,
language=language)
for arg in args]
r = f'{numer} / {denom}'
precedence = Precedence.PRODUCT
else:
args = [arg.tostring(Precedence.TUPLE, language=language)
for arg in args]
args += [k + '=' + v.tostring(Precedence.NONE)
for k, v in kwargs.items()]
r = f'{name}({", ".join(args)})'
precedence = Precedence.ATOM
elif self.op is Op.INDEXING:
name = self.data[0]
args = [arg.tostring(Precedence.TUPLE, language=language)
for arg in self.data[1:]]
r = f'{name}[{", ".join(args)}]'
precedence = Precedence.ATOM
elif self.op is Op.CONCAT:
args = [arg.tostring(Precedence.PRODUCT, language=language)
for arg in self.data]
r = " // ".join(args)
precedence = Precedence.PRODUCT
elif self.op is Op.TERNARY:
cond, expr1, expr2 = [a.tostring(Precedence.TUPLE,
language=language)
for a in self.data]
if language is Language.C:
r = f'({cond}?{expr1}:{expr2})'
elif language is Language.Python:
r = f'({expr1} if {cond} else {expr2})'
elif language is Language.Fortran:
r = f'merge({expr1}, {expr2}, {cond})'
else:
raise NotImplementedError(
f'tostring for {self.op} and {language}')
precedence = Precedence.ATOM
elif self.op is Op.REF:
r = '&' + self.data.tostring(Precedence.UNARY, language=language)
precedence = Precedence.UNARY
elif self.op is Op.DEREF:
r = '*' + self.data.tostring(Precedence.UNARY, language=language)
precedence = Precedence.UNARY
elif self.op is Op.RELATIONAL:
rop, left, right = self.data
precedence = (Precedence.EQ if rop in (RelOp.EQ, RelOp.NE)
else Precedence.LT)
left = left.tostring(precedence, language=language)
right = right.tostring(precedence, language=language)
rop = rop.tostring(language=language)
r = f'{left} {rop} {right}'
else:
raise NotImplementedError(f'tostring for op {self.op}')
if parent_precedence.value < precedence.value:
# If parent precedence is higher than operand precedence,
# operand will be enclosed in parenthesis.
return '(' + r + ')'
return r
def __pos__(self):
return self
def __neg__(self):
return self * -1
def __add__(self, other):
other = as_expr(other)
if isinstance(other, Expr):
if self.op is other.op:
if self.op in (Op.INTEGER, Op.REAL):
return as_number(
self.data[0] + other.data[0],
max(self.data[1], other.data[1]))
if self.op is Op.COMPLEX:
r1, i1 = self.data
r2, i2 = other.data
return as_complex(r1 + r2, i1 + i2)
if self.op is Op.TERMS:
r = Expr(self.op, dict(self.data))
for k, v in other.data.items():
_pairs_add(r.data, k, v)
return normalize(r)
if self.op is Op.COMPLEX and other.op in (Op.INTEGER, Op.REAL):
return self + as_complex(other)
elif self.op in (Op.INTEGER, Op.REAL) and other.op is Op.COMPLEX:
return as_complex(self) + other
elif self.op is Op.REAL and other.op is Op.INTEGER:
return self + as_real(other, kind=self.data[1])
elif self.op is Op.INTEGER and other.op is Op.REAL:
return as_real(self, kind=other.data[1]) + other
return as_terms(self) + as_terms(other)
return NotImplemented
def __radd__(self, other):
if isinstance(other, number_types):
return as_number(other) + self
return NotImplemented
def __sub__(self, other):
return self + (-other)
def __rsub__(self, other):
if isinstance(other, number_types):
return as_number(other) - self
return NotImplemented
def __mul__(self, other):
other = as_expr(other)
if isinstance(other, Expr):
if self.op is other.op:
if self.op in (Op.INTEGER, Op.REAL):
return as_number(self.data[0] * other.data[0],
max(self.data[1], other.data[1]))
elif self.op is Op.COMPLEX:
r1, i1 = self.data
r2, i2 = other.data
return as_complex(r1 * r2 - i1 * i2, r1 * i2 + r2 * i1)
if self.op is Op.FACTORS:
r = Expr(self.op, dict(self.data))
for k, v in other.data.items():
_pairs_add(r.data, k, v)
return normalize(r)
elif self.op is Op.TERMS:
r = Expr(self.op, {})
for t1, c1 in self.data.items():
for t2, c2 in other.data.items():
_pairs_add(r.data, t1 * t2, c1 * c2)
return normalize(r)
if self.op is Op.COMPLEX and other.op in (Op.INTEGER, Op.REAL):
return self * as_complex(other)
elif other.op is Op.COMPLEX and self.op in (Op.INTEGER, Op.REAL):
return as_complex(self) * other
elif self.op is Op.REAL and other.op is Op.INTEGER:
return self * as_real(other, kind=self.data[1])
elif self.op is Op.INTEGER and other.op is Op.REAL:
return as_real(self, kind=other.data[1]) * other
if self.op is Op.TERMS:
return self * as_terms(other)
elif other.op is Op.TERMS:
return as_terms(self) * other
return as_factors(self) * as_factors(other)
return NotImplemented
def __rmul__(self, other):
if isinstance(other, number_types):
return as_number(other) * self
return NotImplemented
def __pow__(self, other):
other = as_expr(other)
if isinstance(other, Expr):
if other.op is Op.INTEGER:
exponent = other.data[0]
# TODO: other kind not used
if exponent == 0:
return as_number(1)
if exponent == 1:
return self
if exponent > 0:
if self.op is Op.FACTORS:
r = Expr(self.op, {})
for k, v in self.data.items():
r.data[k] = v * exponent
return normalize(r)
return self * (self ** (exponent - 1))
elif exponent != -1:
return (self ** (-exponent)) ** -1
return Expr(Op.FACTORS, {self: exponent})
return as_apply(ArithOp.POW, self, other)
return NotImplemented
def __truediv__(self, other):
other = as_expr(other)
if isinstance(other, Expr):
# Fortran / is different from Python /:
# - `/` is a truncate operation for integer operands
return normalize(as_apply(ArithOp.DIV, self, other))
return NotImplemented
def __rtruediv__(self, other):
other = as_expr(other)
if isinstance(other, Expr):
return other / self
return NotImplemented
def __floordiv__(self, other):
other = as_expr(other)
if isinstance(other, Expr):
# Fortran // is different from Python //:
# - `//` is a concatenate operation for string operands
return normalize(Expr(Op.CONCAT, (self, other)))
return NotImplemented
def __rfloordiv__(self, other):
other = as_expr(other)
if isinstance(other, Expr):
return other // self
return NotImplemented
def __call__(self, *args, **kwargs):
# In Fortran, parenthesis () are use for both function call as
# well as indexing operations.
#
# TODO: implement a method for deciding when __call__ should
# return an INDEXING expression.
return as_apply(self, *map(as_expr, args),
**dict((k, as_expr(v)) for k, v in kwargs.items()))
def __getitem__(self, index):
# Provided to support C indexing operations that .pyf files
# may contain.
index = as_expr(index)
if not isinstance(index, tuple):
index = index,
if len(index) > 1:
ewarn(f'C-index should be a single expression but got `{index}`')
return Expr(Op.INDEXING, (self,) + index)
def substitute(self, symbols_map):
"""Recursively substitute symbols with values in symbols map.
Symbols map is a dictionary of symbol-expression pairs.
"""
if self.op is Op.SYMBOL:
value = symbols_map.get(self)
if value is None:
return self
m = re.match(r'\A(@__f2py_PARENTHESIS_(\w+)_\d+@)\Z', self.data)
if m:
# complement to fromstring method
items, paren = m.groups()
if paren in ['ROUNDDIV', 'SQUARE']:
return as_array(value)
assert paren == 'ROUND', (paren, value)
return value
if self.op in (Op.INTEGER, Op.REAL, Op.STRING):
return self
if self.op in (Op.ARRAY, Op.COMPLEX):
return Expr(self.op, tuple(item.substitute(symbols_map)
for item in self.data))
if self.op is Op.CONCAT:
return normalize(Expr(self.op, tuple(item.substitute(symbols_map)
for item in self.data)))
if self.op is Op.TERMS:
r = None
for term, coeff in self.data.items():
if r is None:
r = term.substitute(symbols_map) * coeff
else:
r += term.substitute(symbols_map) * coeff
if r is None:
ewarn('substitute: empty TERMS expression interpreted as'
' int-literal 0')
return as_number(0)
return r
if self.op is Op.FACTORS:
r = None
for base, exponent in self.data.items():
if r is None:
r = base.substitute(symbols_map) ** exponent
else:
r *= base.substitute(symbols_map) ** exponent
if r is None:
ewarn('substitute: empty FACTORS expression interpreted'
' as int-literal 1')
return as_number(1)
return r
if self.op is Op.APPLY:
target, args, kwargs = self.data
if isinstance(target, Expr):
target = target.substitute(symbols_map)
args = tuple(a.substitute(symbols_map) for a in args)
kwargs = dict((k, v.substitute(symbols_map))
for k, v in kwargs.items())
return normalize(Expr(self.op, (target, args, kwargs)))
if self.op is Op.INDEXING:
func = self.data[0]
if isinstance(func, Expr):
func = func.substitute(symbols_map)
args = tuple(a.substitute(symbols_map) for a in self.data[1:])
return normalize(Expr(self.op, (func,) + args))
if self.op is Op.TERNARY:
operands = tuple(a.substitute(symbols_map) for a in self.data)
return normalize(Expr(self.op, operands))
if self.op in (Op.REF, Op.DEREF):
return normalize(Expr(self.op, self.data.substitute(symbols_map)))
if self.op is Op.RELATIONAL:
rop, left, right = self.data
left = left.substitute(symbols_map)
right = right.substitute(symbols_map)
return normalize(Expr(self.op, (rop, left, right)))
raise NotImplementedError(f'substitute method for {self.op}: {self!r}')
def traverse(self, visit, *args, **kwargs):
"""Traverse expression tree with visit function.
The visit function is applied to an expression with given args
and kwargs.
Traverse call returns an expression returned by visit when not
None, otherwise return a new normalized expression with
traverse-visit sub-expressions.
"""
result = visit(self, *args, **kwargs)
if result is not None:
return result
if self.op in (Op.INTEGER, Op.REAL, Op.STRING, Op.SYMBOL):
return self
elif self.op in (Op.COMPLEX, Op.ARRAY, Op.CONCAT, Op.TERNARY):
return normalize(Expr(self.op, tuple(
item.traverse(visit, *args, **kwargs)
for item in self.data)))
elif self.op in (Op.TERMS, Op.FACTORS):
data = {}
for k, v in self.data.items():
k = k.traverse(visit, *args, **kwargs)
v = (v.traverse(visit, *args, **kwargs)
if isinstance(v, Expr) else v)
if k in data:
v = data[k] + v
data[k] = v
return normalize(Expr(self.op, data))
elif self.op is Op.APPLY:
obj = self.data[0]
func = (obj.traverse(visit, *args, **kwargs)
if isinstance(obj, Expr) else obj)
operands = tuple(operand.traverse(visit, *args, **kwargs)
for operand in self.data[1])
kwoperands = dict((k, v.traverse(visit, *args, **kwargs))
for k, v in self.data[2].items())
return normalize(Expr(self.op, (func, operands, kwoperands)))
elif self.op is Op.INDEXING:
obj = self.data[0]
obj = (obj.traverse(visit, *args, **kwargs)
if isinstance(obj, Expr) else obj)
indices = tuple(index.traverse(visit, *args, **kwargs)
for index in self.data[1:])
return normalize(Expr(self.op, (obj,) + indices))
elif self.op in (Op.REF, Op.DEREF):
return normalize(Expr(self.op,
self.data.traverse(visit, *args, **kwargs)))
elif self.op is Op.RELATIONAL:
rop, left, right = self.data
left = left.traverse(visit, *args, **kwargs)
right = right.traverse(visit, *args, **kwargs)
return normalize(Expr(self.op, (rop, left, right)))
raise NotImplementedError(f'traverse method for {self.op}')
def contains(self, other):
"""Check if self contains other.
"""
found = []
def visit(expr, found=found):
if found:
return expr
elif expr == other:
found.append(1)
return expr
self.traverse(visit)
return len(found) != 0
def symbols(self):
"""Return a set of symbols contained in self.
"""
found = set()
def visit(expr, found=found):
if expr.op is Op.SYMBOL:
found.add(expr)
self.traverse(visit)
return found
def polynomial_atoms(self):
"""Return a set of expressions used as atoms in polynomial self.
"""
found = set()
def visit(expr, found=found):
if expr.op is Op.FACTORS:
for b in expr.data:
b.traverse(visit)
return expr
if expr.op in (Op.TERMS, Op.COMPLEX):
return
if expr.op is Op.APPLY and isinstance(expr.data[0], ArithOp):
if expr.data[0] is ArithOp.POW:
expr.data[1][0].traverse(visit)
return expr
return
if expr.op in (Op.INTEGER, Op.REAL):
return expr
found.add(expr)
if expr.op in (Op.INDEXING, Op.APPLY):
return expr
self.traverse(visit)
return found
def linear_solve(self, symbol):
"""Return a, b such that a * symbol + b == self.
If self is not linear with respect to symbol, raise RuntimeError.
"""
b = self.substitute({symbol: as_number(0)})
ax = self - b
a = ax.substitute({symbol: as_number(1)})
zero, _ = as_numer_denom(a * symbol - ax)
if zero != as_number(0):
raise RuntimeError(f'not a {symbol}-linear equation:'
f' {a} * {symbol} + {b} == {self}')
return a, b
The provided code snippet includes necessary dependencies for implementing the `as_ref` function. Write a Python function `def as_ref(expr)` to solve the following problem:
Return object as referencing expression.
Here is the function:
def as_ref(expr):
"""Return object as referencing expression.
"""
return Expr(Op.REF, expr) | Return object as referencing expression. |
168,563 | import re
import warnings
from enum import Enum
from math import gcd
class Op(Enum):
"""
Used as Expr op attribute.
"""
INTEGER = 10
REAL = 12
COMPLEX = 15
STRING = 20
ARRAY = 30
SYMBOL = 40
TERNARY = 100
APPLY = 200
INDEXING = 210
CONCAT = 220
RELATIONAL = 300
TERMS = 1000
FACTORS = 2000
REF = 3000
DEREF = 3001
class Expr:
"""Represents a Fortran expression as a op-data pair.
Expr instances are hashable and sortable.
"""
def parse(s, language=Language.C):
"""Parse a Fortran expression to a Expr.
"""
return fromstring(s, language=language)
def __init__(self, op, data):
assert isinstance(op, Op)
# sanity checks
if op is Op.INTEGER:
# data is a 2-tuple of numeric object and a kind value
# (default is 4)
assert isinstance(data, tuple) and len(data) == 2
assert isinstance(data[0], int)
assert isinstance(data[1], (int, str)), data
elif op is Op.REAL:
# data is a 2-tuple of numeric object and a kind value
# (default is 4)
assert isinstance(data, tuple) and len(data) == 2
assert isinstance(data[0], float)
assert isinstance(data[1], (int, str)), data
elif op is Op.COMPLEX:
# data is a 2-tuple of constant expressions
assert isinstance(data, tuple) and len(data) == 2
elif op is Op.STRING:
# data is a 2-tuple of quoted string and a kind value
# (default is 1)
assert isinstance(data, tuple) and len(data) == 2
assert (isinstance(data[0], str)
and data[0][::len(data[0])-1] in ('""', "''", '@@'))
assert isinstance(data[1], (int, str)), data
elif op is Op.SYMBOL:
# data is any hashable object
assert hash(data) is not None
elif op in (Op.ARRAY, Op.CONCAT):
# data is a tuple of expressions
assert isinstance(data, tuple)
assert all(isinstance(item, Expr) for item in data), data
elif op in (Op.TERMS, Op.FACTORS):
# data is {<term|base>:<coeff|exponent>} where dict values
# are nonzero Python integers
assert isinstance(data, dict)
elif op is Op.APPLY:
# data is (<function>, <operands>, <kwoperands>) where
# operands are Expr instances
assert isinstance(data, tuple) and len(data) == 3
# function is any hashable object
assert hash(data[0]) is not None
assert isinstance(data[1], tuple)
assert isinstance(data[2], dict)
elif op is Op.INDEXING:
# data is (<object>, <indices>)
assert isinstance(data, tuple) and len(data) == 2
# function is any hashable object
assert hash(data[0]) is not None
elif op is Op.TERNARY:
# data is (<cond>, <expr1>, <expr2>)
assert isinstance(data, tuple) and len(data) == 3
elif op in (Op.REF, Op.DEREF):
# data is Expr instance
assert isinstance(data, Expr)
elif op is Op.RELATIONAL:
# data is (<relop>, <left>, <right>)
assert isinstance(data, tuple) and len(data) == 3
else:
raise NotImplementedError(
f'unknown op or missing sanity check: {op}')
self.op = op
self.data = data
def __eq__(self, other):
return (isinstance(other, Expr)
and self.op is other.op
and self.data == other.data)
def __hash__(self):
if self.op in (Op.TERMS, Op.FACTORS):
data = tuple(sorted(self.data.items()))
elif self.op is Op.APPLY:
data = self.data[:2] + tuple(sorted(self.data[2].items()))
else:
data = self.data
return hash((self.op, data))
def __lt__(self, other):
if isinstance(other, Expr):
if self.op is not other.op:
return self.op.value < other.op.value
if self.op in (Op.TERMS, Op.FACTORS):
return (tuple(sorted(self.data.items()))
< tuple(sorted(other.data.items())))
if self.op is Op.APPLY:
if self.data[:2] != other.data[:2]:
return self.data[:2] < other.data[:2]
return tuple(sorted(self.data[2].items())) < tuple(
sorted(other.data[2].items()))
return self.data < other.data
return NotImplemented
def __le__(self, other): return self == other or self < other
def __gt__(self, other): return not (self <= other)
def __ge__(self, other): return not (self < other)
def __repr__(self):
return f'{type(self).__name__}({self.op}, {self.data!r})'
def __str__(self):
return self.tostring()
def tostring(self, parent_precedence=Precedence.NONE,
language=Language.Fortran):
"""Return a string representation of Expr.
"""
if self.op in (Op.INTEGER, Op.REAL):
precedence = (Precedence.SUM if self.data[0] < 0
else Precedence.ATOM)
r = str(self.data[0]) + (f'_{self.data[1]}'
if self.data[1] != 4 else '')
elif self.op is Op.COMPLEX:
r = ', '.join(item.tostring(Precedence.TUPLE, language=language)
for item in self.data)
r = '(' + r + ')'
precedence = Precedence.ATOM
elif self.op is Op.SYMBOL:
precedence = Precedence.ATOM
r = str(self.data)
elif self.op is Op.STRING:
r = self.data[0]
if self.data[1] != 1:
r = self.data[1] + '_' + r
precedence = Precedence.ATOM
elif self.op is Op.ARRAY:
r = ', '.join(item.tostring(Precedence.TUPLE, language=language)
for item in self.data)
r = '[' + r + ']'
precedence = Precedence.ATOM
elif self.op is Op.TERMS:
terms = []
for term, coeff in sorted(self.data.items()):
if coeff < 0:
op = ' - '
coeff = -coeff
else:
op = ' + '
if coeff == 1:
term = term.tostring(Precedence.SUM, language=language)
else:
if term == as_number(1):
term = str(coeff)
else:
term = f'{coeff} * ' + term.tostring(
Precedence.PRODUCT, language=language)
if terms:
terms.append(op)
elif op == ' - ':
terms.append('-')
terms.append(term)
r = ''.join(terms) or '0'
precedence = Precedence.SUM if terms else Precedence.ATOM
elif self.op is Op.FACTORS:
factors = []
tail = []
for base, exp in sorted(self.data.items()):
op = ' * '
if exp == 1:
factor = base.tostring(Precedence.PRODUCT,
language=language)
elif language is Language.C:
if exp in range(2, 10):
factor = base.tostring(Precedence.PRODUCT,
language=language)
factor = ' * '.join([factor] * exp)
elif exp in range(-10, 0):
factor = base.tostring(Precedence.PRODUCT,
language=language)
tail += [factor] * -exp
continue
else:
factor = base.tostring(Precedence.TUPLE,
language=language)
factor = f'pow({factor}, {exp})'
else:
factor = base.tostring(Precedence.POWER,
language=language) + f' ** {exp}'
if factors:
factors.append(op)
factors.append(factor)
if tail:
if not factors:
factors += ['1']
factors += ['/', '(', ' * '.join(tail), ')']
r = ''.join(factors) or '1'
precedence = Precedence.PRODUCT if factors else Precedence.ATOM
elif self.op is Op.APPLY:
name, args, kwargs = self.data
if name is ArithOp.DIV and language is Language.C:
numer, denom = [arg.tostring(Precedence.PRODUCT,
language=language)
for arg in args]
r = f'{numer} / {denom}'
precedence = Precedence.PRODUCT
else:
args = [arg.tostring(Precedence.TUPLE, language=language)
for arg in args]
args += [k + '=' + v.tostring(Precedence.NONE)
for k, v in kwargs.items()]
r = f'{name}({", ".join(args)})'
precedence = Precedence.ATOM
elif self.op is Op.INDEXING:
name = self.data[0]
args = [arg.tostring(Precedence.TUPLE, language=language)
for arg in self.data[1:]]
r = f'{name}[{", ".join(args)}]'
precedence = Precedence.ATOM
elif self.op is Op.CONCAT:
args = [arg.tostring(Precedence.PRODUCT, language=language)
for arg in self.data]
r = " // ".join(args)
precedence = Precedence.PRODUCT
elif self.op is Op.TERNARY:
cond, expr1, expr2 = [a.tostring(Precedence.TUPLE,
language=language)
for a in self.data]
if language is Language.C:
r = f'({cond}?{expr1}:{expr2})'
elif language is Language.Python:
r = f'({expr1} if {cond} else {expr2})'
elif language is Language.Fortran:
r = f'merge({expr1}, {expr2}, {cond})'
else:
raise NotImplementedError(
f'tostring for {self.op} and {language}')
precedence = Precedence.ATOM
elif self.op is Op.REF:
r = '&' + self.data.tostring(Precedence.UNARY, language=language)
precedence = Precedence.UNARY
elif self.op is Op.DEREF:
r = '*' + self.data.tostring(Precedence.UNARY, language=language)
precedence = Precedence.UNARY
elif self.op is Op.RELATIONAL:
rop, left, right = self.data
precedence = (Precedence.EQ if rop in (RelOp.EQ, RelOp.NE)
else Precedence.LT)
left = left.tostring(precedence, language=language)
right = right.tostring(precedence, language=language)
rop = rop.tostring(language=language)
r = f'{left} {rop} {right}'
else:
raise NotImplementedError(f'tostring for op {self.op}')
if parent_precedence.value < precedence.value:
# If parent precedence is higher than operand precedence,
# operand will be enclosed in parenthesis.
return '(' + r + ')'
return r
def __pos__(self):
return self
def __neg__(self):
return self * -1
def __add__(self, other):
other = as_expr(other)
if isinstance(other, Expr):
if self.op is other.op:
if self.op in (Op.INTEGER, Op.REAL):
return as_number(
self.data[0] + other.data[0],
max(self.data[1], other.data[1]))
if self.op is Op.COMPLEX:
r1, i1 = self.data
r2, i2 = other.data
return as_complex(r1 + r2, i1 + i2)
if self.op is Op.TERMS:
r = Expr(self.op, dict(self.data))
for k, v in other.data.items():
_pairs_add(r.data, k, v)
return normalize(r)
if self.op is Op.COMPLEX and other.op in (Op.INTEGER, Op.REAL):
return self + as_complex(other)
elif self.op in (Op.INTEGER, Op.REAL) and other.op is Op.COMPLEX:
return as_complex(self) + other
elif self.op is Op.REAL and other.op is Op.INTEGER:
return self + as_real(other, kind=self.data[1])
elif self.op is Op.INTEGER and other.op is Op.REAL:
return as_real(self, kind=other.data[1]) + other
return as_terms(self) + as_terms(other)
return NotImplemented
def __radd__(self, other):
if isinstance(other, number_types):
return as_number(other) + self
return NotImplemented
def __sub__(self, other):
return self + (-other)
def __rsub__(self, other):
if isinstance(other, number_types):
return as_number(other) - self
return NotImplemented
def __mul__(self, other):
other = as_expr(other)
if isinstance(other, Expr):
if self.op is other.op:
if self.op in (Op.INTEGER, Op.REAL):
return as_number(self.data[0] * other.data[0],
max(self.data[1], other.data[1]))
elif self.op is Op.COMPLEX:
r1, i1 = self.data
r2, i2 = other.data
return as_complex(r1 * r2 - i1 * i2, r1 * i2 + r2 * i1)
if self.op is Op.FACTORS:
r = Expr(self.op, dict(self.data))
for k, v in other.data.items():
_pairs_add(r.data, k, v)
return normalize(r)
elif self.op is Op.TERMS:
r = Expr(self.op, {})
for t1, c1 in self.data.items():
for t2, c2 in other.data.items():
_pairs_add(r.data, t1 * t2, c1 * c2)
return normalize(r)
if self.op is Op.COMPLEX and other.op in (Op.INTEGER, Op.REAL):
return self * as_complex(other)
elif other.op is Op.COMPLEX and self.op in (Op.INTEGER, Op.REAL):
return as_complex(self) * other
elif self.op is Op.REAL and other.op is Op.INTEGER:
return self * as_real(other, kind=self.data[1])
elif self.op is Op.INTEGER and other.op is Op.REAL:
return as_real(self, kind=other.data[1]) * other
if self.op is Op.TERMS:
return self * as_terms(other)
elif other.op is Op.TERMS:
return as_terms(self) * other
return as_factors(self) * as_factors(other)
return NotImplemented
def __rmul__(self, other):
if isinstance(other, number_types):
return as_number(other) * self
return NotImplemented
def __pow__(self, other):
other = as_expr(other)
if isinstance(other, Expr):
if other.op is Op.INTEGER:
exponent = other.data[0]
# TODO: other kind not used
if exponent == 0:
return as_number(1)
if exponent == 1:
return self
if exponent > 0:
if self.op is Op.FACTORS:
r = Expr(self.op, {})
for k, v in self.data.items():
r.data[k] = v * exponent
return normalize(r)
return self * (self ** (exponent - 1))
elif exponent != -1:
return (self ** (-exponent)) ** -1
return Expr(Op.FACTORS, {self: exponent})
return as_apply(ArithOp.POW, self, other)
return NotImplemented
def __truediv__(self, other):
other = as_expr(other)
if isinstance(other, Expr):
# Fortran / is different from Python /:
# - `/` is a truncate operation for integer operands
return normalize(as_apply(ArithOp.DIV, self, other))
return NotImplemented
def __rtruediv__(self, other):
other = as_expr(other)
if isinstance(other, Expr):
return other / self
return NotImplemented
def __floordiv__(self, other):
other = as_expr(other)
if isinstance(other, Expr):
# Fortran // is different from Python //:
# - `//` is a concatenate operation for string operands
return normalize(Expr(Op.CONCAT, (self, other)))
return NotImplemented
def __rfloordiv__(self, other):
other = as_expr(other)
if isinstance(other, Expr):
return other // self
return NotImplemented
def __call__(self, *args, **kwargs):
# In Fortran, parenthesis () are use for both function call as
# well as indexing operations.
#
# TODO: implement a method for deciding when __call__ should
# return an INDEXING expression.
return as_apply(self, *map(as_expr, args),
**dict((k, as_expr(v)) for k, v in kwargs.items()))
def __getitem__(self, index):
# Provided to support C indexing operations that .pyf files
# may contain.
index = as_expr(index)
if not isinstance(index, tuple):
index = index,
if len(index) > 1:
ewarn(f'C-index should be a single expression but got `{index}`')
return Expr(Op.INDEXING, (self,) + index)
def substitute(self, symbols_map):
"""Recursively substitute symbols with values in symbols map.
Symbols map is a dictionary of symbol-expression pairs.
"""
if self.op is Op.SYMBOL:
value = symbols_map.get(self)
if value is None:
return self
m = re.match(r'\A(@__f2py_PARENTHESIS_(\w+)_\d+@)\Z', self.data)
if m:
# complement to fromstring method
items, paren = m.groups()
if paren in ['ROUNDDIV', 'SQUARE']:
return as_array(value)
assert paren == 'ROUND', (paren, value)
return value
if self.op in (Op.INTEGER, Op.REAL, Op.STRING):
return self
if self.op in (Op.ARRAY, Op.COMPLEX):
return Expr(self.op, tuple(item.substitute(symbols_map)
for item in self.data))
if self.op is Op.CONCAT:
return normalize(Expr(self.op, tuple(item.substitute(symbols_map)
for item in self.data)))
if self.op is Op.TERMS:
r = None
for term, coeff in self.data.items():
if r is None:
r = term.substitute(symbols_map) * coeff
else:
r += term.substitute(symbols_map) * coeff
if r is None:
ewarn('substitute: empty TERMS expression interpreted as'
' int-literal 0')
return as_number(0)
return r
if self.op is Op.FACTORS:
r = None
for base, exponent in self.data.items():
if r is None:
r = base.substitute(symbols_map) ** exponent
else:
r *= base.substitute(symbols_map) ** exponent
if r is None:
ewarn('substitute: empty FACTORS expression interpreted'
' as int-literal 1')
return as_number(1)
return r
if self.op is Op.APPLY:
target, args, kwargs = self.data
if isinstance(target, Expr):
target = target.substitute(symbols_map)
args = tuple(a.substitute(symbols_map) for a in args)
kwargs = dict((k, v.substitute(symbols_map))
for k, v in kwargs.items())
return normalize(Expr(self.op, (target, args, kwargs)))
if self.op is Op.INDEXING:
func = self.data[0]
if isinstance(func, Expr):
func = func.substitute(symbols_map)
args = tuple(a.substitute(symbols_map) for a in self.data[1:])
return normalize(Expr(self.op, (func,) + args))
if self.op is Op.TERNARY:
operands = tuple(a.substitute(symbols_map) for a in self.data)
return normalize(Expr(self.op, operands))
if self.op in (Op.REF, Op.DEREF):
return normalize(Expr(self.op, self.data.substitute(symbols_map)))
if self.op is Op.RELATIONAL:
rop, left, right = self.data
left = left.substitute(symbols_map)
right = right.substitute(symbols_map)
return normalize(Expr(self.op, (rop, left, right)))
raise NotImplementedError(f'substitute method for {self.op}: {self!r}')
def traverse(self, visit, *args, **kwargs):
"""Traverse expression tree with visit function.
The visit function is applied to an expression with given args
and kwargs.
Traverse call returns an expression returned by visit when not
None, otherwise return a new normalized expression with
traverse-visit sub-expressions.
"""
result = visit(self, *args, **kwargs)
if result is not None:
return result
if self.op in (Op.INTEGER, Op.REAL, Op.STRING, Op.SYMBOL):
return self
elif self.op in (Op.COMPLEX, Op.ARRAY, Op.CONCAT, Op.TERNARY):
return normalize(Expr(self.op, tuple(
item.traverse(visit, *args, **kwargs)
for item in self.data)))
elif self.op in (Op.TERMS, Op.FACTORS):
data = {}
for k, v in self.data.items():
k = k.traverse(visit, *args, **kwargs)
v = (v.traverse(visit, *args, **kwargs)
if isinstance(v, Expr) else v)
if k in data:
v = data[k] + v
data[k] = v
return normalize(Expr(self.op, data))
elif self.op is Op.APPLY:
obj = self.data[0]
func = (obj.traverse(visit, *args, **kwargs)
if isinstance(obj, Expr) else obj)
operands = tuple(operand.traverse(visit, *args, **kwargs)
for operand in self.data[1])
kwoperands = dict((k, v.traverse(visit, *args, **kwargs))
for k, v in self.data[2].items())
return normalize(Expr(self.op, (func, operands, kwoperands)))
elif self.op is Op.INDEXING:
obj = self.data[0]
obj = (obj.traverse(visit, *args, **kwargs)
if isinstance(obj, Expr) else obj)
indices = tuple(index.traverse(visit, *args, **kwargs)
for index in self.data[1:])
return normalize(Expr(self.op, (obj,) + indices))
elif self.op in (Op.REF, Op.DEREF):
return normalize(Expr(self.op,
self.data.traverse(visit, *args, **kwargs)))
elif self.op is Op.RELATIONAL:
rop, left, right = self.data
left = left.traverse(visit, *args, **kwargs)
right = right.traverse(visit, *args, **kwargs)
return normalize(Expr(self.op, (rop, left, right)))
raise NotImplementedError(f'traverse method for {self.op}')
def contains(self, other):
"""Check if self contains other.
"""
found = []
def visit(expr, found=found):
if found:
return expr
elif expr == other:
found.append(1)
return expr
self.traverse(visit)
return len(found) != 0
def symbols(self):
"""Return a set of symbols contained in self.
"""
found = set()
def visit(expr, found=found):
if expr.op is Op.SYMBOL:
found.add(expr)
self.traverse(visit)
return found
def polynomial_atoms(self):
"""Return a set of expressions used as atoms in polynomial self.
"""
found = set()
def visit(expr, found=found):
if expr.op is Op.FACTORS:
for b in expr.data:
b.traverse(visit)
return expr
if expr.op in (Op.TERMS, Op.COMPLEX):
return
if expr.op is Op.APPLY and isinstance(expr.data[0], ArithOp):
if expr.data[0] is ArithOp.POW:
expr.data[1][0].traverse(visit)
return expr
return
if expr.op in (Op.INTEGER, Op.REAL):
return expr
found.add(expr)
if expr.op in (Op.INDEXING, Op.APPLY):
return expr
self.traverse(visit)
return found
def linear_solve(self, symbol):
"""Return a, b such that a * symbol + b == self.
If self is not linear with respect to symbol, raise RuntimeError.
"""
b = self.substitute({symbol: as_number(0)})
ax = self - b
a = ax.substitute({symbol: as_number(1)})
zero, _ = as_numer_denom(a * symbol - ax)
if zero != as_number(0):
raise RuntimeError(f'not a {symbol}-linear equation:'
f' {a} * {symbol} + {b} == {self}')
return a, b
The provided code snippet includes necessary dependencies for implementing the `as_deref` function. Write a Python function `def as_deref(expr)` to solve the following problem:
Return object as dereferencing expression.
Here is the function:
def as_deref(expr):
"""Return object as dereferencing expression.
"""
return Expr(Op.DEREF, expr) | Return object as dereferencing expression. |
168,564 | import re
import warnings
from enum import Enum
from math import gcd
class Op(Enum):
"""
Used as Expr op attribute.
"""
INTEGER = 10
REAL = 12
COMPLEX = 15
STRING = 20
ARRAY = 30
SYMBOL = 40
TERNARY = 100
APPLY = 200
INDEXING = 210
CONCAT = 220
RELATIONAL = 300
TERMS = 1000
FACTORS = 2000
REF = 3000
DEREF = 3001
class RelOp(Enum):
"""
Used in Op.RELATIONAL expression to specify the function part.
"""
EQ = 1
NE = 2
LT = 3
LE = 4
GT = 5
GE = 6
def fromstring(cls, s, language=Language.C):
if language is Language.Fortran:
return {'.eq.': RelOp.EQ, '.ne.': RelOp.NE,
'.lt.': RelOp.LT, '.le.': RelOp.LE,
'.gt.': RelOp.GT, '.ge.': RelOp.GE}[s.lower()]
return {'==': RelOp.EQ, '!=': RelOp.NE, '<': RelOp.LT,
'<=': RelOp.LE, '>': RelOp.GT, '>=': RelOp.GE}[s]
def tostring(self, language=Language.C):
if language is Language.Fortran:
return {RelOp.EQ: '.eq.', RelOp.NE: '.ne.',
RelOp.LT: '.lt.', RelOp.LE: '.le.',
RelOp.GT: '.gt.', RelOp.GE: '.ge.'}[self]
return {RelOp.EQ: '==', RelOp.NE: '!=',
RelOp.LT: '<', RelOp.LE: '<=',
RelOp.GT: '>', RelOp.GE: '>='}[self]
class Expr:
"""Represents a Fortran expression as a op-data pair.
Expr instances are hashable and sortable.
"""
def parse(s, language=Language.C):
"""Parse a Fortran expression to a Expr.
"""
return fromstring(s, language=language)
def __init__(self, op, data):
assert isinstance(op, Op)
# sanity checks
if op is Op.INTEGER:
# data is a 2-tuple of numeric object and a kind value
# (default is 4)
assert isinstance(data, tuple) and len(data) == 2
assert isinstance(data[0], int)
assert isinstance(data[1], (int, str)), data
elif op is Op.REAL:
# data is a 2-tuple of numeric object and a kind value
# (default is 4)
assert isinstance(data, tuple) and len(data) == 2
assert isinstance(data[0], float)
assert isinstance(data[1], (int, str)), data
elif op is Op.COMPLEX:
# data is a 2-tuple of constant expressions
assert isinstance(data, tuple) and len(data) == 2
elif op is Op.STRING:
# data is a 2-tuple of quoted string and a kind value
# (default is 1)
assert isinstance(data, tuple) and len(data) == 2
assert (isinstance(data[0], str)
and data[0][::len(data[0])-1] in ('""', "''", '@@'))
assert isinstance(data[1], (int, str)), data
elif op is Op.SYMBOL:
# data is any hashable object
assert hash(data) is not None
elif op in (Op.ARRAY, Op.CONCAT):
# data is a tuple of expressions
assert isinstance(data, tuple)
assert all(isinstance(item, Expr) for item in data), data
elif op in (Op.TERMS, Op.FACTORS):
# data is {<term|base>:<coeff|exponent>} where dict values
# are nonzero Python integers
assert isinstance(data, dict)
elif op is Op.APPLY:
# data is (<function>, <operands>, <kwoperands>) where
# operands are Expr instances
assert isinstance(data, tuple) and len(data) == 3
# function is any hashable object
assert hash(data[0]) is not None
assert isinstance(data[1], tuple)
assert isinstance(data[2], dict)
elif op is Op.INDEXING:
# data is (<object>, <indices>)
assert isinstance(data, tuple) and len(data) == 2
# function is any hashable object
assert hash(data[0]) is not None
elif op is Op.TERNARY:
# data is (<cond>, <expr1>, <expr2>)
assert isinstance(data, tuple) and len(data) == 3
elif op in (Op.REF, Op.DEREF):
# data is Expr instance
assert isinstance(data, Expr)
elif op is Op.RELATIONAL:
# data is (<relop>, <left>, <right>)
assert isinstance(data, tuple) and len(data) == 3
else:
raise NotImplementedError(
f'unknown op or missing sanity check: {op}')
self.op = op
self.data = data
def __eq__(self, other):
return (isinstance(other, Expr)
and self.op is other.op
and self.data == other.data)
def __hash__(self):
if self.op in (Op.TERMS, Op.FACTORS):
data = tuple(sorted(self.data.items()))
elif self.op is Op.APPLY:
data = self.data[:2] + tuple(sorted(self.data[2].items()))
else:
data = self.data
return hash((self.op, data))
def __lt__(self, other):
if isinstance(other, Expr):
if self.op is not other.op:
return self.op.value < other.op.value
if self.op in (Op.TERMS, Op.FACTORS):
return (tuple(sorted(self.data.items()))
< tuple(sorted(other.data.items())))
if self.op is Op.APPLY:
if self.data[:2] != other.data[:2]:
return self.data[:2] < other.data[:2]
return tuple(sorted(self.data[2].items())) < tuple(
sorted(other.data[2].items()))
return self.data < other.data
return NotImplemented
def __le__(self, other): return self == other or self < other
def __gt__(self, other): return not (self <= other)
def __ge__(self, other): return not (self < other)
def __repr__(self):
return f'{type(self).__name__}({self.op}, {self.data!r})'
def __str__(self):
return self.tostring()
def tostring(self, parent_precedence=Precedence.NONE,
language=Language.Fortran):
"""Return a string representation of Expr.
"""
if self.op in (Op.INTEGER, Op.REAL):
precedence = (Precedence.SUM if self.data[0] < 0
else Precedence.ATOM)
r = str(self.data[0]) + (f'_{self.data[1]}'
if self.data[1] != 4 else '')
elif self.op is Op.COMPLEX:
r = ', '.join(item.tostring(Precedence.TUPLE, language=language)
for item in self.data)
r = '(' + r + ')'
precedence = Precedence.ATOM
elif self.op is Op.SYMBOL:
precedence = Precedence.ATOM
r = str(self.data)
elif self.op is Op.STRING:
r = self.data[0]
if self.data[1] != 1:
r = self.data[1] + '_' + r
precedence = Precedence.ATOM
elif self.op is Op.ARRAY:
r = ', '.join(item.tostring(Precedence.TUPLE, language=language)
for item in self.data)
r = '[' + r + ']'
precedence = Precedence.ATOM
elif self.op is Op.TERMS:
terms = []
for term, coeff in sorted(self.data.items()):
if coeff < 0:
op = ' - '
coeff = -coeff
else:
op = ' + '
if coeff == 1:
term = term.tostring(Precedence.SUM, language=language)
else:
if term == as_number(1):
term = str(coeff)
else:
term = f'{coeff} * ' + term.tostring(
Precedence.PRODUCT, language=language)
if terms:
terms.append(op)
elif op == ' - ':
terms.append('-')
terms.append(term)
r = ''.join(terms) or '0'
precedence = Precedence.SUM if terms else Precedence.ATOM
elif self.op is Op.FACTORS:
factors = []
tail = []
for base, exp in sorted(self.data.items()):
op = ' * '
if exp == 1:
factor = base.tostring(Precedence.PRODUCT,
language=language)
elif language is Language.C:
if exp in range(2, 10):
factor = base.tostring(Precedence.PRODUCT,
language=language)
factor = ' * '.join([factor] * exp)
elif exp in range(-10, 0):
factor = base.tostring(Precedence.PRODUCT,
language=language)
tail += [factor] * -exp
continue
else:
factor = base.tostring(Precedence.TUPLE,
language=language)
factor = f'pow({factor}, {exp})'
else:
factor = base.tostring(Precedence.POWER,
language=language) + f' ** {exp}'
if factors:
factors.append(op)
factors.append(factor)
if tail:
if not factors:
factors += ['1']
factors += ['/', '(', ' * '.join(tail), ')']
r = ''.join(factors) or '1'
precedence = Precedence.PRODUCT if factors else Precedence.ATOM
elif self.op is Op.APPLY:
name, args, kwargs = self.data
if name is ArithOp.DIV and language is Language.C:
numer, denom = [arg.tostring(Precedence.PRODUCT,
language=language)
for arg in args]
r = f'{numer} / {denom}'
precedence = Precedence.PRODUCT
else:
args = [arg.tostring(Precedence.TUPLE, language=language)
for arg in args]
args += [k + '=' + v.tostring(Precedence.NONE)
for k, v in kwargs.items()]
r = f'{name}({", ".join(args)})'
precedence = Precedence.ATOM
elif self.op is Op.INDEXING:
name = self.data[0]
args = [arg.tostring(Precedence.TUPLE, language=language)
for arg in self.data[1:]]
r = f'{name}[{", ".join(args)}]'
precedence = Precedence.ATOM
elif self.op is Op.CONCAT:
args = [arg.tostring(Precedence.PRODUCT, language=language)
for arg in self.data]
r = " // ".join(args)
precedence = Precedence.PRODUCT
elif self.op is Op.TERNARY:
cond, expr1, expr2 = [a.tostring(Precedence.TUPLE,
language=language)
for a in self.data]
if language is Language.C:
r = f'({cond}?{expr1}:{expr2})'
elif language is Language.Python:
r = f'({expr1} if {cond} else {expr2})'
elif language is Language.Fortran:
r = f'merge({expr1}, {expr2}, {cond})'
else:
raise NotImplementedError(
f'tostring for {self.op} and {language}')
precedence = Precedence.ATOM
elif self.op is Op.REF:
r = '&' + self.data.tostring(Precedence.UNARY, language=language)
precedence = Precedence.UNARY
elif self.op is Op.DEREF:
r = '*' + self.data.tostring(Precedence.UNARY, language=language)
precedence = Precedence.UNARY
elif self.op is Op.RELATIONAL:
rop, left, right = self.data
precedence = (Precedence.EQ if rop in (RelOp.EQ, RelOp.NE)
else Precedence.LT)
left = left.tostring(precedence, language=language)
right = right.tostring(precedence, language=language)
rop = rop.tostring(language=language)
r = f'{left} {rop} {right}'
else:
raise NotImplementedError(f'tostring for op {self.op}')
if parent_precedence.value < precedence.value:
# If parent precedence is higher than operand precedence,
# operand will be enclosed in parenthesis.
return '(' + r + ')'
return r
def __pos__(self):
return self
def __neg__(self):
return self * -1
def __add__(self, other):
other = as_expr(other)
if isinstance(other, Expr):
if self.op is other.op:
if self.op in (Op.INTEGER, Op.REAL):
return as_number(
self.data[0] + other.data[0],
max(self.data[1], other.data[1]))
if self.op is Op.COMPLEX:
r1, i1 = self.data
r2, i2 = other.data
return as_complex(r1 + r2, i1 + i2)
if self.op is Op.TERMS:
r = Expr(self.op, dict(self.data))
for k, v in other.data.items():
_pairs_add(r.data, k, v)
return normalize(r)
if self.op is Op.COMPLEX and other.op in (Op.INTEGER, Op.REAL):
return self + as_complex(other)
elif self.op in (Op.INTEGER, Op.REAL) and other.op is Op.COMPLEX:
return as_complex(self) + other
elif self.op is Op.REAL and other.op is Op.INTEGER:
return self + as_real(other, kind=self.data[1])
elif self.op is Op.INTEGER and other.op is Op.REAL:
return as_real(self, kind=other.data[1]) + other
return as_terms(self) + as_terms(other)
return NotImplemented
def __radd__(self, other):
if isinstance(other, number_types):
return as_number(other) + self
return NotImplemented
def __sub__(self, other):
return self + (-other)
def __rsub__(self, other):
if isinstance(other, number_types):
return as_number(other) - self
return NotImplemented
def __mul__(self, other):
other = as_expr(other)
if isinstance(other, Expr):
if self.op is other.op:
if self.op in (Op.INTEGER, Op.REAL):
return as_number(self.data[0] * other.data[0],
max(self.data[1], other.data[1]))
elif self.op is Op.COMPLEX:
r1, i1 = self.data
r2, i2 = other.data
return as_complex(r1 * r2 - i1 * i2, r1 * i2 + r2 * i1)
if self.op is Op.FACTORS:
r = Expr(self.op, dict(self.data))
for k, v in other.data.items():
_pairs_add(r.data, k, v)
return normalize(r)
elif self.op is Op.TERMS:
r = Expr(self.op, {})
for t1, c1 in self.data.items():
for t2, c2 in other.data.items():
_pairs_add(r.data, t1 * t2, c1 * c2)
return normalize(r)
if self.op is Op.COMPLEX and other.op in (Op.INTEGER, Op.REAL):
return self * as_complex(other)
elif other.op is Op.COMPLEX and self.op in (Op.INTEGER, Op.REAL):
return as_complex(self) * other
elif self.op is Op.REAL and other.op is Op.INTEGER:
return self * as_real(other, kind=self.data[1])
elif self.op is Op.INTEGER and other.op is Op.REAL:
return as_real(self, kind=other.data[1]) * other
if self.op is Op.TERMS:
return self * as_terms(other)
elif other.op is Op.TERMS:
return as_terms(self) * other
return as_factors(self) * as_factors(other)
return NotImplemented
def __rmul__(self, other):
if isinstance(other, number_types):
return as_number(other) * self
return NotImplemented
def __pow__(self, other):
other = as_expr(other)
if isinstance(other, Expr):
if other.op is Op.INTEGER:
exponent = other.data[0]
# TODO: other kind not used
if exponent == 0:
return as_number(1)
if exponent == 1:
return self
if exponent > 0:
if self.op is Op.FACTORS:
r = Expr(self.op, {})
for k, v in self.data.items():
r.data[k] = v * exponent
return normalize(r)
return self * (self ** (exponent - 1))
elif exponent != -1:
return (self ** (-exponent)) ** -1
return Expr(Op.FACTORS, {self: exponent})
return as_apply(ArithOp.POW, self, other)
return NotImplemented
def __truediv__(self, other):
other = as_expr(other)
if isinstance(other, Expr):
# Fortran / is different from Python /:
# - `/` is a truncate operation for integer operands
return normalize(as_apply(ArithOp.DIV, self, other))
return NotImplemented
def __rtruediv__(self, other):
other = as_expr(other)
if isinstance(other, Expr):
return other / self
return NotImplemented
def __floordiv__(self, other):
other = as_expr(other)
if isinstance(other, Expr):
# Fortran // is different from Python //:
# - `//` is a concatenate operation for string operands
return normalize(Expr(Op.CONCAT, (self, other)))
return NotImplemented
def __rfloordiv__(self, other):
other = as_expr(other)
if isinstance(other, Expr):
return other // self
return NotImplemented
def __call__(self, *args, **kwargs):
# In Fortran, parenthesis () are use for both function call as
# well as indexing operations.
#
# TODO: implement a method for deciding when __call__ should
# return an INDEXING expression.
return as_apply(self, *map(as_expr, args),
**dict((k, as_expr(v)) for k, v in kwargs.items()))
def __getitem__(self, index):
# Provided to support C indexing operations that .pyf files
# may contain.
index = as_expr(index)
if not isinstance(index, tuple):
index = index,
if len(index) > 1:
ewarn(f'C-index should be a single expression but got `{index}`')
return Expr(Op.INDEXING, (self,) + index)
def substitute(self, symbols_map):
"""Recursively substitute symbols with values in symbols map.
Symbols map is a dictionary of symbol-expression pairs.
"""
if self.op is Op.SYMBOL:
value = symbols_map.get(self)
if value is None:
return self
m = re.match(r'\A(@__f2py_PARENTHESIS_(\w+)_\d+@)\Z', self.data)
if m:
# complement to fromstring method
items, paren = m.groups()
if paren in ['ROUNDDIV', 'SQUARE']:
return as_array(value)
assert paren == 'ROUND', (paren, value)
return value
if self.op in (Op.INTEGER, Op.REAL, Op.STRING):
return self
if self.op in (Op.ARRAY, Op.COMPLEX):
return Expr(self.op, tuple(item.substitute(symbols_map)
for item in self.data))
if self.op is Op.CONCAT:
return normalize(Expr(self.op, tuple(item.substitute(symbols_map)
for item in self.data)))
if self.op is Op.TERMS:
r = None
for term, coeff in self.data.items():
if r is None:
r = term.substitute(symbols_map) * coeff
else:
r += term.substitute(symbols_map) * coeff
if r is None:
ewarn('substitute: empty TERMS expression interpreted as'
' int-literal 0')
return as_number(0)
return r
if self.op is Op.FACTORS:
r = None
for base, exponent in self.data.items():
if r is None:
r = base.substitute(symbols_map) ** exponent
else:
r *= base.substitute(symbols_map) ** exponent
if r is None:
ewarn('substitute: empty FACTORS expression interpreted'
' as int-literal 1')
return as_number(1)
return r
if self.op is Op.APPLY:
target, args, kwargs = self.data
if isinstance(target, Expr):
target = target.substitute(symbols_map)
args = tuple(a.substitute(symbols_map) for a in args)
kwargs = dict((k, v.substitute(symbols_map))
for k, v in kwargs.items())
return normalize(Expr(self.op, (target, args, kwargs)))
if self.op is Op.INDEXING:
func = self.data[0]
if isinstance(func, Expr):
func = func.substitute(symbols_map)
args = tuple(a.substitute(symbols_map) for a in self.data[1:])
return normalize(Expr(self.op, (func,) + args))
if self.op is Op.TERNARY:
operands = tuple(a.substitute(symbols_map) for a in self.data)
return normalize(Expr(self.op, operands))
if self.op in (Op.REF, Op.DEREF):
return normalize(Expr(self.op, self.data.substitute(symbols_map)))
if self.op is Op.RELATIONAL:
rop, left, right = self.data
left = left.substitute(symbols_map)
right = right.substitute(symbols_map)
return normalize(Expr(self.op, (rop, left, right)))
raise NotImplementedError(f'substitute method for {self.op}: {self!r}')
def traverse(self, visit, *args, **kwargs):
"""Traverse expression tree with visit function.
The visit function is applied to an expression with given args
and kwargs.
Traverse call returns an expression returned by visit when not
None, otherwise return a new normalized expression with
traverse-visit sub-expressions.
"""
result = visit(self, *args, **kwargs)
if result is not None:
return result
if self.op in (Op.INTEGER, Op.REAL, Op.STRING, Op.SYMBOL):
return self
elif self.op in (Op.COMPLEX, Op.ARRAY, Op.CONCAT, Op.TERNARY):
return normalize(Expr(self.op, tuple(
item.traverse(visit, *args, **kwargs)
for item in self.data)))
elif self.op in (Op.TERMS, Op.FACTORS):
data = {}
for k, v in self.data.items():
k = k.traverse(visit, *args, **kwargs)
v = (v.traverse(visit, *args, **kwargs)
if isinstance(v, Expr) else v)
if k in data:
v = data[k] + v
data[k] = v
return normalize(Expr(self.op, data))
elif self.op is Op.APPLY:
obj = self.data[0]
func = (obj.traverse(visit, *args, **kwargs)
if isinstance(obj, Expr) else obj)
operands = tuple(operand.traverse(visit, *args, **kwargs)
for operand in self.data[1])
kwoperands = dict((k, v.traverse(visit, *args, **kwargs))
for k, v in self.data[2].items())
return normalize(Expr(self.op, (func, operands, kwoperands)))
elif self.op is Op.INDEXING:
obj = self.data[0]
obj = (obj.traverse(visit, *args, **kwargs)
if isinstance(obj, Expr) else obj)
indices = tuple(index.traverse(visit, *args, **kwargs)
for index in self.data[1:])
return normalize(Expr(self.op, (obj,) + indices))
elif self.op in (Op.REF, Op.DEREF):
return normalize(Expr(self.op,
self.data.traverse(visit, *args, **kwargs)))
elif self.op is Op.RELATIONAL:
rop, left, right = self.data
left = left.traverse(visit, *args, **kwargs)
right = right.traverse(visit, *args, **kwargs)
return normalize(Expr(self.op, (rop, left, right)))
raise NotImplementedError(f'traverse method for {self.op}')
def contains(self, other):
"""Check if self contains other.
"""
found = []
def visit(expr, found=found):
if found:
return expr
elif expr == other:
found.append(1)
return expr
self.traverse(visit)
return len(found) != 0
def symbols(self):
"""Return a set of symbols contained in self.
"""
found = set()
def visit(expr, found=found):
if expr.op is Op.SYMBOL:
found.add(expr)
self.traverse(visit)
return found
def polynomial_atoms(self):
"""Return a set of expressions used as atoms in polynomial self.
"""
found = set()
def visit(expr, found=found):
if expr.op is Op.FACTORS:
for b in expr.data:
b.traverse(visit)
return expr
if expr.op in (Op.TERMS, Op.COMPLEX):
return
if expr.op is Op.APPLY and isinstance(expr.data[0], ArithOp):
if expr.data[0] is ArithOp.POW:
expr.data[1][0].traverse(visit)
return expr
return
if expr.op in (Op.INTEGER, Op.REAL):
return expr
found.add(expr)
if expr.op in (Op.INDEXING, Op.APPLY):
return expr
self.traverse(visit)
return found
def linear_solve(self, symbol):
"""Return a, b such that a * symbol + b == self.
If self is not linear with respect to symbol, raise RuntimeError.
"""
b = self.substitute({symbol: as_number(0)})
ax = self - b
a = ax.substitute({symbol: as_number(1)})
zero, _ = as_numer_denom(a * symbol - ax)
if zero != as_number(0):
raise RuntimeError(f'not a {symbol}-linear equation:'
f' {a} * {symbol} + {b} == {self}')
return a, b
def as_eq(left, right):
return Expr(Op.RELATIONAL, (RelOp.EQ, left, right)) | null |
168,565 | import re
import warnings
from enum import Enum
from math import gcd
class Op(Enum):
class RelOp(Enum):
def fromstring(cls, s, language=Language.C):
def tostring(self, language=Language.C):
class Expr:
def parse(s, language=Language.C):
def __init__(self, op, data):
def __eq__(self, other):
def __hash__(self):
def __lt__(self, other):
def __le__(self, other):
def __gt__(self, other):
def __ge__(self, other):
def __repr__(self):
def __str__(self):
def tostring(self, parent_precedence=Precedence.NONE,
language=Language.Fortran):
def __pos__(self):
def __neg__(self):
def __add__(self, other):
def __radd__(self, other):
def __sub__(self, other):
def __rsub__(self, other):
def __mul__(self, other):
def __rmul__(self, other):
def __pow__(self, other):
def __truediv__(self, other):
def __rtruediv__(self, other):
def __floordiv__(self, other):
def __rfloordiv__(self, other):
def __call__(self, *args, **kwargs):
def __getitem__(self, index):
def substitute(self, symbols_map):
def traverse(self, visit, *args, **kwargs):
def contains(self, other):
def visit(expr, found=found):
def symbols(self):
def visit(expr, found=found):
def polynomial_atoms(self):
def visit(expr, found=found):
def linear_solve(self, symbol):
def as_ne(left, right):
return Expr(Op.RELATIONAL, (RelOp.NE, left, right)) | null |
168,566 | import re
import warnings
from enum import Enum
from math import gcd
class Op(Enum):
"""
Used as Expr op attribute.
"""
INTEGER = 10
REAL = 12
COMPLEX = 15
STRING = 20
ARRAY = 30
SYMBOL = 40
TERNARY = 100
APPLY = 200
INDEXING = 210
CONCAT = 220
RELATIONAL = 300
TERMS = 1000
FACTORS = 2000
REF = 3000
DEREF = 3001
class RelOp(Enum):
"""
Used in Op.RELATIONAL expression to specify the function part.
"""
EQ = 1
NE = 2
LT = 3
LE = 4
GT = 5
GE = 6
def fromstring(cls, s, language=Language.C):
if language is Language.Fortran:
return {'.eq.': RelOp.EQ, '.ne.': RelOp.NE,
'.lt.': RelOp.LT, '.le.': RelOp.LE,
'.gt.': RelOp.GT, '.ge.': RelOp.GE}[s.lower()]
return {'==': RelOp.EQ, '!=': RelOp.NE, '<': RelOp.LT,
'<=': RelOp.LE, '>': RelOp.GT, '>=': RelOp.GE}[s]
def tostring(self, language=Language.C):
if language is Language.Fortran:
return {RelOp.EQ: '.eq.', RelOp.NE: '.ne.',
RelOp.LT: '.lt.', RelOp.LE: '.le.',
RelOp.GT: '.gt.', RelOp.GE: '.ge.'}[self]
return {RelOp.EQ: '==', RelOp.NE: '!=',
RelOp.LT: '<', RelOp.LE: '<=',
RelOp.GT: '>', RelOp.GE: '>='}[self]
class Expr:
"""Represents a Fortran expression as a op-data pair.
Expr instances are hashable and sortable.
"""
def parse(s, language=Language.C):
"""Parse a Fortran expression to a Expr.
"""
return fromstring(s, language=language)
def __init__(self, op, data):
assert isinstance(op, Op)
# sanity checks
if op is Op.INTEGER:
# data is a 2-tuple of numeric object and a kind value
# (default is 4)
assert isinstance(data, tuple) and len(data) == 2
assert isinstance(data[0], int)
assert isinstance(data[1], (int, str)), data
elif op is Op.REAL:
# data is a 2-tuple of numeric object and a kind value
# (default is 4)
assert isinstance(data, tuple) and len(data) == 2
assert isinstance(data[0], float)
assert isinstance(data[1], (int, str)), data
elif op is Op.COMPLEX:
# data is a 2-tuple of constant expressions
assert isinstance(data, tuple) and len(data) == 2
elif op is Op.STRING:
# data is a 2-tuple of quoted string and a kind value
# (default is 1)
assert isinstance(data, tuple) and len(data) == 2
assert (isinstance(data[0], str)
and data[0][::len(data[0])-1] in ('""', "''", '@@'))
assert isinstance(data[1], (int, str)), data
elif op is Op.SYMBOL:
# data is any hashable object
assert hash(data) is not None
elif op in (Op.ARRAY, Op.CONCAT):
# data is a tuple of expressions
assert isinstance(data, tuple)
assert all(isinstance(item, Expr) for item in data), data
elif op in (Op.TERMS, Op.FACTORS):
# data is {<term|base>:<coeff|exponent>} where dict values
# are nonzero Python integers
assert isinstance(data, dict)
elif op is Op.APPLY:
# data is (<function>, <operands>, <kwoperands>) where
# operands are Expr instances
assert isinstance(data, tuple) and len(data) == 3
# function is any hashable object
assert hash(data[0]) is not None
assert isinstance(data[1], tuple)
assert isinstance(data[2], dict)
elif op is Op.INDEXING:
# data is (<object>, <indices>)
assert isinstance(data, tuple) and len(data) == 2
# function is any hashable object
assert hash(data[0]) is not None
elif op is Op.TERNARY:
# data is (<cond>, <expr1>, <expr2>)
assert isinstance(data, tuple) and len(data) == 3
elif op in (Op.REF, Op.DEREF):
# data is Expr instance
assert isinstance(data, Expr)
elif op is Op.RELATIONAL:
# data is (<relop>, <left>, <right>)
assert isinstance(data, tuple) and len(data) == 3
else:
raise NotImplementedError(
f'unknown op or missing sanity check: {op}')
self.op = op
self.data = data
def __eq__(self, other):
return (isinstance(other, Expr)
and self.op is other.op
and self.data == other.data)
def __hash__(self):
if self.op in (Op.TERMS, Op.FACTORS):
data = tuple(sorted(self.data.items()))
elif self.op is Op.APPLY:
data = self.data[:2] + tuple(sorted(self.data[2].items()))
else:
data = self.data
return hash((self.op, data))
def __lt__(self, other):
if isinstance(other, Expr):
if self.op is not other.op:
return self.op.value < other.op.value
if self.op in (Op.TERMS, Op.FACTORS):
return (tuple(sorted(self.data.items()))
< tuple(sorted(other.data.items())))
if self.op is Op.APPLY:
if self.data[:2] != other.data[:2]:
return self.data[:2] < other.data[:2]
return tuple(sorted(self.data[2].items())) < tuple(
sorted(other.data[2].items()))
return self.data < other.data
return NotImplemented
def __le__(self, other): return self == other or self < other
def __gt__(self, other): return not (self <= other)
def __ge__(self, other): return not (self < other)
def __repr__(self):
return f'{type(self).__name__}({self.op}, {self.data!r})'
def __str__(self):
return self.tostring()
def tostring(self, parent_precedence=Precedence.NONE,
language=Language.Fortran):
"""Return a string representation of Expr.
"""
if self.op in (Op.INTEGER, Op.REAL):
precedence = (Precedence.SUM if self.data[0] < 0
else Precedence.ATOM)
r = str(self.data[0]) + (f'_{self.data[1]}'
if self.data[1] != 4 else '')
elif self.op is Op.COMPLEX:
r = ', '.join(item.tostring(Precedence.TUPLE, language=language)
for item in self.data)
r = '(' + r + ')'
precedence = Precedence.ATOM
elif self.op is Op.SYMBOL:
precedence = Precedence.ATOM
r = str(self.data)
elif self.op is Op.STRING:
r = self.data[0]
if self.data[1] != 1:
r = self.data[1] + '_' + r
precedence = Precedence.ATOM
elif self.op is Op.ARRAY:
r = ', '.join(item.tostring(Precedence.TUPLE, language=language)
for item in self.data)
r = '[' + r + ']'
precedence = Precedence.ATOM
elif self.op is Op.TERMS:
terms = []
for term, coeff in sorted(self.data.items()):
if coeff < 0:
op = ' - '
coeff = -coeff
else:
op = ' + '
if coeff == 1:
term = term.tostring(Precedence.SUM, language=language)
else:
if term == as_number(1):
term = str(coeff)
else:
term = f'{coeff} * ' + term.tostring(
Precedence.PRODUCT, language=language)
if terms:
terms.append(op)
elif op == ' - ':
terms.append('-')
terms.append(term)
r = ''.join(terms) or '0'
precedence = Precedence.SUM if terms else Precedence.ATOM
elif self.op is Op.FACTORS:
factors = []
tail = []
for base, exp in sorted(self.data.items()):
op = ' * '
if exp == 1:
factor = base.tostring(Precedence.PRODUCT,
language=language)
elif language is Language.C:
if exp in range(2, 10):
factor = base.tostring(Precedence.PRODUCT,
language=language)
factor = ' * '.join([factor] * exp)
elif exp in range(-10, 0):
factor = base.tostring(Precedence.PRODUCT,
language=language)
tail += [factor] * -exp
continue
else:
factor = base.tostring(Precedence.TUPLE,
language=language)
factor = f'pow({factor}, {exp})'
else:
factor = base.tostring(Precedence.POWER,
language=language) + f' ** {exp}'
if factors:
factors.append(op)
factors.append(factor)
if tail:
if not factors:
factors += ['1']
factors += ['/', '(', ' * '.join(tail), ')']
r = ''.join(factors) or '1'
precedence = Precedence.PRODUCT if factors else Precedence.ATOM
elif self.op is Op.APPLY:
name, args, kwargs = self.data
if name is ArithOp.DIV and language is Language.C:
numer, denom = [arg.tostring(Precedence.PRODUCT,
language=language)
for arg in args]
r = f'{numer} / {denom}'
precedence = Precedence.PRODUCT
else:
args = [arg.tostring(Precedence.TUPLE, language=language)
for arg in args]
args += [k + '=' + v.tostring(Precedence.NONE)
for k, v in kwargs.items()]
r = f'{name}({", ".join(args)})'
precedence = Precedence.ATOM
elif self.op is Op.INDEXING:
name = self.data[0]
args = [arg.tostring(Precedence.TUPLE, language=language)
for arg in self.data[1:]]
r = f'{name}[{", ".join(args)}]'
precedence = Precedence.ATOM
elif self.op is Op.CONCAT:
args = [arg.tostring(Precedence.PRODUCT, language=language)
for arg in self.data]
r = " // ".join(args)
precedence = Precedence.PRODUCT
elif self.op is Op.TERNARY:
cond, expr1, expr2 = [a.tostring(Precedence.TUPLE,
language=language)
for a in self.data]
if language is Language.C:
r = f'({cond}?{expr1}:{expr2})'
elif language is Language.Python:
r = f'({expr1} if {cond} else {expr2})'
elif language is Language.Fortran:
r = f'merge({expr1}, {expr2}, {cond})'
else:
raise NotImplementedError(
f'tostring for {self.op} and {language}')
precedence = Precedence.ATOM
elif self.op is Op.REF:
r = '&' + self.data.tostring(Precedence.UNARY, language=language)
precedence = Precedence.UNARY
elif self.op is Op.DEREF:
r = '*' + self.data.tostring(Precedence.UNARY, language=language)
precedence = Precedence.UNARY
elif self.op is Op.RELATIONAL:
rop, left, right = self.data
precedence = (Precedence.EQ if rop in (RelOp.EQ, RelOp.NE)
else Precedence.LT)
left = left.tostring(precedence, language=language)
right = right.tostring(precedence, language=language)
rop = rop.tostring(language=language)
r = f'{left} {rop} {right}'
else:
raise NotImplementedError(f'tostring for op {self.op}')
if parent_precedence.value < precedence.value:
# If parent precedence is higher than operand precedence,
# operand will be enclosed in parenthesis.
return '(' + r + ')'
return r
def __pos__(self):
return self
def __neg__(self):
return self * -1
def __add__(self, other):
other = as_expr(other)
if isinstance(other, Expr):
if self.op is other.op:
if self.op in (Op.INTEGER, Op.REAL):
return as_number(
self.data[0] + other.data[0],
max(self.data[1], other.data[1]))
if self.op is Op.COMPLEX:
r1, i1 = self.data
r2, i2 = other.data
return as_complex(r1 + r2, i1 + i2)
if self.op is Op.TERMS:
r = Expr(self.op, dict(self.data))
for k, v in other.data.items():
_pairs_add(r.data, k, v)
return normalize(r)
if self.op is Op.COMPLEX and other.op in (Op.INTEGER, Op.REAL):
return self + as_complex(other)
elif self.op in (Op.INTEGER, Op.REAL) and other.op is Op.COMPLEX:
return as_complex(self) + other
elif self.op is Op.REAL and other.op is Op.INTEGER:
return self + as_real(other, kind=self.data[1])
elif self.op is Op.INTEGER and other.op is Op.REAL:
return as_real(self, kind=other.data[1]) + other
return as_terms(self) + as_terms(other)
return NotImplemented
def __radd__(self, other):
if isinstance(other, number_types):
return as_number(other) + self
return NotImplemented
def __sub__(self, other):
return self + (-other)
def __rsub__(self, other):
if isinstance(other, number_types):
return as_number(other) - self
return NotImplemented
def __mul__(self, other):
other = as_expr(other)
if isinstance(other, Expr):
if self.op is other.op:
if self.op in (Op.INTEGER, Op.REAL):
return as_number(self.data[0] * other.data[0],
max(self.data[1], other.data[1]))
elif self.op is Op.COMPLEX:
r1, i1 = self.data
r2, i2 = other.data
return as_complex(r1 * r2 - i1 * i2, r1 * i2 + r2 * i1)
if self.op is Op.FACTORS:
r = Expr(self.op, dict(self.data))
for k, v in other.data.items():
_pairs_add(r.data, k, v)
return normalize(r)
elif self.op is Op.TERMS:
r = Expr(self.op, {})
for t1, c1 in self.data.items():
for t2, c2 in other.data.items():
_pairs_add(r.data, t1 * t2, c1 * c2)
return normalize(r)
if self.op is Op.COMPLEX and other.op in (Op.INTEGER, Op.REAL):
return self * as_complex(other)
elif other.op is Op.COMPLEX and self.op in (Op.INTEGER, Op.REAL):
return as_complex(self) * other
elif self.op is Op.REAL and other.op is Op.INTEGER:
return self * as_real(other, kind=self.data[1])
elif self.op is Op.INTEGER and other.op is Op.REAL:
return as_real(self, kind=other.data[1]) * other
if self.op is Op.TERMS:
return self * as_terms(other)
elif other.op is Op.TERMS:
return as_terms(self) * other
return as_factors(self) * as_factors(other)
return NotImplemented
def __rmul__(self, other):
if isinstance(other, number_types):
return as_number(other) * self
return NotImplemented
def __pow__(self, other):
other = as_expr(other)
if isinstance(other, Expr):
if other.op is Op.INTEGER:
exponent = other.data[0]
# TODO: other kind not used
if exponent == 0:
return as_number(1)
if exponent == 1:
return self
if exponent > 0:
if self.op is Op.FACTORS:
r = Expr(self.op, {})
for k, v in self.data.items():
r.data[k] = v * exponent
return normalize(r)
return self * (self ** (exponent - 1))
elif exponent != -1:
return (self ** (-exponent)) ** -1
return Expr(Op.FACTORS, {self: exponent})
return as_apply(ArithOp.POW, self, other)
return NotImplemented
def __truediv__(self, other):
other = as_expr(other)
if isinstance(other, Expr):
# Fortran / is different from Python /:
# - `/` is a truncate operation for integer operands
return normalize(as_apply(ArithOp.DIV, self, other))
return NotImplemented
def __rtruediv__(self, other):
other = as_expr(other)
if isinstance(other, Expr):
return other / self
return NotImplemented
def __floordiv__(self, other):
other = as_expr(other)
if isinstance(other, Expr):
# Fortran // is different from Python //:
# - `//` is a concatenate operation for string operands
return normalize(Expr(Op.CONCAT, (self, other)))
return NotImplemented
def __rfloordiv__(self, other):
other = as_expr(other)
if isinstance(other, Expr):
return other // self
return NotImplemented
def __call__(self, *args, **kwargs):
# In Fortran, parenthesis () are use for both function call as
# well as indexing operations.
#
# TODO: implement a method for deciding when __call__ should
# return an INDEXING expression.
return as_apply(self, *map(as_expr, args),
**dict((k, as_expr(v)) for k, v in kwargs.items()))
def __getitem__(self, index):
# Provided to support C indexing operations that .pyf files
# may contain.
index = as_expr(index)
if not isinstance(index, tuple):
index = index,
if len(index) > 1:
ewarn(f'C-index should be a single expression but got `{index}`')
return Expr(Op.INDEXING, (self,) + index)
def substitute(self, symbols_map):
"""Recursively substitute symbols with values in symbols map.
Symbols map is a dictionary of symbol-expression pairs.
"""
if self.op is Op.SYMBOL:
value = symbols_map.get(self)
if value is None:
return self
m = re.match(r'\A(@__f2py_PARENTHESIS_(\w+)_\d+@)\Z', self.data)
if m:
# complement to fromstring method
items, paren = m.groups()
if paren in ['ROUNDDIV', 'SQUARE']:
return as_array(value)
assert paren == 'ROUND', (paren, value)
return value
if self.op in (Op.INTEGER, Op.REAL, Op.STRING):
return self
if self.op in (Op.ARRAY, Op.COMPLEX):
return Expr(self.op, tuple(item.substitute(symbols_map)
for item in self.data))
if self.op is Op.CONCAT:
return normalize(Expr(self.op, tuple(item.substitute(symbols_map)
for item in self.data)))
if self.op is Op.TERMS:
r = None
for term, coeff in self.data.items():
if r is None:
r = term.substitute(symbols_map) * coeff
else:
r += term.substitute(symbols_map) * coeff
if r is None:
ewarn('substitute: empty TERMS expression interpreted as'
' int-literal 0')
return as_number(0)
return r
if self.op is Op.FACTORS:
r = None
for base, exponent in self.data.items():
if r is None:
r = base.substitute(symbols_map) ** exponent
else:
r *= base.substitute(symbols_map) ** exponent
if r is None:
ewarn('substitute: empty FACTORS expression interpreted'
' as int-literal 1')
return as_number(1)
return r
if self.op is Op.APPLY:
target, args, kwargs = self.data
if isinstance(target, Expr):
target = target.substitute(symbols_map)
args = tuple(a.substitute(symbols_map) for a in args)
kwargs = dict((k, v.substitute(symbols_map))
for k, v in kwargs.items())
return normalize(Expr(self.op, (target, args, kwargs)))
if self.op is Op.INDEXING:
func = self.data[0]
if isinstance(func, Expr):
func = func.substitute(symbols_map)
args = tuple(a.substitute(symbols_map) for a in self.data[1:])
return normalize(Expr(self.op, (func,) + args))
if self.op is Op.TERNARY:
operands = tuple(a.substitute(symbols_map) for a in self.data)
return normalize(Expr(self.op, operands))
if self.op in (Op.REF, Op.DEREF):
return normalize(Expr(self.op, self.data.substitute(symbols_map)))
if self.op is Op.RELATIONAL:
rop, left, right = self.data
left = left.substitute(symbols_map)
right = right.substitute(symbols_map)
return normalize(Expr(self.op, (rop, left, right)))
raise NotImplementedError(f'substitute method for {self.op}: {self!r}')
def traverse(self, visit, *args, **kwargs):
"""Traverse expression tree with visit function.
The visit function is applied to an expression with given args
and kwargs.
Traverse call returns an expression returned by visit when not
None, otherwise return a new normalized expression with
traverse-visit sub-expressions.
"""
result = visit(self, *args, **kwargs)
if result is not None:
return result
if self.op in (Op.INTEGER, Op.REAL, Op.STRING, Op.SYMBOL):
return self
elif self.op in (Op.COMPLEX, Op.ARRAY, Op.CONCAT, Op.TERNARY):
return normalize(Expr(self.op, tuple(
item.traverse(visit, *args, **kwargs)
for item in self.data)))
elif self.op in (Op.TERMS, Op.FACTORS):
data = {}
for k, v in self.data.items():
k = k.traverse(visit, *args, **kwargs)
v = (v.traverse(visit, *args, **kwargs)
if isinstance(v, Expr) else v)
if k in data:
v = data[k] + v
data[k] = v
return normalize(Expr(self.op, data))
elif self.op is Op.APPLY:
obj = self.data[0]
func = (obj.traverse(visit, *args, **kwargs)
if isinstance(obj, Expr) else obj)
operands = tuple(operand.traverse(visit, *args, **kwargs)
for operand in self.data[1])
kwoperands = dict((k, v.traverse(visit, *args, **kwargs))
for k, v in self.data[2].items())
return normalize(Expr(self.op, (func, operands, kwoperands)))
elif self.op is Op.INDEXING:
obj = self.data[0]
obj = (obj.traverse(visit, *args, **kwargs)
if isinstance(obj, Expr) else obj)
indices = tuple(index.traverse(visit, *args, **kwargs)
for index in self.data[1:])
return normalize(Expr(self.op, (obj,) + indices))
elif self.op in (Op.REF, Op.DEREF):
return normalize(Expr(self.op,
self.data.traverse(visit, *args, **kwargs)))
elif self.op is Op.RELATIONAL:
rop, left, right = self.data
left = left.traverse(visit, *args, **kwargs)
right = right.traverse(visit, *args, **kwargs)
return normalize(Expr(self.op, (rop, left, right)))
raise NotImplementedError(f'traverse method for {self.op}')
def contains(self, other):
"""Check if self contains other.
"""
found = []
def visit(expr, found=found):
if found:
return expr
elif expr == other:
found.append(1)
return expr
self.traverse(visit)
return len(found) != 0
def symbols(self):
"""Return a set of symbols contained in self.
"""
found = set()
def visit(expr, found=found):
if expr.op is Op.SYMBOL:
found.add(expr)
self.traverse(visit)
return found
def polynomial_atoms(self):
"""Return a set of expressions used as atoms in polynomial self.
"""
found = set()
def visit(expr, found=found):
if expr.op is Op.FACTORS:
for b in expr.data:
b.traverse(visit)
return expr
if expr.op in (Op.TERMS, Op.COMPLEX):
return
if expr.op is Op.APPLY and isinstance(expr.data[0], ArithOp):
if expr.data[0] is ArithOp.POW:
expr.data[1][0].traverse(visit)
return expr
return
if expr.op in (Op.INTEGER, Op.REAL):
return expr
found.add(expr)
if expr.op in (Op.INDEXING, Op.APPLY):
return expr
self.traverse(visit)
return found
def linear_solve(self, symbol):
"""Return a, b such that a * symbol + b == self.
If self is not linear with respect to symbol, raise RuntimeError.
"""
b = self.substitute({symbol: as_number(0)})
ax = self - b
a = ax.substitute({symbol: as_number(1)})
zero, _ = as_numer_denom(a * symbol - ax)
if zero != as_number(0):
raise RuntimeError(f'not a {symbol}-linear equation:'
f' {a} * {symbol} + {b} == {self}')
return a, b
def as_lt(left, right):
return Expr(Op.RELATIONAL, (RelOp.LT, left, right)) | null |
168,567 | import re
import warnings
from enum import Enum
from math import gcd
class Op(Enum):
"""
Used as Expr op attribute.
"""
INTEGER = 10
REAL = 12
COMPLEX = 15
STRING = 20
ARRAY = 30
SYMBOL = 40
TERNARY = 100
APPLY = 200
INDEXING = 210
CONCAT = 220
RELATIONAL = 300
TERMS = 1000
FACTORS = 2000
REF = 3000
DEREF = 3001
class RelOp(Enum):
"""
Used in Op.RELATIONAL expression to specify the function part.
"""
EQ = 1
NE = 2
LT = 3
LE = 4
GT = 5
GE = 6
def fromstring(cls, s, language=Language.C):
if language is Language.Fortran:
return {'.eq.': RelOp.EQ, '.ne.': RelOp.NE,
'.lt.': RelOp.LT, '.le.': RelOp.LE,
'.gt.': RelOp.GT, '.ge.': RelOp.GE}[s.lower()]
return {'==': RelOp.EQ, '!=': RelOp.NE, '<': RelOp.LT,
'<=': RelOp.LE, '>': RelOp.GT, '>=': RelOp.GE}[s]
def tostring(self, language=Language.C):
if language is Language.Fortran:
return {RelOp.EQ: '.eq.', RelOp.NE: '.ne.',
RelOp.LT: '.lt.', RelOp.LE: '.le.',
RelOp.GT: '.gt.', RelOp.GE: '.ge.'}[self]
return {RelOp.EQ: '==', RelOp.NE: '!=',
RelOp.LT: '<', RelOp.LE: '<=',
RelOp.GT: '>', RelOp.GE: '>='}[self]
class Expr:
"""Represents a Fortran expression as a op-data pair.
Expr instances are hashable and sortable.
"""
def parse(s, language=Language.C):
"""Parse a Fortran expression to a Expr.
"""
return fromstring(s, language=language)
def __init__(self, op, data):
assert isinstance(op, Op)
# sanity checks
if op is Op.INTEGER:
# data is a 2-tuple of numeric object and a kind value
# (default is 4)
assert isinstance(data, tuple) and len(data) == 2
assert isinstance(data[0], int)
assert isinstance(data[1], (int, str)), data
elif op is Op.REAL:
# data is a 2-tuple of numeric object and a kind value
# (default is 4)
assert isinstance(data, tuple) and len(data) == 2
assert isinstance(data[0], float)
assert isinstance(data[1], (int, str)), data
elif op is Op.COMPLEX:
# data is a 2-tuple of constant expressions
assert isinstance(data, tuple) and len(data) == 2
elif op is Op.STRING:
# data is a 2-tuple of quoted string and a kind value
# (default is 1)
assert isinstance(data, tuple) and len(data) == 2
assert (isinstance(data[0], str)
and data[0][::len(data[0])-1] in ('""', "''", '@@'))
assert isinstance(data[1], (int, str)), data
elif op is Op.SYMBOL:
# data is any hashable object
assert hash(data) is not None
elif op in (Op.ARRAY, Op.CONCAT):
# data is a tuple of expressions
assert isinstance(data, tuple)
assert all(isinstance(item, Expr) for item in data), data
elif op in (Op.TERMS, Op.FACTORS):
# data is {<term|base>:<coeff|exponent>} where dict values
# are nonzero Python integers
assert isinstance(data, dict)
elif op is Op.APPLY:
# data is (<function>, <operands>, <kwoperands>) where
# operands are Expr instances
assert isinstance(data, tuple) and len(data) == 3
# function is any hashable object
assert hash(data[0]) is not None
assert isinstance(data[1], tuple)
assert isinstance(data[2], dict)
elif op is Op.INDEXING:
# data is (<object>, <indices>)
assert isinstance(data, tuple) and len(data) == 2
# function is any hashable object
assert hash(data[0]) is not None
elif op is Op.TERNARY:
# data is (<cond>, <expr1>, <expr2>)
assert isinstance(data, tuple) and len(data) == 3
elif op in (Op.REF, Op.DEREF):
# data is Expr instance
assert isinstance(data, Expr)
elif op is Op.RELATIONAL:
# data is (<relop>, <left>, <right>)
assert isinstance(data, tuple) and len(data) == 3
else:
raise NotImplementedError(
f'unknown op or missing sanity check: {op}')
self.op = op
self.data = data
def __eq__(self, other):
return (isinstance(other, Expr)
and self.op is other.op
and self.data == other.data)
def __hash__(self):
if self.op in (Op.TERMS, Op.FACTORS):
data = tuple(sorted(self.data.items()))
elif self.op is Op.APPLY:
data = self.data[:2] + tuple(sorted(self.data[2].items()))
else:
data = self.data
return hash((self.op, data))
def __lt__(self, other):
if isinstance(other, Expr):
if self.op is not other.op:
return self.op.value < other.op.value
if self.op in (Op.TERMS, Op.FACTORS):
return (tuple(sorted(self.data.items()))
< tuple(sorted(other.data.items())))
if self.op is Op.APPLY:
if self.data[:2] != other.data[:2]:
return self.data[:2] < other.data[:2]
return tuple(sorted(self.data[2].items())) < tuple(
sorted(other.data[2].items()))
return self.data < other.data
return NotImplemented
def __le__(self, other): return self == other or self < other
def __gt__(self, other): return not (self <= other)
def __ge__(self, other): return not (self < other)
def __repr__(self):
return f'{type(self).__name__}({self.op}, {self.data!r})'
def __str__(self):
return self.tostring()
def tostring(self, parent_precedence=Precedence.NONE,
language=Language.Fortran):
"""Return a string representation of Expr.
"""
if self.op in (Op.INTEGER, Op.REAL):
precedence = (Precedence.SUM if self.data[0] < 0
else Precedence.ATOM)
r = str(self.data[0]) + (f'_{self.data[1]}'
if self.data[1] != 4 else '')
elif self.op is Op.COMPLEX:
r = ', '.join(item.tostring(Precedence.TUPLE, language=language)
for item in self.data)
r = '(' + r + ')'
precedence = Precedence.ATOM
elif self.op is Op.SYMBOL:
precedence = Precedence.ATOM
r = str(self.data)
elif self.op is Op.STRING:
r = self.data[0]
if self.data[1] != 1:
r = self.data[1] + '_' + r
precedence = Precedence.ATOM
elif self.op is Op.ARRAY:
r = ', '.join(item.tostring(Precedence.TUPLE, language=language)
for item in self.data)
r = '[' + r + ']'
precedence = Precedence.ATOM
elif self.op is Op.TERMS:
terms = []
for term, coeff in sorted(self.data.items()):
if coeff < 0:
op = ' - '
coeff = -coeff
else:
op = ' + '
if coeff == 1:
term = term.tostring(Precedence.SUM, language=language)
else:
if term == as_number(1):
term = str(coeff)
else:
term = f'{coeff} * ' + term.tostring(
Precedence.PRODUCT, language=language)
if terms:
terms.append(op)
elif op == ' - ':
terms.append('-')
terms.append(term)
r = ''.join(terms) or '0'
precedence = Precedence.SUM if terms else Precedence.ATOM
elif self.op is Op.FACTORS:
factors = []
tail = []
for base, exp in sorted(self.data.items()):
op = ' * '
if exp == 1:
factor = base.tostring(Precedence.PRODUCT,
language=language)
elif language is Language.C:
if exp in range(2, 10):
factor = base.tostring(Precedence.PRODUCT,
language=language)
factor = ' * '.join([factor] * exp)
elif exp in range(-10, 0):
factor = base.tostring(Precedence.PRODUCT,
language=language)
tail += [factor] * -exp
continue
else:
factor = base.tostring(Precedence.TUPLE,
language=language)
factor = f'pow({factor}, {exp})'
else:
factor = base.tostring(Precedence.POWER,
language=language) + f' ** {exp}'
if factors:
factors.append(op)
factors.append(factor)
if tail:
if not factors:
factors += ['1']
factors += ['/', '(', ' * '.join(tail), ')']
r = ''.join(factors) or '1'
precedence = Precedence.PRODUCT if factors else Precedence.ATOM
elif self.op is Op.APPLY:
name, args, kwargs = self.data
if name is ArithOp.DIV and language is Language.C:
numer, denom = [arg.tostring(Precedence.PRODUCT,
language=language)
for arg in args]
r = f'{numer} / {denom}'
precedence = Precedence.PRODUCT
else:
args = [arg.tostring(Precedence.TUPLE, language=language)
for arg in args]
args += [k + '=' + v.tostring(Precedence.NONE)
for k, v in kwargs.items()]
r = f'{name}({", ".join(args)})'
precedence = Precedence.ATOM
elif self.op is Op.INDEXING:
name = self.data[0]
args = [arg.tostring(Precedence.TUPLE, language=language)
for arg in self.data[1:]]
r = f'{name}[{", ".join(args)}]'
precedence = Precedence.ATOM
elif self.op is Op.CONCAT:
args = [arg.tostring(Precedence.PRODUCT, language=language)
for arg in self.data]
r = " // ".join(args)
precedence = Precedence.PRODUCT
elif self.op is Op.TERNARY:
cond, expr1, expr2 = [a.tostring(Precedence.TUPLE,
language=language)
for a in self.data]
if language is Language.C:
r = f'({cond}?{expr1}:{expr2})'
elif language is Language.Python:
r = f'({expr1} if {cond} else {expr2})'
elif language is Language.Fortran:
r = f'merge({expr1}, {expr2}, {cond})'
else:
raise NotImplementedError(
f'tostring for {self.op} and {language}')
precedence = Precedence.ATOM
elif self.op is Op.REF:
r = '&' + self.data.tostring(Precedence.UNARY, language=language)
precedence = Precedence.UNARY
elif self.op is Op.DEREF:
r = '*' + self.data.tostring(Precedence.UNARY, language=language)
precedence = Precedence.UNARY
elif self.op is Op.RELATIONAL:
rop, left, right = self.data
precedence = (Precedence.EQ if rop in (RelOp.EQ, RelOp.NE)
else Precedence.LT)
left = left.tostring(precedence, language=language)
right = right.tostring(precedence, language=language)
rop = rop.tostring(language=language)
r = f'{left} {rop} {right}'
else:
raise NotImplementedError(f'tostring for op {self.op}')
if parent_precedence.value < precedence.value:
# If parent precedence is higher than operand precedence,
# operand will be enclosed in parenthesis.
return '(' + r + ')'
return r
def __pos__(self):
return self
def __neg__(self):
return self * -1
def __add__(self, other):
other = as_expr(other)
if isinstance(other, Expr):
if self.op is other.op:
if self.op in (Op.INTEGER, Op.REAL):
return as_number(
self.data[0] + other.data[0],
max(self.data[1], other.data[1]))
if self.op is Op.COMPLEX:
r1, i1 = self.data
r2, i2 = other.data
return as_complex(r1 + r2, i1 + i2)
if self.op is Op.TERMS:
r = Expr(self.op, dict(self.data))
for k, v in other.data.items():
_pairs_add(r.data, k, v)
return normalize(r)
if self.op is Op.COMPLEX and other.op in (Op.INTEGER, Op.REAL):
return self + as_complex(other)
elif self.op in (Op.INTEGER, Op.REAL) and other.op is Op.COMPLEX:
return as_complex(self) + other
elif self.op is Op.REAL and other.op is Op.INTEGER:
return self + as_real(other, kind=self.data[1])
elif self.op is Op.INTEGER and other.op is Op.REAL:
return as_real(self, kind=other.data[1]) + other
return as_terms(self) + as_terms(other)
return NotImplemented
def __radd__(self, other):
if isinstance(other, number_types):
return as_number(other) + self
return NotImplemented
def __sub__(self, other):
return self + (-other)
def __rsub__(self, other):
if isinstance(other, number_types):
return as_number(other) - self
return NotImplemented
def __mul__(self, other):
other = as_expr(other)
if isinstance(other, Expr):
if self.op is other.op:
if self.op in (Op.INTEGER, Op.REAL):
return as_number(self.data[0] * other.data[0],
max(self.data[1], other.data[1]))
elif self.op is Op.COMPLEX:
r1, i1 = self.data
r2, i2 = other.data
return as_complex(r1 * r2 - i1 * i2, r1 * i2 + r2 * i1)
if self.op is Op.FACTORS:
r = Expr(self.op, dict(self.data))
for k, v in other.data.items():
_pairs_add(r.data, k, v)
return normalize(r)
elif self.op is Op.TERMS:
r = Expr(self.op, {})
for t1, c1 in self.data.items():
for t2, c2 in other.data.items():
_pairs_add(r.data, t1 * t2, c1 * c2)
return normalize(r)
if self.op is Op.COMPLEX and other.op in (Op.INTEGER, Op.REAL):
return self * as_complex(other)
elif other.op is Op.COMPLEX and self.op in (Op.INTEGER, Op.REAL):
return as_complex(self) * other
elif self.op is Op.REAL and other.op is Op.INTEGER:
return self * as_real(other, kind=self.data[1])
elif self.op is Op.INTEGER and other.op is Op.REAL:
return as_real(self, kind=other.data[1]) * other
if self.op is Op.TERMS:
return self * as_terms(other)
elif other.op is Op.TERMS:
return as_terms(self) * other
return as_factors(self) * as_factors(other)
return NotImplemented
def __rmul__(self, other):
if isinstance(other, number_types):
return as_number(other) * self
return NotImplemented
def __pow__(self, other):
other = as_expr(other)
if isinstance(other, Expr):
if other.op is Op.INTEGER:
exponent = other.data[0]
# TODO: other kind not used
if exponent == 0:
return as_number(1)
if exponent == 1:
return self
if exponent > 0:
if self.op is Op.FACTORS:
r = Expr(self.op, {})
for k, v in self.data.items():
r.data[k] = v * exponent
return normalize(r)
return self * (self ** (exponent - 1))
elif exponent != -1:
return (self ** (-exponent)) ** -1
return Expr(Op.FACTORS, {self: exponent})
return as_apply(ArithOp.POW, self, other)
return NotImplemented
def __truediv__(self, other):
other = as_expr(other)
if isinstance(other, Expr):
# Fortran / is different from Python /:
# - `/` is a truncate operation for integer operands
return normalize(as_apply(ArithOp.DIV, self, other))
return NotImplemented
def __rtruediv__(self, other):
other = as_expr(other)
if isinstance(other, Expr):
return other / self
return NotImplemented
def __floordiv__(self, other):
other = as_expr(other)
if isinstance(other, Expr):
# Fortran // is different from Python //:
# - `//` is a concatenate operation for string operands
return normalize(Expr(Op.CONCAT, (self, other)))
return NotImplemented
def __rfloordiv__(self, other):
other = as_expr(other)
if isinstance(other, Expr):
return other // self
return NotImplemented
def __call__(self, *args, **kwargs):
# In Fortran, parenthesis () are use for both function call as
# well as indexing operations.
#
# TODO: implement a method for deciding when __call__ should
# return an INDEXING expression.
return as_apply(self, *map(as_expr, args),
**dict((k, as_expr(v)) for k, v in kwargs.items()))
def __getitem__(self, index):
# Provided to support C indexing operations that .pyf files
# may contain.
index = as_expr(index)
if not isinstance(index, tuple):
index = index,
if len(index) > 1:
ewarn(f'C-index should be a single expression but got `{index}`')
return Expr(Op.INDEXING, (self,) + index)
def substitute(self, symbols_map):
"""Recursively substitute symbols with values in symbols map.
Symbols map is a dictionary of symbol-expression pairs.
"""
if self.op is Op.SYMBOL:
value = symbols_map.get(self)
if value is None:
return self
m = re.match(r'\A(@__f2py_PARENTHESIS_(\w+)_\d+@)\Z', self.data)
if m:
# complement to fromstring method
items, paren = m.groups()
if paren in ['ROUNDDIV', 'SQUARE']:
return as_array(value)
assert paren == 'ROUND', (paren, value)
return value
if self.op in (Op.INTEGER, Op.REAL, Op.STRING):
return self
if self.op in (Op.ARRAY, Op.COMPLEX):
return Expr(self.op, tuple(item.substitute(symbols_map)
for item in self.data))
if self.op is Op.CONCAT:
return normalize(Expr(self.op, tuple(item.substitute(symbols_map)
for item in self.data)))
if self.op is Op.TERMS:
r = None
for term, coeff in self.data.items():
if r is None:
r = term.substitute(symbols_map) * coeff
else:
r += term.substitute(symbols_map) * coeff
if r is None:
ewarn('substitute: empty TERMS expression interpreted as'
' int-literal 0')
return as_number(0)
return r
if self.op is Op.FACTORS:
r = None
for base, exponent in self.data.items():
if r is None:
r = base.substitute(symbols_map) ** exponent
else:
r *= base.substitute(symbols_map) ** exponent
if r is None:
ewarn('substitute: empty FACTORS expression interpreted'
' as int-literal 1')
return as_number(1)
return r
if self.op is Op.APPLY:
target, args, kwargs = self.data
if isinstance(target, Expr):
target = target.substitute(symbols_map)
args = tuple(a.substitute(symbols_map) for a in args)
kwargs = dict((k, v.substitute(symbols_map))
for k, v in kwargs.items())
return normalize(Expr(self.op, (target, args, kwargs)))
if self.op is Op.INDEXING:
func = self.data[0]
if isinstance(func, Expr):
func = func.substitute(symbols_map)
args = tuple(a.substitute(symbols_map) for a in self.data[1:])
return normalize(Expr(self.op, (func,) + args))
if self.op is Op.TERNARY:
operands = tuple(a.substitute(symbols_map) for a in self.data)
return normalize(Expr(self.op, operands))
if self.op in (Op.REF, Op.DEREF):
return normalize(Expr(self.op, self.data.substitute(symbols_map)))
if self.op is Op.RELATIONAL:
rop, left, right = self.data
left = left.substitute(symbols_map)
right = right.substitute(symbols_map)
return normalize(Expr(self.op, (rop, left, right)))
raise NotImplementedError(f'substitute method for {self.op}: {self!r}')
def traverse(self, visit, *args, **kwargs):
"""Traverse expression tree with visit function.
The visit function is applied to an expression with given args
and kwargs.
Traverse call returns an expression returned by visit when not
None, otherwise return a new normalized expression with
traverse-visit sub-expressions.
"""
result = visit(self, *args, **kwargs)
if result is not None:
return result
if self.op in (Op.INTEGER, Op.REAL, Op.STRING, Op.SYMBOL):
return self
elif self.op in (Op.COMPLEX, Op.ARRAY, Op.CONCAT, Op.TERNARY):
return normalize(Expr(self.op, tuple(
item.traverse(visit, *args, **kwargs)
for item in self.data)))
elif self.op in (Op.TERMS, Op.FACTORS):
data = {}
for k, v in self.data.items():
k = k.traverse(visit, *args, **kwargs)
v = (v.traverse(visit, *args, **kwargs)
if isinstance(v, Expr) else v)
if k in data:
v = data[k] + v
data[k] = v
return normalize(Expr(self.op, data))
elif self.op is Op.APPLY:
obj = self.data[0]
func = (obj.traverse(visit, *args, **kwargs)
if isinstance(obj, Expr) else obj)
operands = tuple(operand.traverse(visit, *args, **kwargs)
for operand in self.data[1])
kwoperands = dict((k, v.traverse(visit, *args, **kwargs))
for k, v in self.data[2].items())
return normalize(Expr(self.op, (func, operands, kwoperands)))
elif self.op is Op.INDEXING:
obj = self.data[0]
obj = (obj.traverse(visit, *args, **kwargs)
if isinstance(obj, Expr) else obj)
indices = tuple(index.traverse(visit, *args, **kwargs)
for index in self.data[1:])
return normalize(Expr(self.op, (obj,) + indices))
elif self.op in (Op.REF, Op.DEREF):
return normalize(Expr(self.op,
self.data.traverse(visit, *args, **kwargs)))
elif self.op is Op.RELATIONAL:
rop, left, right = self.data
left = left.traverse(visit, *args, **kwargs)
right = right.traverse(visit, *args, **kwargs)
return normalize(Expr(self.op, (rop, left, right)))
raise NotImplementedError(f'traverse method for {self.op}')
def contains(self, other):
"""Check if self contains other.
"""
found = []
def visit(expr, found=found):
if found:
return expr
elif expr == other:
found.append(1)
return expr
self.traverse(visit)
return len(found) != 0
def symbols(self):
"""Return a set of symbols contained in self.
"""
found = set()
def visit(expr, found=found):
if expr.op is Op.SYMBOL:
found.add(expr)
self.traverse(visit)
return found
def polynomial_atoms(self):
"""Return a set of expressions used as atoms in polynomial self.
"""
found = set()
def visit(expr, found=found):
if expr.op is Op.FACTORS:
for b in expr.data:
b.traverse(visit)
return expr
if expr.op in (Op.TERMS, Op.COMPLEX):
return
if expr.op is Op.APPLY and isinstance(expr.data[0], ArithOp):
if expr.data[0] is ArithOp.POW:
expr.data[1][0].traverse(visit)
return expr
return
if expr.op in (Op.INTEGER, Op.REAL):
return expr
found.add(expr)
if expr.op in (Op.INDEXING, Op.APPLY):
return expr
self.traverse(visit)
return found
def linear_solve(self, symbol):
"""Return a, b such that a * symbol + b == self.
If self is not linear with respect to symbol, raise RuntimeError.
"""
b = self.substitute({symbol: as_number(0)})
ax = self - b
a = ax.substitute({symbol: as_number(1)})
zero, _ = as_numer_denom(a * symbol - ax)
if zero != as_number(0):
raise RuntimeError(f'not a {symbol}-linear equation:'
f' {a} * {symbol} + {b} == {self}')
return a, b
def as_le(left, right):
return Expr(Op.RELATIONAL, (RelOp.LE, left, right)) | null |
168,568 | import re
import warnings
from enum import Enum
from math import gcd
class Op(Enum):
"""
Used as Expr op attribute.
"""
INTEGER = 10
REAL = 12
COMPLEX = 15
STRING = 20
ARRAY = 30
SYMBOL = 40
TERNARY = 100
APPLY = 200
INDEXING = 210
CONCAT = 220
RELATIONAL = 300
TERMS = 1000
FACTORS = 2000
REF = 3000
DEREF = 3001
class RelOp(Enum):
"""
Used in Op.RELATIONAL expression to specify the function part.
"""
EQ = 1
NE = 2
LT = 3
LE = 4
GT = 5
GE = 6
def fromstring(cls, s, language=Language.C):
if language is Language.Fortran:
return {'.eq.': RelOp.EQ, '.ne.': RelOp.NE,
'.lt.': RelOp.LT, '.le.': RelOp.LE,
'.gt.': RelOp.GT, '.ge.': RelOp.GE}[s.lower()]
return {'==': RelOp.EQ, '!=': RelOp.NE, '<': RelOp.LT,
'<=': RelOp.LE, '>': RelOp.GT, '>=': RelOp.GE}[s]
def tostring(self, language=Language.C):
if language is Language.Fortran:
return {RelOp.EQ: '.eq.', RelOp.NE: '.ne.',
RelOp.LT: '.lt.', RelOp.LE: '.le.',
RelOp.GT: '.gt.', RelOp.GE: '.ge.'}[self]
return {RelOp.EQ: '==', RelOp.NE: '!=',
RelOp.LT: '<', RelOp.LE: '<=',
RelOp.GT: '>', RelOp.GE: '>='}[self]
class Expr:
"""Represents a Fortran expression as a op-data pair.
Expr instances are hashable and sortable.
"""
def parse(s, language=Language.C):
"""Parse a Fortran expression to a Expr.
"""
return fromstring(s, language=language)
def __init__(self, op, data):
assert isinstance(op, Op)
# sanity checks
if op is Op.INTEGER:
# data is a 2-tuple of numeric object and a kind value
# (default is 4)
assert isinstance(data, tuple) and len(data) == 2
assert isinstance(data[0], int)
assert isinstance(data[1], (int, str)), data
elif op is Op.REAL:
# data is a 2-tuple of numeric object and a kind value
# (default is 4)
assert isinstance(data, tuple) and len(data) == 2
assert isinstance(data[0], float)
assert isinstance(data[1], (int, str)), data
elif op is Op.COMPLEX:
# data is a 2-tuple of constant expressions
assert isinstance(data, tuple) and len(data) == 2
elif op is Op.STRING:
# data is a 2-tuple of quoted string and a kind value
# (default is 1)
assert isinstance(data, tuple) and len(data) == 2
assert (isinstance(data[0], str)
and data[0][::len(data[0])-1] in ('""', "''", '@@'))
assert isinstance(data[1], (int, str)), data
elif op is Op.SYMBOL:
# data is any hashable object
assert hash(data) is not None
elif op in (Op.ARRAY, Op.CONCAT):
# data is a tuple of expressions
assert isinstance(data, tuple)
assert all(isinstance(item, Expr) for item in data), data
elif op in (Op.TERMS, Op.FACTORS):
# data is {<term|base>:<coeff|exponent>} where dict values
# are nonzero Python integers
assert isinstance(data, dict)
elif op is Op.APPLY:
# data is (<function>, <operands>, <kwoperands>) where
# operands are Expr instances
assert isinstance(data, tuple) and len(data) == 3
# function is any hashable object
assert hash(data[0]) is not None
assert isinstance(data[1], tuple)
assert isinstance(data[2], dict)
elif op is Op.INDEXING:
# data is (<object>, <indices>)
assert isinstance(data, tuple) and len(data) == 2
# function is any hashable object
assert hash(data[0]) is not None
elif op is Op.TERNARY:
# data is (<cond>, <expr1>, <expr2>)
assert isinstance(data, tuple) and len(data) == 3
elif op in (Op.REF, Op.DEREF):
# data is Expr instance
assert isinstance(data, Expr)
elif op is Op.RELATIONAL:
# data is (<relop>, <left>, <right>)
assert isinstance(data, tuple) and len(data) == 3
else:
raise NotImplementedError(
f'unknown op or missing sanity check: {op}')
self.op = op
self.data = data
def __eq__(self, other):
return (isinstance(other, Expr)
and self.op is other.op
and self.data == other.data)
def __hash__(self):
if self.op in (Op.TERMS, Op.FACTORS):
data = tuple(sorted(self.data.items()))
elif self.op is Op.APPLY:
data = self.data[:2] + tuple(sorted(self.data[2].items()))
else:
data = self.data
return hash((self.op, data))
def __lt__(self, other):
if isinstance(other, Expr):
if self.op is not other.op:
return self.op.value < other.op.value
if self.op in (Op.TERMS, Op.FACTORS):
return (tuple(sorted(self.data.items()))
< tuple(sorted(other.data.items())))
if self.op is Op.APPLY:
if self.data[:2] != other.data[:2]:
return self.data[:2] < other.data[:2]
return tuple(sorted(self.data[2].items())) < tuple(
sorted(other.data[2].items()))
return self.data < other.data
return NotImplemented
def __le__(self, other): return self == other or self < other
def __gt__(self, other): return not (self <= other)
def __ge__(self, other): return not (self < other)
def __repr__(self):
return f'{type(self).__name__}({self.op}, {self.data!r})'
def __str__(self):
return self.tostring()
def tostring(self, parent_precedence=Precedence.NONE,
language=Language.Fortran):
"""Return a string representation of Expr.
"""
if self.op in (Op.INTEGER, Op.REAL):
precedence = (Precedence.SUM if self.data[0] < 0
else Precedence.ATOM)
r = str(self.data[0]) + (f'_{self.data[1]}'
if self.data[1] != 4 else '')
elif self.op is Op.COMPLEX:
r = ', '.join(item.tostring(Precedence.TUPLE, language=language)
for item in self.data)
r = '(' + r + ')'
precedence = Precedence.ATOM
elif self.op is Op.SYMBOL:
precedence = Precedence.ATOM
r = str(self.data)
elif self.op is Op.STRING:
r = self.data[0]
if self.data[1] != 1:
r = self.data[1] + '_' + r
precedence = Precedence.ATOM
elif self.op is Op.ARRAY:
r = ', '.join(item.tostring(Precedence.TUPLE, language=language)
for item in self.data)
r = '[' + r + ']'
precedence = Precedence.ATOM
elif self.op is Op.TERMS:
terms = []
for term, coeff in sorted(self.data.items()):
if coeff < 0:
op = ' - '
coeff = -coeff
else:
op = ' + '
if coeff == 1:
term = term.tostring(Precedence.SUM, language=language)
else:
if term == as_number(1):
term = str(coeff)
else:
term = f'{coeff} * ' + term.tostring(
Precedence.PRODUCT, language=language)
if terms:
terms.append(op)
elif op == ' - ':
terms.append('-')
terms.append(term)
r = ''.join(terms) or '0'
precedence = Precedence.SUM if terms else Precedence.ATOM
elif self.op is Op.FACTORS:
factors = []
tail = []
for base, exp in sorted(self.data.items()):
op = ' * '
if exp == 1:
factor = base.tostring(Precedence.PRODUCT,
language=language)
elif language is Language.C:
if exp in range(2, 10):
factor = base.tostring(Precedence.PRODUCT,
language=language)
factor = ' * '.join([factor] * exp)
elif exp in range(-10, 0):
factor = base.tostring(Precedence.PRODUCT,
language=language)
tail += [factor] * -exp
continue
else:
factor = base.tostring(Precedence.TUPLE,
language=language)
factor = f'pow({factor}, {exp})'
else:
factor = base.tostring(Precedence.POWER,
language=language) + f' ** {exp}'
if factors:
factors.append(op)
factors.append(factor)
if tail:
if not factors:
factors += ['1']
factors += ['/', '(', ' * '.join(tail), ')']
r = ''.join(factors) or '1'
precedence = Precedence.PRODUCT if factors else Precedence.ATOM
elif self.op is Op.APPLY:
name, args, kwargs = self.data
if name is ArithOp.DIV and language is Language.C:
numer, denom = [arg.tostring(Precedence.PRODUCT,
language=language)
for arg in args]
r = f'{numer} / {denom}'
precedence = Precedence.PRODUCT
else:
args = [arg.tostring(Precedence.TUPLE, language=language)
for arg in args]
args += [k + '=' + v.tostring(Precedence.NONE)
for k, v in kwargs.items()]
r = f'{name}({", ".join(args)})'
precedence = Precedence.ATOM
elif self.op is Op.INDEXING:
name = self.data[0]
args = [arg.tostring(Precedence.TUPLE, language=language)
for arg in self.data[1:]]
r = f'{name}[{", ".join(args)}]'
precedence = Precedence.ATOM
elif self.op is Op.CONCAT:
args = [arg.tostring(Precedence.PRODUCT, language=language)
for arg in self.data]
r = " // ".join(args)
precedence = Precedence.PRODUCT
elif self.op is Op.TERNARY:
cond, expr1, expr2 = [a.tostring(Precedence.TUPLE,
language=language)
for a in self.data]
if language is Language.C:
r = f'({cond}?{expr1}:{expr2})'
elif language is Language.Python:
r = f'({expr1} if {cond} else {expr2})'
elif language is Language.Fortran:
r = f'merge({expr1}, {expr2}, {cond})'
else:
raise NotImplementedError(
f'tostring for {self.op} and {language}')
precedence = Precedence.ATOM
elif self.op is Op.REF:
r = '&' + self.data.tostring(Precedence.UNARY, language=language)
precedence = Precedence.UNARY
elif self.op is Op.DEREF:
r = '*' + self.data.tostring(Precedence.UNARY, language=language)
precedence = Precedence.UNARY
elif self.op is Op.RELATIONAL:
rop, left, right = self.data
precedence = (Precedence.EQ if rop in (RelOp.EQ, RelOp.NE)
else Precedence.LT)
left = left.tostring(precedence, language=language)
right = right.tostring(precedence, language=language)
rop = rop.tostring(language=language)
r = f'{left} {rop} {right}'
else:
raise NotImplementedError(f'tostring for op {self.op}')
if parent_precedence.value < precedence.value:
# If parent precedence is higher than operand precedence,
# operand will be enclosed in parenthesis.
return '(' + r + ')'
return r
def __pos__(self):
return self
def __neg__(self):
return self * -1
def __add__(self, other):
other = as_expr(other)
if isinstance(other, Expr):
if self.op is other.op:
if self.op in (Op.INTEGER, Op.REAL):
return as_number(
self.data[0] + other.data[0],
max(self.data[1], other.data[1]))
if self.op is Op.COMPLEX:
r1, i1 = self.data
r2, i2 = other.data
return as_complex(r1 + r2, i1 + i2)
if self.op is Op.TERMS:
r = Expr(self.op, dict(self.data))
for k, v in other.data.items():
_pairs_add(r.data, k, v)
return normalize(r)
if self.op is Op.COMPLEX and other.op in (Op.INTEGER, Op.REAL):
return self + as_complex(other)
elif self.op in (Op.INTEGER, Op.REAL) and other.op is Op.COMPLEX:
return as_complex(self) + other
elif self.op is Op.REAL and other.op is Op.INTEGER:
return self + as_real(other, kind=self.data[1])
elif self.op is Op.INTEGER and other.op is Op.REAL:
return as_real(self, kind=other.data[1]) + other
return as_terms(self) + as_terms(other)
return NotImplemented
def __radd__(self, other):
if isinstance(other, number_types):
return as_number(other) + self
return NotImplemented
def __sub__(self, other):
return self + (-other)
def __rsub__(self, other):
if isinstance(other, number_types):
return as_number(other) - self
return NotImplemented
def __mul__(self, other):
other = as_expr(other)
if isinstance(other, Expr):
if self.op is other.op:
if self.op in (Op.INTEGER, Op.REAL):
return as_number(self.data[0] * other.data[0],
max(self.data[1], other.data[1]))
elif self.op is Op.COMPLEX:
r1, i1 = self.data
r2, i2 = other.data
return as_complex(r1 * r2 - i1 * i2, r1 * i2 + r2 * i1)
if self.op is Op.FACTORS:
r = Expr(self.op, dict(self.data))
for k, v in other.data.items():
_pairs_add(r.data, k, v)
return normalize(r)
elif self.op is Op.TERMS:
r = Expr(self.op, {})
for t1, c1 in self.data.items():
for t2, c2 in other.data.items():
_pairs_add(r.data, t1 * t2, c1 * c2)
return normalize(r)
if self.op is Op.COMPLEX and other.op in (Op.INTEGER, Op.REAL):
return self * as_complex(other)
elif other.op is Op.COMPLEX and self.op in (Op.INTEGER, Op.REAL):
return as_complex(self) * other
elif self.op is Op.REAL and other.op is Op.INTEGER:
return self * as_real(other, kind=self.data[1])
elif self.op is Op.INTEGER and other.op is Op.REAL:
return as_real(self, kind=other.data[1]) * other
if self.op is Op.TERMS:
return self * as_terms(other)
elif other.op is Op.TERMS:
return as_terms(self) * other
return as_factors(self) * as_factors(other)
return NotImplemented
def __rmul__(self, other):
if isinstance(other, number_types):
return as_number(other) * self
return NotImplemented
def __pow__(self, other):
other = as_expr(other)
if isinstance(other, Expr):
if other.op is Op.INTEGER:
exponent = other.data[0]
# TODO: other kind not used
if exponent == 0:
return as_number(1)
if exponent == 1:
return self
if exponent > 0:
if self.op is Op.FACTORS:
r = Expr(self.op, {})
for k, v in self.data.items():
r.data[k] = v * exponent
return normalize(r)
return self * (self ** (exponent - 1))
elif exponent != -1:
return (self ** (-exponent)) ** -1
return Expr(Op.FACTORS, {self: exponent})
return as_apply(ArithOp.POW, self, other)
return NotImplemented
def __truediv__(self, other):
other = as_expr(other)
if isinstance(other, Expr):
# Fortran / is different from Python /:
# - `/` is a truncate operation for integer operands
return normalize(as_apply(ArithOp.DIV, self, other))
return NotImplemented
def __rtruediv__(self, other):
other = as_expr(other)
if isinstance(other, Expr):
return other / self
return NotImplemented
def __floordiv__(self, other):
other = as_expr(other)
if isinstance(other, Expr):
# Fortran // is different from Python //:
# - `//` is a concatenate operation for string operands
return normalize(Expr(Op.CONCAT, (self, other)))
return NotImplemented
def __rfloordiv__(self, other):
other = as_expr(other)
if isinstance(other, Expr):
return other // self
return NotImplemented
def __call__(self, *args, **kwargs):
# In Fortran, parenthesis () are use for both function call as
# well as indexing operations.
#
# TODO: implement a method for deciding when __call__ should
# return an INDEXING expression.
return as_apply(self, *map(as_expr, args),
**dict((k, as_expr(v)) for k, v in kwargs.items()))
def __getitem__(self, index):
# Provided to support C indexing operations that .pyf files
# may contain.
index = as_expr(index)
if not isinstance(index, tuple):
index = index,
if len(index) > 1:
ewarn(f'C-index should be a single expression but got `{index}`')
return Expr(Op.INDEXING, (self,) + index)
def substitute(self, symbols_map):
"""Recursively substitute symbols with values in symbols map.
Symbols map is a dictionary of symbol-expression pairs.
"""
if self.op is Op.SYMBOL:
value = symbols_map.get(self)
if value is None:
return self
m = re.match(r'\A(@__f2py_PARENTHESIS_(\w+)_\d+@)\Z', self.data)
if m:
# complement to fromstring method
items, paren = m.groups()
if paren in ['ROUNDDIV', 'SQUARE']:
return as_array(value)
assert paren == 'ROUND', (paren, value)
return value
if self.op in (Op.INTEGER, Op.REAL, Op.STRING):
return self
if self.op in (Op.ARRAY, Op.COMPLEX):
return Expr(self.op, tuple(item.substitute(symbols_map)
for item in self.data))
if self.op is Op.CONCAT:
return normalize(Expr(self.op, tuple(item.substitute(symbols_map)
for item in self.data)))
if self.op is Op.TERMS:
r = None
for term, coeff in self.data.items():
if r is None:
r = term.substitute(symbols_map) * coeff
else:
r += term.substitute(symbols_map) * coeff
if r is None:
ewarn('substitute: empty TERMS expression interpreted as'
' int-literal 0')
return as_number(0)
return r
if self.op is Op.FACTORS:
r = None
for base, exponent in self.data.items():
if r is None:
r = base.substitute(symbols_map) ** exponent
else:
r *= base.substitute(symbols_map) ** exponent
if r is None:
ewarn('substitute: empty FACTORS expression interpreted'
' as int-literal 1')
return as_number(1)
return r
if self.op is Op.APPLY:
target, args, kwargs = self.data
if isinstance(target, Expr):
target = target.substitute(symbols_map)
args = tuple(a.substitute(symbols_map) for a in args)
kwargs = dict((k, v.substitute(symbols_map))
for k, v in kwargs.items())
return normalize(Expr(self.op, (target, args, kwargs)))
if self.op is Op.INDEXING:
func = self.data[0]
if isinstance(func, Expr):
func = func.substitute(symbols_map)
args = tuple(a.substitute(symbols_map) for a in self.data[1:])
return normalize(Expr(self.op, (func,) + args))
if self.op is Op.TERNARY:
operands = tuple(a.substitute(symbols_map) for a in self.data)
return normalize(Expr(self.op, operands))
if self.op in (Op.REF, Op.DEREF):
return normalize(Expr(self.op, self.data.substitute(symbols_map)))
if self.op is Op.RELATIONAL:
rop, left, right = self.data
left = left.substitute(symbols_map)
right = right.substitute(symbols_map)
return normalize(Expr(self.op, (rop, left, right)))
raise NotImplementedError(f'substitute method for {self.op}: {self!r}')
def traverse(self, visit, *args, **kwargs):
"""Traverse expression tree with visit function.
The visit function is applied to an expression with given args
and kwargs.
Traverse call returns an expression returned by visit when not
None, otherwise return a new normalized expression with
traverse-visit sub-expressions.
"""
result = visit(self, *args, **kwargs)
if result is not None:
return result
if self.op in (Op.INTEGER, Op.REAL, Op.STRING, Op.SYMBOL):
return self
elif self.op in (Op.COMPLEX, Op.ARRAY, Op.CONCAT, Op.TERNARY):
return normalize(Expr(self.op, tuple(
item.traverse(visit, *args, **kwargs)
for item in self.data)))
elif self.op in (Op.TERMS, Op.FACTORS):
data = {}
for k, v in self.data.items():
k = k.traverse(visit, *args, **kwargs)
v = (v.traverse(visit, *args, **kwargs)
if isinstance(v, Expr) else v)
if k in data:
v = data[k] + v
data[k] = v
return normalize(Expr(self.op, data))
elif self.op is Op.APPLY:
obj = self.data[0]
func = (obj.traverse(visit, *args, **kwargs)
if isinstance(obj, Expr) else obj)
operands = tuple(operand.traverse(visit, *args, **kwargs)
for operand in self.data[1])
kwoperands = dict((k, v.traverse(visit, *args, **kwargs))
for k, v in self.data[2].items())
return normalize(Expr(self.op, (func, operands, kwoperands)))
elif self.op is Op.INDEXING:
obj = self.data[0]
obj = (obj.traverse(visit, *args, **kwargs)
if isinstance(obj, Expr) else obj)
indices = tuple(index.traverse(visit, *args, **kwargs)
for index in self.data[1:])
return normalize(Expr(self.op, (obj,) + indices))
elif self.op in (Op.REF, Op.DEREF):
return normalize(Expr(self.op,
self.data.traverse(visit, *args, **kwargs)))
elif self.op is Op.RELATIONAL:
rop, left, right = self.data
left = left.traverse(visit, *args, **kwargs)
right = right.traverse(visit, *args, **kwargs)
return normalize(Expr(self.op, (rop, left, right)))
raise NotImplementedError(f'traverse method for {self.op}')
def contains(self, other):
"""Check if self contains other.
"""
found = []
def visit(expr, found=found):
if found:
return expr
elif expr == other:
found.append(1)
return expr
self.traverse(visit)
return len(found) != 0
def symbols(self):
"""Return a set of symbols contained in self.
"""
found = set()
def visit(expr, found=found):
if expr.op is Op.SYMBOL:
found.add(expr)
self.traverse(visit)
return found
def polynomial_atoms(self):
"""Return a set of expressions used as atoms in polynomial self.
"""
found = set()
def visit(expr, found=found):
if expr.op is Op.FACTORS:
for b in expr.data:
b.traverse(visit)
return expr
if expr.op in (Op.TERMS, Op.COMPLEX):
return
if expr.op is Op.APPLY and isinstance(expr.data[0], ArithOp):
if expr.data[0] is ArithOp.POW:
expr.data[1][0].traverse(visit)
return expr
return
if expr.op in (Op.INTEGER, Op.REAL):
return expr
found.add(expr)
if expr.op in (Op.INDEXING, Op.APPLY):
return expr
self.traverse(visit)
return found
def linear_solve(self, symbol):
"""Return a, b such that a * symbol + b == self.
If self is not linear with respect to symbol, raise RuntimeError.
"""
b = self.substitute({symbol: as_number(0)})
ax = self - b
a = ax.substitute({symbol: as_number(1)})
zero, _ = as_numer_denom(a * symbol - ax)
if zero != as_number(0):
raise RuntimeError(f'not a {symbol}-linear equation:'
f' {a} * {symbol} + {b} == {self}')
return a, b
def as_gt(left, right):
return Expr(Op.RELATIONAL, (RelOp.GT, left, right)) | null |
168,569 | import re
import warnings
from enum import Enum
from math import gcd
class Op(Enum):
"""
Used as Expr op attribute.
"""
INTEGER = 10
REAL = 12
COMPLEX = 15
STRING = 20
ARRAY = 30
SYMBOL = 40
TERNARY = 100
APPLY = 200
INDEXING = 210
CONCAT = 220
RELATIONAL = 300
TERMS = 1000
FACTORS = 2000
REF = 3000
DEREF = 3001
class RelOp(Enum):
"""
Used in Op.RELATIONAL expression to specify the function part.
"""
EQ = 1
NE = 2
LT = 3
LE = 4
GT = 5
GE = 6
def fromstring(cls, s, language=Language.C):
if language is Language.Fortran:
return {'.eq.': RelOp.EQ, '.ne.': RelOp.NE,
'.lt.': RelOp.LT, '.le.': RelOp.LE,
'.gt.': RelOp.GT, '.ge.': RelOp.GE}[s.lower()]
return {'==': RelOp.EQ, '!=': RelOp.NE, '<': RelOp.LT,
'<=': RelOp.LE, '>': RelOp.GT, '>=': RelOp.GE}[s]
def tostring(self, language=Language.C):
if language is Language.Fortran:
return {RelOp.EQ: '.eq.', RelOp.NE: '.ne.',
RelOp.LT: '.lt.', RelOp.LE: '.le.',
RelOp.GT: '.gt.', RelOp.GE: '.ge.'}[self]
return {RelOp.EQ: '==', RelOp.NE: '!=',
RelOp.LT: '<', RelOp.LE: '<=',
RelOp.GT: '>', RelOp.GE: '>='}[self]
class Expr:
"""Represents a Fortran expression as a op-data pair.
Expr instances are hashable and sortable.
"""
def parse(s, language=Language.C):
"""Parse a Fortran expression to a Expr.
"""
return fromstring(s, language=language)
def __init__(self, op, data):
assert isinstance(op, Op)
# sanity checks
if op is Op.INTEGER:
# data is a 2-tuple of numeric object and a kind value
# (default is 4)
assert isinstance(data, tuple) and len(data) == 2
assert isinstance(data[0], int)
assert isinstance(data[1], (int, str)), data
elif op is Op.REAL:
# data is a 2-tuple of numeric object and a kind value
# (default is 4)
assert isinstance(data, tuple) and len(data) == 2
assert isinstance(data[0], float)
assert isinstance(data[1], (int, str)), data
elif op is Op.COMPLEX:
# data is a 2-tuple of constant expressions
assert isinstance(data, tuple) and len(data) == 2
elif op is Op.STRING:
# data is a 2-tuple of quoted string and a kind value
# (default is 1)
assert isinstance(data, tuple) and len(data) == 2
assert (isinstance(data[0], str)
and data[0][::len(data[0])-1] in ('""', "''", '@@'))
assert isinstance(data[1], (int, str)), data
elif op is Op.SYMBOL:
# data is any hashable object
assert hash(data) is not None
elif op in (Op.ARRAY, Op.CONCAT):
# data is a tuple of expressions
assert isinstance(data, tuple)
assert all(isinstance(item, Expr) for item in data), data
elif op in (Op.TERMS, Op.FACTORS):
# data is {<term|base>:<coeff|exponent>} where dict values
# are nonzero Python integers
assert isinstance(data, dict)
elif op is Op.APPLY:
# data is (<function>, <operands>, <kwoperands>) where
# operands are Expr instances
assert isinstance(data, tuple) and len(data) == 3
# function is any hashable object
assert hash(data[0]) is not None
assert isinstance(data[1], tuple)
assert isinstance(data[2], dict)
elif op is Op.INDEXING:
# data is (<object>, <indices>)
assert isinstance(data, tuple) and len(data) == 2
# function is any hashable object
assert hash(data[0]) is not None
elif op is Op.TERNARY:
# data is (<cond>, <expr1>, <expr2>)
assert isinstance(data, tuple) and len(data) == 3
elif op in (Op.REF, Op.DEREF):
# data is Expr instance
assert isinstance(data, Expr)
elif op is Op.RELATIONAL:
# data is (<relop>, <left>, <right>)
assert isinstance(data, tuple) and len(data) == 3
else:
raise NotImplementedError(
f'unknown op or missing sanity check: {op}')
self.op = op
self.data = data
def __eq__(self, other):
return (isinstance(other, Expr)
and self.op is other.op
and self.data == other.data)
def __hash__(self):
if self.op in (Op.TERMS, Op.FACTORS):
data = tuple(sorted(self.data.items()))
elif self.op is Op.APPLY:
data = self.data[:2] + tuple(sorted(self.data[2].items()))
else:
data = self.data
return hash((self.op, data))
def __lt__(self, other):
if isinstance(other, Expr):
if self.op is not other.op:
return self.op.value < other.op.value
if self.op in (Op.TERMS, Op.FACTORS):
return (tuple(sorted(self.data.items()))
< tuple(sorted(other.data.items())))
if self.op is Op.APPLY:
if self.data[:2] != other.data[:2]:
return self.data[:2] < other.data[:2]
return tuple(sorted(self.data[2].items())) < tuple(
sorted(other.data[2].items()))
return self.data < other.data
return NotImplemented
def __le__(self, other): return self == other or self < other
def __gt__(self, other): return not (self <= other)
def __ge__(self, other): return not (self < other)
def __repr__(self):
return f'{type(self).__name__}({self.op}, {self.data!r})'
def __str__(self):
return self.tostring()
def tostring(self, parent_precedence=Precedence.NONE,
language=Language.Fortran):
"""Return a string representation of Expr.
"""
if self.op in (Op.INTEGER, Op.REAL):
precedence = (Precedence.SUM if self.data[0] < 0
else Precedence.ATOM)
r = str(self.data[0]) + (f'_{self.data[1]}'
if self.data[1] != 4 else '')
elif self.op is Op.COMPLEX:
r = ', '.join(item.tostring(Precedence.TUPLE, language=language)
for item in self.data)
r = '(' + r + ')'
precedence = Precedence.ATOM
elif self.op is Op.SYMBOL:
precedence = Precedence.ATOM
r = str(self.data)
elif self.op is Op.STRING:
r = self.data[0]
if self.data[1] != 1:
r = self.data[1] + '_' + r
precedence = Precedence.ATOM
elif self.op is Op.ARRAY:
r = ', '.join(item.tostring(Precedence.TUPLE, language=language)
for item in self.data)
r = '[' + r + ']'
precedence = Precedence.ATOM
elif self.op is Op.TERMS:
terms = []
for term, coeff in sorted(self.data.items()):
if coeff < 0:
op = ' - '
coeff = -coeff
else:
op = ' + '
if coeff == 1:
term = term.tostring(Precedence.SUM, language=language)
else:
if term == as_number(1):
term = str(coeff)
else:
term = f'{coeff} * ' + term.tostring(
Precedence.PRODUCT, language=language)
if terms:
terms.append(op)
elif op == ' - ':
terms.append('-')
terms.append(term)
r = ''.join(terms) or '0'
precedence = Precedence.SUM if terms else Precedence.ATOM
elif self.op is Op.FACTORS:
factors = []
tail = []
for base, exp in sorted(self.data.items()):
op = ' * '
if exp == 1:
factor = base.tostring(Precedence.PRODUCT,
language=language)
elif language is Language.C:
if exp in range(2, 10):
factor = base.tostring(Precedence.PRODUCT,
language=language)
factor = ' * '.join([factor] * exp)
elif exp in range(-10, 0):
factor = base.tostring(Precedence.PRODUCT,
language=language)
tail += [factor] * -exp
continue
else:
factor = base.tostring(Precedence.TUPLE,
language=language)
factor = f'pow({factor}, {exp})'
else:
factor = base.tostring(Precedence.POWER,
language=language) + f' ** {exp}'
if factors:
factors.append(op)
factors.append(factor)
if tail:
if not factors:
factors += ['1']
factors += ['/', '(', ' * '.join(tail), ')']
r = ''.join(factors) or '1'
precedence = Precedence.PRODUCT if factors else Precedence.ATOM
elif self.op is Op.APPLY:
name, args, kwargs = self.data
if name is ArithOp.DIV and language is Language.C:
numer, denom = [arg.tostring(Precedence.PRODUCT,
language=language)
for arg in args]
r = f'{numer} / {denom}'
precedence = Precedence.PRODUCT
else:
args = [arg.tostring(Precedence.TUPLE, language=language)
for arg in args]
args += [k + '=' + v.tostring(Precedence.NONE)
for k, v in kwargs.items()]
r = f'{name}({", ".join(args)})'
precedence = Precedence.ATOM
elif self.op is Op.INDEXING:
name = self.data[0]
args = [arg.tostring(Precedence.TUPLE, language=language)
for arg in self.data[1:]]
r = f'{name}[{", ".join(args)}]'
precedence = Precedence.ATOM
elif self.op is Op.CONCAT:
args = [arg.tostring(Precedence.PRODUCT, language=language)
for arg in self.data]
r = " // ".join(args)
precedence = Precedence.PRODUCT
elif self.op is Op.TERNARY:
cond, expr1, expr2 = [a.tostring(Precedence.TUPLE,
language=language)
for a in self.data]
if language is Language.C:
r = f'({cond}?{expr1}:{expr2})'
elif language is Language.Python:
r = f'({expr1} if {cond} else {expr2})'
elif language is Language.Fortran:
r = f'merge({expr1}, {expr2}, {cond})'
else:
raise NotImplementedError(
f'tostring for {self.op} and {language}')
precedence = Precedence.ATOM
elif self.op is Op.REF:
r = '&' + self.data.tostring(Precedence.UNARY, language=language)
precedence = Precedence.UNARY
elif self.op is Op.DEREF:
r = '*' + self.data.tostring(Precedence.UNARY, language=language)
precedence = Precedence.UNARY
elif self.op is Op.RELATIONAL:
rop, left, right = self.data
precedence = (Precedence.EQ if rop in (RelOp.EQ, RelOp.NE)
else Precedence.LT)
left = left.tostring(precedence, language=language)
right = right.tostring(precedence, language=language)
rop = rop.tostring(language=language)
r = f'{left} {rop} {right}'
else:
raise NotImplementedError(f'tostring for op {self.op}')
if parent_precedence.value < precedence.value:
# If parent precedence is higher than operand precedence,
# operand will be enclosed in parenthesis.
return '(' + r + ')'
return r
def __pos__(self):
return self
def __neg__(self):
return self * -1
def __add__(self, other):
other = as_expr(other)
if isinstance(other, Expr):
if self.op is other.op:
if self.op in (Op.INTEGER, Op.REAL):
return as_number(
self.data[0] + other.data[0],
max(self.data[1], other.data[1]))
if self.op is Op.COMPLEX:
r1, i1 = self.data
r2, i2 = other.data
return as_complex(r1 + r2, i1 + i2)
if self.op is Op.TERMS:
r = Expr(self.op, dict(self.data))
for k, v in other.data.items():
_pairs_add(r.data, k, v)
return normalize(r)
if self.op is Op.COMPLEX and other.op in (Op.INTEGER, Op.REAL):
return self + as_complex(other)
elif self.op in (Op.INTEGER, Op.REAL) and other.op is Op.COMPLEX:
return as_complex(self) + other
elif self.op is Op.REAL and other.op is Op.INTEGER:
return self + as_real(other, kind=self.data[1])
elif self.op is Op.INTEGER and other.op is Op.REAL:
return as_real(self, kind=other.data[1]) + other
return as_terms(self) + as_terms(other)
return NotImplemented
def __radd__(self, other):
if isinstance(other, number_types):
return as_number(other) + self
return NotImplemented
def __sub__(self, other):
return self + (-other)
def __rsub__(self, other):
if isinstance(other, number_types):
return as_number(other) - self
return NotImplemented
def __mul__(self, other):
other = as_expr(other)
if isinstance(other, Expr):
if self.op is other.op:
if self.op in (Op.INTEGER, Op.REAL):
return as_number(self.data[0] * other.data[0],
max(self.data[1], other.data[1]))
elif self.op is Op.COMPLEX:
r1, i1 = self.data
r2, i2 = other.data
return as_complex(r1 * r2 - i1 * i2, r1 * i2 + r2 * i1)
if self.op is Op.FACTORS:
r = Expr(self.op, dict(self.data))
for k, v in other.data.items():
_pairs_add(r.data, k, v)
return normalize(r)
elif self.op is Op.TERMS:
r = Expr(self.op, {})
for t1, c1 in self.data.items():
for t2, c2 in other.data.items():
_pairs_add(r.data, t1 * t2, c1 * c2)
return normalize(r)
if self.op is Op.COMPLEX and other.op in (Op.INTEGER, Op.REAL):
return self * as_complex(other)
elif other.op is Op.COMPLEX and self.op in (Op.INTEGER, Op.REAL):
return as_complex(self) * other
elif self.op is Op.REAL and other.op is Op.INTEGER:
return self * as_real(other, kind=self.data[1])
elif self.op is Op.INTEGER and other.op is Op.REAL:
return as_real(self, kind=other.data[1]) * other
if self.op is Op.TERMS:
return self * as_terms(other)
elif other.op is Op.TERMS:
return as_terms(self) * other
return as_factors(self) * as_factors(other)
return NotImplemented
def __rmul__(self, other):
if isinstance(other, number_types):
return as_number(other) * self
return NotImplemented
def __pow__(self, other):
other = as_expr(other)
if isinstance(other, Expr):
if other.op is Op.INTEGER:
exponent = other.data[0]
# TODO: other kind not used
if exponent == 0:
return as_number(1)
if exponent == 1:
return self
if exponent > 0:
if self.op is Op.FACTORS:
r = Expr(self.op, {})
for k, v in self.data.items():
r.data[k] = v * exponent
return normalize(r)
return self * (self ** (exponent - 1))
elif exponent != -1:
return (self ** (-exponent)) ** -1
return Expr(Op.FACTORS, {self: exponent})
return as_apply(ArithOp.POW, self, other)
return NotImplemented
def __truediv__(self, other):
other = as_expr(other)
if isinstance(other, Expr):
# Fortran / is different from Python /:
# - `/` is a truncate operation for integer operands
return normalize(as_apply(ArithOp.DIV, self, other))
return NotImplemented
def __rtruediv__(self, other):
other = as_expr(other)
if isinstance(other, Expr):
return other / self
return NotImplemented
def __floordiv__(self, other):
other = as_expr(other)
if isinstance(other, Expr):
# Fortran // is different from Python //:
# - `//` is a concatenate operation for string operands
return normalize(Expr(Op.CONCAT, (self, other)))
return NotImplemented
def __rfloordiv__(self, other):
other = as_expr(other)
if isinstance(other, Expr):
return other // self
return NotImplemented
def __call__(self, *args, **kwargs):
# In Fortran, parenthesis () are use for both function call as
# well as indexing operations.
#
# TODO: implement a method for deciding when __call__ should
# return an INDEXING expression.
return as_apply(self, *map(as_expr, args),
**dict((k, as_expr(v)) for k, v in kwargs.items()))
def __getitem__(self, index):
# Provided to support C indexing operations that .pyf files
# may contain.
index = as_expr(index)
if not isinstance(index, tuple):
index = index,
if len(index) > 1:
ewarn(f'C-index should be a single expression but got `{index}`')
return Expr(Op.INDEXING, (self,) + index)
def substitute(self, symbols_map):
"""Recursively substitute symbols with values in symbols map.
Symbols map is a dictionary of symbol-expression pairs.
"""
if self.op is Op.SYMBOL:
value = symbols_map.get(self)
if value is None:
return self
m = re.match(r'\A(@__f2py_PARENTHESIS_(\w+)_\d+@)\Z', self.data)
if m:
# complement to fromstring method
items, paren = m.groups()
if paren in ['ROUNDDIV', 'SQUARE']:
return as_array(value)
assert paren == 'ROUND', (paren, value)
return value
if self.op in (Op.INTEGER, Op.REAL, Op.STRING):
return self
if self.op in (Op.ARRAY, Op.COMPLEX):
return Expr(self.op, tuple(item.substitute(symbols_map)
for item in self.data))
if self.op is Op.CONCAT:
return normalize(Expr(self.op, tuple(item.substitute(symbols_map)
for item in self.data)))
if self.op is Op.TERMS:
r = None
for term, coeff in self.data.items():
if r is None:
r = term.substitute(symbols_map) * coeff
else:
r += term.substitute(symbols_map) * coeff
if r is None:
ewarn('substitute: empty TERMS expression interpreted as'
' int-literal 0')
return as_number(0)
return r
if self.op is Op.FACTORS:
r = None
for base, exponent in self.data.items():
if r is None:
r = base.substitute(symbols_map) ** exponent
else:
r *= base.substitute(symbols_map) ** exponent
if r is None:
ewarn('substitute: empty FACTORS expression interpreted'
' as int-literal 1')
return as_number(1)
return r
if self.op is Op.APPLY:
target, args, kwargs = self.data
if isinstance(target, Expr):
target = target.substitute(symbols_map)
args = tuple(a.substitute(symbols_map) for a in args)
kwargs = dict((k, v.substitute(symbols_map))
for k, v in kwargs.items())
return normalize(Expr(self.op, (target, args, kwargs)))
if self.op is Op.INDEXING:
func = self.data[0]
if isinstance(func, Expr):
func = func.substitute(symbols_map)
args = tuple(a.substitute(symbols_map) for a in self.data[1:])
return normalize(Expr(self.op, (func,) + args))
if self.op is Op.TERNARY:
operands = tuple(a.substitute(symbols_map) for a in self.data)
return normalize(Expr(self.op, operands))
if self.op in (Op.REF, Op.DEREF):
return normalize(Expr(self.op, self.data.substitute(symbols_map)))
if self.op is Op.RELATIONAL:
rop, left, right = self.data
left = left.substitute(symbols_map)
right = right.substitute(symbols_map)
return normalize(Expr(self.op, (rop, left, right)))
raise NotImplementedError(f'substitute method for {self.op}: {self!r}')
def traverse(self, visit, *args, **kwargs):
"""Traverse expression tree with visit function.
The visit function is applied to an expression with given args
and kwargs.
Traverse call returns an expression returned by visit when not
None, otherwise return a new normalized expression with
traverse-visit sub-expressions.
"""
result = visit(self, *args, **kwargs)
if result is not None:
return result
if self.op in (Op.INTEGER, Op.REAL, Op.STRING, Op.SYMBOL):
return self
elif self.op in (Op.COMPLEX, Op.ARRAY, Op.CONCAT, Op.TERNARY):
return normalize(Expr(self.op, tuple(
item.traverse(visit, *args, **kwargs)
for item in self.data)))
elif self.op in (Op.TERMS, Op.FACTORS):
data = {}
for k, v in self.data.items():
k = k.traverse(visit, *args, **kwargs)
v = (v.traverse(visit, *args, **kwargs)
if isinstance(v, Expr) else v)
if k in data:
v = data[k] + v
data[k] = v
return normalize(Expr(self.op, data))
elif self.op is Op.APPLY:
obj = self.data[0]
func = (obj.traverse(visit, *args, **kwargs)
if isinstance(obj, Expr) else obj)
operands = tuple(operand.traverse(visit, *args, **kwargs)
for operand in self.data[1])
kwoperands = dict((k, v.traverse(visit, *args, **kwargs))
for k, v in self.data[2].items())
return normalize(Expr(self.op, (func, operands, kwoperands)))
elif self.op is Op.INDEXING:
obj = self.data[0]
obj = (obj.traverse(visit, *args, **kwargs)
if isinstance(obj, Expr) else obj)
indices = tuple(index.traverse(visit, *args, **kwargs)
for index in self.data[1:])
return normalize(Expr(self.op, (obj,) + indices))
elif self.op in (Op.REF, Op.DEREF):
return normalize(Expr(self.op,
self.data.traverse(visit, *args, **kwargs)))
elif self.op is Op.RELATIONAL:
rop, left, right = self.data
left = left.traverse(visit, *args, **kwargs)
right = right.traverse(visit, *args, **kwargs)
return normalize(Expr(self.op, (rop, left, right)))
raise NotImplementedError(f'traverse method for {self.op}')
def contains(self, other):
"""Check if self contains other.
"""
found = []
def visit(expr, found=found):
if found:
return expr
elif expr == other:
found.append(1)
return expr
self.traverse(visit)
return len(found) != 0
def symbols(self):
"""Return a set of symbols contained in self.
"""
found = set()
def visit(expr, found=found):
if expr.op is Op.SYMBOL:
found.add(expr)
self.traverse(visit)
return found
def polynomial_atoms(self):
"""Return a set of expressions used as atoms in polynomial self.
"""
found = set()
def visit(expr, found=found):
if expr.op is Op.FACTORS:
for b in expr.data:
b.traverse(visit)
return expr
if expr.op in (Op.TERMS, Op.COMPLEX):
return
if expr.op is Op.APPLY and isinstance(expr.data[0], ArithOp):
if expr.data[0] is ArithOp.POW:
expr.data[1][0].traverse(visit)
return expr
return
if expr.op in (Op.INTEGER, Op.REAL):
return expr
found.add(expr)
if expr.op in (Op.INDEXING, Op.APPLY):
return expr
self.traverse(visit)
return found
def linear_solve(self, symbol):
"""Return a, b such that a * symbol + b == self.
If self is not linear with respect to symbol, raise RuntimeError.
"""
b = self.substitute({symbol: as_number(0)})
ax = self - b
a = ax.substitute({symbol: as_number(1)})
zero, _ = as_numer_denom(a * symbol - ax)
if zero != as_number(0):
raise RuntimeError(f'not a {symbol}-linear equation:'
f' {a} * {symbol} + {b} == {self}')
return a, b
def as_ge(left, right):
return Expr(Op.RELATIONAL, (RelOp.GE, left, right)) | null |
168,570 | import re
import warnings
from enum import Enum
from math import gcd
class Op(Enum):
"""
Used as Expr op attribute.
"""
INTEGER = 10
REAL = 12
COMPLEX = 15
STRING = 20
ARRAY = 30
SYMBOL = 40
TERNARY = 100
APPLY = 200
INDEXING = 210
CONCAT = 220
RELATIONAL = 300
TERMS = 1000
FACTORS = 2000
REF = 3000
DEREF = 3001
class OpError(Exception):
pass
class Expr:
"""Represents a Fortran expression as a op-data pair.
Expr instances are hashable and sortable.
"""
def parse(s, language=Language.C):
"""Parse a Fortran expression to a Expr.
"""
return fromstring(s, language=language)
def __init__(self, op, data):
assert isinstance(op, Op)
# sanity checks
if op is Op.INTEGER:
# data is a 2-tuple of numeric object and a kind value
# (default is 4)
assert isinstance(data, tuple) and len(data) == 2
assert isinstance(data[0], int)
assert isinstance(data[1], (int, str)), data
elif op is Op.REAL:
# data is a 2-tuple of numeric object and a kind value
# (default is 4)
assert isinstance(data, tuple) and len(data) == 2
assert isinstance(data[0], float)
assert isinstance(data[1], (int, str)), data
elif op is Op.COMPLEX:
# data is a 2-tuple of constant expressions
assert isinstance(data, tuple) and len(data) == 2
elif op is Op.STRING:
# data is a 2-tuple of quoted string and a kind value
# (default is 1)
assert isinstance(data, tuple) and len(data) == 2
assert (isinstance(data[0], str)
and data[0][::len(data[0])-1] in ('""', "''", '@@'))
assert isinstance(data[1], (int, str)), data
elif op is Op.SYMBOL:
# data is any hashable object
assert hash(data) is not None
elif op in (Op.ARRAY, Op.CONCAT):
# data is a tuple of expressions
assert isinstance(data, tuple)
assert all(isinstance(item, Expr) for item in data), data
elif op in (Op.TERMS, Op.FACTORS):
# data is {<term|base>:<coeff|exponent>} where dict values
# are nonzero Python integers
assert isinstance(data, dict)
elif op is Op.APPLY:
# data is (<function>, <operands>, <kwoperands>) where
# operands are Expr instances
assert isinstance(data, tuple) and len(data) == 3
# function is any hashable object
assert hash(data[0]) is not None
assert isinstance(data[1], tuple)
assert isinstance(data[2], dict)
elif op is Op.INDEXING:
# data is (<object>, <indices>)
assert isinstance(data, tuple) and len(data) == 2
# function is any hashable object
assert hash(data[0]) is not None
elif op is Op.TERNARY:
# data is (<cond>, <expr1>, <expr2>)
assert isinstance(data, tuple) and len(data) == 3
elif op in (Op.REF, Op.DEREF):
# data is Expr instance
assert isinstance(data, Expr)
elif op is Op.RELATIONAL:
# data is (<relop>, <left>, <right>)
assert isinstance(data, tuple) and len(data) == 3
else:
raise NotImplementedError(
f'unknown op or missing sanity check: {op}')
self.op = op
self.data = data
def __eq__(self, other):
return (isinstance(other, Expr)
and self.op is other.op
and self.data == other.data)
def __hash__(self):
if self.op in (Op.TERMS, Op.FACTORS):
data = tuple(sorted(self.data.items()))
elif self.op is Op.APPLY:
data = self.data[:2] + tuple(sorted(self.data[2].items()))
else:
data = self.data
return hash((self.op, data))
def __lt__(self, other):
if isinstance(other, Expr):
if self.op is not other.op:
return self.op.value < other.op.value
if self.op in (Op.TERMS, Op.FACTORS):
return (tuple(sorted(self.data.items()))
< tuple(sorted(other.data.items())))
if self.op is Op.APPLY:
if self.data[:2] != other.data[:2]:
return self.data[:2] < other.data[:2]
return tuple(sorted(self.data[2].items())) < tuple(
sorted(other.data[2].items()))
return self.data < other.data
return NotImplemented
def __le__(self, other): return self == other or self < other
def __gt__(self, other): return not (self <= other)
def __ge__(self, other): return not (self < other)
def __repr__(self):
return f'{type(self).__name__}({self.op}, {self.data!r})'
def __str__(self):
return self.tostring()
def tostring(self, parent_precedence=Precedence.NONE,
language=Language.Fortran):
"""Return a string representation of Expr.
"""
if self.op in (Op.INTEGER, Op.REAL):
precedence = (Precedence.SUM if self.data[0] < 0
else Precedence.ATOM)
r = str(self.data[0]) + (f'_{self.data[1]}'
if self.data[1] != 4 else '')
elif self.op is Op.COMPLEX:
r = ', '.join(item.tostring(Precedence.TUPLE, language=language)
for item in self.data)
r = '(' + r + ')'
precedence = Precedence.ATOM
elif self.op is Op.SYMBOL:
precedence = Precedence.ATOM
r = str(self.data)
elif self.op is Op.STRING:
r = self.data[0]
if self.data[1] != 1:
r = self.data[1] + '_' + r
precedence = Precedence.ATOM
elif self.op is Op.ARRAY:
r = ', '.join(item.tostring(Precedence.TUPLE, language=language)
for item in self.data)
r = '[' + r + ']'
precedence = Precedence.ATOM
elif self.op is Op.TERMS:
terms = []
for term, coeff in sorted(self.data.items()):
if coeff < 0:
op = ' - '
coeff = -coeff
else:
op = ' + '
if coeff == 1:
term = term.tostring(Precedence.SUM, language=language)
else:
if term == as_number(1):
term = str(coeff)
else:
term = f'{coeff} * ' + term.tostring(
Precedence.PRODUCT, language=language)
if terms:
terms.append(op)
elif op == ' - ':
terms.append('-')
terms.append(term)
r = ''.join(terms) or '0'
precedence = Precedence.SUM if terms else Precedence.ATOM
elif self.op is Op.FACTORS:
factors = []
tail = []
for base, exp in sorted(self.data.items()):
op = ' * '
if exp == 1:
factor = base.tostring(Precedence.PRODUCT,
language=language)
elif language is Language.C:
if exp in range(2, 10):
factor = base.tostring(Precedence.PRODUCT,
language=language)
factor = ' * '.join([factor] * exp)
elif exp in range(-10, 0):
factor = base.tostring(Precedence.PRODUCT,
language=language)
tail += [factor] * -exp
continue
else:
factor = base.tostring(Precedence.TUPLE,
language=language)
factor = f'pow({factor}, {exp})'
else:
factor = base.tostring(Precedence.POWER,
language=language) + f' ** {exp}'
if factors:
factors.append(op)
factors.append(factor)
if tail:
if not factors:
factors += ['1']
factors += ['/', '(', ' * '.join(tail), ')']
r = ''.join(factors) or '1'
precedence = Precedence.PRODUCT if factors else Precedence.ATOM
elif self.op is Op.APPLY:
name, args, kwargs = self.data
if name is ArithOp.DIV and language is Language.C:
numer, denom = [arg.tostring(Precedence.PRODUCT,
language=language)
for arg in args]
r = f'{numer} / {denom}'
precedence = Precedence.PRODUCT
else:
args = [arg.tostring(Precedence.TUPLE, language=language)
for arg in args]
args += [k + '=' + v.tostring(Precedence.NONE)
for k, v in kwargs.items()]
r = f'{name}({", ".join(args)})'
precedence = Precedence.ATOM
elif self.op is Op.INDEXING:
name = self.data[0]
args = [arg.tostring(Precedence.TUPLE, language=language)
for arg in self.data[1:]]
r = f'{name}[{", ".join(args)}]'
precedence = Precedence.ATOM
elif self.op is Op.CONCAT:
args = [arg.tostring(Precedence.PRODUCT, language=language)
for arg in self.data]
r = " // ".join(args)
precedence = Precedence.PRODUCT
elif self.op is Op.TERNARY:
cond, expr1, expr2 = [a.tostring(Precedence.TUPLE,
language=language)
for a in self.data]
if language is Language.C:
r = f'({cond}?{expr1}:{expr2})'
elif language is Language.Python:
r = f'({expr1} if {cond} else {expr2})'
elif language is Language.Fortran:
r = f'merge({expr1}, {expr2}, {cond})'
else:
raise NotImplementedError(
f'tostring for {self.op} and {language}')
precedence = Precedence.ATOM
elif self.op is Op.REF:
r = '&' + self.data.tostring(Precedence.UNARY, language=language)
precedence = Precedence.UNARY
elif self.op is Op.DEREF:
r = '*' + self.data.tostring(Precedence.UNARY, language=language)
precedence = Precedence.UNARY
elif self.op is Op.RELATIONAL:
rop, left, right = self.data
precedence = (Precedence.EQ if rop in (RelOp.EQ, RelOp.NE)
else Precedence.LT)
left = left.tostring(precedence, language=language)
right = right.tostring(precedence, language=language)
rop = rop.tostring(language=language)
r = f'{left} {rop} {right}'
else:
raise NotImplementedError(f'tostring for op {self.op}')
if parent_precedence.value < precedence.value:
# If parent precedence is higher than operand precedence,
# operand will be enclosed in parenthesis.
return '(' + r + ')'
return r
def __pos__(self):
return self
def __neg__(self):
return self * -1
def __add__(self, other):
other = as_expr(other)
if isinstance(other, Expr):
if self.op is other.op:
if self.op in (Op.INTEGER, Op.REAL):
return as_number(
self.data[0] + other.data[0],
max(self.data[1], other.data[1]))
if self.op is Op.COMPLEX:
r1, i1 = self.data
r2, i2 = other.data
return as_complex(r1 + r2, i1 + i2)
if self.op is Op.TERMS:
r = Expr(self.op, dict(self.data))
for k, v in other.data.items():
_pairs_add(r.data, k, v)
return normalize(r)
if self.op is Op.COMPLEX and other.op in (Op.INTEGER, Op.REAL):
return self + as_complex(other)
elif self.op in (Op.INTEGER, Op.REAL) and other.op is Op.COMPLEX:
return as_complex(self) + other
elif self.op is Op.REAL and other.op is Op.INTEGER:
return self + as_real(other, kind=self.data[1])
elif self.op is Op.INTEGER and other.op is Op.REAL:
return as_real(self, kind=other.data[1]) + other
return as_terms(self) + as_terms(other)
return NotImplemented
def __radd__(self, other):
if isinstance(other, number_types):
return as_number(other) + self
return NotImplemented
def __sub__(self, other):
return self + (-other)
def __rsub__(self, other):
if isinstance(other, number_types):
return as_number(other) - self
return NotImplemented
def __mul__(self, other):
other = as_expr(other)
if isinstance(other, Expr):
if self.op is other.op:
if self.op in (Op.INTEGER, Op.REAL):
return as_number(self.data[0] * other.data[0],
max(self.data[1], other.data[1]))
elif self.op is Op.COMPLEX:
r1, i1 = self.data
r2, i2 = other.data
return as_complex(r1 * r2 - i1 * i2, r1 * i2 + r2 * i1)
if self.op is Op.FACTORS:
r = Expr(self.op, dict(self.data))
for k, v in other.data.items():
_pairs_add(r.data, k, v)
return normalize(r)
elif self.op is Op.TERMS:
r = Expr(self.op, {})
for t1, c1 in self.data.items():
for t2, c2 in other.data.items():
_pairs_add(r.data, t1 * t2, c1 * c2)
return normalize(r)
if self.op is Op.COMPLEX and other.op in (Op.INTEGER, Op.REAL):
return self * as_complex(other)
elif other.op is Op.COMPLEX and self.op in (Op.INTEGER, Op.REAL):
return as_complex(self) * other
elif self.op is Op.REAL and other.op is Op.INTEGER:
return self * as_real(other, kind=self.data[1])
elif self.op is Op.INTEGER and other.op is Op.REAL:
return as_real(self, kind=other.data[1]) * other
if self.op is Op.TERMS:
return self * as_terms(other)
elif other.op is Op.TERMS:
return as_terms(self) * other
return as_factors(self) * as_factors(other)
return NotImplemented
def __rmul__(self, other):
if isinstance(other, number_types):
return as_number(other) * self
return NotImplemented
def __pow__(self, other):
other = as_expr(other)
if isinstance(other, Expr):
if other.op is Op.INTEGER:
exponent = other.data[0]
# TODO: other kind not used
if exponent == 0:
return as_number(1)
if exponent == 1:
return self
if exponent > 0:
if self.op is Op.FACTORS:
r = Expr(self.op, {})
for k, v in self.data.items():
r.data[k] = v * exponent
return normalize(r)
return self * (self ** (exponent - 1))
elif exponent != -1:
return (self ** (-exponent)) ** -1
return Expr(Op.FACTORS, {self: exponent})
return as_apply(ArithOp.POW, self, other)
return NotImplemented
def __truediv__(self, other):
other = as_expr(other)
if isinstance(other, Expr):
# Fortran / is different from Python /:
# - `/` is a truncate operation for integer operands
return normalize(as_apply(ArithOp.DIV, self, other))
return NotImplemented
def __rtruediv__(self, other):
other = as_expr(other)
if isinstance(other, Expr):
return other / self
return NotImplemented
def __floordiv__(self, other):
other = as_expr(other)
if isinstance(other, Expr):
# Fortran // is different from Python //:
# - `//` is a concatenate operation for string operands
return normalize(Expr(Op.CONCAT, (self, other)))
return NotImplemented
def __rfloordiv__(self, other):
other = as_expr(other)
if isinstance(other, Expr):
return other // self
return NotImplemented
def __call__(self, *args, **kwargs):
# In Fortran, parenthesis () are use for both function call as
# well as indexing operations.
#
# TODO: implement a method for deciding when __call__ should
# return an INDEXING expression.
return as_apply(self, *map(as_expr, args),
**dict((k, as_expr(v)) for k, v in kwargs.items()))
def __getitem__(self, index):
# Provided to support C indexing operations that .pyf files
# may contain.
index = as_expr(index)
if not isinstance(index, tuple):
index = index,
if len(index) > 1:
ewarn(f'C-index should be a single expression but got `{index}`')
return Expr(Op.INDEXING, (self,) + index)
def substitute(self, symbols_map):
"""Recursively substitute symbols with values in symbols map.
Symbols map is a dictionary of symbol-expression pairs.
"""
if self.op is Op.SYMBOL:
value = symbols_map.get(self)
if value is None:
return self
m = re.match(r'\A(@__f2py_PARENTHESIS_(\w+)_\d+@)\Z', self.data)
if m:
# complement to fromstring method
items, paren = m.groups()
if paren in ['ROUNDDIV', 'SQUARE']:
return as_array(value)
assert paren == 'ROUND', (paren, value)
return value
if self.op in (Op.INTEGER, Op.REAL, Op.STRING):
return self
if self.op in (Op.ARRAY, Op.COMPLEX):
return Expr(self.op, tuple(item.substitute(symbols_map)
for item in self.data))
if self.op is Op.CONCAT:
return normalize(Expr(self.op, tuple(item.substitute(symbols_map)
for item in self.data)))
if self.op is Op.TERMS:
r = None
for term, coeff in self.data.items():
if r is None:
r = term.substitute(symbols_map) * coeff
else:
r += term.substitute(symbols_map) * coeff
if r is None:
ewarn('substitute: empty TERMS expression interpreted as'
' int-literal 0')
return as_number(0)
return r
if self.op is Op.FACTORS:
r = None
for base, exponent in self.data.items():
if r is None:
r = base.substitute(symbols_map) ** exponent
else:
r *= base.substitute(symbols_map) ** exponent
if r is None:
ewarn('substitute: empty FACTORS expression interpreted'
' as int-literal 1')
return as_number(1)
return r
if self.op is Op.APPLY:
target, args, kwargs = self.data
if isinstance(target, Expr):
target = target.substitute(symbols_map)
args = tuple(a.substitute(symbols_map) for a in args)
kwargs = dict((k, v.substitute(symbols_map))
for k, v in kwargs.items())
return normalize(Expr(self.op, (target, args, kwargs)))
if self.op is Op.INDEXING:
func = self.data[0]
if isinstance(func, Expr):
func = func.substitute(symbols_map)
args = tuple(a.substitute(symbols_map) for a in self.data[1:])
return normalize(Expr(self.op, (func,) + args))
if self.op is Op.TERNARY:
operands = tuple(a.substitute(symbols_map) for a in self.data)
return normalize(Expr(self.op, operands))
if self.op in (Op.REF, Op.DEREF):
return normalize(Expr(self.op, self.data.substitute(symbols_map)))
if self.op is Op.RELATIONAL:
rop, left, right = self.data
left = left.substitute(symbols_map)
right = right.substitute(symbols_map)
return normalize(Expr(self.op, (rop, left, right)))
raise NotImplementedError(f'substitute method for {self.op}: {self!r}')
def traverse(self, visit, *args, **kwargs):
"""Traverse expression tree with visit function.
The visit function is applied to an expression with given args
and kwargs.
Traverse call returns an expression returned by visit when not
None, otherwise return a new normalized expression with
traverse-visit sub-expressions.
"""
result = visit(self, *args, **kwargs)
if result is not None:
return result
if self.op in (Op.INTEGER, Op.REAL, Op.STRING, Op.SYMBOL):
return self
elif self.op in (Op.COMPLEX, Op.ARRAY, Op.CONCAT, Op.TERNARY):
return normalize(Expr(self.op, tuple(
item.traverse(visit, *args, **kwargs)
for item in self.data)))
elif self.op in (Op.TERMS, Op.FACTORS):
data = {}
for k, v in self.data.items():
k = k.traverse(visit, *args, **kwargs)
v = (v.traverse(visit, *args, **kwargs)
if isinstance(v, Expr) else v)
if k in data:
v = data[k] + v
data[k] = v
return normalize(Expr(self.op, data))
elif self.op is Op.APPLY:
obj = self.data[0]
func = (obj.traverse(visit, *args, **kwargs)
if isinstance(obj, Expr) else obj)
operands = tuple(operand.traverse(visit, *args, **kwargs)
for operand in self.data[1])
kwoperands = dict((k, v.traverse(visit, *args, **kwargs))
for k, v in self.data[2].items())
return normalize(Expr(self.op, (func, operands, kwoperands)))
elif self.op is Op.INDEXING:
obj = self.data[0]
obj = (obj.traverse(visit, *args, **kwargs)
if isinstance(obj, Expr) else obj)
indices = tuple(index.traverse(visit, *args, **kwargs)
for index in self.data[1:])
return normalize(Expr(self.op, (obj,) + indices))
elif self.op in (Op.REF, Op.DEREF):
return normalize(Expr(self.op,
self.data.traverse(visit, *args, **kwargs)))
elif self.op is Op.RELATIONAL:
rop, left, right = self.data
left = left.traverse(visit, *args, **kwargs)
right = right.traverse(visit, *args, **kwargs)
return normalize(Expr(self.op, (rop, left, right)))
raise NotImplementedError(f'traverse method for {self.op}')
def contains(self, other):
"""Check if self contains other.
"""
found = []
def visit(expr, found=found):
if found:
return expr
elif expr == other:
found.append(1)
return expr
self.traverse(visit)
return len(found) != 0
def symbols(self):
"""Return a set of symbols contained in self.
"""
found = set()
def visit(expr, found=found):
if expr.op is Op.SYMBOL:
found.add(expr)
self.traverse(visit)
return found
def polynomial_atoms(self):
"""Return a set of expressions used as atoms in polynomial self.
"""
found = set()
def visit(expr, found=found):
if expr.op is Op.FACTORS:
for b in expr.data:
b.traverse(visit)
return expr
if expr.op in (Op.TERMS, Op.COMPLEX):
return
if expr.op is Op.APPLY and isinstance(expr.data[0], ArithOp):
if expr.data[0] is ArithOp.POW:
expr.data[1][0].traverse(visit)
return expr
return
if expr.op in (Op.INTEGER, Op.REAL):
return expr
found.add(expr)
if expr.op in (Op.INDEXING, Op.APPLY):
return expr
self.traverse(visit)
return found
def linear_solve(self, symbol):
"""Return a, b such that a * symbol + b == self.
If self is not linear with respect to symbol, raise RuntimeError.
"""
b = self.substitute({symbol: as_number(0)})
ax = self - b
a = ax.substitute({symbol: as_number(1)})
zero, _ = as_numer_denom(a * symbol - ax)
if zero != as_number(0):
raise RuntimeError(f'not a {symbol}-linear equation:'
f' {a} * {symbol} + {b} == {self}')
return a, b
def normalize(obj):
"""Normalize Expr and apply basic evaluation methods.
"""
if not isinstance(obj, Expr):
return obj
if obj.op is Op.TERMS:
d = {}
for t, c in obj.data.items():
if c == 0:
continue
if t.op is Op.COMPLEX and c != 1:
t = t * c
c = 1
if t.op is Op.TERMS:
for t1, c1 in t.data.items():
_pairs_add(d, t1, c1 * c)
else:
_pairs_add(d, t, c)
if len(d) == 0:
# TODO: deterimine correct kind
return as_number(0)
elif len(d) == 1:
(t, c), = d.items()
if c == 1:
return t
return Expr(Op.TERMS, d)
if obj.op is Op.FACTORS:
coeff = 1
d = {}
for b, e in obj.data.items():
if e == 0:
continue
if b.op is Op.TERMS and isinstance(e, integer_types) and e > 1:
# expand integer powers of sums
b = b * (b ** (e - 1))
e = 1
if b.op in (Op.INTEGER, Op.REAL):
if e == 1:
coeff *= b.data[0]
elif e > 0:
coeff *= b.data[0] ** e
else:
_pairs_add(d, b, e)
elif b.op is Op.FACTORS:
if e > 0 and isinstance(e, integer_types):
for b1, e1 in b.data.items():
_pairs_add(d, b1, e1 * e)
else:
_pairs_add(d, b, e)
else:
_pairs_add(d, b, e)
if len(d) == 0 or coeff == 0:
# TODO: deterimine correct kind
assert isinstance(coeff, number_types)
return as_number(coeff)
elif len(d) == 1:
(b, e), = d.items()
if e == 1:
t = b
else:
t = Expr(Op.FACTORS, d)
if coeff == 1:
return t
return Expr(Op.TERMS, {t: coeff})
elif coeff == 1:
return Expr(Op.FACTORS, d)
else:
return Expr(Op.TERMS, {Expr(Op.FACTORS, d): coeff})
if obj.op is Op.APPLY and obj.data[0] is ArithOp.DIV:
dividend, divisor = obj.data[1]
t1, c1 = as_term_coeff(dividend)
t2, c2 = as_term_coeff(divisor)
if isinstance(c1, integer_types) and isinstance(c2, integer_types):
g = gcd(c1, c2)
c1, c2 = c1//g, c2//g
else:
c1, c2 = c1/c2, 1
if t1.op is Op.APPLY and t1.data[0] is ArithOp.DIV:
numer = t1.data[1][0] * c1
denom = t1.data[1][1] * t2 * c2
return as_apply(ArithOp.DIV, numer, denom)
if t2.op is Op.APPLY and t2.data[0] is ArithOp.DIV:
numer = t2.data[1][1] * t1 * c1
denom = t2.data[1][0] * c2
return as_apply(ArithOp.DIV, numer, denom)
d = dict(as_factors(t1).data)
for b, e in as_factors(t2).data.items():
_pairs_add(d, b, -e)
numer, denom = {}, {}
for b, e in d.items():
if e > 0:
numer[b] = e
else:
denom[b] = -e
numer = normalize(Expr(Op.FACTORS, numer)) * c1
denom = normalize(Expr(Op.FACTORS, denom)) * c2
if denom.op in (Op.INTEGER, Op.REAL) and denom.data[0] == 1:
# TODO: denom kind not used
return numer
return as_apply(ArithOp.DIV, numer, denom)
if obj.op is Op.CONCAT:
lst = [obj.data[0]]
for s in obj.data[1:]:
last = lst[-1]
if (
last.op is Op.STRING
and s.op is Op.STRING
and last.data[0][0] in '"\''
and s.data[0][0] == last.data[0][-1]
):
new_last = as_string(last.data[0][:-1] + s.data[0][1:],
max(last.data[1], s.data[1]))
lst[-1] = new_last
else:
lst.append(s)
if len(lst) == 1:
return lst[0]
return Expr(Op.CONCAT, tuple(lst))
if obj.op is Op.TERNARY:
cond, expr1, expr2 = map(normalize, obj.data)
if cond.op is Op.INTEGER:
return expr1 if cond.data[0] else expr2
return Expr(Op.TERNARY, (cond, expr1, expr2))
return obj
def as_integer(obj, kind=4):
"""Return object as INTEGER constant.
"""
if isinstance(obj, int):
return Expr(Op.INTEGER, (obj, kind))
if isinstance(obj, Expr):
if obj.op is Op.INTEGER:
return obj
raise OpError(f'cannot convert {obj} to INTEGER constant')
def as_real(obj, kind=4):
"""Return object as REAL constant.
"""
if isinstance(obj, int):
return Expr(Op.REAL, (float(obj), kind))
if isinstance(obj, float):
return Expr(Op.REAL, (obj, kind))
if isinstance(obj, Expr):
if obj.op is Op.REAL:
return obj
elif obj.op is Op.INTEGER:
return Expr(Op.REAL, (float(obj.data[0]), kind))
raise OpError(f'cannot convert {obj} to REAL constant')
The provided code snippet includes necessary dependencies for implementing the `as_terms` function. Write a Python function `def as_terms(obj)` to solve the following problem:
Return expression as TERMS expression.
Here is the function:
def as_terms(obj):
"""Return expression as TERMS expression.
"""
if isinstance(obj, Expr):
obj = normalize(obj)
if obj.op is Op.TERMS:
return obj
if obj.op is Op.INTEGER:
return Expr(Op.TERMS, {as_integer(1, obj.data[1]): obj.data[0]})
if obj.op is Op.REAL:
return Expr(Op.TERMS, {as_real(1, obj.data[1]): obj.data[0]})
return Expr(Op.TERMS, {obj: 1})
raise OpError(f'cannot convert {type(obj)} to terms Expr') | Return expression as TERMS expression. |
168,571 | import re
import warnings
from enum import Enum
from math import gcd
class Op(Enum):
"""
Used as Expr op attribute.
"""
INTEGER = 10
REAL = 12
COMPLEX = 15
STRING = 20
ARRAY = 30
SYMBOL = 40
TERNARY = 100
APPLY = 200
INDEXING = 210
CONCAT = 220
RELATIONAL = 300
TERMS = 1000
FACTORS = 2000
REF = 3000
DEREF = 3001
class ArithOp(Enum):
"""
Used in Op.APPLY expression to specify the function part.
"""
POS = 1
NEG = 2
ADD = 3
SUB = 4
MUL = 5
DIV = 6
POW = 7
class OpError(Exception):
pass
class Expr:
"""Represents a Fortran expression as a op-data pair.
Expr instances are hashable and sortable.
"""
def parse(s, language=Language.C):
"""Parse a Fortran expression to a Expr.
"""
return fromstring(s, language=language)
def __init__(self, op, data):
assert isinstance(op, Op)
# sanity checks
if op is Op.INTEGER:
# data is a 2-tuple of numeric object and a kind value
# (default is 4)
assert isinstance(data, tuple) and len(data) == 2
assert isinstance(data[0], int)
assert isinstance(data[1], (int, str)), data
elif op is Op.REAL:
# data is a 2-tuple of numeric object and a kind value
# (default is 4)
assert isinstance(data, tuple) and len(data) == 2
assert isinstance(data[0], float)
assert isinstance(data[1], (int, str)), data
elif op is Op.COMPLEX:
# data is a 2-tuple of constant expressions
assert isinstance(data, tuple) and len(data) == 2
elif op is Op.STRING:
# data is a 2-tuple of quoted string and a kind value
# (default is 1)
assert isinstance(data, tuple) and len(data) == 2
assert (isinstance(data[0], str)
and data[0][::len(data[0])-1] in ('""', "''", '@@'))
assert isinstance(data[1], (int, str)), data
elif op is Op.SYMBOL:
# data is any hashable object
assert hash(data) is not None
elif op in (Op.ARRAY, Op.CONCAT):
# data is a tuple of expressions
assert isinstance(data, tuple)
assert all(isinstance(item, Expr) for item in data), data
elif op in (Op.TERMS, Op.FACTORS):
# data is {<term|base>:<coeff|exponent>} where dict values
# are nonzero Python integers
assert isinstance(data, dict)
elif op is Op.APPLY:
# data is (<function>, <operands>, <kwoperands>) where
# operands are Expr instances
assert isinstance(data, tuple) and len(data) == 3
# function is any hashable object
assert hash(data[0]) is not None
assert isinstance(data[1], tuple)
assert isinstance(data[2], dict)
elif op is Op.INDEXING:
# data is (<object>, <indices>)
assert isinstance(data, tuple) and len(data) == 2
# function is any hashable object
assert hash(data[0]) is not None
elif op is Op.TERNARY:
# data is (<cond>, <expr1>, <expr2>)
assert isinstance(data, tuple) and len(data) == 3
elif op in (Op.REF, Op.DEREF):
# data is Expr instance
assert isinstance(data, Expr)
elif op is Op.RELATIONAL:
# data is (<relop>, <left>, <right>)
assert isinstance(data, tuple) and len(data) == 3
else:
raise NotImplementedError(
f'unknown op or missing sanity check: {op}')
self.op = op
self.data = data
def __eq__(self, other):
return (isinstance(other, Expr)
and self.op is other.op
and self.data == other.data)
def __hash__(self):
if self.op in (Op.TERMS, Op.FACTORS):
data = tuple(sorted(self.data.items()))
elif self.op is Op.APPLY:
data = self.data[:2] + tuple(sorted(self.data[2].items()))
else:
data = self.data
return hash((self.op, data))
def __lt__(self, other):
if isinstance(other, Expr):
if self.op is not other.op:
return self.op.value < other.op.value
if self.op in (Op.TERMS, Op.FACTORS):
return (tuple(sorted(self.data.items()))
< tuple(sorted(other.data.items())))
if self.op is Op.APPLY:
if self.data[:2] != other.data[:2]:
return self.data[:2] < other.data[:2]
return tuple(sorted(self.data[2].items())) < tuple(
sorted(other.data[2].items()))
return self.data < other.data
return NotImplemented
def __le__(self, other): return self == other or self < other
def __gt__(self, other): return not (self <= other)
def __ge__(self, other): return not (self < other)
def __repr__(self):
return f'{type(self).__name__}({self.op}, {self.data!r})'
def __str__(self):
return self.tostring()
def tostring(self, parent_precedence=Precedence.NONE,
language=Language.Fortran):
"""Return a string representation of Expr.
"""
if self.op in (Op.INTEGER, Op.REAL):
precedence = (Precedence.SUM if self.data[0] < 0
else Precedence.ATOM)
r = str(self.data[0]) + (f'_{self.data[1]}'
if self.data[1] != 4 else '')
elif self.op is Op.COMPLEX:
r = ', '.join(item.tostring(Precedence.TUPLE, language=language)
for item in self.data)
r = '(' + r + ')'
precedence = Precedence.ATOM
elif self.op is Op.SYMBOL:
precedence = Precedence.ATOM
r = str(self.data)
elif self.op is Op.STRING:
r = self.data[0]
if self.data[1] != 1:
r = self.data[1] + '_' + r
precedence = Precedence.ATOM
elif self.op is Op.ARRAY:
r = ', '.join(item.tostring(Precedence.TUPLE, language=language)
for item in self.data)
r = '[' + r + ']'
precedence = Precedence.ATOM
elif self.op is Op.TERMS:
terms = []
for term, coeff in sorted(self.data.items()):
if coeff < 0:
op = ' - '
coeff = -coeff
else:
op = ' + '
if coeff == 1:
term = term.tostring(Precedence.SUM, language=language)
else:
if term == as_number(1):
term = str(coeff)
else:
term = f'{coeff} * ' + term.tostring(
Precedence.PRODUCT, language=language)
if terms:
terms.append(op)
elif op == ' - ':
terms.append('-')
terms.append(term)
r = ''.join(terms) or '0'
precedence = Precedence.SUM if terms else Precedence.ATOM
elif self.op is Op.FACTORS:
factors = []
tail = []
for base, exp in sorted(self.data.items()):
op = ' * '
if exp == 1:
factor = base.tostring(Precedence.PRODUCT,
language=language)
elif language is Language.C:
if exp in range(2, 10):
factor = base.tostring(Precedence.PRODUCT,
language=language)
factor = ' * '.join([factor] * exp)
elif exp in range(-10, 0):
factor = base.tostring(Precedence.PRODUCT,
language=language)
tail += [factor] * -exp
continue
else:
factor = base.tostring(Precedence.TUPLE,
language=language)
factor = f'pow({factor}, {exp})'
else:
factor = base.tostring(Precedence.POWER,
language=language) + f' ** {exp}'
if factors:
factors.append(op)
factors.append(factor)
if tail:
if not factors:
factors += ['1']
factors += ['/', '(', ' * '.join(tail), ')']
r = ''.join(factors) or '1'
precedence = Precedence.PRODUCT if factors else Precedence.ATOM
elif self.op is Op.APPLY:
name, args, kwargs = self.data
if name is ArithOp.DIV and language is Language.C:
numer, denom = [arg.tostring(Precedence.PRODUCT,
language=language)
for arg in args]
r = f'{numer} / {denom}'
precedence = Precedence.PRODUCT
else:
args = [arg.tostring(Precedence.TUPLE, language=language)
for arg in args]
args += [k + '=' + v.tostring(Precedence.NONE)
for k, v in kwargs.items()]
r = f'{name}({", ".join(args)})'
precedence = Precedence.ATOM
elif self.op is Op.INDEXING:
name = self.data[0]
args = [arg.tostring(Precedence.TUPLE, language=language)
for arg in self.data[1:]]
r = f'{name}[{", ".join(args)}]'
precedence = Precedence.ATOM
elif self.op is Op.CONCAT:
args = [arg.tostring(Precedence.PRODUCT, language=language)
for arg in self.data]
r = " // ".join(args)
precedence = Precedence.PRODUCT
elif self.op is Op.TERNARY:
cond, expr1, expr2 = [a.tostring(Precedence.TUPLE,
language=language)
for a in self.data]
if language is Language.C:
r = f'({cond}?{expr1}:{expr2})'
elif language is Language.Python:
r = f'({expr1} if {cond} else {expr2})'
elif language is Language.Fortran:
r = f'merge({expr1}, {expr2}, {cond})'
else:
raise NotImplementedError(
f'tostring for {self.op} and {language}')
precedence = Precedence.ATOM
elif self.op is Op.REF:
r = '&' + self.data.tostring(Precedence.UNARY, language=language)
precedence = Precedence.UNARY
elif self.op is Op.DEREF:
r = '*' + self.data.tostring(Precedence.UNARY, language=language)
precedence = Precedence.UNARY
elif self.op is Op.RELATIONAL:
rop, left, right = self.data
precedence = (Precedence.EQ if rop in (RelOp.EQ, RelOp.NE)
else Precedence.LT)
left = left.tostring(precedence, language=language)
right = right.tostring(precedence, language=language)
rop = rop.tostring(language=language)
r = f'{left} {rop} {right}'
else:
raise NotImplementedError(f'tostring for op {self.op}')
if parent_precedence.value < precedence.value:
# If parent precedence is higher than operand precedence,
# operand will be enclosed in parenthesis.
return '(' + r + ')'
return r
def __pos__(self):
return self
def __neg__(self):
return self * -1
def __add__(self, other):
other = as_expr(other)
if isinstance(other, Expr):
if self.op is other.op:
if self.op in (Op.INTEGER, Op.REAL):
return as_number(
self.data[0] + other.data[0],
max(self.data[1], other.data[1]))
if self.op is Op.COMPLEX:
r1, i1 = self.data
r2, i2 = other.data
return as_complex(r1 + r2, i1 + i2)
if self.op is Op.TERMS:
r = Expr(self.op, dict(self.data))
for k, v in other.data.items():
_pairs_add(r.data, k, v)
return normalize(r)
if self.op is Op.COMPLEX and other.op in (Op.INTEGER, Op.REAL):
return self + as_complex(other)
elif self.op in (Op.INTEGER, Op.REAL) and other.op is Op.COMPLEX:
return as_complex(self) + other
elif self.op is Op.REAL and other.op is Op.INTEGER:
return self + as_real(other, kind=self.data[1])
elif self.op is Op.INTEGER and other.op is Op.REAL:
return as_real(self, kind=other.data[1]) + other
return as_terms(self) + as_terms(other)
return NotImplemented
def __radd__(self, other):
if isinstance(other, number_types):
return as_number(other) + self
return NotImplemented
def __sub__(self, other):
return self + (-other)
def __rsub__(self, other):
if isinstance(other, number_types):
return as_number(other) - self
return NotImplemented
def __mul__(self, other):
other = as_expr(other)
if isinstance(other, Expr):
if self.op is other.op:
if self.op in (Op.INTEGER, Op.REAL):
return as_number(self.data[0] * other.data[0],
max(self.data[1], other.data[1]))
elif self.op is Op.COMPLEX:
r1, i1 = self.data
r2, i2 = other.data
return as_complex(r1 * r2 - i1 * i2, r1 * i2 + r2 * i1)
if self.op is Op.FACTORS:
r = Expr(self.op, dict(self.data))
for k, v in other.data.items():
_pairs_add(r.data, k, v)
return normalize(r)
elif self.op is Op.TERMS:
r = Expr(self.op, {})
for t1, c1 in self.data.items():
for t2, c2 in other.data.items():
_pairs_add(r.data, t1 * t2, c1 * c2)
return normalize(r)
if self.op is Op.COMPLEX and other.op in (Op.INTEGER, Op.REAL):
return self * as_complex(other)
elif other.op is Op.COMPLEX and self.op in (Op.INTEGER, Op.REAL):
return as_complex(self) * other
elif self.op is Op.REAL and other.op is Op.INTEGER:
return self * as_real(other, kind=self.data[1])
elif self.op is Op.INTEGER and other.op is Op.REAL:
return as_real(self, kind=other.data[1]) * other
if self.op is Op.TERMS:
return self * as_terms(other)
elif other.op is Op.TERMS:
return as_terms(self) * other
return as_factors(self) * as_factors(other)
return NotImplemented
def __rmul__(self, other):
if isinstance(other, number_types):
return as_number(other) * self
return NotImplemented
def __pow__(self, other):
other = as_expr(other)
if isinstance(other, Expr):
if other.op is Op.INTEGER:
exponent = other.data[0]
# TODO: other kind not used
if exponent == 0:
return as_number(1)
if exponent == 1:
return self
if exponent > 0:
if self.op is Op.FACTORS:
r = Expr(self.op, {})
for k, v in self.data.items():
r.data[k] = v * exponent
return normalize(r)
return self * (self ** (exponent - 1))
elif exponent != -1:
return (self ** (-exponent)) ** -1
return Expr(Op.FACTORS, {self: exponent})
return as_apply(ArithOp.POW, self, other)
return NotImplemented
def __truediv__(self, other):
other = as_expr(other)
if isinstance(other, Expr):
# Fortran / is different from Python /:
# - `/` is a truncate operation for integer operands
return normalize(as_apply(ArithOp.DIV, self, other))
return NotImplemented
def __rtruediv__(self, other):
other = as_expr(other)
if isinstance(other, Expr):
return other / self
return NotImplemented
def __floordiv__(self, other):
other = as_expr(other)
if isinstance(other, Expr):
# Fortran // is different from Python //:
# - `//` is a concatenate operation for string operands
return normalize(Expr(Op.CONCAT, (self, other)))
return NotImplemented
def __rfloordiv__(self, other):
other = as_expr(other)
if isinstance(other, Expr):
return other // self
return NotImplemented
def __call__(self, *args, **kwargs):
# In Fortran, parenthesis () are use for both function call as
# well as indexing operations.
#
# TODO: implement a method for deciding when __call__ should
# return an INDEXING expression.
return as_apply(self, *map(as_expr, args),
**dict((k, as_expr(v)) for k, v in kwargs.items()))
def __getitem__(self, index):
# Provided to support C indexing operations that .pyf files
# may contain.
index = as_expr(index)
if not isinstance(index, tuple):
index = index,
if len(index) > 1:
ewarn(f'C-index should be a single expression but got `{index}`')
return Expr(Op.INDEXING, (self,) + index)
def substitute(self, symbols_map):
"""Recursively substitute symbols with values in symbols map.
Symbols map is a dictionary of symbol-expression pairs.
"""
if self.op is Op.SYMBOL:
value = symbols_map.get(self)
if value is None:
return self
m = re.match(r'\A(@__f2py_PARENTHESIS_(\w+)_\d+@)\Z', self.data)
if m:
# complement to fromstring method
items, paren = m.groups()
if paren in ['ROUNDDIV', 'SQUARE']:
return as_array(value)
assert paren == 'ROUND', (paren, value)
return value
if self.op in (Op.INTEGER, Op.REAL, Op.STRING):
return self
if self.op in (Op.ARRAY, Op.COMPLEX):
return Expr(self.op, tuple(item.substitute(symbols_map)
for item in self.data))
if self.op is Op.CONCAT:
return normalize(Expr(self.op, tuple(item.substitute(symbols_map)
for item in self.data)))
if self.op is Op.TERMS:
r = None
for term, coeff in self.data.items():
if r is None:
r = term.substitute(symbols_map) * coeff
else:
r += term.substitute(symbols_map) * coeff
if r is None:
ewarn('substitute: empty TERMS expression interpreted as'
' int-literal 0')
return as_number(0)
return r
if self.op is Op.FACTORS:
r = None
for base, exponent in self.data.items():
if r is None:
r = base.substitute(symbols_map) ** exponent
else:
r *= base.substitute(symbols_map) ** exponent
if r is None:
ewarn('substitute: empty FACTORS expression interpreted'
' as int-literal 1')
return as_number(1)
return r
if self.op is Op.APPLY:
target, args, kwargs = self.data
if isinstance(target, Expr):
target = target.substitute(symbols_map)
args = tuple(a.substitute(symbols_map) for a in args)
kwargs = dict((k, v.substitute(symbols_map))
for k, v in kwargs.items())
return normalize(Expr(self.op, (target, args, kwargs)))
if self.op is Op.INDEXING:
func = self.data[0]
if isinstance(func, Expr):
func = func.substitute(symbols_map)
args = tuple(a.substitute(symbols_map) for a in self.data[1:])
return normalize(Expr(self.op, (func,) + args))
if self.op is Op.TERNARY:
operands = tuple(a.substitute(symbols_map) for a in self.data)
return normalize(Expr(self.op, operands))
if self.op in (Op.REF, Op.DEREF):
return normalize(Expr(self.op, self.data.substitute(symbols_map)))
if self.op is Op.RELATIONAL:
rop, left, right = self.data
left = left.substitute(symbols_map)
right = right.substitute(symbols_map)
return normalize(Expr(self.op, (rop, left, right)))
raise NotImplementedError(f'substitute method for {self.op}: {self!r}')
def traverse(self, visit, *args, **kwargs):
"""Traverse expression tree with visit function.
The visit function is applied to an expression with given args
and kwargs.
Traverse call returns an expression returned by visit when not
None, otherwise return a new normalized expression with
traverse-visit sub-expressions.
"""
result = visit(self, *args, **kwargs)
if result is not None:
return result
if self.op in (Op.INTEGER, Op.REAL, Op.STRING, Op.SYMBOL):
return self
elif self.op in (Op.COMPLEX, Op.ARRAY, Op.CONCAT, Op.TERNARY):
return normalize(Expr(self.op, tuple(
item.traverse(visit, *args, **kwargs)
for item in self.data)))
elif self.op in (Op.TERMS, Op.FACTORS):
data = {}
for k, v in self.data.items():
k = k.traverse(visit, *args, **kwargs)
v = (v.traverse(visit, *args, **kwargs)
if isinstance(v, Expr) else v)
if k in data:
v = data[k] + v
data[k] = v
return normalize(Expr(self.op, data))
elif self.op is Op.APPLY:
obj = self.data[0]
func = (obj.traverse(visit, *args, **kwargs)
if isinstance(obj, Expr) else obj)
operands = tuple(operand.traverse(visit, *args, **kwargs)
for operand in self.data[1])
kwoperands = dict((k, v.traverse(visit, *args, **kwargs))
for k, v in self.data[2].items())
return normalize(Expr(self.op, (func, operands, kwoperands)))
elif self.op is Op.INDEXING:
obj = self.data[0]
obj = (obj.traverse(visit, *args, **kwargs)
if isinstance(obj, Expr) else obj)
indices = tuple(index.traverse(visit, *args, **kwargs)
for index in self.data[1:])
return normalize(Expr(self.op, (obj,) + indices))
elif self.op in (Op.REF, Op.DEREF):
return normalize(Expr(self.op,
self.data.traverse(visit, *args, **kwargs)))
elif self.op is Op.RELATIONAL:
rop, left, right = self.data
left = left.traverse(visit, *args, **kwargs)
right = right.traverse(visit, *args, **kwargs)
return normalize(Expr(self.op, (rop, left, right)))
raise NotImplementedError(f'traverse method for {self.op}')
def contains(self, other):
"""Check if self contains other.
"""
found = []
def visit(expr, found=found):
if found:
return expr
elif expr == other:
found.append(1)
return expr
self.traverse(visit)
return len(found) != 0
def symbols(self):
"""Return a set of symbols contained in self.
"""
found = set()
def visit(expr, found=found):
if expr.op is Op.SYMBOL:
found.add(expr)
self.traverse(visit)
return found
def polynomial_atoms(self):
"""Return a set of expressions used as atoms in polynomial self.
"""
found = set()
def visit(expr, found=found):
if expr.op is Op.FACTORS:
for b in expr.data:
b.traverse(visit)
return expr
if expr.op in (Op.TERMS, Op.COMPLEX):
return
if expr.op is Op.APPLY and isinstance(expr.data[0], ArithOp):
if expr.data[0] is ArithOp.POW:
expr.data[1][0].traverse(visit)
return expr
return
if expr.op in (Op.INTEGER, Op.REAL):
return expr
found.add(expr)
if expr.op in (Op.INDEXING, Op.APPLY):
return expr
self.traverse(visit)
return found
def linear_solve(self, symbol):
"""Return a, b such that a * symbol + b == self.
If self is not linear with respect to symbol, raise RuntimeError.
"""
b = self.substitute({symbol: as_number(0)})
ax = self - b
a = ax.substitute({symbol: as_number(1)})
zero, _ = as_numer_denom(a * symbol - ax)
if zero != as_number(0):
raise RuntimeError(f'not a {symbol}-linear equation:'
f' {a} * {symbol} + {b} == {self}')
return a, b
def normalize(obj):
"""Normalize Expr and apply basic evaluation methods.
"""
if not isinstance(obj, Expr):
return obj
if obj.op is Op.TERMS:
d = {}
for t, c in obj.data.items():
if c == 0:
continue
if t.op is Op.COMPLEX and c != 1:
t = t * c
c = 1
if t.op is Op.TERMS:
for t1, c1 in t.data.items():
_pairs_add(d, t1, c1 * c)
else:
_pairs_add(d, t, c)
if len(d) == 0:
# TODO: deterimine correct kind
return as_number(0)
elif len(d) == 1:
(t, c), = d.items()
if c == 1:
return t
return Expr(Op.TERMS, d)
if obj.op is Op.FACTORS:
coeff = 1
d = {}
for b, e in obj.data.items():
if e == 0:
continue
if b.op is Op.TERMS and isinstance(e, integer_types) and e > 1:
# expand integer powers of sums
b = b * (b ** (e - 1))
e = 1
if b.op in (Op.INTEGER, Op.REAL):
if e == 1:
coeff *= b.data[0]
elif e > 0:
coeff *= b.data[0] ** e
else:
_pairs_add(d, b, e)
elif b.op is Op.FACTORS:
if e > 0 and isinstance(e, integer_types):
for b1, e1 in b.data.items():
_pairs_add(d, b1, e1 * e)
else:
_pairs_add(d, b, e)
else:
_pairs_add(d, b, e)
if len(d) == 0 or coeff == 0:
# TODO: deterimine correct kind
assert isinstance(coeff, number_types)
return as_number(coeff)
elif len(d) == 1:
(b, e), = d.items()
if e == 1:
t = b
else:
t = Expr(Op.FACTORS, d)
if coeff == 1:
return t
return Expr(Op.TERMS, {t: coeff})
elif coeff == 1:
return Expr(Op.FACTORS, d)
else:
return Expr(Op.TERMS, {Expr(Op.FACTORS, d): coeff})
if obj.op is Op.APPLY and obj.data[0] is ArithOp.DIV:
dividend, divisor = obj.data[1]
t1, c1 = as_term_coeff(dividend)
t2, c2 = as_term_coeff(divisor)
if isinstance(c1, integer_types) and isinstance(c2, integer_types):
g = gcd(c1, c2)
c1, c2 = c1//g, c2//g
else:
c1, c2 = c1/c2, 1
if t1.op is Op.APPLY and t1.data[0] is ArithOp.DIV:
numer = t1.data[1][0] * c1
denom = t1.data[1][1] * t2 * c2
return as_apply(ArithOp.DIV, numer, denom)
if t2.op is Op.APPLY and t2.data[0] is ArithOp.DIV:
numer = t2.data[1][1] * t1 * c1
denom = t2.data[1][0] * c2
return as_apply(ArithOp.DIV, numer, denom)
d = dict(as_factors(t1).data)
for b, e in as_factors(t2).data.items():
_pairs_add(d, b, -e)
numer, denom = {}, {}
for b, e in d.items():
if e > 0:
numer[b] = e
else:
denom[b] = -e
numer = normalize(Expr(Op.FACTORS, numer)) * c1
denom = normalize(Expr(Op.FACTORS, denom)) * c2
if denom.op in (Op.INTEGER, Op.REAL) and denom.data[0] == 1:
# TODO: denom kind not used
return numer
return as_apply(ArithOp.DIV, numer, denom)
if obj.op is Op.CONCAT:
lst = [obj.data[0]]
for s in obj.data[1:]:
last = lst[-1]
if (
last.op is Op.STRING
and s.op is Op.STRING
and last.data[0][0] in '"\''
and s.data[0][0] == last.data[0][-1]
):
new_last = as_string(last.data[0][:-1] + s.data[0][1:],
max(last.data[1], s.data[1]))
lst[-1] = new_last
else:
lst.append(s)
if len(lst) == 1:
return lst[0]
return Expr(Op.CONCAT, tuple(lst))
if obj.op is Op.TERNARY:
cond, expr1, expr2 = map(normalize, obj.data)
if cond.op is Op.INTEGER:
return expr1 if cond.data[0] else expr2
return Expr(Op.TERNARY, (cond, expr1, expr2))
return obj
def as_number(obj, kind=4):
"""Return object as INTEGER or REAL constant.
"""
if isinstance(obj, int):
return Expr(Op.INTEGER, (obj, kind))
if isinstance(obj, float):
return Expr(Op.REAL, (obj, kind))
if isinstance(obj, Expr):
if obj.op in (Op.INTEGER, Op.REAL):
return obj
raise OpError(f'cannot convert {obj} to INTEGER or REAL constant')
The provided code snippet includes necessary dependencies for implementing the `as_numer_denom` function. Write a Python function `def as_numer_denom(obj)` to solve the following problem:
Return expression as numer-denom pair.
Here is the function:
def as_numer_denom(obj):
"""Return expression as numer-denom pair.
"""
if isinstance(obj, Expr):
obj = normalize(obj)
if obj.op in (Op.INTEGER, Op.REAL, Op.COMPLEX, Op.SYMBOL,
Op.INDEXING, Op.TERNARY):
return obj, as_number(1)
elif obj.op is Op.APPLY:
if obj.data[0] is ArithOp.DIV and not obj.data[2]:
numers, denoms = map(as_numer_denom, obj.data[1])
return numers[0] * denoms[1], numers[1] * denoms[0]
return obj, as_number(1)
elif obj.op is Op.TERMS:
numers, denoms = [], []
for term, coeff in obj.data.items():
n, d = as_numer_denom(term)
n = n * coeff
numers.append(n)
denoms.append(d)
numer, denom = as_number(0), as_number(1)
for i in range(len(numers)):
n = numers[i]
for j in range(len(numers)):
if i != j:
n *= denoms[j]
numer += n
denom *= denoms[i]
if denom.op in (Op.INTEGER, Op.REAL) and denom.data[0] < 0:
numer, denom = -numer, -denom
return numer, denom
elif obj.op is Op.FACTORS:
numer, denom = as_number(1), as_number(1)
for b, e in obj.data.items():
bnumer, bdenom = as_numer_denom(b)
if e > 0:
numer *= bnumer ** e
denom *= bdenom ** e
elif e < 0:
numer *= bdenom ** (-e)
denom *= bnumer ** (-e)
return numer, denom
raise OpError(f'cannot convert {type(obj)} to numer and denom') | Return expression as numer-denom pair. |
168,572 | import re
import warnings
from enum import Enum
from math import gcd
def _counter():
# Used internally to generate unique dummy symbols
counter = 0
while True:
counter += 1
yield counter | null |
168,573 | import re
import warnings
from enum import Enum
from math import gcd
COUNTER = _counter()
The provided code snippet includes necessary dependencies for implementing the `eliminate_quotes` function. Write a Python function `def eliminate_quotes(s)` to solve the following problem:
Replace quoted substrings of input string. Return a new string and a mapping of replacements.
Here is the function:
def eliminate_quotes(s):
"""Replace quoted substrings of input string.
Return a new string and a mapping of replacements.
"""
d = {}
def repl(m):
kind, value = m.groups()[:2]
if kind:
# remove trailing underscore
kind = kind[:-1]
p = {"'": "SINGLE", '"': "DOUBLE"}[value[0]]
k = f'{kind}@__f2py_QUOTES_{p}_{COUNTER.__next__()}@'
d[k] = value
return k
new_s = re.sub(r'({kind}_|)({single_quoted}|{double_quoted})'.format(
kind=r'\w[\w\d_]*',
single_quoted=r"('([^'\\]|(\\.))*')",
double_quoted=r'("([^"\\]|(\\.))*")'),
repl, s)
assert '"' not in new_s
assert "'" not in new_s
return new_s, d | Replace quoted substrings of input string. Return a new string and a mapping of replacements. |
168,574 | import re
import warnings
from enum import Enum
from math import gcd
The provided code snippet includes necessary dependencies for implementing the `insert_quotes` function. Write a Python function `def insert_quotes(s, d)` to solve the following problem:
Inverse of eliminate_quotes.
Here is the function:
def insert_quotes(s, d):
"""Inverse of eliminate_quotes.
"""
for k, v in d.items():
kind = k[:k.find('@')]
if kind:
kind += '_'
s = s.replace(k, kind + v)
return s | Inverse of eliminate_quotes. |
168,575 | import re
import warnings
from enum import Enum
from math import gcd
COUNTER = _counter()
The provided code snippet includes necessary dependencies for implementing the `replace_parenthesis` function. Write a Python function `def replace_parenthesis(s)` to solve the following problem:
Replace substrings of input that are enclosed in parenthesis. Return a new string and a mapping of replacements.
Here is the function:
def replace_parenthesis(s):
"""Replace substrings of input that are enclosed in parenthesis.
Return a new string and a mapping of replacements.
"""
# Find a parenthesis pair that appears first.
# Fortran deliminator are `(`, `)`, `[`, `]`, `(/', '/)`, `/`.
# We don't handle `/` deliminator because it is not a part of an
# expression.
left, right = None, None
mn_i = len(s)
for left_, right_ in (('(/', '/)'),
'()',
'{}', # to support C literal structs
'[]'):
i = s.find(left_)
if i == -1:
continue
if i < mn_i:
mn_i = i
left, right = left_, right_
if left is None:
return s, {}
i = mn_i
j = s.find(right, i)
while s.count(left, i + 1, j) != s.count(right, i + 1, j):
j = s.find(right, j + 1)
if j == -1:
raise ValueError(f'Mismatch of {left+right} parenthesis in {s!r}')
p = {'(': 'ROUND', '[': 'SQUARE', '{': 'CURLY', '(/': 'ROUNDDIV'}[left]
k = f'@__f2py_PARENTHESIS_{p}_{COUNTER.__next__()}@'
v = s[i+len(left):j]
r, d = replace_parenthesis(s[j+len(right):])
d[k] = v
return s[:i] + k + r, d | Replace substrings of input that are enclosed in parenthesis. Return a new string and a mapping of replacements. |
168,576 | import re
import warnings
from enum import Enum
from math import gcd
def _get_parenthesis_kind(s):
assert s.startswith('@__f2py_PARENTHESIS_'), s
return s.split('_')[4]
The provided code snippet includes necessary dependencies for implementing the `unreplace_parenthesis` function. Write a Python function `def unreplace_parenthesis(s, d)` to solve the following problem:
Inverse of replace_parenthesis.
Here is the function:
def unreplace_parenthesis(s, d):
"""Inverse of replace_parenthesis.
"""
for k, v in d.items():
p = _get_parenthesis_kind(k)
left = dict(ROUND='(', SQUARE='[', CURLY='{', ROUNDDIV='(/')[p]
right = dict(ROUND=')', SQUARE=']', CURLY='}', ROUNDDIV='/)')[p]
s = s.replace(k, left + v + right)
return s | Inverse of replace_parenthesis. |
168,577 | import re
import warnings
from enum import Enum
from math import gcd
class Language(Enum):
"""
Used as Expr.tostring language argument.
"""
Python = 0
Fortran = 1
C = 2
class Expr:
"""Represents a Fortran expression as a op-data pair.
Expr instances are hashable and sortable.
"""
def parse(s, language=Language.C):
"""Parse a Fortran expression to a Expr.
"""
return fromstring(s, language=language)
def __init__(self, op, data):
assert isinstance(op, Op)
# sanity checks
if op is Op.INTEGER:
# data is a 2-tuple of numeric object and a kind value
# (default is 4)
assert isinstance(data, tuple) and len(data) == 2
assert isinstance(data[0], int)
assert isinstance(data[1], (int, str)), data
elif op is Op.REAL:
# data is a 2-tuple of numeric object and a kind value
# (default is 4)
assert isinstance(data, tuple) and len(data) == 2
assert isinstance(data[0], float)
assert isinstance(data[1], (int, str)), data
elif op is Op.COMPLEX:
# data is a 2-tuple of constant expressions
assert isinstance(data, tuple) and len(data) == 2
elif op is Op.STRING:
# data is a 2-tuple of quoted string and a kind value
# (default is 1)
assert isinstance(data, tuple) and len(data) == 2
assert (isinstance(data[0], str)
and data[0][::len(data[0])-1] in ('""', "''", '@@'))
assert isinstance(data[1], (int, str)), data
elif op is Op.SYMBOL:
# data is any hashable object
assert hash(data) is not None
elif op in (Op.ARRAY, Op.CONCAT):
# data is a tuple of expressions
assert isinstance(data, tuple)
assert all(isinstance(item, Expr) for item in data), data
elif op in (Op.TERMS, Op.FACTORS):
# data is {<term|base>:<coeff|exponent>} where dict values
# are nonzero Python integers
assert isinstance(data, dict)
elif op is Op.APPLY:
# data is (<function>, <operands>, <kwoperands>) where
# operands are Expr instances
assert isinstance(data, tuple) and len(data) == 3
# function is any hashable object
assert hash(data[0]) is not None
assert isinstance(data[1], tuple)
assert isinstance(data[2], dict)
elif op is Op.INDEXING:
# data is (<object>, <indices>)
assert isinstance(data, tuple) and len(data) == 2
# function is any hashable object
assert hash(data[0]) is not None
elif op is Op.TERNARY:
# data is (<cond>, <expr1>, <expr2>)
assert isinstance(data, tuple) and len(data) == 3
elif op in (Op.REF, Op.DEREF):
# data is Expr instance
assert isinstance(data, Expr)
elif op is Op.RELATIONAL:
# data is (<relop>, <left>, <right>)
assert isinstance(data, tuple) and len(data) == 3
else:
raise NotImplementedError(
f'unknown op or missing sanity check: {op}')
self.op = op
self.data = data
def __eq__(self, other):
return (isinstance(other, Expr)
and self.op is other.op
and self.data == other.data)
def __hash__(self):
if self.op in (Op.TERMS, Op.FACTORS):
data = tuple(sorted(self.data.items()))
elif self.op is Op.APPLY:
data = self.data[:2] + tuple(sorted(self.data[2].items()))
else:
data = self.data
return hash((self.op, data))
def __lt__(self, other):
if isinstance(other, Expr):
if self.op is not other.op:
return self.op.value < other.op.value
if self.op in (Op.TERMS, Op.FACTORS):
return (tuple(sorted(self.data.items()))
< tuple(sorted(other.data.items())))
if self.op is Op.APPLY:
if self.data[:2] != other.data[:2]:
return self.data[:2] < other.data[:2]
return tuple(sorted(self.data[2].items())) < tuple(
sorted(other.data[2].items()))
return self.data < other.data
return NotImplemented
def __le__(self, other): return self == other or self < other
def __gt__(self, other): return not (self <= other)
def __ge__(self, other): return not (self < other)
def __repr__(self):
return f'{type(self).__name__}({self.op}, {self.data!r})'
def __str__(self):
return self.tostring()
def tostring(self, parent_precedence=Precedence.NONE,
language=Language.Fortran):
"""Return a string representation of Expr.
"""
if self.op in (Op.INTEGER, Op.REAL):
precedence = (Precedence.SUM if self.data[0] < 0
else Precedence.ATOM)
r = str(self.data[0]) + (f'_{self.data[1]}'
if self.data[1] != 4 else '')
elif self.op is Op.COMPLEX:
r = ', '.join(item.tostring(Precedence.TUPLE, language=language)
for item in self.data)
r = '(' + r + ')'
precedence = Precedence.ATOM
elif self.op is Op.SYMBOL:
precedence = Precedence.ATOM
r = str(self.data)
elif self.op is Op.STRING:
r = self.data[0]
if self.data[1] != 1:
r = self.data[1] + '_' + r
precedence = Precedence.ATOM
elif self.op is Op.ARRAY:
r = ', '.join(item.tostring(Precedence.TUPLE, language=language)
for item in self.data)
r = '[' + r + ']'
precedence = Precedence.ATOM
elif self.op is Op.TERMS:
terms = []
for term, coeff in sorted(self.data.items()):
if coeff < 0:
op = ' - '
coeff = -coeff
else:
op = ' + '
if coeff == 1:
term = term.tostring(Precedence.SUM, language=language)
else:
if term == as_number(1):
term = str(coeff)
else:
term = f'{coeff} * ' + term.tostring(
Precedence.PRODUCT, language=language)
if terms:
terms.append(op)
elif op == ' - ':
terms.append('-')
terms.append(term)
r = ''.join(terms) or '0'
precedence = Precedence.SUM if terms else Precedence.ATOM
elif self.op is Op.FACTORS:
factors = []
tail = []
for base, exp in sorted(self.data.items()):
op = ' * '
if exp == 1:
factor = base.tostring(Precedence.PRODUCT,
language=language)
elif language is Language.C:
if exp in range(2, 10):
factor = base.tostring(Precedence.PRODUCT,
language=language)
factor = ' * '.join([factor] * exp)
elif exp in range(-10, 0):
factor = base.tostring(Precedence.PRODUCT,
language=language)
tail += [factor] * -exp
continue
else:
factor = base.tostring(Precedence.TUPLE,
language=language)
factor = f'pow({factor}, {exp})'
else:
factor = base.tostring(Precedence.POWER,
language=language) + f' ** {exp}'
if factors:
factors.append(op)
factors.append(factor)
if tail:
if not factors:
factors += ['1']
factors += ['/', '(', ' * '.join(tail), ')']
r = ''.join(factors) or '1'
precedence = Precedence.PRODUCT if factors else Precedence.ATOM
elif self.op is Op.APPLY:
name, args, kwargs = self.data
if name is ArithOp.DIV and language is Language.C:
numer, denom = [arg.tostring(Precedence.PRODUCT,
language=language)
for arg in args]
r = f'{numer} / {denom}'
precedence = Precedence.PRODUCT
else:
args = [arg.tostring(Precedence.TUPLE, language=language)
for arg in args]
args += [k + '=' + v.tostring(Precedence.NONE)
for k, v in kwargs.items()]
r = f'{name}({", ".join(args)})'
precedence = Precedence.ATOM
elif self.op is Op.INDEXING:
name = self.data[0]
args = [arg.tostring(Precedence.TUPLE, language=language)
for arg in self.data[1:]]
r = f'{name}[{", ".join(args)}]'
precedence = Precedence.ATOM
elif self.op is Op.CONCAT:
args = [arg.tostring(Precedence.PRODUCT, language=language)
for arg in self.data]
r = " // ".join(args)
precedence = Precedence.PRODUCT
elif self.op is Op.TERNARY:
cond, expr1, expr2 = [a.tostring(Precedence.TUPLE,
language=language)
for a in self.data]
if language is Language.C:
r = f'({cond}?{expr1}:{expr2})'
elif language is Language.Python:
r = f'({expr1} if {cond} else {expr2})'
elif language is Language.Fortran:
r = f'merge({expr1}, {expr2}, {cond})'
else:
raise NotImplementedError(
f'tostring for {self.op} and {language}')
precedence = Precedence.ATOM
elif self.op is Op.REF:
r = '&' + self.data.tostring(Precedence.UNARY, language=language)
precedence = Precedence.UNARY
elif self.op is Op.DEREF:
r = '*' + self.data.tostring(Precedence.UNARY, language=language)
precedence = Precedence.UNARY
elif self.op is Op.RELATIONAL:
rop, left, right = self.data
precedence = (Precedence.EQ if rop in (RelOp.EQ, RelOp.NE)
else Precedence.LT)
left = left.tostring(precedence, language=language)
right = right.tostring(precedence, language=language)
rop = rop.tostring(language=language)
r = f'{left} {rop} {right}'
else:
raise NotImplementedError(f'tostring for op {self.op}')
if parent_precedence.value < precedence.value:
# If parent precedence is higher than operand precedence,
# operand will be enclosed in parenthesis.
return '(' + r + ')'
return r
def __pos__(self):
return self
def __neg__(self):
return self * -1
def __add__(self, other):
other = as_expr(other)
if isinstance(other, Expr):
if self.op is other.op:
if self.op in (Op.INTEGER, Op.REAL):
return as_number(
self.data[0] + other.data[0],
max(self.data[1], other.data[1]))
if self.op is Op.COMPLEX:
r1, i1 = self.data
r2, i2 = other.data
return as_complex(r1 + r2, i1 + i2)
if self.op is Op.TERMS:
r = Expr(self.op, dict(self.data))
for k, v in other.data.items():
_pairs_add(r.data, k, v)
return normalize(r)
if self.op is Op.COMPLEX and other.op in (Op.INTEGER, Op.REAL):
return self + as_complex(other)
elif self.op in (Op.INTEGER, Op.REAL) and other.op is Op.COMPLEX:
return as_complex(self) + other
elif self.op is Op.REAL and other.op is Op.INTEGER:
return self + as_real(other, kind=self.data[1])
elif self.op is Op.INTEGER and other.op is Op.REAL:
return as_real(self, kind=other.data[1]) + other
return as_terms(self) + as_terms(other)
return NotImplemented
def __radd__(self, other):
if isinstance(other, number_types):
return as_number(other) + self
return NotImplemented
def __sub__(self, other):
return self + (-other)
def __rsub__(self, other):
if isinstance(other, number_types):
return as_number(other) - self
return NotImplemented
def __mul__(self, other):
other = as_expr(other)
if isinstance(other, Expr):
if self.op is other.op:
if self.op in (Op.INTEGER, Op.REAL):
return as_number(self.data[0] * other.data[0],
max(self.data[1], other.data[1]))
elif self.op is Op.COMPLEX:
r1, i1 = self.data
r2, i2 = other.data
return as_complex(r1 * r2 - i1 * i2, r1 * i2 + r2 * i1)
if self.op is Op.FACTORS:
r = Expr(self.op, dict(self.data))
for k, v in other.data.items():
_pairs_add(r.data, k, v)
return normalize(r)
elif self.op is Op.TERMS:
r = Expr(self.op, {})
for t1, c1 in self.data.items():
for t2, c2 in other.data.items():
_pairs_add(r.data, t1 * t2, c1 * c2)
return normalize(r)
if self.op is Op.COMPLEX and other.op in (Op.INTEGER, Op.REAL):
return self * as_complex(other)
elif other.op is Op.COMPLEX and self.op in (Op.INTEGER, Op.REAL):
return as_complex(self) * other
elif self.op is Op.REAL and other.op is Op.INTEGER:
return self * as_real(other, kind=self.data[1])
elif self.op is Op.INTEGER and other.op is Op.REAL:
return as_real(self, kind=other.data[1]) * other
if self.op is Op.TERMS:
return self * as_terms(other)
elif other.op is Op.TERMS:
return as_terms(self) * other
return as_factors(self) * as_factors(other)
return NotImplemented
def __rmul__(self, other):
if isinstance(other, number_types):
return as_number(other) * self
return NotImplemented
def __pow__(self, other):
other = as_expr(other)
if isinstance(other, Expr):
if other.op is Op.INTEGER:
exponent = other.data[0]
# TODO: other kind not used
if exponent == 0:
return as_number(1)
if exponent == 1:
return self
if exponent > 0:
if self.op is Op.FACTORS:
r = Expr(self.op, {})
for k, v in self.data.items():
r.data[k] = v * exponent
return normalize(r)
return self * (self ** (exponent - 1))
elif exponent != -1:
return (self ** (-exponent)) ** -1
return Expr(Op.FACTORS, {self: exponent})
return as_apply(ArithOp.POW, self, other)
return NotImplemented
def __truediv__(self, other):
other = as_expr(other)
if isinstance(other, Expr):
# Fortran / is different from Python /:
# - `/` is a truncate operation for integer operands
return normalize(as_apply(ArithOp.DIV, self, other))
return NotImplemented
def __rtruediv__(self, other):
other = as_expr(other)
if isinstance(other, Expr):
return other / self
return NotImplemented
def __floordiv__(self, other):
other = as_expr(other)
if isinstance(other, Expr):
# Fortran // is different from Python //:
# - `//` is a concatenate operation for string operands
return normalize(Expr(Op.CONCAT, (self, other)))
return NotImplemented
def __rfloordiv__(self, other):
other = as_expr(other)
if isinstance(other, Expr):
return other // self
return NotImplemented
def __call__(self, *args, **kwargs):
# In Fortran, parenthesis () are use for both function call as
# well as indexing operations.
#
# TODO: implement a method for deciding when __call__ should
# return an INDEXING expression.
return as_apply(self, *map(as_expr, args),
**dict((k, as_expr(v)) for k, v in kwargs.items()))
def __getitem__(self, index):
# Provided to support C indexing operations that .pyf files
# may contain.
index = as_expr(index)
if not isinstance(index, tuple):
index = index,
if len(index) > 1:
ewarn(f'C-index should be a single expression but got `{index}`')
return Expr(Op.INDEXING, (self,) + index)
def substitute(self, symbols_map):
"""Recursively substitute symbols with values in symbols map.
Symbols map is a dictionary of symbol-expression pairs.
"""
if self.op is Op.SYMBOL:
value = symbols_map.get(self)
if value is None:
return self
m = re.match(r'\A(@__f2py_PARENTHESIS_(\w+)_\d+@)\Z', self.data)
if m:
# complement to fromstring method
items, paren = m.groups()
if paren in ['ROUNDDIV', 'SQUARE']:
return as_array(value)
assert paren == 'ROUND', (paren, value)
return value
if self.op in (Op.INTEGER, Op.REAL, Op.STRING):
return self
if self.op in (Op.ARRAY, Op.COMPLEX):
return Expr(self.op, tuple(item.substitute(symbols_map)
for item in self.data))
if self.op is Op.CONCAT:
return normalize(Expr(self.op, tuple(item.substitute(symbols_map)
for item in self.data)))
if self.op is Op.TERMS:
r = None
for term, coeff in self.data.items():
if r is None:
r = term.substitute(symbols_map) * coeff
else:
r += term.substitute(symbols_map) * coeff
if r is None:
ewarn('substitute: empty TERMS expression interpreted as'
' int-literal 0')
return as_number(0)
return r
if self.op is Op.FACTORS:
r = None
for base, exponent in self.data.items():
if r is None:
r = base.substitute(symbols_map) ** exponent
else:
r *= base.substitute(symbols_map) ** exponent
if r is None:
ewarn('substitute: empty FACTORS expression interpreted'
' as int-literal 1')
return as_number(1)
return r
if self.op is Op.APPLY:
target, args, kwargs = self.data
if isinstance(target, Expr):
target = target.substitute(symbols_map)
args = tuple(a.substitute(symbols_map) for a in args)
kwargs = dict((k, v.substitute(symbols_map))
for k, v in kwargs.items())
return normalize(Expr(self.op, (target, args, kwargs)))
if self.op is Op.INDEXING:
func = self.data[0]
if isinstance(func, Expr):
func = func.substitute(symbols_map)
args = tuple(a.substitute(symbols_map) for a in self.data[1:])
return normalize(Expr(self.op, (func,) + args))
if self.op is Op.TERNARY:
operands = tuple(a.substitute(symbols_map) for a in self.data)
return normalize(Expr(self.op, operands))
if self.op in (Op.REF, Op.DEREF):
return normalize(Expr(self.op, self.data.substitute(symbols_map)))
if self.op is Op.RELATIONAL:
rop, left, right = self.data
left = left.substitute(symbols_map)
right = right.substitute(symbols_map)
return normalize(Expr(self.op, (rop, left, right)))
raise NotImplementedError(f'substitute method for {self.op}: {self!r}')
def traverse(self, visit, *args, **kwargs):
"""Traverse expression tree with visit function.
The visit function is applied to an expression with given args
and kwargs.
Traverse call returns an expression returned by visit when not
None, otherwise return a new normalized expression with
traverse-visit sub-expressions.
"""
result = visit(self, *args, **kwargs)
if result is not None:
return result
if self.op in (Op.INTEGER, Op.REAL, Op.STRING, Op.SYMBOL):
return self
elif self.op in (Op.COMPLEX, Op.ARRAY, Op.CONCAT, Op.TERNARY):
return normalize(Expr(self.op, tuple(
item.traverse(visit, *args, **kwargs)
for item in self.data)))
elif self.op in (Op.TERMS, Op.FACTORS):
data = {}
for k, v in self.data.items():
k = k.traverse(visit, *args, **kwargs)
v = (v.traverse(visit, *args, **kwargs)
if isinstance(v, Expr) else v)
if k in data:
v = data[k] + v
data[k] = v
return normalize(Expr(self.op, data))
elif self.op is Op.APPLY:
obj = self.data[0]
func = (obj.traverse(visit, *args, **kwargs)
if isinstance(obj, Expr) else obj)
operands = tuple(operand.traverse(visit, *args, **kwargs)
for operand in self.data[1])
kwoperands = dict((k, v.traverse(visit, *args, **kwargs))
for k, v in self.data[2].items())
return normalize(Expr(self.op, (func, operands, kwoperands)))
elif self.op is Op.INDEXING:
obj = self.data[0]
obj = (obj.traverse(visit, *args, **kwargs)
if isinstance(obj, Expr) else obj)
indices = tuple(index.traverse(visit, *args, **kwargs)
for index in self.data[1:])
return normalize(Expr(self.op, (obj,) + indices))
elif self.op in (Op.REF, Op.DEREF):
return normalize(Expr(self.op,
self.data.traverse(visit, *args, **kwargs)))
elif self.op is Op.RELATIONAL:
rop, left, right = self.data
left = left.traverse(visit, *args, **kwargs)
right = right.traverse(visit, *args, **kwargs)
return normalize(Expr(self.op, (rop, left, right)))
raise NotImplementedError(f'traverse method for {self.op}')
def contains(self, other):
"""Check if self contains other.
"""
found = []
def visit(expr, found=found):
if found:
return expr
elif expr == other:
found.append(1)
return expr
self.traverse(visit)
return len(found) != 0
def symbols(self):
"""Return a set of symbols contained in self.
"""
found = set()
def visit(expr, found=found):
if expr.op is Op.SYMBOL:
found.add(expr)
self.traverse(visit)
return found
def polynomial_atoms(self):
"""Return a set of expressions used as atoms in polynomial self.
"""
found = set()
def visit(expr, found=found):
if expr.op is Op.FACTORS:
for b in expr.data:
b.traverse(visit)
return expr
if expr.op in (Op.TERMS, Op.COMPLEX):
return
if expr.op is Op.APPLY and isinstance(expr.data[0], ArithOp):
if expr.data[0] is ArithOp.POW:
expr.data[1][0].traverse(visit)
return expr
return
if expr.op in (Op.INTEGER, Op.REAL):
return expr
found.add(expr)
if expr.op in (Op.INDEXING, Op.APPLY):
return expr
self.traverse(visit)
return found
def linear_solve(self, symbol):
"""Return a, b such that a * symbol + b == self.
If self is not linear with respect to symbol, raise RuntimeError.
"""
b = self.substitute({symbol: as_number(0)})
ax = self - b
a = ax.substitute({symbol: as_number(1)})
zero, _ = as_numer_denom(a * symbol - ax)
if zero != as_number(0):
raise RuntimeError(f'not a {symbol}-linear equation:'
f' {a} * {symbol} + {b} == {self}')
return a, b
class _FromStringWorker:
def __init__(self, language=Language.C):
self.original = None
self.quotes_map = None
self.language = language
def finalize_string(self, s):
return insert_quotes(s, self.quotes_map)
def parse(self, inp):
self.original = inp
unquoted, self.quotes_map = eliminate_quotes(inp)
return self.process(unquoted)
def process(self, s, context='expr'):
"""Parse string within the given context.
The context may define the result in case of ambiguous
expressions. For instance, consider expressions `f(x, y)` and
`(x, y) + (a, b)` where `f` is a function and pair `(x, y)`
denotes complex number. Specifying context as "args" or
"expr", the subexpression `(x, y)` will be parse to an
argument list or to a complex number, respectively.
"""
if isinstance(s, (list, tuple)):
return type(s)(self.process(s_, context) for s_ in s)
assert isinstance(s, str), (type(s), s)
# replace subexpressions in parenthesis with f2py @-names
r, raw_symbols_map = replace_parenthesis(s)
r = r.strip()
def restore(r):
# restores subexpressions marked with f2py @-names
if isinstance(r, (list, tuple)):
return type(r)(map(restore, r))
return unreplace_parenthesis(r, raw_symbols_map)
# comma-separated tuple
if ',' in r:
operands = restore(r.split(','))
if context == 'args':
return tuple(self.process(operands))
if context == 'expr':
if len(operands) == 2:
# complex number literal
return as_complex(*self.process(operands))
raise NotImplementedError(
f'parsing comma-separated list (context={context}): {r}')
# ternary operation
m = re.match(r'\A([^?]+)[?]([^:]+)[:](.+)\Z', r)
if m:
assert context == 'expr', context
oper, expr1, expr2 = restore(m.groups())
oper = self.process(oper)
expr1 = self.process(expr1)
expr2 = self.process(expr2)
return as_ternary(oper, expr1, expr2)
# relational expression
if self.language is Language.Fortran:
m = re.match(
r'\A(.+)\s*[.](eq|ne|lt|le|gt|ge)[.]\s*(.+)\Z', r, re.I)
else:
m = re.match(
r'\A(.+)\s*([=][=]|[!][=]|[<][=]|[<]|[>][=]|[>])\s*(.+)\Z', r)
if m:
left, rop, right = m.groups()
if self.language is Language.Fortran:
rop = '.' + rop + '.'
left, right = self.process(restore((left, right)))
rop = RelOp.fromstring(rop, language=self.language)
return Expr(Op.RELATIONAL, (rop, left, right))
# keyword argument
m = re.match(r'\A(\w[\w\d_]*)\s*[=](.*)\Z', r)
if m:
keyname, value = m.groups()
value = restore(value)
return _Pair(keyname, self.process(value))
# addition/subtraction operations
operands = re.split(r'((?<!\d[edED])[+-])', r)
if len(operands) > 1:
result = self.process(restore(operands[0] or '0'))
for op, operand in zip(operands[1::2], operands[2::2]):
operand = self.process(restore(operand))
op = op.strip()
if op == '+':
result += operand
else:
assert op == '-'
result -= operand
return result
# string concatenate operation
if self.language is Language.Fortran and '//' in r:
operands = restore(r.split('//'))
return Expr(Op.CONCAT,
tuple(self.process(operands)))
# multiplication/division operations
operands = re.split(r'(?<=[@\w\d_])\s*([*]|/)',
(r if self.language is Language.C
else r.replace('**', '@__f2py_DOUBLE_STAR@')))
if len(operands) > 1:
operands = restore(operands)
if self.language is not Language.C:
operands = [operand.replace('@__f2py_DOUBLE_STAR@', '**')
for operand in operands]
# Expression is an arithmetic product
result = self.process(operands[0])
for op, operand in zip(operands[1::2], operands[2::2]):
operand = self.process(operand)
op = op.strip()
if op == '*':
result *= operand
else:
assert op == '/'
result /= operand
return result
# referencing/dereferencing
if r.startswith('*') or r.startswith('&'):
op = {'*': Op.DEREF, '&': Op.REF}[r[0]]
operand = self.process(restore(r[1:]))
return Expr(op, operand)
# exponentiation operations
if self.language is not Language.C and '**' in r:
operands = list(reversed(restore(r.split('**'))))
result = self.process(operands[0])
for operand in operands[1:]:
operand = self.process(operand)
result = operand ** result
return result
# int-literal-constant
m = re.match(r'\A({digit_string})({kind}|)\Z'.format(
digit_string=r'\d+',
kind=r'_(\d+|\w[\w\d_]*)'), r)
if m:
value, _, kind = m.groups()
if kind and kind.isdigit():
kind = int(kind)
return as_integer(int(value), kind or 4)
# real-literal-constant
m = re.match(r'\A({significant}({exponent}|)|\d+{exponent})({kind}|)\Z'
.format(
significant=r'[.]\d+|\d+[.]\d*',
exponent=r'[edED][+-]?\d+',
kind=r'_(\d+|\w[\w\d_]*)'), r)
if m:
value, _, _, kind = m.groups()
if kind and kind.isdigit():
kind = int(kind)
value = value.lower()
if 'd' in value:
return as_real(float(value.replace('d', 'e')), kind or 8)
return as_real(float(value), kind or 4)
# string-literal-constant with kind parameter specification
if r in self.quotes_map:
kind = r[:r.find('@')]
return as_string(self.quotes_map[r], kind or 1)
# array constructor or literal complex constant or
# parenthesized expression
if r in raw_symbols_map:
paren = _get_parenthesis_kind(r)
items = self.process(restore(raw_symbols_map[r]),
'expr' if paren == 'ROUND' else 'args')
if paren == 'ROUND':
if isinstance(items, Expr):
return items
if paren in ['ROUNDDIV', 'SQUARE']:
# Expression is a array constructor
if isinstance(items, Expr):
items = (items,)
return as_array(items)
# function call/indexing
m = re.match(r'\A(.+)\s*(@__f2py_PARENTHESIS_(ROUND|SQUARE)_\d+@)\Z',
r)
if m:
target, args, paren = m.groups()
target = self.process(restore(target))
args = self.process(restore(args)[1:-1], 'args')
if not isinstance(args, tuple):
args = args,
if paren == 'ROUND':
kwargs = dict((a.left, a.right) for a in args
if isinstance(a, _Pair))
args = tuple(a for a in args if not isinstance(a, _Pair))
# Warning: this could also be Fortran indexing operation..
return as_apply(target, *args, **kwargs)
else:
# Expression is a C/Python indexing operation
# (e.g. used in .pyf files)
assert paren == 'SQUARE'
return target[args]
# Fortran standard conforming identifier
m = re.match(r'\A\w[\w\d_]*\Z', r)
if m:
return as_symbol(r)
# fall-back to symbol
r = self.finalize_string(restore(r))
ewarn(
f'fromstring: treating {r!r} as symbol (original={self.original})')
return as_symbol(r)
The provided code snippet includes necessary dependencies for implementing the `fromstring` function. Write a Python function `def fromstring(s, language=Language.C)` to solve the following problem:
Create an expression from a string. This is a "lazy" parser, that is, only arithmetic operations are resolved, non-arithmetic operations are treated as symbols.
Here is the function:
def fromstring(s, language=Language.C):
"""Create an expression from a string.
This is a "lazy" parser, that is, only arithmetic operations are
resolved, non-arithmetic operations are treated as symbols.
"""
r = _FromStringWorker(language=language).parse(s)
if isinstance(r, Expr):
return r
raise ValueError(f'failed to parse `{s}` to Expr instance: got `{r}`') | Create an expression from a string. This is a "lazy" parser, that is, only arithmetic operations are resolved, non-arithmetic operations are treated as symbols. |
168,578 | import sys
import string
import fileinput
import re
import os
import copy
import platform
import codecs
from . import __version__
from .auxfuncs import *
from . import symbolic
def getextension(name):
i = name.rfind('.')
if i == -1:
return ''
if '\\' in name[i:]:
return ''
if '/' in name[i:]:
return ''
return name[i + 1:] | null |
168,579 | import sys
import string
import fileinput
import re
import os
import copy
import platform
import codecs
from . import __version__
from .auxfuncs import *
from . import symbolic
_intentcallbackpattern = re.compile(r'intent\s*\(.*?\bcallback\b', re.I)
def _is_intent_callback(vdecl):
for a in vdecl.get('attrspec', []):
if _intentcallbackpattern.match(a):
return 1
return 0 | null |
168,580 | import sys
import string
import fileinput
import re
import os
import copy
import platform
import codecs
from . import __version__
from .auxfuncs import *
from . import symbolic
def getblockname(block, unknown='unknown'):
if 'name' in block:
return block['name']
return unknown | null |
168,581 | import sys
import string
import fileinput
import re
import os
import copy
import platform
import codecs
from . import __version__
from .auxfuncs import *
from . import symbolic
re._MAXCACHE = 50
for c in "abcdefghopqrstuvwxyz$_":
defaultimplicitrules[c] = {'typespec': 'real'}
for c in "ijklmn":
defaultimplicitrules[c] = {'typespec': 'integer'}
def myeval(e, g=None, l=None):
""" Like `eval` but returns only integers and floats """
r = eval(e, g, l)
if type(r) in [int, float]:
return r
raise ValueError('r=%r' % (r))
getlincoef_re_1 = re.compile(r'\A\b\w+\b\Z', re.I)
The provided code snippet includes necessary dependencies for implementing the `getlincoef` function. Write a Python function `def getlincoef(e, xset)` to solve the following problem:
Obtain ``a`` and ``b`` when ``e == "a*x+b"``, where ``x`` is a symbol in xset. >>> getlincoef('2*x + 1', {'x'}) (2, 1, 'x') >>> getlincoef('3*x + x*2 + 2 + 1', {'x'}) (5, 3, 'x') >>> getlincoef('0', {'x'}) (0, 0, None) >>> getlincoef('0*x', {'x'}) (0, 0, 'x') >>> getlincoef('x*x', {'x'}) (None, None, None) This can be tricked by sufficiently complex expressions >>> getlincoef('(x - 0.5)*(x - 1.5)*(x - 1)*x + 2*x + 3', {'x'}) (2.0, 3.0, 'x')
Here is the function:
def getlincoef(e, xset): # e = a*x+b ; x in xset
"""
Obtain ``a`` and ``b`` when ``e == "a*x+b"``, where ``x`` is a symbol in
xset.
>>> getlincoef('2*x + 1', {'x'})
(2, 1, 'x')
>>> getlincoef('3*x + x*2 + 2 + 1', {'x'})
(5, 3, 'x')
>>> getlincoef('0', {'x'})
(0, 0, None)
>>> getlincoef('0*x', {'x'})
(0, 0, 'x')
>>> getlincoef('x*x', {'x'})
(None, None, None)
This can be tricked by sufficiently complex expressions
>>> getlincoef('(x - 0.5)*(x - 1.5)*(x - 1)*x + 2*x + 3', {'x'})
(2.0, 3.0, 'x')
"""
try:
c = int(myeval(e, {}, {}))
return 0, c, None
except Exception:
pass
if getlincoef_re_1.match(e):
return 1, 0, e
len_e = len(e)
for x in xset:
if len(x) > len_e:
continue
if re.search(r'\w\s*\([^)]*\b' + x + r'\b', e):
# skip function calls having x as an argument, e.g max(1, x)
continue
re_1 = re.compile(r'(?P<before>.*?)\b' + x + r'\b(?P<after>.*)', re.I)
m = re_1.match(e)
if m:
try:
m1 = re_1.match(e)
while m1:
ee = '%s(%s)%s' % (
m1.group('before'), 0, m1.group('after'))
m1 = re_1.match(ee)
b = myeval(ee, {}, {})
m1 = re_1.match(e)
while m1:
ee = '%s(%s)%s' % (
m1.group('before'), 1, m1.group('after'))
m1 = re_1.match(ee)
a = myeval(ee, {}, {}) - b
m1 = re_1.match(e)
while m1:
ee = '%s(%s)%s' % (
m1.group('before'), 0.5, m1.group('after'))
m1 = re_1.match(ee)
c = myeval(ee, {}, {})
# computing another point to be sure that expression is linear
m1 = re_1.match(e)
while m1:
ee = '%s(%s)%s' % (
m1.group('before'), 1.5, m1.group('after'))
m1 = re_1.match(ee)
c2 = myeval(ee, {}, {})
if (a * 0.5 + b == c and a * 1.5 + b == c2):
return a, b, x
except Exception:
pass
break
return None, None, None | Obtain ``a`` and ``b`` when ``e == "a*x+b"``, where ``x`` is a symbol in xset. >>> getlincoef('2*x + 1', {'x'}) (2, 1, 'x') >>> getlincoef('3*x + x*2 + 2 + 1', {'x'}) (5, 3, 'x') >>> getlincoef('0', {'x'}) (0, 0, None) >>> getlincoef('0*x', {'x'}) (0, 0, 'x') >>> getlincoef('x*x', {'x'}) (None, None, None) This can be tricked by sufficiently complex expressions >>> getlincoef('(x - 0.5)*(x - 1.5)*(x - 1)*x + 2*x + 3', {'x'}) (2.0, 3.0, 'x') |
168,582 | import sys
import string
import fileinput
import re
import os
import copy
import platform
import codecs
from . import __version__
from .auxfuncs import *
from . import symbolic
def outmess(line, flag=1):
global filepositiontext
if not verbose:
return
if not quiet:
if flag:
sys.stdout.write(filepositiontext)
sys.stdout.write(line)
re._MAXCACHE = 50
def outmess(t):
if options.get('verbose', 1):
sys.stdout.write(t)
def ischaracter(var):
return ischaracter_or_characterarray(var) and not isarray(var)
The provided code snippet includes necessary dependencies for implementing the `character_backward_compatibility_hook` function. Write a Python function `def character_backward_compatibility_hook(item, parents, result, *args, **kwargs)` to solve the following problem:
Previously, Fortran character was incorrectly treated as character*1. This hook fixes the usage of the corresponding variables in `check`, `dimension`, `=`, and `callstatement` expressions. The usage of `char*` in `callprotoargument` expression can be left unchanged because C `character` is C typedef of `char`, although, new implementations should use `character*` in the corresponding expressions. See https://github.com/numpy/numpy/pull/19388 for more information.
Here is the function:
def character_backward_compatibility_hook(item, parents, result,
*args, **kwargs):
"""Previously, Fortran character was incorrectly treated as
character*1. This hook fixes the usage of the corresponding
variables in `check`, `dimension`, `=`, and `callstatement`
expressions.
The usage of `char*` in `callprotoargument` expression can be left
unchanged because C `character` is C typedef of `char`, although,
new implementations should use `character*` in the corresponding
expressions.
See https://github.com/numpy/numpy/pull/19388 for more information.
"""
parent_key, parent_value = parents[-1]
key, value = item
def fix_usage(varname, value):
value = re.sub(r'[*]\s*\b' + varname + r'\b', varname, value)
value = re.sub(r'\b' + varname + r'\b\s*[\[]\s*0\s*[\]]',
varname, value)
return value
if parent_key in ['dimension', 'check']:
assert parents[-3][0] == 'vars'
vars_dict = parents[-3][1]
elif key == '=':
assert parents[-2][0] == 'vars'
vars_dict = parents[-2][1]
else:
vars_dict = None
new_value = None
if vars_dict is not None:
new_value = value
for varname, vd in vars_dict.items():
if ischaracter(vd):
new_value = fix_usage(varname, new_value)
elif key == 'callstatement':
vars_dict = parents[-2][1]['vars']
new_value = value
for varname, vd in vars_dict.items():
if ischaracter(vd):
# replace all occurrences of `<varname>` with
# `&<varname>` in argument passing
new_value = re.sub(
r'(?<![&])\b' + varname + r'\b', '&' + varname, new_value)
if new_value is not None:
if new_value != value:
# We report the replacements here so that downstream
# software could update their source codes
# accordingly. However, such updates are recommended only
# when BC with numpy 1.21 or older is not required.
outmess(f'character_bc_hook[{parent_key}.{key}]:'
f' replaced `{value}` -> `{new_value}`\n', 1)
return (key, new_value) | Previously, Fortran character was incorrectly treated as character*1. This hook fixes the usage of the corresponding variables in `check`, `dimension`, `=`, and `callstatement` expressions. The usage of `char*` in `callprotoargument` expression can be left unchanged because C `character` is C typedef of `char`, although, new implementations should use `character*` in the corresponding expressions. See https://github.com/numpy/numpy/pull/19388 for more information. |
168,583 | import os
import sys
import tempfile
def run_command(cmd):
print('Running %r:' % (cmd))
os.system(cmd)
print('------') | null |
168,584 | import os
import sys
import tempfile
def run():
_path = os.getcwd()
os.chdir(tempfile.gettempdir())
print('------')
print('os.name=%r' % (os.name))
print('------')
print('sys.platform=%r' % (sys.platform))
print('------')
print('sys.version:')
print(sys.version)
print('------')
print('sys.prefix:')
print(sys.prefix)
print('------')
print('sys.path=%r' % (':'.join(sys.path)))
print('------')
try:
import numpy
has_newnumpy = 1
except ImportError:
print('Failed to import new numpy:', sys.exc_info()[1])
has_newnumpy = 0
try:
from numpy.f2py import f2py2e
has_f2py2e = 1
except ImportError:
print('Failed to import f2py2e:', sys.exc_info()[1])
has_f2py2e = 0
try:
import numpy.distutils
has_numpy_distutils = 2
except ImportError:
try:
import numpy_distutils
has_numpy_distutils = 1
except ImportError:
print('Failed to import numpy_distutils:', sys.exc_info()[1])
has_numpy_distutils = 0
if has_newnumpy:
try:
print('Found new numpy version %r in %s' %
(numpy.__version__, numpy.__file__))
except Exception as msg:
print('error:', msg)
print('------')
if has_f2py2e:
try:
print('Found f2py2e version %r in %s' %
(f2py2e.__version__.version, f2py2e.__file__))
except Exception as msg:
print('error:', msg)
print('------')
if has_numpy_distutils:
try:
if has_numpy_distutils == 2:
print('Found numpy.distutils version %r in %r' % (
numpy.distutils.__version__,
numpy.distutils.__file__))
else:
print('Found numpy_distutils version %r in %r' % (
numpy_distutils.numpy_distutils_version.numpy_distutils_version,
numpy_distutils.__file__))
print('------')
except Exception as msg:
print('error:', msg)
print('------')
try:
if has_numpy_distutils == 1:
print(
'Importing numpy_distutils.command.build_flib ...', end=' ')
import numpy_distutils.command.build_flib as build_flib
print('ok')
print('------')
try:
print(
'Checking availability of supported Fortran compilers:')
for compiler_class in build_flib.all_compilers:
compiler_class(verbose=1).is_available()
print('------')
except Exception as msg:
print('error:', msg)
print('------')
except Exception as msg:
print(
'error:', msg, '(ignore it, build_flib is obsolute for numpy.distutils 0.2.2 and up)')
print('------')
try:
if has_numpy_distutils == 2:
print('Importing numpy.distutils.fcompiler ...', end=' ')
import numpy.distutils.fcompiler as fcompiler
else:
print('Importing numpy_distutils.fcompiler ...', end=' ')
import numpy_distutils.fcompiler as fcompiler
print('ok')
print('------')
try:
print('Checking availability of supported Fortran compilers:')
fcompiler.show_fcompilers()
print('------')
except Exception as msg:
print('error:', msg)
print('------')
except Exception as msg:
print('error:', msg)
print('------')
try:
if has_numpy_distutils == 2:
print('Importing numpy.distutils.cpuinfo ...', end=' ')
from numpy.distutils.cpuinfo import cpuinfo
print('ok')
print('------')
else:
try:
print(
'Importing numpy_distutils.command.cpuinfo ...', end=' ')
from numpy_distutils.command.cpuinfo import cpuinfo
print('ok')
print('------')
except Exception as msg:
print('error:', msg, '(ignore it)')
print('Importing numpy_distutils.cpuinfo ...', end=' ')
from numpy_distutils.cpuinfo import cpuinfo
print('ok')
print('------')
cpu = cpuinfo()
print('CPU information:', end=' ')
for name in dir(cpuinfo):
if name[0] == '_' and name[1] != '_' and getattr(cpu, name[1:])():
print(name[1:], end=' ')
print('------')
except Exception as msg:
print('error:', msg)
print('------')
os.chdir(_path) | null |
168,585 | import os
import sys
import sysconfig
class Configuration:
_list_keys = ['packages', 'ext_modules', 'data_files', 'include_dirs',
'libraries', 'headers', 'scripts', 'py_modules',
'installed_libraries', 'define_macros']
_dict_keys = ['package_dir', 'installed_pkg_config']
_extra_keys = ['name', 'version']
numpy_include_dirs = []
def __init__(self,
package_name=None,
parent_name=None,
top_path=None,
package_path=None,
caller_level=1,
setup_name='setup.py',
**attrs):
"""Construct configuration instance of a package.
package_name -- name of the package
Ex.: 'distutils'
parent_name -- name of the parent package
Ex.: 'numpy'
top_path -- directory of the toplevel package
Ex.: the directory where the numpy package source sits
package_path -- directory of package. Will be computed by magic from the
directory of the caller module if not specified
Ex.: the directory where numpy.distutils is
caller_level -- frame level to caller namespace, internal parameter.
"""
self.name = dot_join(parent_name, package_name)
self.version = None
caller_frame = get_frame(caller_level)
self.local_path = get_path_from_frame(caller_frame, top_path)
# local_path -- directory of a file (usually setup.py) that
# defines a configuration() function.
# local_path -- directory of a file (usually setup.py) that
# defines a configuration() function.
if top_path is None:
top_path = self.local_path
self.local_path = ''
if package_path is None:
package_path = self.local_path
elif os.path.isdir(njoin(self.local_path, package_path)):
package_path = njoin(self.local_path, package_path)
if not os.path.isdir(package_path or '.'):
raise ValueError("%r is not a directory" % (package_path,))
self.top_path = top_path
self.package_path = package_path
# this is the relative path in the installed package
self.path_in_package = os.path.join(*self.name.split('.'))
self.list_keys = self._list_keys[:]
self.dict_keys = self._dict_keys[:]
for n in self.list_keys:
v = copy.copy(attrs.get(n, []))
setattr(self, n, as_list(v))
for n in self.dict_keys:
v = copy.copy(attrs.get(n, {}))
setattr(self, n, v)
known_keys = self.list_keys + self.dict_keys
self.extra_keys = self._extra_keys[:]
for n in attrs.keys():
if n in known_keys:
continue
a = attrs[n]
setattr(self, n, a)
if isinstance(a, list):
self.list_keys.append(n)
elif isinstance(a, dict):
self.dict_keys.append(n)
else:
self.extra_keys.append(n)
if os.path.exists(njoin(package_path, '__init__.py')):
self.packages.append(self.name)
self.package_dir[self.name] = package_path
self.options = dict(
ignore_setup_xxx_py = False,
assume_default_configuration = False,
delegate_options_to_subpackages = False,
quiet = False,
)
caller_instance = None
for i in range(1, 3):
try:
f = get_frame(i)
except ValueError:
break
try:
caller_instance = eval('self', f.f_globals, f.f_locals)
break
except NameError:
pass
if isinstance(caller_instance, self.__class__):
if caller_instance.options['delegate_options_to_subpackages']:
self.set_options(**caller_instance.options)
self.setup_name = setup_name
def todict(self):
"""
Return a dictionary compatible with the keyword arguments of distutils
setup function.
Examples
--------
>>> setup(**config.todict()) #doctest: +SKIP
"""
self._optimize_data_files()
d = {}
known_keys = self.list_keys + self.dict_keys + self.extra_keys
for n in known_keys:
a = getattr(self, n)
if a:
d[n] = a
return d
def info(self, message):
if not self.options['quiet']:
print(message)
def warn(self, message):
sys.stderr.write('Warning: %s\n' % (message,))
def set_options(self, **options):
"""
Configure Configuration instance.
The following options are available:
- ignore_setup_xxx_py
- assume_default_configuration
- delegate_options_to_subpackages
- quiet
"""
for key, value in options.items():
if key in self.options:
self.options[key] = value
else:
raise ValueError('Unknown option: '+key)
def get_distribution(self):
"""Return the distutils distribution object for self."""
from numpy.distutils.core import get_distribution
return get_distribution()
def _wildcard_get_subpackage(self, subpackage_name,
parent_name,
caller_level = 1):
l = subpackage_name.split('.')
subpackage_path = njoin([self.local_path]+l)
dirs = [_m for _m in sorted_glob(subpackage_path) if os.path.isdir(_m)]
config_list = []
for d in dirs:
if not os.path.isfile(njoin(d, '__init__.py')):
continue
if 'build' in d.split(os.sep):
continue
n = '.'.join(d.split(os.sep)[-len(l):])
c = self.get_subpackage(n,
parent_name = parent_name,
caller_level = caller_level+1)
config_list.extend(c)
return config_list
def _get_configuration_from_setup_py(self, setup_py,
subpackage_name,
subpackage_path,
parent_name,
caller_level = 1):
# In case setup_py imports local modules:
sys.path.insert(0, os.path.dirname(setup_py))
try:
setup_name = os.path.splitext(os.path.basename(setup_py))[0]
n = dot_join(self.name, subpackage_name, setup_name)
setup_module = exec_mod_from_location(
'_'.join(n.split('.')), setup_py)
if not hasattr(setup_module, 'configuration'):
if not self.options['assume_default_configuration']:
self.warn('Assuming default configuration '\
'(%s does not define configuration())'\
% (setup_module))
config = Configuration(subpackage_name, parent_name,
self.top_path, subpackage_path,
caller_level = caller_level + 1)
else:
pn = dot_join(*([parent_name] + subpackage_name.split('.')[:-1]))
args = (pn,)
if setup_module.configuration.__code__.co_argcount > 1:
args = args + (self.top_path,)
config = setup_module.configuration(*args)
if config.name!=dot_join(parent_name, subpackage_name):
self.warn('Subpackage %r configuration returned as %r' % \
(dot_join(parent_name, subpackage_name), config.name))
finally:
del sys.path[0]
return config
def get_subpackage(self,subpackage_name,
subpackage_path=None,
parent_name=None,
caller_level = 1):
"""Return list of subpackage configurations.
Parameters
----------
subpackage_name : str or None
Name of the subpackage to get the configuration. '*' in
subpackage_name is handled as a wildcard.
subpackage_path : str
If None, then the path is assumed to be the local path plus the
subpackage_name. If a setup.py file is not found in the
subpackage_path, then a default configuration is used.
parent_name : str
Parent name.
"""
if subpackage_name is None:
if subpackage_path is None:
raise ValueError(
"either subpackage_name or subpackage_path must be specified")
subpackage_name = os.path.basename(subpackage_path)
# handle wildcards
l = subpackage_name.split('.')
if subpackage_path is None and '*' in subpackage_name:
return self._wildcard_get_subpackage(subpackage_name,
parent_name,
caller_level = caller_level+1)
assert '*' not in subpackage_name, repr((subpackage_name, subpackage_path, parent_name))
if subpackage_path is None:
subpackage_path = njoin([self.local_path] + l)
else:
subpackage_path = njoin([subpackage_path] + l[:-1])
subpackage_path = self.paths([subpackage_path])[0]
setup_py = njoin(subpackage_path, self.setup_name)
if not self.options['ignore_setup_xxx_py']:
if not os.path.isfile(setup_py):
setup_py = njoin(subpackage_path,
'setup_%s.py' % (subpackage_name))
if not os.path.isfile(setup_py):
if not self.options['assume_default_configuration']:
self.warn('Assuming default configuration '\
'(%s/{setup_%s,setup}.py was not found)' \
% (os.path.dirname(setup_py), subpackage_name))
config = Configuration(subpackage_name, parent_name,
self.top_path, subpackage_path,
caller_level = caller_level+1)
else:
config = self._get_configuration_from_setup_py(
setup_py,
subpackage_name,
subpackage_path,
parent_name,
caller_level = caller_level + 1)
if config:
return [config]
else:
return []
def add_subpackage(self,subpackage_name,
subpackage_path=None,
standalone = False):
"""Add a sub-package to the current Configuration instance.
This is useful in a setup.py script for adding sub-packages to a
package.
Parameters
----------
subpackage_name : str
name of the subpackage
subpackage_path : str
if given, the subpackage path such as the subpackage is in
subpackage_path / subpackage_name. If None,the subpackage is
assumed to be located in the local path / subpackage_name.
standalone : bool
"""
if standalone:
parent_name = None
else:
parent_name = self.name
config_list = self.get_subpackage(subpackage_name, subpackage_path,
parent_name = parent_name,
caller_level = 2)
if not config_list:
self.warn('No configuration returned, assuming unavailable.')
for config in config_list:
d = config
if isinstance(config, Configuration):
d = config.todict()
assert isinstance(d, dict), repr(type(d))
self.info('Appending %s configuration to %s' \
% (d.get('name'), self.name))
self.dict_append(**d)
dist = self.get_distribution()
if dist is not None:
self.warn('distutils distribution has been initialized,'\
' it may be too late to add a subpackage '+ subpackage_name)
def add_data_dir(self, data_path):
"""Recursively add files under data_path to data_files list.
Recursively add files under data_path to the list of data_files to be
installed (and distributed). The data_path can be either a relative
path-name, or an absolute path-name, or a 2-tuple where the first
argument shows where in the install directory the data directory
should be installed to.
Parameters
----------
data_path : seq or str
Argument can be either
* 2-sequence (<datadir suffix>, <path to data directory>)
* path to data directory where python datadir suffix defaults
to package dir.
Notes
-----
Rules for installation paths::
foo/bar -> (foo/bar, foo/bar) -> parent/foo/bar
(gun, foo/bar) -> parent/gun
foo/* -> (foo/a, foo/a), (foo/b, foo/b) -> parent/foo/a, parent/foo/b
(gun, foo/*) -> (gun, foo/a), (gun, foo/b) -> gun
(gun/*, foo/*) -> parent/gun/a, parent/gun/b
/foo/bar -> (bar, /foo/bar) -> parent/bar
(gun, /foo/bar) -> parent/gun
(fun/*/gun/*, sun/foo/bar) -> parent/fun/foo/gun/bar
Examples
--------
For example suppose the source directory contains fun/foo.dat and
fun/bar/car.dat:
>>> self.add_data_dir('fun') #doctest: +SKIP
>>> self.add_data_dir(('sun', 'fun')) #doctest: +SKIP
>>> self.add_data_dir(('gun', '/full/path/to/fun'))#doctest: +SKIP
Will install data-files to the locations::
<package install directory>/
fun/
foo.dat
bar/
car.dat
sun/
foo.dat
bar/
car.dat
gun/
foo.dat
car.dat
"""
if is_sequence(data_path):
d, data_path = data_path
else:
d = None
if is_sequence(data_path):
[self.add_data_dir((d, p)) for p in data_path]
return
if not is_string(data_path):
raise TypeError("not a string: %r" % (data_path,))
if d is None:
if os.path.isabs(data_path):
return self.add_data_dir((os.path.basename(data_path), data_path))
return self.add_data_dir((data_path, data_path))
paths = self.paths(data_path, include_non_existing=False)
if is_glob_pattern(data_path):
if is_glob_pattern(d):
pattern_list = allpath(d).split(os.sep)
pattern_list.reverse()
# /a/*//b/ -> /a/*/b
rl = list(range(len(pattern_list)-1)); rl.reverse()
for i in rl:
if not pattern_list[i]:
del pattern_list[i]
#
for path in paths:
if not os.path.isdir(path):
print('Not a directory, skipping', path)
continue
rpath = rel_path(path, self.local_path)
path_list = rpath.split(os.sep)
path_list.reverse()
target_list = []
i = 0
for s in pattern_list:
if is_glob_pattern(s):
if i>=len(path_list):
raise ValueError('cannot fill pattern %r with %r' \
% (d, path))
target_list.append(path_list[i])
else:
assert s==path_list[i], repr((s, path_list[i], data_path, d, path, rpath))
target_list.append(s)
i += 1
if path_list[i:]:
self.warn('mismatch of pattern_list=%s and path_list=%s'\
% (pattern_list, path_list))
target_list.reverse()
self.add_data_dir((os.sep.join(target_list), path))
else:
for path in paths:
self.add_data_dir((d, path))
return
assert not is_glob_pattern(d), repr(d)
dist = self.get_distribution()
if dist is not None and dist.data_files is not None:
data_files = dist.data_files
else:
data_files = self.data_files
for path in paths:
for d1, f in list(general_source_directories_files(path)):
target_path = os.path.join(self.path_in_package, d, d1)
data_files.append((target_path, f))
def _optimize_data_files(self):
data_dict = {}
for p, files in self.data_files:
if p not in data_dict:
data_dict[p] = set()
for f in files:
data_dict[p].add(f)
self.data_files[:] = [(p, list(files)) for p, files in data_dict.items()]
def add_data_files(self,*files):
"""Add data files to configuration data_files.
Parameters
----------
files : sequence
Argument(s) can be either
* 2-sequence (<datadir prefix>,<path to data file(s)>)
* paths to data files where python datadir prefix defaults
to package dir.
Notes
-----
The form of each element of the files sequence is very flexible
allowing many combinations of where to get the files from the package
and where they should ultimately be installed on the system. The most
basic usage is for an element of the files argument sequence to be a
simple filename. This will cause that file from the local path to be
installed to the installation path of the self.name package (package
path). The file argument can also be a relative path in which case the
entire relative path will be installed into the package directory.
Finally, the file can be an absolute path name in which case the file
will be found at the absolute path name but installed to the package
path.
This basic behavior can be augmented by passing a 2-tuple in as the
file argument. The first element of the tuple should specify the
relative path (under the package install directory) where the
remaining sequence of files should be installed to (it has nothing to
do with the file-names in the source distribution). The second element
of the tuple is the sequence of files that should be installed. The
files in this sequence can be filenames, relative paths, or absolute
paths. For absolute paths the file will be installed in the top-level
package installation directory (regardless of the first argument).
Filenames and relative path names will be installed in the package
install directory under the path name given as the first element of
the tuple.
Rules for installation paths:
#. file.txt -> (., file.txt)-> parent/file.txt
#. foo/file.txt -> (foo, foo/file.txt) -> parent/foo/file.txt
#. /foo/bar/file.txt -> (., /foo/bar/file.txt) -> parent/file.txt
#. ``*``.txt -> parent/a.txt, parent/b.txt
#. foo/``*``.txt`` -> parent/foo/a.txt, parent/foo/b.txt
#. ``*/*.txt`` -> (``*``, ``*``/``*``.txt) -> parent/c/a.txt, parent/d/b.txt
#. (sun, file.txt) -> parent/sun/file.txt
#. (sun, bar/file.txt) -> parent/sun/file.txt
#. (sun, /foo/bar/file.txt) -> parent/sun/file.txt
#. (sun, ``*``.txt) -> parent/sun/a.txt, parent/sun/b.txt
#. (sun, bar/``*``.txt) -> parent/sun/a.txt, parent/sun/b.txt
#. (sun/``*``, ``*``/``*``.txt) -> parent/sun/c/a.txt, parent/d/b.txt
An additional feature is that the path to a data-file can actually be
a function that takes no arguments and returns the actual path(s) to
the data-files. This is useful when the data files are generated while
building the package.
Examples
--------
Add files to the list of data_files to be included with the package.
>>> self.add_data_files('foo.dat',
... ('fun', ['gun.dat', 'nun/pun.dat', '/tmp/sun.dat']),
... 'bar/cat.dat',
... '/full/path/to/can.dat') #doctest: +SKIP
will install these data files to::
<package install directory>/
foo.dat
fun/
gun.dat
nun/
pun.dat
sun.dat
bar/
car.dat
can.dat
where <package install directory> is the package (or sub-package)
directory such as '/usr/lib/python2.4/site-packages/mypackage' ('C:
\\Python2.4 \\Lib \\site-packages \\mypackage') or
'/usr/lib/python2.4/site- packages/mypackage/mysubpackage' ('C:
\\Python2.4 \\Lib \\site-packages \\mypackage \\mysubpackage').
"""
if len(files)>1:
for f in files:
self.add_data_files(f)
return
assert len(files)==1
if is_sequence(files[0]):
d, files = files[0]
else:
d = None
if is_string(files):
filepat = files
elif is_sequence(files):
if len(files)==1:
filepat = files[0]
else:
for f in files:
self.add_data_files((d, f))
return
else:
raise TypeError(repr(type(files)))
if d is None:
if hasattr(filepat, '__call__'):
d = ''
elif os.path.isabs(filepat):
d = ''
else:
d = os.path.dirname(filepat)
self.add_data_files((d, files))
return
paths = self.paths(filepat, include_non_existing=False)
if is_glob_pattern(filepat):
if is_glob_pattern(d):
pattern_list = d.split(os.sep)
pattern_list.reverse()
for path in paths:
path_list = path.split(os.sep)
path_list.reverse()
path_list.pop() # filename
target_list = []
i = 0
for s in pattern_list:
if is_glob_pattern(s):
target_list.append(path_list[i])
i += 1
else:
target_list.append(s)
target_list.reverse()
self.add_data_files((os.sep.join(target_list), path))
else:
self.add_data_files((d, paths))
return
assert not is_glob_pattern(d), repr((d, filepat))
dist = self.get_distribution()
if dist is not None and dist.data_files is not None:
data_files = dist.data_files
else:
data_files = self.data_files
data_files.append((os.path.join(self.path_in_package, d), paths))
### XXX Implement add_py_modules
def add_define_macros(self, macros):
"""Add define macros to configuration
Add the given sequence of macro name and value duples to the beginning
of the define_macros list This list will be visible to all extension
modules of the current package.
"""
dist = self.get_distribution()
if dist is not None:
if not hasattr(dist, 'define_macros'):
dist.define_macros = []
dist.define_macros.extend(macros)
else:
self.define_macros.extend(macros)
def add_include_dirs(self,*paths):
"""Add paths to configuration include directories.
Add the given sequence of paths to the beginning of the include_dirs
list. This list will be visible to all extension modules of the
current package.
"""
include_dirs = self.paths(paths)
dist = self.get_distribution()
if dist is not None:
if dist.include_dirs is None:
dist.include_dirs = []
dist.include_dirs.extend(include_dirs)
else:
self.include_dirs.extend(include_dirs)
def add_headers(self,*files):
"""Add installable headers to configuration.
Add the given sequence of files to the beginning of the headers list.
By default, headers will be installed under <python-
include>/<self.name.replace('.','/')>/ directory. If an item of files
is a tuple, then its first argument specifies the actual installation
location relative to the <python-include> path.
Parameters
----------
files : str or seq
Argument(s) can be either:
* 2-sequence (<includedir suffix>,<path to header file(s)>)
* path(s) to header file(s) where python includedir suffix will
default to package name.
"""
headers = []
for path in files:
if is_string(path):
[headers.append((self.name, p)) for p in self.paths(path)]
else:
if not isinstance(path, (tuple, list)) or len(path) != 2:
raise TypeError(repr(path))
[headers.append((path[0], p)) for p in self.paths(path[1])]
dist = self.get_distribution()
if dist is not None:
if dist.headers is None:
dist.headers = []
dist.headers.extend(headers)
else:
self.headers.extend(headers)
def paths(self,*paths,**kws):
"""Apply glob to paths and prepend local_path if needed.
Applies glob.glob(...) to each path in the sequence (if needed) and
pre-pends the local_path if needed. Because this is called on all
source lists, this allows wildcard characters to be specified in lists
of sources for extension modules and libraries and scripts and allows
path-names be relative to the source directory.
"""
include_non_existing = kws.get('include_non_existing', True)
return gpaths(paths,
local_path = self.local_path,
include_non_existing=include_non_existing)
def _fix_paths_dict(self, kw):
for k in kw.keys():
v = kw[k]
if k in ['sources', 'depends', 'include_dirs', 'library_dirs',
'module_dirs', 'extra_objects']:
new_v = self.paths(v)
kw[k] = new_v
def add_extension(self,name,sources,**kw):
"""Add extension to configuration.
Create and add an Extension instance to the ext_modules list. This
method also takes the following optional keyword arguments that are
passed on to the Extension constructor.
Parameters
----------
name : str
name of the extension
sources : seq
list of the sources. The list of sources may contain functions
(called source generators) which must take an extension instance
and a build directory as inputs and return a source file or list of
source files or None. If None is returned then no sources are
generated. If the Extension instance has no sources after
processing all source generators, then no extension module is
built.
include_dirs :
define_macros :
undef_macros :
library_dirs :
libraries :
runtime_library_dirs :
extra_objects :
extra_compile_args :
extra_link_args :
extra_f77_compile_args :
extra_f90_compile_args :
export_symbols :
swig_opts :
depends :
The depends list contains paths to files or directories that the
sources of the extension module depend on. If any path in the
depends list is newer than the extension module, then the module
will be rebuilt.
language :
f2py_options :
module_dirs :
extra_info : dict or list
dict or list of dict of keywords to be appended to keywords.
Notes
-----
The self.paths(...) method is applied to all lists that may contain
paths.
"""
ext_args = copy.copy(kw)
ext_args['name'] = dot_join(self.name, name)
ext_args['sources'] = sources
if 'extra_info' in ext_args:
extra_info = ext_args['extra_info']
del ext_args['extra_info']
if isinstance(extra_info, dict):
extra_info = [extra_info]
for info in extra_info:
assert isinstance(info, dict), repr(info)
dict_append(ext_args,**info)
self._fix_paths_dict(ext_args)
# Resolve out-of-tree dependencies
libraries = ext_args.get('libraries', [])
libnames = []
ext_args['libraries'] = []
for libname in libraries:
if isinstance(libname, tuple):
self._fix_paths_dict(libname[1])
# Handle library names of the form libname@relative/path/to/library
if '@' in libname:
lname, lpath = libname.split('@', 1)
lpath = os.path.abspath(njoin(self.local_path, lpath))
if os.path.isdir(lpath):
c = self.get_subpackage(None, lpath,
caller_level = 2)
if isinstance(c, Configuration):
c = c.todict()
for l in [l[0] for l in c.get('libraries', [])]:
llname = l.split('__OF__', 1)[0]
if llname == lname:
c.pop('name', None)
dict_append(ext_args,**c)
break
continue
libnames.append(libname)
ext_args['libraries'] = libnames + ext_args['libraries']
ext_args['define_macros'] = \
self.define_macros + ext_args.get('define_macros', [])
from numpy.distutils.core import Extension
ext = Extension(**ext_args)
self.ext_modules.append(ext)
dist = self.get_distribution()
if dist is not None:
self.warn('distutils distribution has been initialized,'\
' it may be too late to add an extension '+name)
return ext
def add_library(self,name,sources,**build_info):
"""
Add library to configuration.
Parameters
----------
name : str
Name of the extension.
sources : sequence
List of the sources. The list of sources may contain functions
(called source generators) which must take an extension instance
and a build directory as inputs and return a source file or list of
source files or None. If None is returned then no sources are
generated. If the Extension instance has no sources after
processing all source generators, then no extension module is
built.
build_info : dict, optional
The following keys are allowed:
* depends
* macros
* include_dirs
* extra_compiler_args
* extra_f77_compile_args
* extra_f90_compile_args
* f2py_options
* language
"""
self._add_library(name, sources, None, build_info)
dist = self.get_distribution()
if dist is not None:
self.warn('distutils distribution has been initialized,'\
' it may be too late to add a library '+ name)
def _add_library(self, name, sources, install_dir, build_info):
"""Common implementation for add_library and add_installed_library. Do
not use directly"""
build_info = copy.copy(build_info)
build_info['sources'] = sources
# Sometimes, depends is not set up to an empty list by default, and if
# depends is not given to add_library, distutils barfs (#1134)
if not 'depends' in build_info:
build_info['depends'] = []
self._fix_paths_dict(build_info)
# Add to libraries list so that it is build with build_clib
self.libraries.append((name, build_info))
def add_installed_library(self, name, sources, install_dir, build_info=None):
"""
Similar to add_library, but the specified library is installed.
Most C libraries used with `distutils` are only used to build python
extensions, but libraries built through this method will be installed
so that they can be reused by third-party packages.
Parameters
----------
name : str
Name of the installed library.
sources : sequence
List of the library's source files. See `add_library` for details.
install_dir : str
Path to install the library, relative to the current sub-package.
build_info : dict, optional
The following keys are allowed:
* depends
* macros
* include_dirs
* extra_compiler_args
* extra_f77_compile_args
* extra_f90_compile_args
* f2py_options
* language
Returns
-------
None
See Also
--------
add_library, add_npy_pkg_config, get_info
Notes
-----
The best way to encode the options required to link against the specified
C libraries is to use a "libname.ini" file, and use `get_info` to
retrieve the required options (see `add_npy_pkg_config` for more
information).
"""
if not build_info:
build_info = {}
install_dir = os.path.join(self.package_path, install_dir)
self._add_library(name, sources, install_dir, build_info)
self.installed_libraries.append(InstallableLib(name, build_info, install_dir))
def add_npy_pkg_config(self, template, install_dir, subst_dict=None):
"""
Generate and install a npy-pkg config file from a template.
The config file generated from `template` is installed in the
given install directory, using `subst_dict` for variable substitution.
Parameters
----------
template : str
The path of the template, relatively to the current package path.
install_dir : str
Where to install the npy-pkg config file, relatively to the current
package path.
subst_dict : dict, optional
If given, any string of the form ``@key@`` will be replaced by
``subst_dict[key]`` in the template file when installed. The install
prefix is always available through the variable ``@prefix@``, since the
install prefix is not easy to get reliably from setup.py.
See also
--------
add_installed_library, get_info
Notes
-----
This works for both standard installs and in-place builds, i.e. the
``@prefix@`` refer to the source directory for in-place builds.
Examples
--------
::
config.add_npy_pkg_config('foo.ini.in', 'lib', {'foo': bar})
Assuming the foo.ini.in file has the following content::
[meta]
Name=@foo@
Version=1.0
Description=dummy description
[default]
Cflags=-I@prefix@/include
Libs=
The generated file will have the following content::
[meta]
Name=bar
Version=1.0
Description=dummy description
[default]
Cflags=-Iprefix_dir/include
Libs=
and will be installed as foo.ini in the 'lib' subpath.
When cross-compiling with numpy distutils, it might be necessary to
use modified npy-pkg-config files. Using the default/generated files
will link with the host libraries (i.e. libnpymath.a). For
cross-compilation you of-course need to link with target libraries,
while using the host Python installation.
You can copy out the numpy/core/lib/npy-pkg-config directory, add a
pkgdir value to the .ini files and set NPY_PKG_CONFIG_PATH environment
variable to point to the directory with the modified npy-pkg-config
files.
Example npymath.ini modified for cross-compilation::
[meta]
Name=npymath
Description=Portable, core math library implementing C99 standard
Version=0.1
[variables]
pkgname=numpy.core
pkgdir=/build/arm-linux-gnueabi/sysroot/usr/lib/python3.7/site-packages/numpy/core
prefix=${pkgdir}
libdir=${prefix}/lib
includedir=${prefix}/include
[default]
Libs=-L${libdir} -lnpymath
Cflags=-I${includedir}
Requires=mlib
[msvc]
Libs=/LIBPATH:${libdir} npymath.lib
Cflags=/INCLUDE:${includedir}
Requires=mlib
"""
if subst_dict is None:
subst_dict = {}
template = os.path.join(self.package_path, template)
if self.name in self.installed_pkg_config:
self.installed_pkg_config[self.name].append((template, install_dir,
subst_dict))
else:
self.installed_pkg_config[self.name] = [(template, install_dir,
subst_dict)]
def add_scripts(self,*files):
"""Add scripts to configuration.
Add the sequence of files to the beginning of the scripts list.
Scripts will be installed under the <prefix>/bin/ directory.
"""
scripts = self.paths(files)
dist = self.get_distribution()
if dist is not None:
if dist.scripts is None:
dist.scripts = []
dist.scripts.extend(scripts)
else:
self.scripts.extend(scripts)
def dict_append(self,**dict):
for key in self.list_keys:
a = getattr(self, key)
a.extend(dict.get(key, []))
for key in self.dict_keys:
a = getattr(self, key)
a.update(dict.get(key, {}))
known_keys = self.list_keys + self.dict_keys + self.extra_keys
for key in dict.keys():
if key not in known_keys:
a = getattr(self, key, None)
if a and a==dict[key]: continue
self.warn('Inheriting attribute %r=%r from %r' \
% (key, dict[key], dict.get('name', '?')))
setattr(self, key, dict[key])
self.extra_keys.append(key)
elif key in self.extra_keys:
self.info('Ignoring attempt to set %r (from %r to %r)' \
% (key, getattr(self, key), dict[key]))
elif key in known_keys:
# key is already processed above
pass
else:
raise ValueError("Don't know about key=%r" % (key))
def __str__(self):
from pprint import pformat
known_keys = self.list_keys + self.dict_keys + self.extra_keys
s = '<'+5*'-' + '\n'
s += 'Configuration of '+self.name+':\n'
known_keys.sort()
for k in known_keys:
a = getattr(self, k, None)
if a:
s += '%s = %s\n' % (k, pformat(a))
s += 5*'-' + '>'
return s
def get_config_cmd(self):
"""
Returns the numpy.distutils config command instance.
"""
cmd = get_cmd('config')
cmd.ensure_finalized()
cmd.dump_source = 0
cmd.noisy = 0
old_path = os.environ.get('PATH')
if old_path:
path = os.pathsep.join(['.', old_path])
os.environ['PATH'] = path
return cmd
def get_build_temp_dir(self):
"""
Return a path to a temporary directory where temporary files should be
placed.
"""
cmd = get_cmd('build')
cmd.ensure_finalized()
return cmd.build_temp
def have_f77c(self):
"""Check for availability of Fortran 77 compiler.
Use it inside source generating function to ensure that
setup distribution instance has been initialized.
Notes
-----
True if a Fortran 77 compiler is available (because a simple Fortran 77
code was able to be compiled successfully).
"""
simple_fortran_subroutine = '''
subroutine simple
end
'''
config_cmd = self.get_config_cmd()
flag = config_cmd.try_compile(simple_fortran_subroutine, lang='f77')
return flag
def have_f90c(self):
"""Check for availability of Fortran 90 compiler.
Use it inside source generating function to ensure that
setup distribution instance has been initialized.
Notes
-----
True if a Fortran 90 compiler is available (because a simple Fortran
90 code was able to be compiled successfully)
"""
simple_fortran_subroutine = '''
subroutine simple
end
'''
config_cmd = self.get_config_cmd()
flag = config_cmd.try_compile(simple_fortran_subroutine, lang='f90')
return flag
def append_to(self, extlib):
"""Append libraries, include_dirs to extension or library item.
"""
if is_sequence(extlib):
lib_name, build_info = extlib
dict_append(build_info,
libraries=self.libraries,
include_dirs=self.include_dirs)
else:
from numpy.distutils.core import Extension
assert isinstance(extlib, Extension), repr(extlib)
extlib.libraries.extend(self.libraries)
extlib.include_dirs.extend(self.include_dirs)
def _get_svn_revision(self, path):
"""Return path's SVN revision number.
"""
try:
output = subprocess.check_output(['svnversion'], cwd=path)
except (subprocess.CalledProcessError, OSError):
pass
else:
m = re.match(rb'(?P<revision>\d+)', output)
if m:
return int(m.group('revision'))
if sys.platform=='win32' and os.environ.get('SVN_ASP_DOT_NET_HACK', None):
entries = njoin(path, '_svn', 'entries')
else:
entries = njoin(path, '.svn', 'entries')
if os.path.isfile(entries):
with open(entries) as f:
fstr = f.read()
if fstr[:5] == '<?xml': # pre 1.4
m = re.search(r'revision="(?P<revision>\d+)"', fstr)
if m:
return int(m.group('revision'))
else: # non-xml entries file --- check to be sure that
m = re.search(r'dir[\n\r]+(?P<revision>\d+)', fstr)
if m:
return int(m.group('revision'))
return None
def _get_hg_revision(self, path):
"""Return path's Mercurial revision number.
"""
try:
output = subprocess.check_output(
['hg', 'identify', '--num'], cwd=path)
except (subprocess.CalledProcessError, OSError):
pass
else:
m = re.match(rb'(?P<revision>\d+)', output)
if m:
return int(m.group('revision'))
branch_fn = njoin(path, '.hg', 'branch')
branch_cache_fn = njoin(path, '.hg', 'branch.cache')
if os.path.isfile(branch_fn):
branch0 = None
with open(branch_fn) as f:
revision0 = f.read().strip()
branch_map = {}
with open(branch_cache_fn, 'r') as f:
for line in f:
branch1, revision1 = line.split()[:2]
if revision1==revision0:
branch0 = branch1
try:
revision1 = int(revision1)
except ValueError:
continue
branch_map[branch1] = revision1
return branch_map.get(branch0)
return None
def get_version(self, version_file=None, version_variable=None):
"""Try to get version string of a package.
Return a version string of the current package or None if the version
information could not be detected.
Notes
-----
This method scans files named
__version__.py, <packagename>_version.py, version.py, and
__svn_version__.py for string variables version, __version__, and
<packagename>_version, until a version number is found.
"""
version = getattr(self, 'version', None)
if version is not None:
return version
# Get version from version file.
if version_file is None:
files = ['__version__.py',
self.name.split('.')[-1]+'_version.py',
'version.py',
'__svn_version__.py',
'__hg_version__.py']
else:
files = [version_file]
if version_variable is None:
version_vars = ['version',
'__version__',
self.name.split('.')[-1]+'_version']
else:
version_vars = [version_variable]
for f in files:
fn = njoin(self.local_path, f)
if os.path.isfile(fn):
info = ('.py', 'U', 1)
name = os.path.splitext(os.path.basename(fn))[0]
n = dot_join(self.name, name)
try:
version_module = exec_mod_from_location(
'_'.join(n.split('.')), fn)
except ImportError as e:
self.warn(str(e))
version_module = None
if version_module is None:
continue
for a in version_vars:
version = getattr(version_module, a, None)
if version is not None:
break
# Try if versioneer module
try:
version = version_module.get_versions()['version']
except AttributeError:
pass
if version is not None:
break
if version is not None:
self.version = version
return version
# Get version as SVN or Mercurial revision number
revision = self._get_svn_revision(self.local_path)
if revision is None:
revision = self._get_hg_revision(self.local_path)
if revision is not None:
version = str(revision)
self.version = version
return version
def make_svn_version_py(self, delete=True):
"""Appends a data function to the data_files list that will generate
__svn_version__.py file to the current package directory.
Generate package __svn_version__.py file from SVN revision number,
it will be removed after python exits but will be available
when sdist, etc commands are executed.
Notes
-----
If __svn_version__.py existed before, nothing is done.
This is
intended for working with source directories that are in an SVN
repository.
"""
target = njoin(self.local_path, '__svn_version__.py')
revision = self._get_svn_revision(self.local_path)
if os.path.isfile(target) or revision is None:
return
else:
def generate_svn_version_py():
if not os.path.isfile(target):
version = str(revision)
self.info('Creating %s (version=%r)' % (target, version))
with open(target, 'w') as f:
f.write('version = %r\n' % (version))
def rm_file(f=target,p=self.info):
if delete:
try: os.remove(f); p('removed '+f)
except OSError: pass
try: os.remove(f+'c'); p('removed '+f+'c')
except OSError: pass
atexit.register(rm_file)
return target
self.add_data_files(('', generate_svn_version_py()))
def make_hg_version_py(self, delete=True):
"""Appends a data function to the data_files list that will generate
__hg_version__.py file to the current package directory.
Generate package __hg_version__.py file from Mercurial revision,
it will be removed after python exits but will be available
when sdist, etc commands are executed.
Notes
-----
If __hg_version__.py existed before, nothing is done.
This is intended for working with source directories that are
in an Mercurial repository.
"""
target = njoin(self.local_path, '__hg_version__.py')
revision = self._get_hg_revision(self.local_path)
if os.path.isfile(target) or revision is None:
return
else:
def generate_hg_version_py():
if not os.path.isfile(target):
version = str(revision)
self.info('Creating %s (version=%r)' % (target, version))
with open(target, 'w') as f:
f.write('version = %r\n' % (version))
def rm_file(f=target,p=self.info):
if delete:
try: os.remove(f); p('removed '+f)
except OSError: pass
try: os.remove(f+'c'); p('removed '+f+'c')
except OSError: pass
atexit.register(rm_file)
return target
self.add_data_files(('', generate_hg_version_py()))
def make_config_py(self,name='__config__'):
"""Generate package __config__.py file containing system_info
information used during building the package.
This file is installed to the
package installation directory.
"""
self.py_modules.append((self.name, name, generate_config_py))
def get_info(self,*names):
"""Get resources information.
Return information (from system_info.get_info) for all of the names in
the argument list in a single dictionary.
"""
from .system_info import get_info, dict_append
info_dict = {}
for a in names:
dict_append(info_dict,**get_info(a))
return info_dict
NPY_CXX_FLAGS = [
'-std=c++11', # Minimal standard version
'-D__STDC_VERSION__=0', # for compatibility with C headers
'-fno-exceptions', # no exception support
'-fno-rtti']
def get_info(name, notfound_action=0):
"""
notfound_action:
0 - do nothing
1 - display warning message
2 - raise error
"""
cl = {'armpl': armpl_info,
'blas_armpl': blas_armpl_info,
'lapack_armpl': lapack_armpl_info,
'fftw3_armpl': fftw3_armpl_info,
'atlas': atlas_info, # use lapack_opt or blas_opt instead
'atlas_threads': atlas_threads_info, # ditto
'atlas_blas': atlas_blas_info,
'atlas_blas_threads': atlas_blas_threads_info,
'lapack_atlas': lapack_atlas_info, # use lapack_opt instead
'lapack_atlas_threads': lapack_atlas_threads_info, # ditto
'atlas_3_10': atlas_3_10_info, # use lapack_opt or blas_opt instead
'atlas_3_10_threads': atlas_3_10_threads_info, # ditto
'atlas_3_10_blas': atlas_3_10_blas_info,
'atlas_3_10_blas_threads': atlas_3_10_blas_threads_info,
'lapack_atlas_3_10': lapack_atlas_3_10_info, # use lapack_opt instead
'lapack_atlas_3_10_threads': lapack_atlas_3_10_threads_info, # ditto
'flame': flame_info, # use lapack_opt instead
'mkl': mkl_info,
# openblas which may or may not have embedded lapack
'openblas': openblas_info, # use blas_opt instead
# openblas with embedded lapack
'openblas_lapack': openblas_lapack_info, # use blas_opt instead
'openblas_clapack': openblas_clapack_info, # use blas_opt instead
'blis': blis_info, # use blas_opt instead
'lapack_mkl': lapack_mkl_info, # use lapack_opt instead
'blas_mkl': blas_mkl_info, # use blas_opt instead
'accelerate': accelerate_info, # use blas_opt instead
'openblas64_': openblas64__info,
'openblas64__lapack': openblas64__lapack_info,
'openblas_ilp64': openblas_ilp64_info,
'openblas_ilp64_lapack': openblas_ilp64_lapack_info,
'x11': x11_info,
'fft_opt': fft_opt_info,
'fftw': fftw_info,
'fftw2': fftw2_info,
'fftw3': fftw3_info,
'dfftw': dfftw_info,
'sfftw': sfftw_info,
'fftw_threads': fftw_threads_info,
'dfftw_threads': dfftw_threads_info,
'sfftw_threads': sfftw_threads_info,
'djbfft': djbfft_info,
'blas': blas_info, # use blas_opt instead
'lapack': lapack_info, # use lapack_opt instead
'lapack_src': lapack_src_info,
'blas_src': blas_src_info,
'numpy': numpy_info,
'f2py': f2py_info,
'Numeric': Numeric_info,
'numeric': Numeric_info,
'numarray': numarray_info,
'numerix': numerix_info,
'lapack_opt': lapack_opt_info,
'lapack_ilp64_opt': lapack_ilp64_opt_info,
'lapack_ilp64_plain_opt': lapack_ilp64_plain_opt_info,
'lapack64__opt': lapack64__opt_info,
'blas_opt': blas_opt_info,
'blas_ilp64_opt': blas_ilp64_opt_info,
'blas_ilp64_plain_opt': blas_ilp64_plain_opt_info,
'blas64__opt': blas64__opt_info,
'boost_python': boost_python_info,
'agg2': agg2_info,
'wx': wx_info,
'gdk_pixbuf_xlib_2': gdk_pixbuf_xlib_2_info,
'gdk-pixbuf-xlib-2.0': gdk_pixbuf_xlib_2_info,
'gdk_pixbuf_2': gdk_pixbuf_2_info,
'gdk-pixbuf-2.0': gdk_pixbuf_2_info,
'gdk': gdk_info,
'gdk_2': gdk_2_info,
'gdk-2.0': gdk_2_info,
'gdk_x11_2': gdk_x11_2_info,
'gdk-x11-2.0': gdk_x11_2_info,
'gtkp_x11_2': gtkp_x11_2_info,
'gtk+-x11-2.0': gtkp_x11_2_info,
'gtkp_2': gtkp_2_info,
'gtk+-2.0': gtkp_2_info,
'xft': xft_info,
'freetype2': freetype2_info,
'umfpack': umfpack_info,
'amd': amd_info,
}.get(name.lower(), system_info)
return cl().get_info(notfound_action)
class system_info:
""" get_info() is the only public method. Don't use others.
"""
dir_env_var = None
# XXX: search_static_first is disabled by default, may disappear in
# future unless it is proved to be useful.
search_static_first = 0
# The base-class section name is a random word "ALL" and is not really
# intended for general use. It cannot be None nor can it be DEFAULT as
# these break the ConfigParser. See gh-15338
section = 'ALL'
saved_results = {}
notfounderror = NotFoundError
def __init__(self,
default_lib_dirs=default_lib_dirs,
default_include_dirs=default_include_dirs,
):
self.__class__.info = {}
self.local_prefixes = []
defaults = {'library_dirs': os.pathsep.join(default_lib_dirs),
'include_dirs': os.pathsep.join(default_include_dirs),
'runtime_library_dirs': os.pathsep.join(default_runtime_dirs),
'rpath': '',
'src_dirs': os.pathsep.join(default_src_dirs),
'search_static_first': str(self.search_static_first),
'extra_compile_args': '', 'extra_link_args': ''}
self.cp = ConfigParser(defaults)
self.files = []
self.files.extend(get_standard_file('.numpy-site.cfg'))
self.files.extend(get_standard_file('site.cfg'))
self.parse_config_files()
if self.section is not None:
self.search_static_first = self.cp.getboolean(
self.section, 'search_static_first')
assert isinstance(self.search_static_first, int)
def parse_config_files(self):
self.cp.read(self.files)
if not self.cp.has_section(self.section):
if self.section is not None:
self.cp.add_section(self.section)
def calc_libraries_info(self):
libs = self.get_libraries()
dirs = self.get_lib_dirs()
# The extensions use runtime_library_dirs
r_dirs = self.get_runtime_lib_dirs()
# Intrinsic distutils use rpath, we simply append both entries
# as though they were one entry
r_dirs.extend(self.get_runtime_lib_dirs(key='rpath'))
info = {}
for lib in libs:
i = self.check_libs(dirs, [lib])
if i is not None:
dict_append(info, **i)
else:
log.info('Library %s was not found. Ignoring' % (lib))
if r_dirs:
i = self.check_libs(r_dirs, [lib])
if i is not None:
# Swap library keywords found to runtime_library_dirs
# the libraries are insisting on the user having defined
# them using the library_dirs, and not necessarily by
# runtime_library_dirs
del i['libraries']
i['runtime_library_dirs'] = i.pop('library_dirs')
dict_append(info, **i)
else:
log.info('Runtime library %s was not found. Ignoring' % (lib))
return info
def set_info(self, **info):
if info:
lib_info = self.calc_libraries_info()
dict_append(info, **lib_info)
# Update extra information
extra_info = self.calc_extra_info()
dict_append(info, **extra_info)
self.saved_results[self.__class__.__name__] = info
def get_option_single(self, *options):
""" Ensure that only one of `options` are found in the section
Parameters
----------
*options : list of str
a list of options to be found in the section (``self.section``)
Returns
-------
str :
the option that is uniquely found in the section
Raises
------
AliasedOptionError :
in case more than one of the options are found
"""
found = [self.cp.has_option(self.section, opt) for opt in options]
if sum(found) == 1:
return options[found.index(True)]
elif sum(found) == 0:
# nothing is found anyways
return options[0]
# Else we have more than 1 key found
if AliasedOptionError.__doc__ is None:
raise AliasedOptionError()
raise AliasedOptionError(AliasedOptionError.__doc__.format(
section=self.section, options='[{}]'.format(', '.join(options))))
def has_info(self):
return self.__class__.__name__ in self.saved_results
def calc_extra_info(self):
""" Updates the information in the current information with
respect to these flags:
extra_compile_args
extra_link_args
"""
info = {}
for key in ['extra_compile_args', 'extra_link_args']:
# Get values
opt = self.cp.get(self.section, key)
opt = _shell_utils.NativeParser.split(opt)
if opt:
tmp = {key: opt}
dict_append(info, **tmp)
return info
def get_info(self, notfound_action=0):
""" Return a dictionary with items that are compatible
with numpy.distutils.setup keyword arguments.
"""
flag = 0
if not self.has_info():
flag = 1
log.info(self.__class__.__name__ + ':')
if hasattr(self, 'calc_info'):
self.calc_info()
if notfound_action:
if not self.has_info():
if notfound_action == 1:
warnings.warn(self.notfounderror.__doc__, stacklevel=2)
elif notfound_action == 2:
raise self.notfounderror(self.notfounderror.__doc__)
else:
raise ValueError(repr(notfound_action))
if not self.has_info():
log.info(' NOT AVAILABLE')
self.set_info()
else:
log.info(' FOUND:')
res = self.saved_results.get(self.__class__.__name__)
if log.get_threshold() <= log.INFO and flag:
for k, v in res.items():
v = str(v)
if k in ['sources', 'libraries'] and len(v) > 270:
v = v[:120] + '...\n...\n...' + v[-120:]
log.info(' %s = %s', k, v)
log.info('')
return copy.deepcopy(res)
def get_paths(self, section, key):
dirs = self.cp.get(section, key).split(os.pathsep)
env_var = self.dir_env_var
if env_var:
if is_sequence(env_var):
e0 = env_var[-1]
for e in env_var:
if e in os.environ:
e0 = e
break
if not env_var[0] == e0:
log.info('Setting %s=%s' % (env_var[0], e0))
env_var = e0
if env_var and env_var in os.environ:
d = os.environ[env_var]
if d == 'None':
log.info('Disabled %s: %s',
self.__class__.__name__, '(%s is None)'
% (env_var,))
return []
if os.path.isfile(d):
dirs = [os.path.dirname(d)] + dirs
l = getattr(self, '_lib_names', [])
if len(l) == 1:
b = os.path.basename(d)
b = os.path.splitext(b)[0]
if b[:3] == 'lib':
log.info('Replacing _lib_names[0]==%r with %r' \
% (self._lib_names[0], b[3:]))
self._lib_names[0] = b[3:]
else:
ds = d.split(os.pathsep)
ds2 = []
for d in ds:
if os.path.isdir(d):
ds2.append(d)
for dd in ['include', 'lib']:
d1 = os.path.join(d, dd)
if os.path.isdir(d1):
ds2.append(d1)
dirs = ds2 + dirs
default_dirs = self.cp.get(self.section, key).split(os.pathsep)
dirs.extend(default_dirs)
ret = []
for d in dirs:
if len(d) > 0 and not os.path.isdir(d):
warnings.warn('Specified path %s is invalid.' % d, stacklevel=2)
continue
if d not in ret:
ret.append(d)
log.debug('( %s = %s )', key, ':'.join(ret))
return ret
def get_lib_dirs(self, key='library_dirs'):
return self.get_paths(self.section, key)
def get_runtime_lib_dirs(self, key='runtime_library_dirs'):
path = self.get_paths(self.section, key)
if path == ['']:
path = []
return path
def get_include_dirs(self, key='include_dirs'):
return self.get_paths(self.section, key)
def get_src_dirs(self, key='src_dirs'):
return self.get_paths(self.section, key)
def get_libs(self, key, default):
try:
libs = self.cp.get(self.section, key)
except NoOptionError:
if not default:
return []
if is_string(default):
return [default]
return default
return [b for b in [a.strip() for a in libs.split(',')] if b]
def get_libraries(self, key='libraries'):
if hasattr(self, '_lib_names'):
return self.get_libs(key, default=self._lib_names)
else:
return self.get_libs(key, '')
def library_extensions(self):
c = customized_ccompiler()
static_exts = []
if c.compiler_type != 'msvc':
# MSVC doesn't understand binutils
static_exts.append('.a')
if sys.platform == 'win32':
static_exts.append('.lib') # .lib is used by MSVC and others
if self.search_static_first:
exts = static_exts + [so_ext]
else:
exts = [so_ext] + static_exts
if sys.platform == 'cygwin':
exts.append('.dll.a')
if sys.platform == 'darwin':
exts.append('.dylib')
return exts
def check_libs(self, lib_dirs, libs, opt_libs=[]):
"""If static or shared libraries are available then return
their info dictionary.
Checks for all libraries as shared libraries first, then
static (or vice versa if self.search_static_first is True).
"""
exts = self.library_extensions()
info = None
for ext in exts:
info = self._check_libs(lib_dirs, libs, opt_libs, [ext])
if info is not None:
break
if not info:
log.info(' libraries %s not found in %s', ','.join(libs),
lib_dirs)
return info
def check_libs2(self, lib_dirs, libs, opt_libs=[]):
"""If static or shared libraries are available then return
their info dictionary.
Checks each library for shared or static.
"""
exts = self.library_extensions()
info = self._check_libs(lib_dirs, libs, opt_libs, exts)
if not info:
log.info(' libraries %s not found in %s', ','.join(libs),
lib_dirs)
return info
def _find_lib(self, lib_dir, lib, exts):
assert is_string(lib_dir)
# under windows first try without 'lib' prefix
if sys.platform == 'win32':
lib_prefixes = ['', 'lib']
else:
lib_prefixes = ['lib']
# for each library name, see if we can find a file for it.
for ext in exts:
for prefix in lib_prefixes:
p = self.combine_paths(lib_dir, prefix + lib + ext)
if p:
break
if p:
assert len(p) == 1
# ??? splitext on p[0] would do this for cygwin
# doesn't seem correct
if ext == '.dll.a':
lib += '.dll'
if ext == '.lib':
lib = prefix + lib
return lib
return False
def _find_libs(self, lib_dirs, libs, exts):
# make sure we preserve the order of libs, as it can be important
found_dirs, found_libs = [], []
for lib in libs:
for lib_dir in lib_dirs:
found_lib = self._find_lib(lib_dir, lib, exts)
if found_lib:
found_libs.append(found_lib)
if lib_dir not in found_dirs:
found_dirs.append(lib_dir)
break
return found_dirs, found_libs
def _check_libs(self, lib_dirs, libs, opt_libs, exts):
"""Find mandatory and optional libs in expected paths.
Missing optional libraries are silently forgotten.
"""
if not is_sequence(lib_dirs):
lib_dirs = [lib_dirs]
# First, try to find the mandatory libraries
found_dirs, found_libs = self._find_libs(lib_dirs, libs, exts)
if len(found_libs) > 0 and len(found_libs) == len(libs):
# Now, check for optional libraries
opt_found_dirs, opt_found_libs = self._find_libs(lib_dirs, opt_libs, exts)
found_libs.extend(opt_found_libs)
for lib_dir in opt_found_dirs:
if lib_dir not in found_dirs:
found_dirs.append(lib_dir)
info = {'libraries': found_libs, 'library_dirs': found_dirs}
return info
else:
return None
def combine_paths(self, *args):
"""Return a list of existing paths composed by all combinations
of items from the arguments.
"""
return combine_paths(*args)
def configuration(parent_package='', top_path=None):
from numpy.distutils.misc_util import Configuration
from numpy.distutils.ccompiler_opt import NPY_CXX_FLAGS
from numpy.distutils.system_info import get_info, system_info
config = Configuration('linalg', parent_package, top_path)
config.add_subpackage('tests')
# Configure lapack_lite
src_dir = 'lapack_lite'
lapack_lite_src = [
os.path.join(src_dir, 'python_xerbla.c'),
os.path.join(src_dir, 'f2c_z_lapack.c'),
os.path.join(src_dir, 'f2c_c_lapack.c'),
os.path.join(src_dir, 'f2c_d_lapack.c'),
os.path.join(src_dir, 'f2c_s_lapack.c'),
os.path.join(src_dir, 'f2c_lapack.c'),
os.path.join(src_dir, 'f2c_blas.c'),
os.path.join(src_dir, 'f2c_config.c'),
os.path.join(src_dir, 'f2c.c'),
]
all_sources = config.paths(lapack_lite_src)
if os.environ.get('NPY_USE_BLAS_ILP64', "0") != "0":
lapack_info = get_info('lapack_ilp64_opt', 2)
else:
lapack_info = get_info('lapack_opt', 0) # and {}
use_lapack_lite = not lapack_info
if use_lapack_lite:
# This makes numpy.distutils write the fact that lapack_lite
# is being used to numpy.__config__
class numpy_linalg_lapack_lite(system_info):
def calc_info(self):
info = {'language': 'c'}
size_t_size = sysconfig.get_config_var("SIZEOF_SIZE_T")
if size_t_size:
maxsize = 2**(size_t_size - 1) - 1
else:
# We prefer using sysconfig as it allows cross-compilation
# but the information may be missing (e.g. on windows).
maxsize = sys.maxsize
if maxsize > 2**32:
# Build lapack-lite in 64-bit integer mode.
# The suffix is arbitrary (lapack_lite symbols follow it),
# but use the "64_" convention here.
info['define_macros'] = [
('HAVE_BLAS_ILP64', None),
('BLAS_SYMBOL_SUFFIX', '64_')
]
self.set_info(**info)
lapack_info = numpy_linalg_lapack_lite().get_info(2)
def get_lapack_lite_sources(ext, build_dir):
if use_lapack_lite:
print("### Warning: Using unoptimized lapack ###")
return all_sources
else:
if sys.platform == 'win32':
print("### Warning: python_xerbla.c is disabled ###")
return []
return [all_sources[0]]
config.add_extension(
'lapack_lite',
sources=['lapack_litemodule.c', get_lapack_lite_sources],
depends=['lapack_lite/f2c.h'],
extra_info=lapack_info,
)
# umath_linalg module
config.add_extension(
'_umath_linalg',
sources=['umath_linalg.cpp', get_lapack_lite_sources],
depends=['lapack_lite/f2c.h'],
extra_info=lapack_info,
extra_cxx_compile_args=NPY_CXX_FLAGS,
libraries=['npymath'],
)
config.add_data_files('*.pyi')
return config | null |
168,586 | import functools
import operator
import warnings
from numpy.core import (
array, asarray, zeros, empty, empty_like, intc, single, double,
csingle, cdouble, inexact, complexfloating, newaxis, all, Inf, dot,
add, multiply, sqrt, sum, isfinite,
finfo, errstate, geterrobj, moveaxis, amin, amax, product, abs,
atleast_2d, intp, asanyarray, object_, matmul,
swapaxes, divide, count_nonzero, isnan, sign, argsort, sort,
reciprocal
)
from numpy.core.multiarray import normalize_axis_index
from numpy.core.overrides import set_module
from numpy.core import overrides
from numpy.lib.twodim_base import triu, eye
from numpy.linalg import _umath_linalg
def _determine_error_states():
errobj = geterrobj()
bufsize = errobj[0]
with errstate(invalid='call', over='ignore',
divide='ignore', under='ignore'):
invalid_call_errmask = geterrobj()[1]
return [bufsize, invalid_call_errmask, None] | null |
168,587 | import functools
import operator
import warnings
from numpy.core import (
array, asarray, zeros, empty, empty_like, intc, single, double,
csingle, cdouble, inexact, complexfloating, newaxis, all, Inf, dot,
add, multiply, sqrt, sum, isfinite,
finfo, errstate, geterrobj, moveaxis, amin, amax, product, abs,
atleast_2d, intp, asanyarray, object_, matmul,
swapaxes, divide, count_nonzero, isnan, sign, argsort, sort,
reciprocal
)
from numpy.core.multiarray import normalize_axis_index
from numpy.core.overrides import set_module
from numpy.core import overrides
from numpy.lib.twodim_base import triu, eye
from numpy.linalg import _umath_linalg
def _tensorsolve_dispatcher(a, b, axes=None):
return (a, b) | null |
168,588 | import functools
import operator
import warnings
from numpy.core import (
array, asarray, zeros, empty, empty_like, intc, single, double,
csingle, cdouble, inexact, complexfloating, newaxis, all, Inf, dot,
add, multiply, sqrt, sum, isfinite,
finfo, errstate, geterrobj, moveaxis, amin, amax, product, abs,
atleast_2d, intp, asanyarray, object_, matmul,
swapaxes, divide, count_nonzero, isnan, sign, argsort, sort,
reciprocal
)
from numpy.core.multiarray import normalize_axis_index
from numpy.core.overrides import set_module
from numpy.core import overrides
from numpy.lib.twodim_base import triu, eye
from numpy.linalg import _umath_linalg
class LinAlgError(Exception):
"""
Generic Python-exception-derived object raised by linalg functions.
General purpose exception class, derived from Python's exception.Exception
class, programmatically raised in linalg functions when a Linear
Algebra-related condition would prevent further correct execution of the
function.
Parameters
----------
None
Examples
--------
>>> from numpy import linalg as LA
>>> LA.inv(np.zeros((2,2)))
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
File "...linalg.py", line 350,
in inv return wrap(solve(a, identity(a.shape[0], dtype=a.dtype)))
File "...linalg.py", line 249,
in solve
raise LinAlgError('Singular matrix')
numpy.linalg.LinAlgError: Singular matrix
"""
def _makearray(a):
new = asarray(a)
wrap = getattr(a, "__array_prepare__", new.__array_wrap__)
return new, wrap
def transpose(a):
"""
Transpose each matrix in a stack of matrices.
Unlike np.transpose, this only swaps the last two axes, rather than all of
them
Parameters
----------
a : (...,M,N) array_like
Returns
-------
aT : (...,N,M) ndarray
"""
return swapaxes(a, -1, -2)
def solve(a, b):
"""
Solve a linear matrix equation, or system of linear scalar equations.
Computes the "exact" solution, `x`, of the well-determined, i.e., full
rank, linear matrix equation `ax = b`.
Parameters
----------
a : (..., M, M) array_like
Coefficient matrix.
b : {(..., M,), (..., M, K)}, array_like
Ordinate or "dependent variable" values.
Returns
-------
x : {(..., M,), (..., M, K)} ndarray
Solution to the system a x = b. Returned shape is identical to `b`.
Raises
------
LinAlgError
If `a` is singular or not square.
See Also
--------
scipy.linalg.solve : Similar function in SciPy.
Notes
-----
.. versionadded:: 1.8.0
Broadcasting rules apply, see the `numpy.linalg` documentation for
details.
The solutions are computed using LAPACK routine ``_gesv``.
`a` must be square and of full-rank, i.e., all rows (or, equivalently,
columns) must be linearly independent; if either is not true, use
`lstsq` for the least-squares best "solution" of the
system/equation.
References
----------
.. [1] G. Strang, *Linear Algebra and Its Applications*, 2nd Ed., Orlando,
FL, Academic Press, Inc., 1980, pg. 22.
Examples
--------
Solve the system of equations ``x0 + 2 * x1 = 1`` and ``3 * x0 + 5 * x1 = 2``:
>>> a = np.array([[1, 2], [3, 5]])
>>> b = np.array([1, 2])
>>> x = np.linalg.solve(a, b)
>>> x
array([-1., 1.])
Check that the solution is correct:
>>> np.allclose(np.dot(a, x), b)
True
"""
a, _ = _makearray(a)
_assert_stacked_2d(a)
_assert_stacked_square(a)
b, wrap = _makearray(b)
t, result_t = _commonType(a, b)
# We use the b = (..., M,) logic, only if the number of extra dimensions
# match exactly
if b.ndim == a.ndim - 1:
gufunc = _umath_linalg.solve1
else:
gufunc = _umath_linalg.solve
signature = 'DD->D' if isComplexType(t) else 'dd->d'
extobj = get_linalg_error_extobj(_raise_linalgerror_singular)
r = gufunc(a, b, signature=signature, extobj=extobj)
return wrap(r.astype(result_t, copy=False))
The provided code snippet includes necessary dependencies for implementing the `tensorsolve` function. Write a Python function `def tensorsolve(a, b, axes=None)` to solve the following problem:
Solve the tensor equation ``a x = b`` for x. It is assumed that all indices of `x` are summed over in the product, together with the rightmost indices of `a`, as is done in, for example, ``tensordot(a, x, axes=x.ndim)``. Parameters ---------- a : array_like Coefficient tensor, of shape ``b.shape + Q``. `Q`, a tuple, equals the shape of that sub-tensor of `a` consisting of the appropriate number of its rightmost indices, and must be such that ``prod(Q) == prod(b.shape)`` (in which sense `a` is said to be 'square'). b : array_like Right-hand tensor, which can be of any shape. axes : tuple of ints, optional Axes in `a` to reorder to the right, before inversion. If None (default), no reordering is done. Returns ------- x : ndarray, shape Q Raises ------ LinAlgError If `a` is singular or not 'square' (in the above sense). See Also -------- numpy.tensordot, tensorinv, numpy.einsum Examples -------- >>> a = np.eye(2*3*4) >>> a.shape = (2*3, 4, 2, 3, 4) >>> b = np.random.randn(2*3, 4) >>> x = np.linalg.tensorsolve(a, b) >>> x.shape (2, 3, 4) >>> np.allclose(np.tensordot(a, x, axes=3), b) True
Here is the function:
def tensorsolve(a, b, axes=None):
"""
Solve the tensor equation ``a x = b`` for x.
It is assumed that all indices of `x` are summed over in the product,
together with the rightmost indices of `a`, as is done in, for example,
``tensordot(a, x, axes=x.ndim)``.
Parameters
----------
a : array_like
Coefficient tensor, of shape ``b.shape + Q``. `Q`, a tuple, equals
the shape of that sub-tensor of `a` consisting of the appropriate
number of its rightmost indices, and must be such that
``prod(Q) == prod(b.shape)`` (in which sense `a` is said to be
'square').
b : array_like
Right-hand tensor, which can be of any shape.
axes : tuple of ints, optional
Axes in `a` to reorder to the right, before inversion.
If None (default), no reordering is done.
Returns
-------
x : ndarray, shape Q
Raises
------
LinAlgError
If `a` is singular or not 'square' (in the above sense).
See Also
--------
numpy.tensordot, tensorinv, numpy.einsum
Examples
--------
>>> a = np.eye(2*3*4)
>>> a.shape = (2*3, 4, 2, 3, 4)
>>> b = np.random.randn(2*3, 4)
>>> x = np.linalg.tensorsolve(a, b)
>>> x.shape
(2, 3, 4)
>>> np.allclose(np.tensordot(a, x, axes=3), b)
True
"""
a, wrap = _makearray(a)
b = asarray(b)
an = a.ndim
if axes is not None:
allaxes = list(range(0, an))
for k in axes:
allaxes.remove(k)
allaxes.insert(an, k)
a = a.transpose(allaxes)
oldshape = a.shape[-(an-b.ndim):]
prod = 1
for k in oldshape:
prod *= k
if a.size != prod ** 2:
raise LinAlgError(
"Input arrays must satisfy the requirement \
prod(a.shape[b.ndim:]) == prod(a.shape[:b.ndim])"
)
a = a.reshape(prod, prod)
b = b.ravel()
res = wrap(solve(a, b))
res.shape = oldshape
return res | Solve the tensor equation ``a x = b`` for x. It is assumed that all indices of `x` are summed over in the product, together with the rightmost indices of `a`, as is done in, for example, ``tensordot(a, x, axes=x.ndim)``. Parameters ---------- a : array_like Coefficient tensor, of shape ``b.shape + Q``. `Q`, a tuple, equals the shape of that sub-tensor of `a` consisting of the appropriate number of its rightmost indices, and must be such that ``prod(Q) == prod(b.shape)`` (in which sense `a` is said to be 'square'). b : array_like Right-hand tensor, which can be of any shape. axes : tuple of ints, optional Axes in `a` to reorder to the right, before inversion. If None (default), no reordering is done. Returns ------- x : ndarray, shape Q Raises ------ LinAlgError If `a` is singular or not 'square' (in the above sense). See Also -------- numpy.tensordot, tensorinv, numpy.einsum Examples -------- >>> a = np.eye(2*3*4) >>> a.shape = (2*3, 4, 2, 3, 4) >>> b = np.random.randn(2*3, 4) >>> x = np.linalg.tensorsolve(a, b) >>> x.shape (2, 3, 4) >>> np.allclose(np.tensordot(a, x, axes=3), b) True |
168,589 | import functools
import operator
import warnings
from numpy.core import (
array, asarray, zeros, empty, empty_like, intc, single, double,
csingle, cdouble, inexact, complexfloating, newaxis, all, Inf, dot,
add, multiply, sqrt, sum, isfinite,
finfo, errstate, geterrobj, moveaxis, amin, amax, product, abs,
atleast_2d, intp, asanyarray, object_, matmul,
swapaxes, divide, count_nonzero, isnan, sign, argsort, sort,
reciprocal
)
from numpy.core.multiarray import normalize_axis_index
from numpy.core.overrides import set_module
from numpy.core import overrides
from numpy.lib.twodim_base import triu, eye
from numpy.linalg import _umath_linalg
def _solve_dispatcher(a, b):
return (a, b) | null |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.