diff --git a/.gitattributes b/.gitattributes
index bf032c28a6b06bcf7e4837fffbb8adae3b7fd312..dbfead2c5755bc73154422357f558eab886e1ad2 100644
--- a/.gitattributes
+++ b/.gitattributes
@@ -100,3 +100,5 @@ parrot/lib/libncursesw.a filter=lfs diff=lfs merge=lfs -text
parrot/lib/libncurses.so filter=lfs diff=lfs merge=lfs -text
parrot/lib/libncursesw.so.6.4 filter=lfs diff=lfs merge=lfs -text
parrot/lib/libtinfow.so filter=lfs diff=lfs merge=lfs -text
+parrot/lib/libtinfow.so.6 filter=lfs diff=lfs merge=lfs -text
+parrot/lib/python3.10/site-packages/google/protobuf/__pycache__/descriptor_pb2.cpython-310.pyc filter=lfs diff=lfs merge=lfs -text
diff --git a/parrot/lib/libtinfow.so.6 b/parrot/lib/libtinfow.so.6
new file mode 100644
index 0000000000000000000000000000000000000000..cc8cf878bbd7930738828148050b77fb02c2de4e
--- /dev/null
+++ b/parrot/lib/libtinfow.so.6
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:7ff9b333bc4b796b31c188c2dadd7840788cb963dbf4f34567deb3f326326b02
+size 287080
diff --git a/parrot/lib/python3.10/site-packages/aiohappyeyeballs/__init__.py b/parrot/lib/python3.10/site-packages/aiohappyeyeballs/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..7f3bf14c3851de5ceaa84f1c4c1cad113a846587
--- /dev/null
+++ b/parrot/lib/python3.10/site-packages/aiohappyeyeballs/__init__.py
@@ -0,0 +1,13 @@
+__version__ = "2.4.2"
+
+from .impl import start_connection
+from .types import AddrInfoType
+from .utils import addr_to_addr_infos, pop_addr_infos_interleave, remove_addr_infos
+
+__all__ = (
+ "start_connection",
+ "AddrInfoType",
+ "remove_addr_infos",
+ "pop_addr_infos_interleave",
+ "addr_to_addr_infos",
+)
diff --git a/parrot/lib/python3.10/site-packages/aiohappyeyeballs/__pycache__/__init__.cpython-310.pyc b/parrot/lib/python3.10/site-packages/aiohappyeyeballs/__pycache__/__init__.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..7ee1bdf9de4c316ec917b94115f666ca783c1b09
Binary files /dev/null and b/parrot/lib/python3.10/site-packages/aiohappyeyeballs/__pycache__/__init__.cpython-310.pyc differ
diff --git a/parrot/lib/python3.10/site-packages/aiohappyeyeballs/__pycache__/impl.cpython-310.pyc b/parrot/lib/python3.10/site-packages/aiohappyeyeballs/__pycache__/impl.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..2267a2c47aa408e87259b72c7e3dc408ceee4087
Binary files /dev/null and b/parrot/lib/python3.10/site-packages/aiohappyeyeballs/__pycache__/impl.cpython-310.pyc differ
diff --git a/parrot/lib/python3.10/site-packages/aiohappyeyeballs/__pycache__/staggered.cpython-310.pyc b/parrot/lib/python3.10/site-packages/aiohappyeyeballs/__pycache__/staggered.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..535299609b749038097dc0fadeea1a2b85646c78
Binary files /dev/null and b/parrot/lib/python3.10/site-packages/aiohappyeyeballs/__pycache__/staggered.cpython-310.pyc differ
diff --git a/parrot/lib/python3.10/site-packages/aiohappyeyeballs/__pycache__/types.cpython-310.pyc b/parrot/lib/python3.10/site-packages/aiohappyeyeballs/__pycache__/types.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..34cf2bb557d6d7d46052fa654d40e9ec16a1bbdb
Binary files /dev/null and b/parrot/lib/python3.10/site-packages/aiohappyeyeballs/__pycache__/types.cpython-310.pyc differ
diff --git a/parrot/lib/python3.10/site-packages/aiohappyeyeballs/__pycache__/utils.cpython-310.pyc b/parrot/lib/python3.10/site-packages/aiohappyeyeballs/__pycache__/utils.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..9f7b00cdd1cb647bd4ef6fd0bb78d228864a777c
Binary files /dev/null and b/parrot/lib/python3.10/site-packages/aiohappyeyeballs/__pycache__/utils.cpython-310.pyc differ
diff --git a/parrot/lib/python3.10/site-packages/aiohappyeyeballs/_staggered.py b/parrot/lib/python3.10/site-packages/aiohappyeyeballs/_staggered.py
new file mode 100644
index 0000000000000000000000000000000000000000..b5c6798cf31455f5d6c9d8520cf2871c9ec8c112
--- /dev/null
+++ b/parrot/lib/python3.10/site-packages/aiohappyeyeballs/_staggered.py
@@ -0,0 +1,101 @@
+import asyncio
+import contextlib
+from typing import Awaitable, Callable, Iterable, List, Optional, Tuple, TypeVar
+
+
+class _Done(Exception):
+ pass
+
+
+_T = TypeVar("_T")
+
+
+async def staggered_race(
+ coro_fns: Iterable[Callable[[], Awaitable[_T]]], delay: Optional[float]
+) -> Tuple[Optional[_T], Optional[int], List[Optional[BaseException]]]:
+ """
+ Run coroutines with staggered start times and take the first to finish.
+
+ This method takes an iterable of coroutine functions. The first one is
+ started immediately. From then on, whenever the immediately preceding one
+ fails (raises an exception), or when *delay* seconds has passed, the next
+ coroutine is started. This continues until one of the coroutines complete
+ successfully, in which case all others are cancelled, or until all
+ coroutines fail.
+
+ The coroutines provided should be well-behaved in the following way:
+
+ * They should only ``return`` if completed successfully.
+
+ * They should always raise an exception if they did not complete
+ successfully. In particular, if they handle cancellation, they should
+ probably reraise, like this::
+
+ try:
+ # do work
+ except asyncio.CancelledError:
+ # undo partially completed work
+ raise
+
+ Args:
+ coro_fns: an iterable of coroutine functions, i.e. callables that
+ return a coroutine object when called. Use ``functools.partial`` or
+ lambdas to pass arguments.
+
+ delay: amount of time, in seconds, between starting coroutines. If
+ ``None``, the coroutines will run sequentially.
+
+ Returns:
+ tuple *(winner_result, winner_index, exceptions)* where
+
+ - *winner_result*: the result of the winning coroutine, or ``None``
+ if no coroutines won.
+
+ - *winner_index*: the index of the winning coroutine in
+ ``coro_fns``, or ``None`` if no coroutines won. If the winning
+ coroutine may return None on success, *winner_index* can be used
+ to definitively determine whether any coroutine won.
+
+ - *exceptions*: list of exceptions returned by the coroutines.
+ ``len(exceptions)`` is equal to the number of coroutines actually
+ started, and the order is the same as in ``coro_fns``. The winning
+ coroutine's entry is ``None``.
+
+ """
+ # TODO: when we have aiter() and anext(), allow async iterables in coro_fns.
+ winner_result = None
+ winner_index = None
+ exceptions: List[Optional[BaseException]] = []
+
+ async def run_one_coro(
+ this_index: int,
+ coro_fn: Callable[[], Awaitable[_T]],
+ this_failed: asyncio.Event,
+ ) -> None:
+ try:
+ result = await coro_fn()
+ except (SystemExit, KeyboardInterrupt):
+ raise
+ except BaseException as e:
+ exceptions[this_index] = e
+ this_failed.set() # Kickstart the next coroutine
+ else:
+ # Store winner's results
+ nonlocal winner_index, winner_result
+ assert winner_index is None # noqa: S101
+ winner_index = this_index
+ winner_result = result
+ raise _Done
+
+ try:
+ async with asyncio.TaskGroup() as tg:
+ for this_index, coro_fn in enumerate(coro_fns):
+ this_failed = asyncio.Event()
+ exceptions.append(None)
+ tg.create_task(run_one_coro(this_index, coro_fn, this_failed))
+ with contextlib.suppress(TimeoutError):
+ await asyncio.wait_for(this_failed.wait(), delay)
+ except* _Done:
+ pass
+
+ return winner_result, winner_index, exceptions
diff --git a/parrot/lib/python3.10/site-packages/aiohappyeyeballs/impl.py b/parrot/lib/python3.10/site-packages/aiohappyeyeballs/impl.py
new file mode 100644
index 0000000000000000000000000000000000000000..1017e822118115b7ca8ee37746ace07f3ed4674e
--- /dev/null
+++ b/parrot/lib/python3.10/site-packages/aiohappyeyeballs/impl.py
@@ -0,0 +1,204 @@
+"""Base implementation."""
+
+import asyncio
+import collections
+import functools
+import itertools
+import socket
+import sys
+from typing import List, Optional, Sequence
+
+from . import staggered
+from .types import AddrInfoType
+
+if sys.version_info < (3, 8, 2): # noqa: UP036
+ # asyncio.staggered is broken in Python 3.8.0 and 3.8.1
+ # so it must be patched:
+ # https://github.com/aio-libs/aiohttp/issues/8556
+ # https://bugs.python.org/issue39129
+ # https://github.com/python/cpython/pull/17693
+ import asyncio.futures
+
+ asyncio.futures.TimeoutError = asyncio.TimeoutError # type: ignore[attr-defined]
+
+
+async def start_connection(
+ addr_infos: Sequence[AddrInfoType],
+ *,
+ local_addr_infos: Optional[Sequence[AddrInfoType]] = None,
+ happy_eyeballs_delay: Optional[float] = None,
+ interleave: Optional[int] = None,
+ loop: Optional[asyncio.AbstractEventLoop] = None,
+) -> socket.socket:
+ """
+ Connect to a TCP server.
+
+ Create a socket connection to a specified destination. The
+ destination is specified as a list of AddrInfoType tuples as
+ returned from getaddrinfo().
+
+ The arguments are, in order:
+
+ * ``family``: the address family, e.g. ``socket.AF_INET`` or
+ ``socket.AF_INET6``.
+ * ``type``: the socket type, e.g. ``socket.SOCK_STREAM`` or
+ ``socket.SOCK_DGRAM``.
+ * ``proto``: the protocol, e.g. ``socket.IPPROTO_TCP`` or
+ ``socket.IPPROTO_UDP``.
+ * ``canonname``: the canonical name of the address, e.g.
+ ``"www.python.org"``.
+ * ``sockaddr``: the socket address
+
+ This method is a coroutine which will try to establish the connection
+ in the background. When successful, the coroutine returns a
+ socket.
+
+ The expected use case is to use this method in conjunction with
+ loop.create_connection() to establish a connection to a server::
+
+ socket = await start_connection(addr_infos)
+ transport, protocol = await loop.create_connection(
+ MyProtocol, sock=socket, ...)
+ """
+ if not (current_loop := loop):
+ current_loop = asyncio.get_running_loop()
+
+ single_addr_info = len(addr_infos) == 1
+
+ if happy_eyeballs_delay is not None and interleave is None:
+ # If using happy eyeballs, default to interleave addresses by family
+ interleave = 1
+
+ if interleave and not single_addr_info:
+ addr_infos = _interleave_addrinfos(addr_infos, interleave)
+
+ sock: Optional[socket.socket] = None
+ exceptions: List[List[OSError]] = []
+ if happy_eyeballs_delay is None or single_addr_info:
+ # not using happy eyeballs
+ for addrinfo in addr_infos:
+ try:
+ sock = await _connect_sock(
+ current_loop, exceptions, addrinfo, local_addr_infos
+ )
+ break
+ except OSError:
+ continue
+ else: # using happy eyeballs
+ sock, _, _ = await staggered.staggered_race(
+ (
+ functools.partial(
+ _connect_sock, current_loop, exceptions, addrinfo, local_addr_infos
+ )
+ for addrinfo in addr_infos
+ ),
+ happy_eyeballs_delay,
+ )
+
+ if sock is None:
+ all_exceptions = [exc for sub in exceptions for exc in sub]
+ try:
+ first_exception = all_exceptions[0]
+ if len(all_exceptions) == 1:
+ raise first_exception
+ else:
+ # If they all have the same str(), raise one.
+ model = str(first_exception)
+ if all(str(exc) == model for exc in all_exceptions):
+ raise first_exception
+ # Raise a combined exception so the user can see all
+ # the various error messages.
+ msg = "Multiple exceptions: {}".format(
+ ", ".join(str(exc) for exc in all_exceptions)
+ )
+ # If the errno is the same for all exceptions, raise
+ # an OSError with that errno.
+ first_errno = first_exception.errno
+ if all(
+ isinstance(exc, OSError) and exc.errno == first_errno
+ for exc in all_exceptions
+ ):
+ raise OSError(first_errno, msg)
+ raise OSError(msg)
+ finally:
+ all_exceptions = None # type: ignore[assignment]
+ exceptions = None # type: ignore[assignment]
+
+ return sock
+
+
+async def _connect_sock(
+ loop: asyncio.AbstractEventLoop,
+ exceptions: List[List[OSError]],
+ addr_info: AddrInfoType,
+ local_addr_infos: Optional[Sequence[AddrInfoType]] = None,
+) -> socket.socket:
+ """Create, bind and connect one socket."""
+ my_exceptions: list[OSError] = []
+ exceptions.append(my_exceptions)
+ family, type_, proto, _, address = addr_info
+ sock = None
+ try:
+ sock = socket.socket(family=family, type=type_, proto=proto)
+ sock.setblocking(False)
+ if local_addr_infos is not None:
+ for lfamily, _, _, _, laddr in local_addr_infos:
+ # skip local addresses of different family
+ if lfamily != family:
+ continue
+ try:
+ sock.bind(laddr)
+ break
+ except OSError as exc:
+ msg = (
+ f"error while attempting to bind on "
+ f"address {laddr!r}: "
+ f"{exc.strerror.lower()}"
+ )
+ exc = OSError(exc.errno, msg)
+ my_exceptions.append(exc)
+ else: # all bind attempts failed
+ if my_exceptions:
+ raise my_exceptions.pop()
+ else:
+ raise OSError(f"no matching local address with {family=} found")
+ await loop.sock_connect(sock, address)
+ return sock
+ except OSError as exc:
+ my_exceptions.append(exc)
+ if sock is not None:
+ sock.close()
+ raise
+ except:
+ if sock is not None:
+ sock.close()
+ raise
+ finally:
+ exceptions = my_exceptions = None # type: ignore[assignment]
+
+
+def _interleave_addrinfos(
+ addrinfos: Sequence[AddrInfoType], first_address_family_count: int = 1
+) -> List[AddrInfoType]:
+ """Interleave list of addrinfo tuples by family."""
+ # Group addresses by family
+ addrinfos_by_family: collections.OrderedDict[int, List[AddrInfoType]] = (
+ collections.OrderedDict()
+ )
+ for addr in addrinfos:
+ family = addr[0]
+ if family not in addrinfos_by_family:
+ addrinfos_by_family[family] = []
+ addrinfos_by_family[family].append(addr)
+ addrinfos_lists = list(addrinfos_by_family.values())
+
+ reordered: List[AddrInfoType] = []
+ if first_address_family_count > 1:
+ reordered.extend(addrinfos_lists[0][: first_address_family_count - 1])
+ del addrinfos_lists[0][: first_address_family_count - 1]
+ reordered.extend(
+ a
+ for a in itertools.chain.from_iterable(itertools.zip_longest(*addrinfos_lists))
+ if a is not None
+ )
+ return reordered
diff --git a/parrot/lib/python3.10/site-packages/aiohappyeyeballs/py.typed b/parrot/lib/python3.10/site-packages/aiohappyeyeballs/py.typed
new file mode 100644
index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391
diff --git a/parrot/lib/python3.10/site-packages/aiohappyeyeballs/staggered.py b/parrot/lib/python3.10/site-packages/aiohappyeyeballs/staggered.py
new file mode 100644
index 0000000000000000000000000000000000000000..6a8b3918b12741a5eb4da55f10f6a63ba9d21d87
--- /dev/null
+++ b/parrot/lib/python3.10/site-packages/aiohappyeyeballs/staggered.py
@@ -0,0 +1,9 @@
+import sys
+
+if sys.version_info > (3, 11):
+ # https://github.com/python/cpython/issues/124639#issuecomment-2378129834
+ from ._staggered import staggered_race
+else:
+ from asyncio.staggered import staggered_race
+
+__all__ = ["staggered_race"]
diff --git a/parrot/lib/python3.10/site-packages/aiohappyeyeballs/types.py b/parrot/lib/python3.10/site-packages/aiohappyeyeballs/types.py
new file mode 100644
index 0000000000000000000000000000000000000000..01d79a28eb0bcb4c6daa2b2f656b0014aecb258c
--- /dev/null
+++ b/parrot/lib/python3.10/site-packages/aiohappyeyeballs/types.py
@@ -0,0 +1,12 @@
+"""Types for aiohappyeyeballs."""
+
+import socket
+from typing import Tuple, Union
+
+AddrInfoType = Tuple[
+ Union[int, socket.AddressFamily],
+ Union[int, socket.SocketKind],
+ int,
+ str,
+ Tuple, # type: ignore[type-arg]
+]
diff --git a/parrot/lib/python3.10/site-packages/aiohappyeyeballs/utils.py b/parrot/lib/python3.10/site-packages/aiohappyeyeballs/utils.py
new file mode 100644
index 0000000000000000000000000000000000000000..b5745ae7f2b4bab73ae9d5491e43d5005b2ac318
--- /dev/null
+++ b/parrot/lib/python3.10/site-packages/aiohappyeyeballs/utils.py
@@ -0,0 +1,97 @@
+"""Utility functions for aiohappyeyeballs."""
+
+import ipaddress
+import socket
+from typing import Dict, List, Optional, Tuple, Union
+
+from .types import AddrInfoType
+
+
+def addr_to_addr_infos(
+ addr: Optional[
+ Union[Tuple[str, int, int, int], Tuple[str, int, int], Tuple[str, int]]
+ ]
+) -> Optional[List[AddrInfoType]]:
+ """Convert an address tuple to a list of addr_info tuples."""
+ if addr is None:
+ return None
+ host = addr[0]
+ port = addr[1]
+ is_ipv6 = ":" in host
+ if is_ipv6:
+ flowinfo = 0
+ scopeid = 0
+ addr_len = len(addr)
+ if addr_len >= 4:
+ scopeid = addr[3] # type: ignore[misc]
+ if addr_len >= 3:
+ flowinfo = addr[2] # type: ignore[misc]
+ addr = (host, port, flowinfo, scopeid)
+ family = socket.AF_INET6
+ else:
+ addr = (host, port)
+ family = socket.AF_INET
+ return [(family, socket.SOCK_STREAM, socket.IPPROTO_TCP, "", addr)]
+
+
+def pop_addr_infos_interleave(
+ addr_infos: List[AddrInfoType], interleave: Optional[int] = None
+) -> None:
+ """
+ Pop addr_info from the list of addr_infos by family up to interleave times.
+
+ The interleave parameter is used to know how many addr_infos for
+ each family should be popped of the top of the list.
+ """
+ seen: Dict[int, int] = {}
+ if interleave is None:
+ interleave = 1
+ to_remove: List[AddrInfoType] = []
+ for addr_info in addr_infos:
+ family = addr_info[0]
+ if family not in seen:
+ seen[family] = 0
+ if seen[family] < interleave:
+ to_remove.append(addr_info)
+ seen[family] += 1
+ for addr_info in to_remove:
+ addr_infos.remove(addr_info)
+
+
+def _addr_tuple_to_ip_address(
+ addr: Union[Tuple[str, int], Tuple[str, int, int, int]]
+) -> Union[
+ Tuple[ipaddress.IPv4Address, int], Tuple[ipaddress.IPv6Address, int, int, int]
+]:
+ """Convert an address tuple to an IPv4Address."""
+ return (ipaddress.ip_address(addr[0]), *addr[1:])
+
+
+def remove_addr_infos(
+ addr_infos: List[AddrInfoType],
+ addr: Union[Tuple[str, int], Tuple[str, int, int, int]],
+) -> None:
+ """
+ Remove an address from the list of addr_infos.
+
+ The addr value is typically the return value of
+ sock.getpeername().
+ """
+ bad_addrs_infos: List[AddrInfoType] = []
+ for addr_info in addr_infos:
+ if addr_info[-1] == addr:
+ bad_addrs_infos.append(addr_info)
+ if bad_addrs_infos:
+ for bad_addr_info in bad_addrs_infos:
+ addr_infos.remove(bad_addr_info)
+ return
+ # Slow path in case addr is formatted differently
+ match_addr = _addr_tuple_to_ip_address(addr)
+ for addr_info in addr_infos:
+ if match_addr == _addr_tuple_to_ip_address(addr_info[-1]):
+ bad_addrs_infos.append(addr_info)
+ if bad_addrs_infos:
+ for bad_addr_info in bad_addrs_infos:
+ addr_infos.remove(bad_addr_info)
+ return
+ raise ValueError(f"Address {addr} not found in addr_infos")
diff --git a/parrot/lib/python3.10/site-packages/aiohttp/__pycache__/__init__.cpython-310.pyc b/parrot/lib/python3.10/site-packages/aiohttp/__pycache__/__init__.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..a3d67755e9eab4a2ba1b90753072b4a1032763c8
Binary files /dev/null and b/parrot/lib/python3.10/site-packages/aiohttp/__pycache__/__init__.cpython-310.pyc differ
diff --git a/parrot/lib/python3.10/site-packages/aiohttp/__pycache__/base_protocol.cpython-310.pyc b/parrot/lib/python3.10/site-packages/aiohttp/__pycache__/base_protocol.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..f993d1e5a349a53b730f4ddc9023d8f26f8a24ca
Binary files /dev/null and b/parrot/lib/python3.10/site-packages/aiohttp/__pycache__/base_protocol.cpython-310.pyc differ
diff --git a/parrot/lib/python3.10/site-packages/aiohttp/__pycache__/http_parser.cpython-310.pyc b/parrot/lib/python3.10/site-packages/aiohttp/__pycache__/http_parser.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..b7995893373c8e478f4f73d68975a1a95cbe89db
Binary files /dev/null and b/parrot/lib/python3.10/site-packages/aiohttp/__pycache__/http_parser.cpython-310.pyc differ
diff --git a/parrot/lib/python3.10/site-packages/aiohttp/__pycache__/http_websocket.cpython-310.pyc b/parrot/lib/python3.10/site-packages/aiohttp/__pycache__/http_websocket.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..ccb0f3ebb6836232df62826ad067bdfab15452e0
Binary files /dev/null and b/parrot/lib/python3.10/site-packages/aiohttp/__pycache__/http_websocket.cpython-310.pyc differ
diff --git a/parrot/lib/python3.10/site-packages/aiohttp/__pycache__/locks.cpython-310.pyc b/parrot/lib/python3.10/site-packages/aiohttp/__pycache__/locks.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..f43403b2f0497db7f631494d6f89ab448f96e688
Binary files /dev/null and b/parrot/lib/python3.10/site-packages/aiohttp/__pycache__/locks.cpython-310.pyc differ
diff --git a/parrot/lib/python3.10/site-packages/aiohttp/__pycache__/log.cpython-310.pyc b/parrot/lib/python3.10/site-packages/aiohttp/__pycache__/log.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..f77b0f9fcb76a114fa265c6354439b22404b4f08
Binary files /dev/null and b/parrot/lib/python3.10/site-packages/aiohttp/__pycache__/log.cpython-310.pyc differ
diff --git a/parrot/lib/python3.10/site-packages/aiohttp/__pycache__/multipart.cpython-310.pyc b/parrot/lib/python3.10/site-packages/aiohttp/__pycache__/multipart.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..7125e8f2c73197e400fe039398e6636500306ab9
Binary files /dev/null and b/parrot/lib/python3.10/site-packages/aiohttp/__pycache__/multipart.cpython-310.pyc differ
diff --git a/parrot/lib/python3.10/site-packages/aiohttp/__pycache__/tcp_helpers.cpython-310.pyc b/parrot/lib/python3.10/site-packages/aiohttp/__pycache__/tcp_helpers.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..264aa3598426dd9fc3395c7466be8b303deb9b3d
Binary files /dev/null and b/parrot/lib/python3.10/site-packages/aiohttp/__pycache__/tcp_helpers.cpython-310.pyc differ
diff --git a/parrot/lib/python3.10/site-packages/aiohttp/__pycache__/tracing.cpython-310.pyc b/parrot/lib/python3.10/site-packages/aiohttp/__pycache__/tracing.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..99155d2a8777318344386fdf3b21cad154a17c84
Binary files /dev/null and b/parrot/lib/python3.10/site-packages/aiohttp/__pycache__/tracing.cpython-310.pyc differ
diff --git a/parrot/lib/python3.10/site-packages/aiohttp/__pycache__/typedefs.cpython-310.pyc b/parrot/lib/python3.10/site-packages/aiohttp/__pycache__/typedefs.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..11bcbb5231f7a1e39707b0477c9425498a1cd1ac
Binary files /dev/null and b/parrot/lib/python3.10/site-packages/aiohttp/__pycache__/typedefs.cpython-310.pyc differ
diff --git a/parrot/lib/python3.10/site-packages/aiohttp/__pycache__/web_app.cpython-310.pyc b/parrot/lib/python3.10/site-packages/aiohttp/__pycache__/web_app.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..9840aa7a41948559f61b6a7ca1fe37a711c10205
Binary files /dev/null and b/parrot/lib/python3.10/site-packages/aiohttp/__pycache__/web_app.cpython-310.pyc differ
diff --git a/parrot/lib/python3.10/site-packages/aiohttp/__pycache__/web_runner.cpython-310.pyc b/parrot/lib/python3.10/site-packages/aiohttp/__pycache__/web_runner.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..bb11179ea8c114b0d6445e32ef1bed2630067322
Binary files /dev/null and b/parrot/lib/python3.10/site-packages/aiohttp/__pycache__/web_runner.cpython-310.pyc differ
diff --git a/parrot/lib/python3.10/site-packages/aiohttp/__pycache__/worker.cpython-310.pyc b/parrot/lib/python3.10/site-packages/aiohttp/__pycache__/worker.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..611687d9bd2ef1a8e1e86efc55575ee3571ef312
Binary files /dev/null and b/parrot/lib/python3.10/site-packages/aiohttp/__pycache__/worker.cpython-310.pyc differ
diff --git a/parrot/lib/python3.10/site-packages/google/protobuf/__pycache__/descriptor_pb2.cpython-310.pyc b/parrot/lib/python3.10/site-packages/google/protobuf/__pycache__/descriptor_pb2.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..e248c1e9a9df1edb110e635de3bd46f94c088c37
--- /dev/null
+++ b/parrot/lib/python3.10/site-packages/google/protobuf/__pycache__/descriptor_pb2.cpython-310.pyc
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:8f7411f196ab7af30cb03dea4c904d1b55601b6c1dce9a01c94ca0b7fb77aa2d
+size 101191
diff --git a/parrot/lib/python3.10/site-packages/mdit_py_plugins/myst_blocks/__pycache__/index.cpython-310.pyc b/parrot/lib/python3.10/site-packages/mdit_py_plugins/myst_blocks/__pycache__/index.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..ce9147dfaedc98d6d9d7430e427c638aea657b63
Binary files /dev/null and b/parrot/lib/python3.10/site-packages/mdit_py_plugins/myst_blocks/__pycache__/index.cpython-310.pyc differ
diff --git a/parrot/lib/python3.10/site-packages/mdit_py_plugins/myst_blocks/index.py b/parrot/lib/python3.10/site-packages/mdit_py_plugins/myst_blocks/index.py
new file mode 100644
index 0000000000000000000000000000000000000000..d0e4cf6aac70e952472169ae366681ed84dd0f52
--- /dev/null
+++ b/parrot/lib/python3.10/site-packages/mdit_py_plugins/myst_blocks/index.py
@@ -0,0 +1,153 @@
+import itertools
+
+from markdown_it import MarkdownIt
+from markdown_it.common.utils import escapeHtml, isSpace
+from markdown_it.rules_block import StateBlock
+
+
+def myst_block_plugin(md: MarkdownIt):
+ """Parse MyST targets (``(name)=``), blockquotes (``% comment``) and block breaks (``+++``)."""
+ md.block.ruler.before(
+ "blockquote",
+ "myst_line_comment",
+ line_comment,
+ {"alt": ["paragraph", "reference", "blockquote", "list", "footnote_def"]},
+ )
+ md.block.ruler.before(
+ "hr",
+ "myst_block_break",
+ block_break,
+ {"alt": ["paragraph", "reference", "blockquote", "list", "footnote_def"]},
+ )
+ md.block.ruler.before(
+ "hr",
+ "myst_target",
+ target,
+ {"alt": ["paragraph", "reference", "blockquote", "list", "footnote_def"]},
+ )
+ md.add_render_rule("myst_target", render_myst_target)
+ md.add_render_rule("myst_line_comment", render_myst_line_comment)
+
+
+def line_comment(state: StateBlock, startLine: int, endLine: int, silent: bool):
+
+ pos = state.bMarks[startLine] + state.tShift[startLine]
+ maximum = state.eMarks[startLine]
+
+ # if it's indented more than 3 spaces, it should be a code block
+ if state.sCount[startLine] - state.blkIndent >= 4:
+ return False
+
+ if state.src[pos] != "%":
+ return False
+
+ if silent:
+ return True
+
+ token = state.push("myst_line_comment", "", 0)
+ token.attrSet("class", "myst-line-comment")
+ token.content = state.src[pos + 1 : maximum].rstrip()
+ token.markup = "%"
+
+ # search end of block while appending lines to `token.content`
+ for nextLine in itertools.count(startLine + 1):
+ if nextLine >= endLine:
+ break
+ pos = state.bMarks[nextLine] + state.tShift[nextLine]
+ maximum = state.eMarks[nextLine]
+
+ if state.src[pos] != "%":
+ break
+ token.content += "\n" + state.src[pos + 1 : maximum].rstrip()
+
+ state.line = nextLine
+ token.map = [startLine, nextLine]
+
+ return True
+
+
+def block_break(state: StateBlock, startLine: int, endLine: int, silent: bool):
+
+ pos = state.bMarks[startLine] + state.tShift[startLine]
+ maximum = state.eMarks[startLine]
+
+ # if it's indented more than 3 spaces, it should be a code block
+ if state.sCount[startLine] - state.blkIndent >= 4:
+ return False
+
+ marker = state.srcCharCode[pos]
+ pos += 1
+
+ # Check block marker /* + */
+ if marker != 0x2B:
+ return False
+
+ # markers can be mixed with spaces, but there should be at least 3 of them
+
+ cnt = 1
+ while pos < maximum:
+ ch = state.srcCharCode[pos]
+ if ch != marker and not isSpace(ch):
+ break
+ if ch == marker:
+ cnt += 1
+ pos += 1
+
+ if cnt < 3:
+ return False
+
+ if silent:
+ return True
+
+ state.line = startLine + 1
+
+ token = state.push("myst_block_break", "hr", 0)
+ token.attrSet("class", "myst-block")
+ token.content = state.src[pos:maximum].strip()
+ token.map = [startLine, state.line]
+ token.markup = chr(marker) * cnt
+
+ return True
+
+
+def target(state: StateBlock, startLine: int, endLine: int, silent: bool):
+
+ pos = state.bMarks[startLine] + state.tShift[startLine]
+ maximum = state.eMarks[startLine]
+
+ # if it's indented more than 3 spaces, it should be a code block
+ if state.sCount[startLine] - state.blkIndent >= 4:
+ return False
+
+ text = state.src[pos:maximum].strip()
+ if not text.startswith("("):
+ return False
+ if not text.endswith(")="):
+ return False
+ if not text[1:-2]:
+ return False
+
+ if silent:
+ return True
+
+ state.line = startLine + 1
+
+ token = state.push("myst_target", "", 0)
+ token.attrSet("class", "myst-target")
+ token.content = text[1:-2]
+ token.map = [startLine, state.line]
+
+ return True
+
+
+def render_myst_target(self, tokens, idx, options, env):
+ label = tokens[idx].content
+ class_name = "myst-target"
+ target = f'({label})='
+ return f'
{target}
'
+
+
+def render_myst_line_comment(self, tokens, idx, options, env):
+ # Strip leading whitespace from all lines
+ content = "\n".join(line.lstrip() for line in tokens[idx].content.split("\n"))
+ return f""
diff --git a/parrot/lib/python3.10/site-packages/mdit_py_plugins/texmath/README.md b/parrot/lib/python3.10/site-packages/mdit_py_plugins/texmath/README.md
new file mode 100644
index 0000000000000000000000000000000000000000..f79f33563eb88e84849fa29f0e8986ccdf50a72c
--- /dev/null
+++ b/parrot/lib/python3.10/site-packages/mdit_py_plugins/texmath/README.md
@@ -0,0 +1,137 @@
+[](https://github.com/goessner/markdown-it-texmath/blob/master/licence.txt)
+[](https://www.npmjs.com/package/markdown-it-texmath)
+[](https://www.npmjs.com/package/markdown-it-texmath)
+
+# markdown-it-texmath
+
+Add TeX math equations to your Markdown documents rendered by [markdown-it](https://github.com/markdown-it/markdown-it) parser. [KaTeX](https://github.com/Khan/KaTeX) is used as a fast math renderer.
+
+## Features
+Simplify the process of authoring markdown documents containing math formulas.
+This extension is a comfortable tool for scientists, engineers and students with markdown as their first choice document format.
+
+* Macro support
+* Simple formula numbering
+* Inline math with tables, lists and blockquote.
+* User setting delimiters:
+ * `'dollars'` (default)
+ * inline: `$...$`
+ * display: `$$...$$`
+ * display + equation number: `$$...$$ (1)`
+ * `'brackets'`
+ * inline: `\(...\)`
+ * display: `\[...\]`
+ * display + equation number: `\[...\] (1)`
+ * `'gitlab'`
+ * inline: ``$`...`$``
+ * display: `` ```math ... ``` ``
+ * display + equation number: `` ```math ... ``` (1)``
+ * `'julia'`
+ * inline: `$...$` or ``` ``...`` ```
+ * display: `` ```math ... ``` ``
+ * display + equation number: `` ```math ... ``` (1)``
+ * `'kramdown'`
+ * inline: ``$$...$$``
+ * display: `$$...$$`
+ * display + equation number: `$$...$$ (1)`
+
+## Show me
+
+View a [test table](https://goessner.github.io/markdown-it-texmath/index.html).
+
+[try it out ...](https://goessner.github.io/markdown-it-texmath/markdown-it-texmath-demo.html)
+
+## Use with `node.js`
+
+Install the extension. Verify having `markdown-it` and `katex` already installed .
+```
+npm install markdown-it-texmath
+```
+Use it with JavaScript.
+```js
+let kt = require('katex'),
+ tm = require('markdown-it-texmath').use(kt),
+ md = require('markdown-it')().use(tm,{delimiters:'dollars',macros:{"\\RR": "\\mathbb{R}"}});
+
+md.render('Euler\'s identity \(e^{i\pi}+1=0\) is a beautiful formula in $\\RR 2$.')
+```
+
+## Use in Browser
+```html
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+```
+## CDN
+
+Use following links for `texmath.js` and `texmath.css`
+* `https://gitcdn.xyz/cdn/goessner/markdown-it-texmath/master/texmath.js`
+* `https://gitcdn.xyz/cdn/goessner/markdown-it-texmath/master/texmath.css`
+
+## Dependencies
+
+* [`markdown-it`](https://github.com/markdown-it/markdown-it): Markdown parser done right. Fast and easy to extend.
+* [`katex`](https://github.com/Khan/KaTeX): This is where credits for fast rendering TeX math in HTML go to.
+
+## ToDo
+
+ nothing yet
+
+## FAQ
+
+* __`markdown-it-texmath` with React Native does not work, why ?__
+ * `markdown-it-texmath` is using regular expressions with `y` [(sticky) property](https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/RegExp/sticky) and cannot avoid this. The use of the `y` flag in regular expressions means the plugin is not compatible with React Native (which as of now doesn't support it and throws an error `Invalid flags supplied to RegExp constructor`).
+
+## CHANGELOG
+
+### [0.6.0] on October 04, 2019
+* Add support for [Julia Markdown](https://docs.julialang.org/en/v1/stdlib/Markdown/) on [request](https://github.com/goessner/markdown-it-texmath/issues/15).
+
+### [0.5.5] on February 07, 2019
+* Remove [rendering bug with brackets delimiters](https://github.com/goessner/markdown-it-texmath/issues/9).
+
+### [0.5.4] on January 20, 2019
+* Remove pathological [bug within blockquotes](https://github.com/goessner/mdmath/issues/50).
+
+### [0.5.3] on November 11, 2018
+* Add support for Tex macros (https://katex.org/docs/supported.html#macros) .
+* Bug with [brackets delimiters](https://github.com/goessner/markdown-it-texmath/issues/9) .
+
+### [0.5.2] on September 07, 2018
+* Add support for [Kramdown](https://kramdown.gettalong.org/) .
+
+### [0.5.0] on August 15, 2018
+* Fatal blockquote bug investigated. Implemented workaround to vscode bug, which has finally gone with vscode 1.26.0 .
+
+### [0.4.6] on January 05, 2018
+* Escaped underscore bug removed.
+
+### [0.4.5] on November 06, 2017
+* Backslash bug removed.
+
+### [0.4.4] on September 27, 2017
+* Modifying the `block` mode regular expression with `gitlab` delimiters, so removing the `newline` bug.
+
+## License
+
+`markdown-it-texmath` is licensed under the [MIT License](./license.txt)
+
+ © [Stefan Gössner](https://github.com/goessner)
diff --git a/parrot/lib/python3.10/site-packages/mdit_py_plugins/texmath/__init__.py b/parrot/lib/python3.10/site-packages/mdit_py_plugins/texmath/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..f0c258875f102750ef36836e8802098f65916b00
--- /dev/null
+++ b/parrot/lib/python3.10/site-packages/mdit_py_plugins/texmath/__init__.py
@@ -0,0 +1 @@
+from .index import texmath_plugin # noqa F401
diff --git a/parrot/lib/python3.10/site-packages/simple_parsing/__init__.py b/parrot/lib/python3.10/site-packages/simple_parsing/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..59fbbf3fd62c751ea6758433e0c2905719db26a1
--- /dev/null
+++ b/parrot/lib/python3.10/site-packages/simple_parsing/__init__.py
@@ -0,0 +1,60 @@
+"""Simple, Elegant Argument parsing.
+
+@author: Fabrice Normandin
+"""
+from . import helpers, utils, wrappers
+from .conflicts import ConflictResolution
+from .decorators import main
+from .help_formatter import SimpleHelpFormatter
+from .helpers import (
+ Partial,
+ Serializable,
+ choice,
+ config_for,
+ field,
+ flag,
+ list_field,
+ mutable_field,
+ subgroups,
+ subparsers,
+)
+from .parsing import (
+ ArgumentGenerationMode,
+ ArgumentParser,
+ DashVariant,
+ NestedMode,
+ ParsingError,
+ parse,
+ parse_known_args,
+)
+from .replace import replace, replace_subgroups
+from .utils import InconsistentArgumentError
+
+__all__ = [
+ "ArgumentGenerationMode",
+ "ArgumentParser",
+ "choice",
+ "config_for",
+ "ConflictResolution",
+ "DashVariant",
+ "field",
+ "flag",
+ "helpers",
+ "InconsistentArgumentError",
+ "list_field",
+ "main",
+ "mutable_field",
+ "NestedMode",
+ "parse_known_args",
+ "parse",
+ "ParsingError",
+ "Partial",
+ "replace",
+ "replace_subgroups",
+ "Serializable",
+ "SimpleHelpFormatter",
+ "subgroups",
+ "subparsers",
+ "utils",
+ "wrappers",
+]
diff --git a/parrot/lib/python3.10/site-packages/simple_parsing/__pycache__/__init__.cpython-310.pyc b/parrot/lib/python3.10/site-packages/simple_parsing/__pycache__/__init__.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..1ee64eab1e676c840dfe575897a3d583abb222e4
Binary files /dev/null and b/parrot/lib/python3.10/site-packages/simple_parsing/__pycache__/__init__.cpython-310.pyc differ
diff --git a/parrot/lib/python3.10/site-packages/simple_parsing/__pycache__/conflicts.cpython-310.pyc b/parrot/lib/python3.10/site-packages/simple_parsing/__pycache__/conflicts.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..5637bb10e0124f8bbe1ef4739d5f8c31d3410f0f
Binary files /dev/null and b/parrot/lib/python3.10/site-packages/simple_parsing/__pycache__/conflicts.cpython-310.pyc differ
diff --git a/parrot/lib/python3.10/site-packages/simple_parsing/__pycache__/decorators.cpython-310.pyc b/parrot/lib/python3.10/site-packages/simple_parsing/__pycache__/decorators.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..3b50b0b1ba0d087a718c184121daae9f4e1bcb75
Binary files /dev/null and b/parrot/lib/python3.10/site-packages/simple_parsing/__pycache__/decorators.cpython-310.pyc differ
diff --git a/parrot/lib/python3.10/site-packages/simple_parsing/__pycache__/docstring.cpython-310.pyc b/parrot/lib/python3.10/site-packages/simple_parsing/__pycache__/docstring.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..42e75255317b35a8fc6285b00dc9edadbcb44c3d
Binary files /dev/null and b/parrot/lib/python3.10/site-packages/simple_parsing/__pycache__/docstring.cpython-310.pyc differ
diff --git a/parrot/lib/python3.10/site-packages/simple_parsing/__pycache__/help_formatter.cpython-310.pyc b/parrot/lib/python3.10/site-packages/simple_parsing/__pycache__/help_formatter.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..24e398e5f9252e91ef560c0ce57294f5d857fdbd
Binary files /dev/null and b/parrot/lib/python3.10/site-packages/simple_parsing/__pycache__/help_formatter.cpython-310.pyc differ
diff --git a/parrot/lib/python3.10/site-packages/simple_parsing/__pycache__/parsing.cpython-310.pyc b/parrot/lib/python3.10/site-packages/simple_parsing/__pycache__/parsing.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..851f867d666875b3af466c5d8896407aa646dd32
Binary files /dev/null and b/parrot/lib/python3.10/site-packages/simple_parsing/__pycache__/parsing.cpython-310.pyc differ
diff --git a/parrot/lib/python3.10/site-packages/simple_parsing/__pycache__/replace.cpython-310.pyc b/parrot/lib/python3.10/site-packages/simple_parsing/__pycache__/replace.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..aed39205bfd5114e8989b5e740432c0e0034a7b3
Binary files /dev/null and b/parrot/lib/python3.10/site-packages/simple_parsing/__pycache__/replace.cpython-310.pyc differ
diff --git a/parrot/lib/python3.10/site-packages/simple_parsing/__pycache__/utils.cpython-310.pyc b/parrot/lib/python3.10/site-packages/simple_parsing/__pycache__/utils.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..09df84e39eb29993f39ab5d0915f6488aad914a0
Binary files /dev/null and b/parrot/lib/python3.10/site-packages/simple_parsing/__pycache__/utils.cpython-310.pyc differ
diff --git a/parrot/lib/python3.10/site-packages/simple_parsing/annotation_utils/__init__.py b/parrot/lib/python3.10/site-packages/simple_parsing/annotation_utils/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391
diff --git a/parrot/lib/python3.10/site-packages/simple_parsing/annotation_utils/__pycache__/__init__.cpython-310.pyc b/parrot/lib/python3.10/site-packages/simple_parsing/annotation_utils/__pycache__/__init__.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..17f505d75693bc01a195de7f97c0d82968961b1c
Binary files /dev/null and b/parrot/lib/python3.10/site-packages/simple_parsing/annotation_utils/__pycache__/__init__.cpython-310.pyc differ
diff --git a/parrot/lib/python3.10/site-packages/simple_parsing/annotation_utils/__pycache__/get_field_annotations.cpython-310.pyc b/parrot/lib/python3.10/site-packages/simple_parsing/annotation_utils/__pycache__/get_field_annotations.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..239a7ba59d0853eb926bcfabaa2418c2b253f023
Binary files /dev/null and b/parrot/lib/python3.10/site-packages/simple_parsing/annotation_utils/__pycache__/get_field_annotations.cpython-310.pyc differ
diff --git a/parrot/lib/python3.10/site-packages/simple_parsing/annotation_utils/get_field_annotations.py b/parrot/lib/python3.10/site-packages/simple_parsing/annotation_utils/get_field_annotations.py
new file mode 100644
index 0000000000000000000000000000000000000000..a45f2d79422cbc8189acea57a206d997d58085c1
--- /dev/null
+++ b/parrot/lib/python3.10/site-packages/simple_parsing/annotation_utils/get_field_annotations.py
@@ -0,0 +1,251 @@
+import collections
+import inspect
+import sys
+import types
+import typing
+from contextlib import contextmanager
+from dataclasses import InitVar
+from itertools import dropwhile
+from logging import getLogger as get_logger
+from typing import Any, Dict, Iterator, Optional, get_type_hints
+
+logger = get_logger(__name__)
+
+# NOTE: This dict is used to enable forward compatibility with things such as `tuple[int, str]`,
+# `list[float]`, etc. when using `from __future__ import annotations`.
+forward_refs_to_types = {
+ "tuple": typing.Tuple,
+ "set": typing.Set,
+ "dict": typing.Dict,
+ "list": typing.List,
+ "type": typing.Type,
+}
+
+
+@contextmanager
+def _initvar_patcher() -> Iterator[None]:
+ """Patch InitVar to not fail when annotations are postponed.
+
+ `TypeVar('Forward references must evaluate to types. Got dataclasses.InitVar[tp].')` is raised
+ when postponed annotations are enabled and `get_type_hints` is called
+ Bug is mentioned here https://github.com/python/cpython/issues/88962
+ In python 3.11 this is fixed, but backport fix is not planned for old releases
+
+ Workaround is mentioned here https://stackoverflow.com/q/70400639
+ """
+ if sys.version_info[:2] < (3, 11):
+ InitVar.__call__ = lambda *args: None
+ yield
+ if sys.version_info[:2] < (3, 11):
+ del InitVar.__call__
+
+
+def evaluate_string_annotation(annotation: str, containing_class: Optional[type] = None) -> type:
+ """Attempts to evaluate the given annotation string, to get a 'live' type annotation back.
+
+ Any exceptions that are raised when evaluating are raised directly as-is.
+
+ NOTE: This is probably not 100% safe. I mean, if the user code puts urls and stuff in their
+ type annotations, and then uses simple-parsing, then sure, that code might get executed. But
+ I don't think it's my job to prevent them from shooting themselves in the foot, you know what I
+ mean?
+ """
+ # The type of the field might be a string when using `from __future__ import annotations`.
+ # Get the local and global namespaces to pass to the `get_type_hints` function.
+ local_ns: Dict[str, Any] = {"typing": typing, **vars(typing)}
+ local_ns.update(forward_refs_to_types)
+ global_ns = {}
+ if containing_class:
+ # Get the globals in the module where the class was defined.
+ global_ns = sys.modules[containing_class.__module__].__dict__
+
+ if "|" in annotation:
+ annotation = _get_old_style_annotation(annotation)
+ evaluated_t: type = eval(annotation, local_ns, global_ns)
+ return evaluated_t
+
+
+def _replace_UnionType_with_typing_Union(annotation):
+ from simple_parsing.utils import builtin_types, is_dict, is_list, is_tuple
+
+ if sys.version_info[:2] < (3, 10):
+ # This is only useful for python 3.10+ (where UnionTypes exist).
+ # Therefore just return the annotation as-is.
+ return annotation
+
+ if isinstance(annotation, types.UnionType): # type: ignore
+ union_args = typing.get_args(annotation)
+ new_union_args = tuple(_replace_UnionType_with_typing_Union(arg) for arg in union_args)
+ return typing.Union[new_union_args] # type: ignore
+ if is_list(annotation):
+ item_annotation = typing.get_args(annotation)[0]
+ new_item_annotation = _replace_UnionType_with_typing_Union(item_annotation)
+ return typing.List[new_item_annotation]
+ if is_tuple(annotation):
+ item_annotations = typing.get_args(annotation)
+ new_item_annotations = tuple(
+ _replace_UnionType_with_typing_Union(arg) for arg in item_annotations
+ )
+ return typing.Tuple[new_item_annotations] # type: ignore
+ if is_dict(annotation):
+ annotations = typing.get_args(annotation)
+ if not annotations:
+ return typing.Dict
+ assert len(annotations) == 2
+ key_annotation = annotations[0]
+ value_annotation = annotations[1]
+ new_key_annotation = _replace_UnionType_with_typing_Union(key_annotation)
+ new_value_annotation = _replace_UnionType_with_typing_Union(value_annotation)
+ return typing.Dict[new_key_annotation, new_value_annotation]
+ if annotation in builtin_types:
+ return annotation
+ if inspect.isclass(annotation):
+ return annotation
+ raise NotImplementedError(annotation)
+
+
+# # return forward_refs_to_types.get(ann, local_ns.get(ann, global_ns.get(ann, getattr(builtins, ann, ann))))
+
+
+def _not_supported(annotation) -> typing.NoReturn:
+ raise NotImplementedError(f"Don't yet support annotations like this: {annotation}")
+
+
+def _get_old_style_annotation(annotation: str) -> str:
+ """Replaces A | B with Union[A,B] in the annotation."""
+ # TODO: Add proper support for things like `list[int | float]`, which isn't currently
+ # working, even without the new-style union.
+ if "|" not in annotation:
+ return annotation
+
+ annotation = annotation.strip()
+ if "[" not in annotation:
+ assert "]" not in annotation
+ return "Union[" + ", ".join(v.strip() for v in annotation.split("|")) + "]"
+
+ before, lsep, rest = annotation.partition("[")
+ middle, rsep, after = rest.rpartition("]")
+ # BUG: Need to handle things like bob[int] | None
+ assert (
+ not after.strip()
+ ), f"can't have text at HERE in []!: {annotation}"
+
+ if "|" in before or "|" in after:
+ _not_supported(annotation)
+ assert "|" in middle
+
+ if "," in middle:
+ parts = [v.strip() for v in middle.split(",")]
+ parts = [_get_old_style_annotation(part) for part in parts]
+ middle = ", ".join(parts)
+
+ new_middle = _get_old_style_annotation(annotation=middle)
+ new_annotation = before + lsep + new_middle + rsep + after
+ return new_annotation
+
+
+def _replace_new_union_syntax_with_old_union_syntax(
+ annotations_dict: Dict[str, str], context: collections.ChainMap
+) -> Dict[str, Any]:
+ new_annotations = annotations_dict.copy()
+ for field, annotation_str in annotations_dict.items():
+ updated_annotation = _get_old_style_annotation(annotation_str)
+ new_annotations[field] = updated_annotation
+
+ return new_annotations
+
+
+def get_field_type_from_annotations(some_class: type, field_name: str) -> type:
+ """Get the annotation for the given field, in the 'old-style' format with types from
+ typing.List, typing.Union, etc.
+
+ If the script uses `from __future__ import annotations`, and we are in python<3.9,
+ Then we need to actually first make this forward-compatibility 'patch' so that we
+ don't run into a "`type` object is not subscriptable" error.
+
+ NOTE: If you get errors of this kind from the function below, then you might want to add an
+ entry to the `forward_refs_to_types` dict above.
+ """
+
+ # Pretty hacky: Modify the type annotations of the class (preferably a copy of the class
+ # if possible, to avoid modifying things in-place), and replace the `a | b`-type
+ # expressions with `Union[a, b]`, so that `get_type_hints` doesn't raise an error.
+ # The type of the field might be a string when using `from __future__ import annotations`.
+
+ # The type of the field might be a string when using `from __future__ import annotations`.
+ # Get the local and global namespaces to pass to the `get_type_hints` function.
+ local_ns: Dict[str, Any] = {"typing": typing, **vars(typing)}
+ local_ns.update(forward_refs_to_types)
+
+ # NOTE: Get the local namespace of the calling function / module where this class is defined,
+ # and use it to get the correct type of the field, if it is a forward reference.
+ frame = inspect.currentframe()
+ # stack = []
+ while frame.f_back is not None and frame.f_locals.get(some_class.__name__) is not some_class:
+ # stack.append(frame)
+ frame = frame.f_back
+ # Found the frame with the dataclass definition. Update the locals. This makes it possible to
+ # use dataclasses defined in local scopes!
+ if frame is not None:
+ local_ns.update(frame.f_locals)
+
+ # Get the global_ns in the module starting from the deepest base until the module with the field_name last definition.
+ global_ns = {}
+ classes_to_iterate = list(
+ dropwhile(
+ lambda cls: field_name not in getattr(cls, "__annotations__", {}), some_class.mro()
+ )
+ )
+ for base_cls in reversed(classes_to_iterate):
+ global_ns.update(sys.modules[base_cls.__module__].__dict__)
+
+ try:
+ with _initvar_patcher():
+ annotations_dict = get_type_hints(some_class, localns=local_ns, globalns=global_ns)
+ except TypeError:
+ annotations_dict = collections.ChainMap(
+ *[getattr(cls, "__annotations__", {}) for cls in some_class.mro()]
+ )
+
+ if field_name not in annotations_dict:
+ raise ValueError(f"Field {field_name} not found in annotations of class {some_class}")
+
+ field_type = annotations_dict[field_name]
+
+ if sys.version_info[:2] >= (3, 7) and isinstance(field_type, typing.ForwardRef):
+ # Weird bug happens when mixing postponed evaluation of type annotations + forward
+ # references: The ForwardRefs are left as-is, and not evaluated!
+ forward_arg = field_type.__forward_arg__
+ field_type = forward_arg
+
+ if sys.version_info >= (3, 10) and isinstance(field_type, types.UnionType):
+ # In python >= 3.10, int | float is allowed. Therefore, just to be consistent, we want
+ # to convert those into the corresponding typing.Union type.
+ # This is necessary for the rest of the code to work, since it's all based on typing.Union.
+ field_type = _replace_UnionType_with_typing_Union(field_type)
+
+ if isinstance(field_type, str) and "|" in field_type:
+ field_type = _get_old_style_annotation(field_type)
+
+ # Pretty hacky:
+ # In order to use `get_type_hints`, we need to pass it a class. We can't just ask it to
+ # evaluate a single annotation. Therefore, we create a temporary class and set it's
+ # __annotation__ attribute, which is introspected by `get_type_hints`.
+
+ try:
+
+ class Temp_:
+ pass
+
+ Temp_.__annotations__ = {field_name: field_type}
+ with _initvar_patcher():
+ annotations_dict = get_type_hints(Temp_, globalns=global_ns, localns=local_ns)
+ field_type = annotations_dict[field_name]
+ except Exception:
+ logger.warning(
+ f"Unable to evaluate forward reference {field_type} for field '{field_name}'.\n"
+ f"Leaving it as-is."
+ )
+ field_type = field_type
+
+ return field_type
diff --git a/parrot/lib/python3.10/site-packages/simple_parsing/conflicts.py b/parrot/lib/python3.10/site-packages/simple_parsing/conflicts.py
new file mode 100644
index 0000000000000000000000000000000000000000..9d522fcb78f97b15bfe5b1ba21c836357f4a59a8
--- /dev/null
+++ b/parrot/lib/python3.10/site-packages/simple_parsing/conflicts.py
@@ -0,0 +1,389 @@
+from __future__ import annotations
+
+import enum
+from collections import defaultdict
+from logging import getLogger
+from typing import NamedTuple
+
+from .wrappers import DataclassWrapper, FieldWrapper
+
+logger = getLogger(__name__)
+
+
+class ConflictResolution(enum.Enum):
+ """Determines prefixing when adding the same dataclass more than once.
+
+ - NONE:
+ Disallow using the same dataclass in two different destinations without
+ explicitly setting a distinct prefix for at least one of them.
+
+ - EXPLICIT:
+ When adding arguments for a dataclass that is already present, the
+ argparse arguments for each class will use their full absolute path as a
+ prefix.
+
+ - ALWAYS_MERGE:
+ When adding arguments for a dataclass that has previously been added,
+ the arguments for both the old and new destinations will be set using
+ the same option_string, and the passed values for the old and new
+ destinations will correspond to the first and second values,
+ respectively.
+ NOTE: This changes the argparse type for that argument into a list of
+ the original item type.
+
+ - AUTO (default):
+ Prefixes for each destination are created automatically, using the first
+ discriminative prefix that can differentiate between all the conflicting
+ arguments.
+ """
+
+ NONE = -1
+ EXPLICIT = 0
+ ALWAYS_MERGE = 1
+ AUTO = 2
+
+
+class ConflictResolutionError(Exception):
+ def __init__(self, *args, **kwargs):
+ super().__init__(*args, **kwargs)
+
+
+class Conflict(NamedTuple):
+ option_string: str
+ wrappers: list[FieldWrapper]
+
+
+def unflatten(possibly_related_wrappers: list[DataclassWrapper]) -> list[DataclassWrapper]:
+ return [wrapper for wrapper in possibly_related_wrappers if wrapper.parent is None]
+
+
+class ConflictResolver:
+ def __init__(self, conflict_resolution=ConflictResolution.AUTO):
+ self.conflict_resolution = conflict_resolution
+ self.max_attempts = 50
+
+ def resolve_and_flatten(self, wrappers: list[DataclassWrapper]) -> list[DataclassWrapper]:
+ """Given the list of all dataclass wrappers, find and resolve any conflicts between fields.
+
+ Returns the new list of (possibly mutated in-place) dataclass wrappers. This returned list
+ is flattened, i.e. it contains all the dataclass wrappers and their children.
+ """
+ from simple_parsing.parsing import _assert_no_duplicates, _flatten_wrappers
+
+ wrappers = wrappers.copy()
+
+ _assert_no_duplicates(wrappers)
+ wrappers_flat = _flatten_wrappers(wrappers)
+
+ dests = [w.dest for w in wrappers_flat]
+ assert len(dests) == len(set(dests)), f"shouldn't be any duplicates: {wrappers_flat}"
+
+ conflict = self.get_conflict(wrappers_flat)
+
+ # current and maximum number of attempts. When reached, raises an error.
+ cur_attempts = 0
+ while conflict:
+ message: str = (
+ "The following wrappers are in conflict, as they share the "
+ + f"'{conflict.option_string}' option string:"
+ + ("\n".join(str(w) for w in conflict.wrappers))
+ + f"(Conflict Resolution mode is {self.conflict_resolution})"
+ )
+ logger.debug(message)
+
+ if self.conflict_resolution == ConflictResolution.NONE:
+ raise ConflictResolutionError(message)
+
+ elif self.conflict_resolution == ConflictResolution.EXPLICIT:
+ self._fix_conflict_explicit(conflict)
+
+ elif self.conflict_resolution == ConflictResolution.ALWAYS_MERGE:
+ wrappers_flat = self._fix_conflict_merge(conflict, wrappers_flat)
+
+ elif self.conflict_resolution == ConflictResolution.AUTO:
+ self._fix_conflict_auto(conflict)
+
+ conflict = self.get_conflict(wrappers_flat)
+ cur_attempts += 1
+ if cur_attempts == self.max_attempts:
+ raise ConflictResolutionError(
+ f"Reached maximum number of attempts ({self.max_attempts}) "
+ "while trying to solve the conflicting argument names. "
+ "This is either a bug, or there is something weird going "
+ "on with your class hierarchy/argument names... \n"
+ "In any case, Please help us by submitting an issue on "
+ "the Github repo at "
+ "https://github.com/lebrice/SimpleParsing/issues, "
+ "or by using the following link: "
+ "https://github.com/lebrice/SimpleParsing/issues/new?"
+ "assignees=lebrice&"
+ "labels=bug"
+ "&template=bug_report.md"
+ "&title=BUG: ConflictResolutionError"
+ )
+
+ assert not self._conflict_exists(wrappers_flat)
+ return wrappers_flat
+
+ def resolve(self, wrappers: list[DataclassWrapper]) -> list[DataclassWrapper]:
+ return unflatten(self.resolve_and_flatten(wrappers))
+
+ def get_conflict(
+ self, wrappers: list[DataclassWrapper] | list[FieldWrapper]
+ ) -> Conflict | None:
+ field_wrappers: list[FieldWrapper] = []
+ for w in wrappers:
+ if isinstance(w, DataclassWrapper):
+ field_wrappers.extend(w.fields)
+ # logger.debug(f"Wrapper {w.dest} has fields {w.fields}")
+ else:
+ field_wrappers.append(w)
+
+ assert len(field_wrappers) == len(set(field_wrappers)), "duplicates?"
+
+ # TODO: #49: Also consider the conflicts with regular argparse arguments.
+ conflicts: dict[str, list[FieldWrapper]] = defaultdict(list)
+ for field_wrapper in field_wrappers:
+ for option_string in field_wrapper.option_strings:
+ conflicts[option_string].append(field_wrapper)
+ # logger.debug(f"conflicts[{option_string}].append({repr(field_wrapper)})")
+
+ for option_string, field_wrappers in conflicts.items():
+ if len(field_wrappers) > 1:
+ return Conflict(option_string, field_wrappers)
+ return None
+
+ def _add(
+ self,
+ wrapper: DataclassWrapper | FieldWrapper,
+ wrappers: list[DataclassWrapper],
+ ) -> list[DataclassWrapper]:
+ """Add the given wrapper and all its descendants to the list of wrappers."""
+ if isinstance(wrapper, FieldWrapper):
+ wrapper = wrapper.parent
+ assert isinstance(wrapper, DataclassWrapper)
+ logger.debug(f"Adding new DataclassWrapper: {wrapper}")
+ wrappers.append(wrapper)
+ wrappers.extend(wrapper.descendants)
+
+ return wrappers
+
+ def _remove(
+ self,
+ wrapper: DataclassWrapper | FieldWrapper,
+ wrappers: list[DataclassWrapper],
+ ):
+ """Remove the given wrapper and all its descendants from the list of wrappers."""
+ if isinstance(wrapper, FieldWrapper):
+ wrapper = wrapper.parent
+ assert isinstance(wrapper, DataclassWrapper)
+ logger.debug(f"Removing DataclassWrapper {wrapper}")
+ wrappers.remove(wrapper)
+ for child in wrapper.descendants:
+ logger.debug(f"\tAlso removing Child DataclassWrapper {child}")
+ wrappers.remove(child)
+ # TODO: Should we also remove the reference to this wrapper from its parent?
+ for other_wrapper in wrappers:
+ if wrapper in other_wrapper._children:
+ other_wrapper._children.remove(wrapper)
+ return wrappers
+
+ def _fix_conflict_explicit(self, conflict: Conflict):
+ """Fixes conflicts between arguments following the "Explicit" approach.
+
+ The Explicit approach gives a prefix to each argument which points to
+ exactly where the argument is stored in the resulting Namespace. There
+ can therefore not be any confusion between arguments, at the cost of
+ having lengthy option strings.
+
+ Parameters
+ ----------
+ - conflict : Conflict
+
+ The conflict to hangle/fix.
+
+ Raises
+ ------
+ ConflictResolutionError
+ If its impossibe to fix the conflict.
+ """
+ logger.debug(f"fixing explicit conflict: {conflict}")
+
+ if any(w.prefix for w in conflict.wrappers):
+ raise ConflictResolutionError(
+ "When in 'Explicit' mode, there shouldn't be a need for any user-set prefixes."
+ "Just let the ArgumentParser set the explicit prefixes for all fields, and there won't be a conflict."
+ )
+ # TODO: Only set an explicit prefix on the fields that are in conflict
+
+ # Check that there is no conflict between the fields after setting the explicit prefix.
+ # If there is, that means that this conflict can't be fixed automatically, and a manual prefix should be set by the user.
+ for field_wrapper in conflict.wrappers:
+ explicit_prefix = field_wrapper.parent.dest + "."
+ field_wrapper.prefix = explicit_prefix
+
+ another_conflict = self.get_conflict(conflict.wrappers)
+ if another_conflict and another_conflict.option_string == conflict.option_string:
+ raise ConflictResolutionError(
+ f"There is a conflict over the '{conflict.option_string}' "
+ "option string, even after adding an explicit prefix to all "
+ "the conflicting fields! \n"
+ "To solve this, You can either use a different argument name, "
+ "a different destination, or pass a differentiating prefix to "
+ "`parser.add_arguments(, dest=destination, "
+ "prefix=prefix)`"
+ )
+
+ def _fix_conflict_auto(self, conflict: Conflict):
+ """Fixes a conflict using the AUTO method.
+
+ Tries to find a discriminating prefix of minimal length for all the conflicting fields, using roughly the following pseudocode:
+
+ 1. Sort the field wrappers by ascending nesting level.
+ ("parent/root" wrappers first, children "leaf" wrappers last)
+ 2. If the first wrapper is less nested than the others, remove it from the list (don't change its prefix)
+ 3. For all the remaining wrappers, add one more "word" from their lineage (dest attribute) to their prefix,
+ starting from the end and moving towards the parent.
+ 4. If there is no conflict left, exit, else, return to step 1 with the new conflict.
+ (This is performed implicitly by the method that calls this function, since it loops while there is a conflict).
+
+ Parameters
+ ----------
+ - conflict : Conflict
+
+ The Conflict NamedTuple containing the conflicting option_string, as well as the conflicting `FieldWrapper`s.
+
+ Raises
+ ------
+ ConflictResolutionError
+ If its impossibe to fix the conflict.
+ """
+ field_wrappers = sorted(conflict.wrappers, key=lambda w: w.nesting_level)
+ logger.debug(f"Conflict with options string '{conflict.option_string}':")
+ for i, field in enumerate(field_wrappers):
+ logger.debug(f"Field wrapper #{i+1}: {field} nesting level: {field.nesting_level}.")
+
+ assert (
+ len(set(field_wrappers)) >= 2
+ ), "Need at least 2 (distinct) FieldWrappers to have a conflict..."
+
+ first_wrapper = field_wrappers[0]
+ second_wrapper = field_wrappers[1]
+ if first_wrapper.nesting_level < second_wrapper.nesting_level:
+ # IF the first field_wrapper is a 'parent' of the following field_wrappers, then it maybe doesn't need an additional prefix.
+ logger.debug(
+ f"The first FieldWrapper is less nested than the others, removing it. ({first_wrapper})"
+ )
+ field_wrappers.remove(first_wrapper)
+
+ # add one more word to each of the remaining field_wrappers.
+ for field_wrapper in field_wrappers:
+ # Get the current and explicit (maximum) prefix:
+ current_prefix = field_wrapper.prefix
+ explicit_prefix = field_wrapper.parent.dest + "."
+
+ logger.debug(f"current prefix: {current_prefix}, explicit prefix: {explicit_prefix}")
+ if current_prefix == explicit_prefix:
+ # We can't add any more words to the prefix of this FieldWrapper,
+ # as it has already a prefix equivalent to its full destination...
+ raise ConflictResolutionError(
+ " ".join(
+ [
+ f"Cannot fix the conflict for the Options string {conflict.option_string},",
+ f"as the field {field_wrapper} already has the most explicit",
+ "prefix possible, and thus we can't add an additional",
+ "discriminating word to its prefix.",
+ "\n Consider modifying either the destination or the prefix",
+ "passed to `parser.add_arguments(, dest=destination, prefix=prefix)",
+ ]
+ )
+ )
+
+ # find the next 'word' to add to the prefix.
+ available_words = list(filter(bool, explicit_prefix.split(".")))
+ used_words = list(filter(bool, current_prefix.split(".")))
+ assert len(available_words) > len(
+ used_words
+ ), "There should at least one word we haven't used yet!"
+ logger.debug(f"Available words: {available_words}, used_words: {used_words}")
+
+ n_available_words = len(available_words)
+ n_used_words = len(used_words)
+ word_to_add = available_words[(n_available_words - 1) - n_used_words]
+ logger.debug(f"Word to be added: {word_to_add}")
+ field_wrapper.prefix = word_to_add + "." + current_prefix
+ logger.debug(f"New prefix: {field_wrapper.prefix}")
+
+ def _fix_conflict_merge(self, conflict: Conflict, wrappers_flat: list[DataclassWrapper]):
+ """Fix conflicts using the merging approach.
+
+ The first wrapper is kept, and the rest of the wrappers are absorbed
+ into the first wrapper.
+
+ # TODO: check that the ordering of arguments is still preserved!
+
+ Parameters
+ ----------
+ conflict : Conflict
+ The conflict NamedTuple.
+ """
+ fields = sorted(conflict.wrappers, key=lambda w: w.nesting_level)
+ logger.debug(f"Conflict with options string '{conflict.option_string}':")
+ for field in fields:
+ logger.debug(f"Field wrapper: {field} nesting level: {field.nesting_level}.")
+
+ assert len(conflict.wrappers) > 1
+
+ # Merge all the fields into the first one.
+ first_wrapper: FieldWrapper = fields[0]
+ wrappers = wrappers_flat.copy()
+
+ first_containing_dataclass: DataclassWrapper = first_wrapper.parent
+ original_parent = first_containing_dataclass.parent
+ wrappers = self._remove(first_containing_dataclass, wrappers)
+
+ for wrapper in conflict.wrappers[1:]:
+ containing_dataclass = wrapper.parent
+ wrappers = self._remove(containing_dataclass, wrappers)
+ first_containing_dataclass.merge(containing_dataclass)
+
+ assert first_containing_dataclass.multiple
+ wrappers = self._add(first_containing_dataclass, wrappers)
+ if original_parent:
+ original_parent._children.append(first_containing_dataclass)
+ return wrappers
+
+ def _get_conflicting_group(self, all_wrappers: list[DataclassWrapper]) -> Conflict | None:
+ """Return the conflicting DataclassWrappers which share argument names.
+
+ TODO: maybe return the list of fields, rather than the dataclasses?
+ """
+ conflicts: dict[str, list[FieldWrapper]] = defaultdict(list)
+ for wrapper in all_wrappers:
+ for field in wrapper.fields:
+ for option in field.option_strings:
+ conflicts[option].append(field)
+
+ for option_string, fields in conflicts.items():
+ if len(fields) > 1:
+ # the dataclasses of the fields that share the same name.
+ # wrappers: List[DataclassWrapper] = [f.parent for f in fields]
+ # dataclasses = [wrapper.dataclass for wrapper in wrappers]
+ # prefixes = [wrapper.prefix for wrapper in wrappers]
+ # return Conflict(dataclasses[0], prefixes[0], wrappers)
+ return Conflict(option_string, fields)
+ return None
+
+ def _conflict_exists(self, all_wrappers: list[DataclassWrapper]) -> bool:
+ """Return True whenever a conflict exists.
+
+ (option strings overlap).
+ """
+ arg_names: set[str] = set()
+ for wrapper in all_wrappers:
+ for field in wrapper.fields:
+ for option in field.option_strings:
+ if option in arg_names:
+ return True
+ arg_names.add(option)
+ return False
diff --git a/parrot/lib/python3.10/site-packages/simple_parsing/decorators.py b/parrot/lib/python3.10/site-packages/simple_parsing/decorators.py
new file mode 100644
index 0000000000000000000000000000000000000000..16162c716be5739b9cd5d99cce83b7fd9b175a48
--- /dev/null
+++ b/parrot/lib/python3.10/site-packages/simple_parsing/decorators.py
@@ -0,0 +1,139 @@
+from __future__ import annotations
+
+import collections
+import dataclasses
+import functools
+import inspect
+import typing
+from typing import Any, Callable, NamedTuple
+
+import docstring_parser as dp
+
+from simple_parsing.docstring import dp_parse, inspect_getdoc
+
+from . import helpers, parsing
+
+
+class _Field(NamedTuple):
+ name: str
+ annotation: type
+ field: dataclasses.Field
+
+
+def _description_from_docstring(docstring: dp.Docstring) -> str:
+ """Construct a description from the short and long description of a docstring."""
+ description = ""
+ if docstring.short_description:
+ description += f"{docstring.short_description}\n"
+ if docstring.blank_after_short_description:
+ description += "\n"
+ if docstring.long_description:
+ description += f"{docstring.long_description}\n"
+ if docstring.blank_after_long_description:
+ description += "\n"
+ return description
+
+
+@typing.overload
+def main(original_function: None = None, **sp_kwargs) -> Callable[..., Callable[..., Any]]:
+ ...
+
+
+@typing.overload
+def main(original_function: Callable[..., Any], **sp_kwargs) -> Callable[..., Any]:
+ ...
+
+
+def main(original_function=None, **sp_kwargs):
+ """Parse a function's arguments using simple-parsing from type annotations."""
+
+ def _decorate_with_cli_args(function: Callable[..., Any]) -> Callable[..., Any]:
+ """Decorate `function` by binding its arguments obtained from simple-parsing."""
+
+ @functools.wraps(function)
+ def _wrapper(*other_args, **other_kwargs) -> Any:
+ # Parse signature and parameters
+ signature = inspect.signature(function, follow_wrapped=True)
+ parameters = signature.parameters
+
+ # Parse docstring to use as help strings
+ docstring = dp_parse(inspect_getdoc(function) or "")
+ docstring_param_description = {
+ param.arg_name: param.description for param in docstring.params
+ }
+
+ # Parse all arguments from the function
+ fields = []
+ for name, parameter in parameters.items():
+ # Replace empty annotation with Any
+ if parameter.annotation == inspect.Parameter.empty:
+ parameter = parameter.replace(annotation=Any)
+
+ # Parse default or default_factory if the default is callable.
+ default, default_factory = dataclasses.MISSING, dataclasses.MISSING
+ if parameter.default != inspect.Parameter.empty:
+ if inspect.isfunction(parameter.default):
+ default_factory = parameter.default
+ else:
+ default = parameter.default
+
+ field = _Field(
+ name,
+ parameter.annotation,
+ helpers.field(
+ name=name,
+ default=default,
+ default_factory=default_factory,
+ help=docstring_param_description.get(name, ""),
+ positional=parameter.kind == inspect.Parameter.POSITIONAL_ONLY,
+ ),
+ )
+ fields.append(field)
+
+ # We can have positional arguments with no defaults that come out of order
+ # when parsing the function signature. Therefore, before we construct
+ # the dataclass we have to sort fields according to their default value.
+ # We query fields by name so there's no need to worry about the order.
+ def _field_has_default(field: _Field) -> bool:
+ return (
+ field.field.default is not dataclasses.MISSING
+ or field.field.default_factory is not dataclasses.MISSING
+ )
+
+ fields = sorted(fields, key=_field_has_default)
+
+ # Create the dataclass using the fields derived from the function's signature
+ FunctionArgs = dataclasses.make_dataclass(function.__qualname__, fields)
+ FunctionArgs.__doc__ = _description_from_docstring(docstring) or None
+ function_args = parsing.parse(
+ FunctionArgs,
+ dest="args",
+ add_config_path_arg=False,
+ **sp_kwargs,
+ )
+
+ # Construct both positional and keyword arguments.
+ args, kwargs = [], {}
+ for field in dataclasses.fields(function_args):
+ value = getattr(function_args, field.name)
+ if field.metadata.get("positional", False):
+ args.append(value)
+ else:
+ # TODO: py39: use union operator (|=)
+ kwargs.update({field.name: value})
+
+ # Construct positional arguments with CLI and runtime args
+ positionals = (*args, *other_args)
+ # Construct keyword arguments so it can override arguments
+ # so we don't receive multiple value errors.
+ keywords = collections.ChainMap(kwargs, other_kwargs)
+
+ # Call the function
+ return function(*positionals, **keywords)
+
+ return _wrapper
+
+ if original_function:
+ return _decorate_with_cli_args(original_function)
+
+ return _decorate_with_cli_args
diff --git a/parrot/lib/python3.10/site-packages/simple_parsing/docstring.py b/parrot/lib/python3.10/site-packages/simple_parsing/docstring.py
new file mode 100644
index 0000000000000000000000000000000000000000..4cc9e129441db312387d6983613fdd3574b1bd5d
--- /dev/null
+++ b/parrot/lib/python3.10/site-packages/simple_parsing/docstring.py
@@ -0,0 +1,385 @@
+"""Utility for retrieveing the docstring of a dataclass's attributes.
+
+@author: Fabrice Normandin
+"""
+from __future__ import annotations
+
+import functools
+import inspect
+
+# from inspect import
+from dataclasses import dataclass
+from logging import getLogger
+
+import docstring_parser as dp
+from docstring_parser.common import Docstring
+
+dp_parse = functools.lru_cache(2048)(dp.parse)
+inspect_getsource = functools.lru_cache(2048)(inspect.getsource)
+inspect_getdoc = functools.lru_cache(2048)(inspect.getdoc)
+logger = getLogger(__name__)
+
+
+@dataclass
+class AttributeDocString:
+ """Simple dataclass for holding the comments of a given field."""
+
+ comment_above: str = ""
+ comment_inline: str = ""
+ docstring_below: str = ""
+
+ desc_from_cls_docstring: str = ""
+ """The description of this field from the class docstring."""
+
+ @property
+ def help_string(self) -> str:
+ """Returns the value that will be used for the "--help" string, using the contents of
+ self."""
+ return (
+ self.docstring_below
+ or self.comment_above
+ or self.comment_inline
+ or self.desc_from_cls_docstring
+ )
+
+
+def get_attribute_docstring(
+ dataclass: type, field_name: str, accumulate_from_bases: bool = True
+) -> AttributeDocString:
+ """Returns the docstrings of a dataclass field.
+ NOTE: a docstring can either be:
+ - An inline comment, starting with <#>
+ - A Comment on the preceding line, starting with <#>
+ - A docstring on the following line, starting with either <\"\"\"> or <'''>
+ - The description of a field in the classes's docstring.
+
+ Arguments:
+ some_dataclass: a dataclass
+ field_name: the name of the field.
+ accumulate_from_bases: Whether to accumulate the docstring components by looking through the
+ base classes. When set to `False`, whenever one of the classes has a definition for the
+ field, it is directly returned. Otherwise, we accumulate the parts of the dodc
+ Returns:
+ AttributeDocString -- an object holding the string descriptions of the field.
+ """
+ created_docstring: AttributeDocString | None = None
+
+ mro = inspect.getmro(dataclass)
+ assert mro[0] is dataclass
+ assert mro[-1] is object
+ mro = mro[:-1]
+ for base_class in mro:
+ attribute_docstring = _get_attribute_docstring(base_class, field_name)
+ if not attribute_docstring:
+ continue
+ if not created_docstring:
+ created_docstring = attribute_docstring
+ if not accumulate_from_bases:
+ # We found a definition for that field in that class, so return it directly.
+ return created_docstring
+ else:
+ # Update the fields.
+ created_docstring.comment_above = (
+ created_docstring.comment_above or attribute_docstring.comment_above
+ )
+ created_docstring.comment_inline = (
+ created_docstring.comment_inline or attribute_docstring.comment_inline
+ )
+ created_docstring.docstring_below = (
+ created_docstring.docstring_below or attribute_docstring.docstring_below
+ )
+ created_docstring.desc_from_cls_docstring = (
+ created_docstring.desc_from_cls_docstring
+ or attribute_docstring.desc_from_cls_docstring
+ )
+ if not created_docstring:
+ logger.debug(
+ RuntimeWarning(
+ f"Couldn't find the definition for field '{field_name}' within the dataclass "
+ f"{dataclass} or any of its base classes {','.join(t.__name__ for t in mro[1:])}."
+ )
+ )
+ return AttributeDocString()
+ return created_docstring
+
+
+@functools.lru_cache(2048)
+def _get_attribute_docstring(dataclass: type, field_name: str) -> AttributeDocString | None:
+ """Gets the AttributeDocString of the given field in the given dataclass.
+
+ Doesn't inspect base classes.
+ """
+ try:
+ source = inspect_getsource(dataclass)
+ except (TypeError, OSError) as e:
+ logger.debug(
+ UserWarning(
+ f"Couldn't retrieve the source code of class {dataclass} "
+ f"(in order to retrieve the docstring of field {field_name}): {e}"
+ )
+ )
+ return None
+
+ # Parse docstring to use as help strings
+ desc_from_cls_docstring = ""
+ cls_docstring = inspect_getdoc(dataclass)
+ if cls_docstring:
+ docstring: Docstring = dp_parse(cls_docstring)
+ for param in docstring.params:
+ if param.arg_name == field_name:
+ desc_from_cls_docstring = param.description or ""
+
+ # NOTE: We want to skip the docstring lines.
+ # NOTE: Currently, we just remove the __doc__ from the source. It's perhaps a bit crude,
+ # but it works.
+ if dataclass.__doc__ and dataclass.__doc__ in source:
+ source = source.replace(dataclass.__doc__, "\n", 1)
+ # note: does this remove the whitespace though?
+
+ code_lines: list[str] = source.splitlines()
+ # the first line is the class definition (OR the decorator!), we skip it.
+ start_line_index = 1
+ # starting at the second line, there might be the docstring for the class.
+ # We want to skip over that until we reach an attribute definition.
+ while start_line_index < len(code_lines):
+ if _contains_field_definition(code_lines[start_line_index]):
+ break
+ start_line_index += 1
+
+ lines_with_field_defs = [
+ (index, line) for index, line in enumerate(code_lines) if _contains_field_definition(line)
+ ]
+ for i, line in lines_with_field_defs:
+ if _line_contains_definition_for(line, field_name):
+ # we found the line with the definition of this field.
+ comment_above = _get_comment_ending_at_line(code_lines, i - 1)
+ comment_inline = _get_inline_comment_at_line(code_lines, i)
+ docstring_below = _get_docstring_starting_at_line(code_lines, i + 1)
+ return AttributeDocString(
+ comment_above,
+ comment_inline,
+ docstring_below,
+ desc_from_cls_docstring=desc_from_cls_docstring,
+ )
+ return None
+
+
+def _contains_field_definition(line: str) -> bool:
+ """Returns whether or not a line contains a an dataclass field definition.
+
+ Arguments:
+ line_str {str} -- the line content
+
+ Returns:
+ bool -- True if there is an attribute definition in the line.
+
+ >>> _contains_field_definition("a: int = 0")
+ True
+ >>> _contains_field_definition("a: int")
+ True
+ >>> _contains_field_definition("a: int # comment")
+ True
+ >>> _contains_field_definition("a: int = 0 # comment")
+ True
+ >>> _contains_field_definition("class FooBaz(Foo, Baz):")
+ False
+ >>> _contains_field_definition("a = 4")
+ False
+ >>> _contains_field_definition("fooooooooobar.append(123)")
+ False
+ >>> _contains_field_definition("{a: int}")
+ False
+ >>> _contains_field_definition(" foobaz: int = 123 #: The foobaz property")
+ True
+ >>> _contains_field_definition("a #:= 3")
+ False
+ """
+ # Get rid of any comments first.
+ line, _, _ = line.partition("#")
+
+ if ":" not in line:
+ return False
+
+ if "=" in line:
+ attribute_and_type, _, _ = line.partition("=")
+ else:
+ attribute_and_type = line
+
+ field_name, _, type = attribute_and_type.partition(":")
+ field_name = field_name.strip()
+ if ":" in type:
+ # weird annotation or dictionary?
+ return False
+ if not field_name:
+ # Empty attribute name?
+ return False
+ return field_name.isidentifier()
+
+
+def _line_contains_definition_for(line: str, field_name: str) -> bool:
+ line = line.strip()
+ if not _contains_field_definition(line):
+ return False
+ attribute, _, type_and_value_assignment = line.partition(":")
+ attribute = attribute.strip() # remove any whitespace after the attribute name.
+ return attribute.isidentifier() and attribute == field_name
+
+
+def _is_empty(line_str: str) -> bool:
+ return line_str.strip() == ""
+
+
+def _is_comment(line_str: str) -> bool:
+ return line_str.strip().startswith("#")
+
+
+def _get_comment_at_line(code_lines: list[str], line: int) -> str:
+ """Gets the comment at line `line` in `code_lines`.
+
+ Arguments:
+ line {int} -- the index of the line in code_lines
+
+ Returns:
+ str -- the comment at the given line. empty string if not present.
+ """
+ line_str = code_lines[line]
+ assert not _contains_field_definition(line_str)
+ if "#" not in line_str:
+ return ""
+ parts = line_str.split("#", maxsplit=1)
+ comment = parts[1].strip()
+ return comment
+
+
+def _get_inline_comment_at_line(code_lines: list[str], line: int) -> str:
+ """Gets the inline comment at line `line`.
+
+ Arguments:
+ line {int} -- the index of the line in code_lines
+
+ Returns:
+ str -- the inline comment at the given line, else an empty string.
+ """
+ assert 0 <= line < len(code_lines)
+ assert _contains_field_definition(code_lines[line])
+ line_str = code_lines[line]
+ parts = line_str.split("#", maxsplit=1)
+ if len(parts) != 2:
+ return ""
+ comment = parts[1].strip()
+ return comment
+
+
+def _get_comment_ending_at_line(code_lines: list[str], line: int) -> str:
+ start_line = line
+ end_line = line
+ # move up the code, one line at a time, while we don't hit the start,
+ # an attribute definition, or the end of a docstring.
+ while start_line > 0:
+ line_str = code_lines[start_line]
+ if _contains_field_definition(line_str):
+ break # previous line is an assignment
+ if '"""' in line_str or "'''" in line_str:
+ break # previous line has a docstring
+ start_line -= 1
+ start_line += 1
+
+ lines = []
+ for i in range(start_line, end_line + 1):
+ # print(f"line {i}: {code_lines[i]}")
+ if _is_empty(code_lines[i]):
+ continue
+ assert not _contains_field_definition(code_lines[i])
+ comment = _get_comment_at_line(code_lines, i)
+ lines.append(comment)
+ return "\n".join(lines).strip()
+
+
+def _get_docstring_starting_at_line(code_lines: list[str], line: int) -> str:
+ i = line
+ token: str | None = None
+ triple_single = "'''"
+ triple_double = '"""'
+ # print("finding docstring starting from line", line)
+
+ # if we are looking further down than the end of the code, there is no
+ # docstring.
+ if line >= len(code_lines):
+ return ""
+ # the list of lines making up the docstring.
+ docstring_contents: list[str] = []
+
+ while i < len(code_lines):
+ line_str = code_lines[i]
+ # print(f"(docstring) line {line}: {line_str}")
+
+ # we haven't identified the starting line yet.
+ if token is None:
+ if _is_empty(line_str):
+ i += 1
+ continue
+
+ elif _contains_field_definition(line_str) or _is_comment(line_str):
+ # we haven't reached the start of a docstring yet (since token
+ # is None), and we reached a line with an attribute definition,
+ # or a comment, hence the docstring is empty.
+ return ""
+
+ elif triple_single in line_str and triple_double in line_str:
+ # This handles something stupid like:
+ # @dataclass
+ # class Bob:
+ # a: int
+ # """ hello '''
+ # bob
+ # ''' bye
+ # """
+ triple_single_index = line_str.index(triple_single)
+ triple_double_index = line_str.index(triple_double)
+ if triple_single_index < triple_double_index:
+ token = triple_single
+ else:
+ token = triple_double
+ elif triple_double in line_str:
+ token = triple_double
+ elif triple_single in line_str:
+ token = triple_single
+ else:
+ # for i, line in enumerate(code_lines):
+ # print(f"line {i}: <{line}>")
+ # print(f"token: <{token}>")
+ # print(line_str)
+ logger.debug(f"Warning: Unable to parse attribute docstring: {line_str}")
+ return ""
+
+ # get the string portion of the line (after a token or possibly
+ # between two tokens).
+ parts = line_str.split(token, maxsplit=2)
+ if len(parts) == 3:
+ # This takes care of cases like:
+ # @dataclass
+ # class Bob:
+ # a: int
+ # """ hello """
+ between_tokens = parts[1].strip()
+ # print("Between tokens:", between_tokens)
+ docstring_contents.append(between_tokens)
+ break
+
+ elif len(parts) == 2:
+ after_token = parts[1].strip()
+ # print("After token:", after_token)
+ docstring_contents.append(after_token)
+ else:
+ # print(f"token is <{token}>")
+ if token in line_str:
+ # print(f"Line {line} End of a docstring:", line_str)
+ before = line_str.split(token, maxsplit=1)[0]
+ docstring_contents.append(before.strip())
+ break
+ else:
+ # intermediate line without the token.
+ docstring_contents.append(line_str.strip())
+ i += 1
+ # print("Docstring contents:", docstring_contents)
+ return "\n".join(docstring_contents)
diff --git a/parrot/lib/python3.10/site-packages/simple_parsing/help_formatter.py b/parrot/lib/python3.10/site-packages/simple_parsing/help_formatter.py
new file mode 100644
index 0000000000000000000000000000000000000000..11d717e706c917a0db1781a4a384f45aec7389e9
--- /dev/null
+++ b/parrot/lib/python3.10/site-packages/simple_parsing/help_formatter.py
@@ -0,0 +1,88 @@
+import argparse
+from argparse import ONE_OR_MORE, OPTIONAL, PARSER, REMAINDER, ZERO_OR_MORE, Action
+from logging import getLogger
+from typing import Optional, Type
+
+from .wrappers.field_metavar import get_metavar
+
+TEMPORARY_TOKEN = "<__TEMP__>"
+logger = getLogger(__name__)
+
+
+class SimpleHelpFormatter(
+ argparse.ArgumentDefaultsHelpFormatter,
+ argparse.MetavarTypeHelpFormatter,
+ argparse.RawDescriptionHelpFormatter,
+):
+ """Little shorthand for using some useful HelpFormatters from argparse.
+
+ This class inherits from argparse's `ArgumentDefaultHelpFormatter`,
+ `MetavarTypeHelpFormatter` and `RawDescriptionHelpFormatter` classes.
+
+ This produces the following resulting actions:
+ - adds a "(default: xyz)" for each argument with a default
+ - uses the name of the argument type as the metavar. For example, gives
+ "-n int" instead of "-n N" in the usage and description of the arguments.
+ - Conserves the formatting of the class and argument docstrings, if given.
+ """
+
+ def _format_args(self, action: Action, default_metavar: str):
+ _get_metavar = self._metavar_formatter(action, default_metavar)
+ action_type = action.type
+
+ metavar = action.metavar or get_metavar(action_type)
+ if metavar and not action.choices:
+ result = metavar
+ elif action.nargs is None:
+ result = "%s" % _get_metavar(1)
+ elif action.nargs == OPTIONAL:
+ result = "[%s]" % _get_metavar(1)
+ elif action.nargs == ZERO_OR_MORE:
+ result = "[%s [%s ...]]" % _get_metavar(2) # noqa: UP031
+ elif action.nargs == ONE_OR_MORE:
+ result = "%s [%s ...]" % _get_metavar(2) # noqa: UP031
+ elif action.nargs == REMAINDER:
+ result = "..."
+ elif action.nargs == PARSER:
+ result = "%s ..." % _get_metavar(1)
+ else:
+ formats = ["%s" for _ in range(action.nargs)]
+ result = " ".join(formats) % _get_metavar(action.nargs)
+
+ # logger.debug(
+ # f"action type: {action_type}, Result: {result}, nargs: {action.nargs}, default metavar: {default_metavar}"
+ # )
+ return result
+
+ def _get_default_metavar_for_optional(self, action: argparse.Action):
+ try:
+ return super()._get_default_metavar_for_optional(action)
+ except BaseException:
+ logger.debug(f"Getting metavar for action with dest {action.dest}.")
+ metavar = self._get_metavar_for_action(action)
+ logger.debug(f"Result metavar: {metavar}")
+ return metavar
+
+ def _get_default_metavar_for_positional(self, action: argparse.Action):
+ try:
+ return super()._get_default_metavar_for_positional(action)
+ except BaseException:
+ logger.debug(f"Getting metavar for action with dest {action.dest}.")
+ metavar = self._get_metavar_for_action(action)
+ logger.debug(f"Result metavar: {metavar}")
+ return metavar
+
+ def _get_metavar_for_action(self, action: argparse.Action) -> str:
+ return self._get_metavar_for_type(action.type)
+
+ def _get_metavar_for_type(self, t: Type) -> str:
+ return get_metavar(t) or str(t)
+
+ def _get_help_string(self, action: Action) -> Optional[str]:
+ help = super()._get_help_string(action=action)
+ if help is not None:
+ help = help.replace(TEMPORARY_TOKEN, "")
+ return help
+
+
+Formatter = SimpleHelpFormatter
diff --git a/parrot/lib/python3.10/site-packages/simple_parsing/helpers/__init__.py b/parrot/lib/python3.10/site-packages/simple_parsing/helpers/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..b0e635bd24220769a0466fb5d610dabfeed6d490
--- /dev/null
+++ b/parrot/lib/python3.10/site-packages/simple_parsing/helpers/__init__.py
@@ -0,0 +1,15 @@
+"""Collection of helper classes and functions to reduce boilerplate code."""
+from .fields import *
+from .flatten import FlattenedAccess
+from .hparams import HyperParameters
+from .partial import Partial, config_for
+from .serialization import FrozenSerializable, Serializable, SimpleJsonEncoder, encode
+
+try:
+ from .serialization import YamlSerializable
+except ImportError:
+ pass
+
+# For backward compatibility purposes
+JsonSerializable = Serializable
+SimpleEncoder = SimpleJsonEncoder
diff --git a/parrot/lib/python3.10/site-packages/simple_parsing/helpers/custom_actions.py b/parrot/lib/python3.10/site-packages/simple_parsing/helpers/custom_actions.py
new file mode 100644
index 0000000000000000000000000000000000000000..fad8789d07afa2801891d97e2beda89cd549046e
--- /dev/null
+++ b/parrot/lib/python3.10/site-packages/simple_parsing/helpers/custom_actions.py
@@ -0,0 +1,174 @@
+from __future__ import annotations
+
+import argparse
+from typing import Any, Callable, Iterable, Sequence
+
+from typing_extensions import Literal
+
+from .. import utils
+
+DEFAULT_NEGATIVE_PREFIX = "--no"
+
+
+class BooleanOptionalAction(argparse.Action):
+ """Similar to `argparse.BooleanOptionalAction`.
+
+ * Support using a custom negative prefix (makes this compatible with `absl.flags`)
+ * Accept `--flag=true` value
+ * Support Python 3.8
+ """
+
+ def __init__(
+ self,
+ option_strings: Sequence[str],
+ dest: str,
+ default: bool | None = None,
+ type: Callable[[str], bool] = utils.str2bool,
+ choices: Iterable[Any] | None = None,
+ required: bool = False,
+ help: str | None = None,
+ metavar: str | tuple[str, ...] | None = "bool",
+ nargs: Literal["?"] | None = "?",
+ negative_prefix: str = DEFAULT_NEGATIVE_PREFIX,
+ negative_option: str | None = None,
+ _conflict_prefix: str | None = "",
+ ):
+ option_strings = list(option_strings)
+ if nargs is None:
+ nargs = "?"
+
+ if nargs != "?":
+ more_info = ""
+ if nargs in {0, 1}:
+ more_info = (
+ "In argparse, nargs=0 parses an empty list, and nargs=1 is list of bools "
+ "with one item, not a required single boolean."
+ )
+ elif nargs in {"+", "*"} or isinstance(nargs, int):
+ more_info = (
+ "To parse a field with a list of booleans, use a sequence of booleans as a "
+ "field annotation (e.g. list[bool] or tuple[bool, ...])."
+ )
+
+ field = {option.lstrip("-") for option in option_strings}
+ raise ValueError(
+ f"Invalid nargs for bool field '{'/'.join(field)}': {nargs!r}\n"
+ f"Fields with a `bool` annotation only accepts nargs of `'?'` or `None`, since it "
+ "parses single-boolean fields. " + "\n" + more_info
+ )
+
+ self.negative_prefix = negative_prefix
+ self.negative_option = negative_option
+
+ self.negative_option_strings: list[str] = []
+ if negative_option is not None:
+ # Use the negative option.
+ # _conflict_prefix is passed down from the FieldWrapper, and is used to also add a
+ # prefix to the generated negative options. This is used to avoid conflicts between
+ # the negative options of different fields!
+ # For example if both a `train: Config` and `valid: Config` have a `--verbose` flag,
+ # with a `--silent` negative option, then we have to add a prefix to the negative flags
+ # also!
+ after_dashes = ""
+ if _conflict_prefix:
+ assert _conflict_prefix.endswith(".")
+ after_dashes = _conflict_prefix
+
+ if negative_option.startswith("-"):
+ negative_option_without_leading_dashes = negative_option.lstrip("-")
+ num_leading_dashes = len(negative_option) - len(
+ negative_option_without_leading_dashes
+ )
+
+ else:
+ negative_option_without_leading_dashes = negative_option
+ # NOTE: Pre-emptively changing this here so we don't use a single leading dash when
+ # there's prefix.
+ # Use a single leading dash only when there isn't a prefix and if the negative
+ # option is a single character.
+ num_leading_dashes = 2 if len(after_dashes + negative_option) > 1 else 1
+ negative_option = (
+ "-" * num_leading_dashes + after_dashes + negative_option_without_leading_dashes
+ )
+ self.negative_option_strings = [negative_option]
+ else:
+ self.negative_option_strings = []
+ for option_string in option_strings:
+ if "." in option_string:
+ parts = option_string.split(".")
+ # NOTE: Need to be careful here.
+ first, *middle, last = parts
+
+ negative_prefix_without_leading_dashes = negative_prefix.lstrip("-")
+ num_leading_dashes = len(negative_prefix) - len(
+ negative_prefix_without_leading_dashes
+ )
+ first_without_leading_dashes = first.lstrip("-")
+
+ first = "-" * num_leading_dashes + first_without_leading_dashes
+ last = negative_prefix_without_leading_dashes + last
+
+ negative_option_string = ".".join([first] + middle + [last])
+ self.negative_option_strings.append(negative_option_string)
+
+ elif option_string.startswith("-"):
+ without_leading_dashes = option_string.lstrip("-")
+ negative_option_string = self.negative_prefix + without_leading_dashes
+ if negative_option_string not in self.negative_option_strings:
+ # NOTE: don't want -a and --a to both add a --noa negative option.
+ self.negative_option_strings.append(negative_option_string)
+ else:
+ raise NotImplementedError(
+ f"Invalid option string {option_string!r} for boolean field. "
+ f"This action doesn't support positional arguments. "
+ f"Option strings should start with one or more dashes ('-'). "
+ )
+ if help is not None and default is not None and default is not argparse.SUPPRESS:
+ help += " (default: %(default)s)"
+
+ super().__init__(
+ option_strings=option_strings + self.negative_option_strings,
+ dest=dest,
+ nargs=nargs,
+ default=default,
+ type=type,
+ choices=choices,
+ required=required,
+ help=help,
+ metavar=metavar,
+ )
+ self.type: Callable[[str], bool]
+ assert self.type is not None
+
+ def __call__(
+ self,
+ parser: argparse.ArgumentParser,
+ namespace: argparse.Namespace,
+ values: Any,
+ option_string: str | None = None,
+ ):
+ # NOTE: `option_string` is only None when using a positional argument.
+ if option_string is None:
+ raise NotImplementedError("This action doesn't support positional arguments yet.")
+ assert option_string in self.option_strings
+
+ used_negative_flag = option_string in self.negative_option_strings
+
+ bool_value: bool
+ if values is None: # --my_flag / --nomy_flag
+ bool_value = not used_negative_flag
+ elif used_negative_flag: # Cannot set `--nomy_flag=True/False`
+ parser.exit(
+ message=f"Negative flags cannot be passed a value (Got: {option_string}={values})"
+ )
+ elif isinstance(values, bool):
+ bool_value = values
+ elif isinstance(values, str): # --my_flag true
+ bool_value = self.type(values)
+ else:
+ raise ValueError(f"Unsupported value for {option_string}: {values!r}")
+
+ setattr(namespace, self.dest, bool_value)
+
+ def format_usage(self):
+ return " | ".join(self.option_strings)
diff --git a/parrot/lib/python3.10/site-packages/simple_parsing/helpers/fields.py b/parrot/lib/python3.10/site-packages/simple_parsing/helpers/fields.py
new file mode 100644
index 0000000000000000000000000000000000000000..a200808416efe001b0ea26b51cb5fd09f31d696e
--- /dev/null
+++ b/parrot/lib/python3.10/site-packages/simple_parsing/helpers/fields.py
@@ -0,0 +1,448 @@
+"""Utility functions that simplify defining field of dataclasses."""
+from __future__ import annotations
+
+import dataclasses
+import functools
+import inspect
+import warnings
+from collections import OrderedDict
+from dataclasses import _MISSING_TYPE, MISSING
+from enum import Enum
+from logging import getLogger
+from typing import Any, Callable, Hashable, Iterable, TypeVar, overload
+
+from typing_extensions import Literal, ParamSpec
+
+from simple_parsing.helpers.custom_actions import (
+ DEFAULT_NEGATIVE_PREFIX,
+ BooleanOptionalAction,
+)
+from simple_parsing.utils import DataclassT, str2bool
+
+# NOTE: backward-compatibility import because it was moved to a different file.
+from .subgroups import subgroups # noqa: F401
+
+logger = getLogger(__name__)
+
+E = TypeVar("E", bound=Enum)
+K = TypeVar("K", bound=Hashable)
+V = TypeVar("V")
+T = TypeVar("T")
+
+
+def field(
+ default: T | _MISSING_TYPE = MISSING,
+ alias: str | list[str] | None = None,
+ cmd: bool = True,
+ positional: bool = False,
+ *,
+ to_dict: bool = True,
+ encoding_fn: Callable[[T], Any] | None = None,
+ decoding_fn: Callable[[Any], T] | None = None,
+ # dataclasses.field arguments
+ default_factory: Callable[[], T] | _MISSING_TYPE = MISSING,
+ init: bool = True,
+ repr: bool = True,
+ hash: bool | None = None,
+ compare: bool = True,
+ metadata: dict[str, Any] | None = None,
+ **custom_argparse_args: Any,
+) -> T:
+ """Extension of the `dataclasses.field` function.
+
+ Adds the ability to customize how this field's command-line options are
+ created, as well as how it is serialized / deseralized (if the containing
+ dataclass inherits from `simple_parsing.Serializable`.
+
+ Leftover arguments are fed directly to the
+ `ArgumentParser.add_argument(*option_strings, **kwargs)` method.
+
+ Parameters
+ ----------
+ default : Union[T, _MISSING_TYPE], optional
+ The default field value (same as in `dataclasses.field`), by default MISSING
+ alias : Union[str, List[str]], optional
+ Additional option_strings to pass to the `add_argument` method, by
+ default None. When passing strings which do not start by "-" or "--",
+ will be prefixed with "-" if the string is one character and by "--"
+ otherwise.
+ cmd: bool, optional
+ Whether to add command-line arguments for this field or not. Defaults to
+ True.
+
+ ## Serialization-related Keyword Arguments:
+
+ to_dict : bool
+ Whether to include this field in the dictionary when calling `to_dict()`.
+ Defaults to True.
+ Only has an effect when the dataclass containing this field is
+ `Serializable`.
+ encoding_fn : Callable[[T], Any], optional
+ Function to apply to this field's value when encoding the dataclass to a
+ dict. Only has an effect when the dataclass containing this field is
+ `Serializable`.
+ decoding_fn : Callable[[Any], T]. optional
+ Function to use in order to recover a the value of this field from a
+ serialized entry in a dictionary (inside `cls.from_dict`).
+ Only has an effect when the dataclass containing this field is
+ `Serializable`.
+
+ ## Keyword Arguments of `dataclasses.field`
+
+ default_factory : Union[Callable[[], T], _MISSING_TYPE], optional
+ (same as in `dataclasses.field`), by default None
+ init : bool, optional
+ (same as in `dataclasses.field`), by default True
+ repr : bool, optional
+ (same as in `dataclasses.field`), by default True
+ hash : bool, optional
+ (same as in `dataclasses.field`), by default None
+ compare : bool, optional
+ (same as in `dataclasses.field`), by default True
+ metadata : Dict[str, Any], optional
+ (same as in `dataclasses.field`), by default None
+
+ Returns
+ -------
+ T
+ The value returned by the `dataclasses.field` function.
+ """
+ _metadata: dict[str, Any] = metadata if metadata is not None else {}
+ if alias:
+ _metadata["alias"] = alias if isinstance(alias, list) else [alias]
+ _metadata.update(dict(to_dict=to_dict))
+ if encoding_fn is not None:
+ _metadata.update(dict(encoding_fn=encoding_fn))
+ if decoding_fn is not None:
+ _metadata.update(dict(decoding_fn=decoding_fn))
+ _metadata["cmd"] = cmd
+ _metadata["positional"] = positional
+
+ if custom_argparse_args:
+ _metadata.update({"custom_args": custom_argparse_args})
+
+ action = custom_argparse_args.get("action")
+ if action == "store_false":
+ if default not in {MISSING, True}:
+ raise RuntimeError(
+ "default should either not be passed or set "
+ "to True when using the store_false action."
+ )
+ default = True # type: ignore
+ elif action == "store_true":
+ if default not in {MISSING, False}:
+ raise RuntimeError(
+ "default should either not be passed or set "
+ "to False when using the store_true action."
+ )
+ default = False # type: ignore
+ if default is not MISSING:
+ return dataclasses.field( # type: ignore
+ default=default,
+ init=init,
+ repr=repr,
+ hash=hash,
+ compare=compare,
+ metadata=_metadata,
+ )
+ elif not isinstance(default_factory, dataclasses._MISSING_TYPE):
+ return dataclasses.field(
+ default_factory=default_factory,
+ init=init,
+ repr=repr,
+ hash=hash,
+ compare=compare,
+ metadata=_metadata,
+ )
+ else:
+ return dataclasses.field(
+ init=init, repr=repr, hash=hash, compare=compare, metadata=_metadata
+ )
+
+
+@overload
+def choice(
+ choices: type[E],
+ *,
+ default: E,
+ default_factory: Callable[[], E] | _MISSING_TYPE = MISSING,
+ **kwargs,
+) -> E:
+ ...
+
+
+@overload
+def choice(choices: dict[K, V], *, default: K, **kwargs) -> V:
+ ...
+
+
+@overload
+def choice(
+ *choices: T,
+ default: T | _MISSING_TYPE = MISSING,
+ default_factory: Callable[[], T] | _MISSING_TYPE = MISSING,
+ **kwargs,
+) -> T:
+ ...
+
+
+def choice(*choices, default=MISSING, **kwargs):
+ """Makes a field which can be chosen from the set of choices from the command-line.
+
+ Returns a regular `dataclasses.field()`, but with metadata which indicates
+ the allowed values.
+
+ (New:) If `choices` is a dictionary, then passing the 'key' will result in
+ the corresponding value being used. The values may be objects, for example.
+ Similarly for Enum types, passing a type of enum will
+
+ Args:
+ default (T, optional): The default value of the field. Defaults to dataclasses.MISSING,
+ in which case the command-line argument is required.
+
+ Raises:
+ ValueError: If the default value isn't part of the given choices.
+
+ Returns:
+ T: the result of the usual `dataclasses.field()` function (a dataclass field/attribute).
+ """
+ assert len(choices) > 0, "Choice requires at least one positional argument!"
+
+ if len(choices) == 1:
+ choices = choices[0]
+ if inspect.isclass(choices) and issubclass(choices, Enum):
+ # If given an enum, construct a mapping from names to values.
+ choice_enum: type[Enum] = choices
+ choices = OrderedDict((e.name, e) for e in choice_enum)
+ if default is not MISSING and not isinstance(default, choice_enum):
+ if default in choices:
+ warnings.warn(
+ UserWarning(
+ f"Setting default={default} could perhaps be ambiguous "
+ f"(enum names vs enum values). Consider using the enum "
+ f"value {choices[default]} instead."
+ )
+ )
+ default = choices[default]
+ else:
+ raise ValueError(
+ f"'default' arg should be of type {choice_enum}, but got {default}"
+ )
+
+ if isinstance(choices, dict):
+ # if the choices is a dict, the options are the keys
+ # save the info about the choice_dict in the field metadata.
+ metadata = kwargs.setdefault("metadata", {})
+ choice_dict = choices
+ # save the choice_dict in metadata so that we can recover the values in postprocessing.
+ metadata["choice_dict"] = choice_dict
+ choices = list(choice_dict.keys())
+
+ # TODO: If the choice dict is given, then add encoding/decoding functions that just
+ # get/set the right key.
+ def _encoding_fn(value: Any) -> str:
+ """Custom encoding function that will simply represent the value as the the key in
+ the dict rather than the value itself."""
+ if value in choice_dict.keys():
+ return value
+ elif value in choice_dict.values():
+ return [k for k, v in choice_dict.items() if v == value][0]
+ return value
+
+ kwargs.setdefault("encoding_fn", _encoding_fn)
+
+ def _decoding_fn(value: Any) -> Any:
+ """Custom decoding function that will retrieve the value from the stored key in the
+ dictionary."""
+ return choice_dict.get(value, value)
+
+ kwargs.setdefault("decoding_fn", _decoding_fn)
+
+ return field(default=default, choices=choices, **kwargs)
+
+
+def list_field(*default_items: T, **kwargs) -> list[T]:
+ """shorthand function for setting a `list` attribute on a dataclass, so that every instance of
+ the dataclass doesn't share the same list.
+
+ Accepts any of the arguments of the `dataclasses.field` function.
+
+ Returns:
+ List[T]: a `dataclasses.field` of type `list`, containing the `default_items`.
+ """
+ if "default" in kwargs and isinstance(kwargs["default"], list):
+ assert not default_items
+ # can't have that. field wants a default_factory.
+ # we just give back a copy of the list as a default factory,
+ # but this should be discouraged.
+ from copy import deepcopy
+
+ default_factory = functools.partial(deepcopy, kwargs.pop("default"))
+ else:
+ default_factory = functools.partial(list, default_items)
+
+ return field(default_factory=default_factory, **kwargs)
+
+
+def dict_field(default_items: dict[K, V] | Iterable[tuple[K, V]] = (), **kwargs) -> dict[K, V]:
+ """shorthand function for setting a `dict` attribute on a dataclass, so that every instance of
+ the dataclass doesn't share the same `dict`.
+
+ NOTE: Do not use keyword arguments as you usually would with a dictionary
+ (as in something like `dict_field(a=1, b=2, c=3)`). Instead pass in a
+ dictionary instance with the items: `dict_field(dict(a=1, b=2, c=3))`.
+ The reason for this is that the keyword arguments are interpreted as custom
+ argparse arguments, rather than arguments of the `dict` function!)
+
+ Also accepts any of the arguments of the `dataclasses.field` function.
+
+ Returns:
+ Dict[K, V]: a `dataclasses.Field` of type `Dict[K, V]`, containing the `default_items`.
+ """
+ return field(default_factory=functools.partial(dict, default_items), **kwargs)
+
+
+def set_field(*default_items: T, **kwargs) -> set[T]:
+ return field(default_factory=functools.partial(set, default_items), **kwargs)
+
+
+P = ParamSpec("P")
+
+
+def mutable_field(
+ fn: Callable[P, T],
+ init: bool = True,
+ repr: bool = True,
+ hash: bool | None = None,
+ compare: bool = True,
+ metadata: dict[str, Any] | None = None,
+ *fn_args: P.args,
+ **fn_kwargs: P.kwargs,
+) -> T:
+ """Shorthand for `dataclasses.field(default_factory=functools.partial(fn, *fn_args,
+
+ **fn_kwargs))`.
+
+ NOTE: The *fn_args and **fn_kwargs here are passed to `fn`, and are never used by the argparse
+ Action!
+ """
+ # TODO: Use this 'smart' partial to make it easier to define nested fields.
+ # from simple_parsing.helpers.nested_partial import npartial
+ default_factory = functools.partial(fn, *fn_args, **fn_kwargs)
+ return dataclasses.field(
+ default_factory=default_factory,
+ init=init,
+ repr=repr,
+ hash=hash,
+ compare=compare,
+ metadata=metadata,
+ )
+
+
+def subparsers(
+ subcommands: dict[str, type[DataclassT]],
+ default: DataclassT | _MISSING_TYPE = MISSING,
+ **kwargs,
+) -> Any:
+ return field(
+ metadata={
+ "subparsers": subcommands,
+ },
+ default=default,
+ **kwargs,
+ )
+
+
+@overload
+def flag(
+ default: _MISSING_TYPE = MISSING,
+ *,
+ default_factory: _MISSING_TYPE = MISSING,
+ negative_prefix: str | None = DEFAULT_NEGATIVE_PREFIX,
+ negative_option: str | None = None,
+ nargs: Literal["?"] | None = None,
+ type: Callable[[str], bool] = str2bool,
+ action: type[BooleanOptionalAction] = BooleanOptionalAction,
+ **kwargs,
+) -> bool:
+ ...
+
+
+@overload
+def flag(
+ default: bool,
+ *,
+ default_factory: _MISSING_TYPE = MISSING,
+ negative_prefix: str | None = DEFAULT_NEGATIVE_PREFIX,
+ negative_option: str | None = None,
+ nargs: Literal["?"] | None = None,
+ type: Callable[[str], bool] = str2bool,
+ action: type[BooleanOptionalAction] = BooleanOptionalAction,
+ **kwargs,
+) -> bool:
+ ...
+
+
+@overload
+def flag(
+ default: _MISSING_TYPE = MISSING,
+ *,
+ default_factory: Callable[[], bool] = ...,
+ negative_prefix: str | None = DEFAULT_NEGATIVE_PREFIX,
+ negative_option: str | None = None,
+ nargs: Literal["?"] | None = None,
+ type: Callable[[str], bool] = str2bool,
+ action: type[BooleanOptionalAction] = BooleanOptionalAction,
+ **kwargs,
+) -> bool:
+ ...
+
+
+def flag(
+ default: bool | _MISSING_TYPE = MISSING,
+ *,
+ default_factory: Callable[[], bool] | _MISSING_TYPE = MISSING,
+ negative_prefix: str | None = DEFAULT_NEGATIVE_PREFIX,
+ negative_option: str | None = None,
+ nargs: Literal["?"] | None = None,
+ type: Callable[[str], bool] = str2bool,
+ action: type[BooleanOptionalAction] = BooleanOptionalAction,
+ **kwargs,
+) -> bool:
+ """A boolean field with a positive and negative command-line argument.
+
+ If either `default` or `default_factory` are set, then both the field and the generated
+ command-line arguments are optional. Otherwise, both are required.
+
+ Negative flags are generated using `negative_prefix` and `negative_option`:
+ - When `negative_option` is passed, it is used to create the negative flag.
+ - Otherwise, `negative_prefix` is prepended to the field name to create the negative flag.
+
+ NOTE: The negative flags don't accept a value. (i.e. `--noverbose` works, but
+ `--noverbose=True` does not.)
+ The positive flags can be used either with or without a value.
+ """
+ return field(
+ default=default,
+ default_factory=default_factory,
+ negative_prefix=negative_prefix,
+ negative_option=negative_option,
+ nargs=nargs,
+ type=type,
+ action=action,
+ **kwargs,
+ )
+
+
+def flags(
+ default_factory: Callable[[], list[bool]] | _MISSING_TYPE = MISSING,
+ nargs: Literal["*", "+"] | int = "*",
+ type: Callable[[str], bool] = str2bool,
+ **kwargs,
+) -> list[bool]:
+ return field(
+ default_factory=default_factory,
+ nargs=nargs,
+ type=type,
+ **kwargs,
+ )
diff --git a/parrot/lib/python3.10/site-packages/simple_parsing/helpers/flatten.py b/parrot/lib/python3.10/site-packages/simple_parsing/helpers/flatten.py
new file mode 100644
index 0000000000000000000000000000000000000000..7eacea617e3627df0c27f6d282bea3cffee5e719
--- /dev/null
+++ b/parrot/lib/python3.10/site-packages/simple_parsing/helpers/flatten.py
@@ -0,0 +1,162 @@
+import dataclasses
+import warnings
+from logging import getLogger
+from typing import Any, Dict, Iterable, List, Tuple
+
+logger = getLogger(__name__)
+
+
+class FlattenedAccess:
+ """Allows flattened access to the attributes of all children dataclasses.
+
+ This is meant to simplify the adoption of dataclasses for argument
+ hierarchies, rather than a single-level dictionary.
+ Dataclasses allow for easy, neatly separated arguments, but suffer from 2
+ potential drawbacks:
+ - When using a highly nested structure, having long accesses is annoying
+ - The dictionary access syntax is often more natural than using getattr()
+ when reading an attribute whose name is a variable.
+ """
+
+ def attributes(self, recursive: bool = True, prefix: str = "") -> Iterable[Tuple[str, Any]]:
+ """Returns an Iterator over the attributes of the dataclass.
+
+ [extended_summary]
+
+ Parameters
+ ----------
+ - dataclass : Dataclass
+
+ A dataclass type or instance.
+ - recursive : bool, optional, by default True
+
+ Whether or not to recurse and yield all the elements of the children
+ dataclass attributes.
+ - prefix : str, optional, by default ""
+
+ A prefix to prepend to all the attribute names before yielding them.
+
+ Returns
+ -------
+ Iterable[Tuple[str, Any]]
+ An iterable of attribute names and values.
+
+ Yields
+ -------
+ Iterable[Tuple[str, Any]]
+ A Tuple of the form .
+ """
+ for field in dataclasses.fields(self):
+ if field.name not in self.__dict__:
+ # the dataclass isn't yet instantiated, or the attr was deleted.
+ continue
+ # get the field value (without needless recursion)
+ field_value = self.__dict__[field.name]
+
+ yield prefix + field.name, field_value
+ if recursive and dataclasses.is_dataclass(field_value):
+ yield from FlattenedAccess.attributes(
+ field_value, recursive=True, prefix=prefix + field.name + "."
+ )
+
+ def __getattr__(self, name: str):
+ """Retrieves the attribute on self, or recursively on the children.
+
+ NOTE: `__getattribute__` is always called before `__getattr__`, hence we
+ always get here because `self` does not have an attribute of `name`.
+ """
+ # potential parents and corresponding values.
+ parents: List[str] = []
+ values: List[Any] = []
+
+ for attr_name, attr_value in FlattenedAccess.attributes(self):
+ # if the attribute name's last part ends with `name`, we add it to
+ # some list of potential parent attributes.
+ name_parts = name.split(".")
+ dest_parts = attr_name.split(".")
+ if dest_parts[-len(name_parts) :] == name_parts:
+ parents.append(attr_name)
+ values.append(attr_value)
+
+ if not parents:
+ raise AttributeError(
+ f"{type(self)} object has no attribute '{name}', "
+ "and neither does any of its children attributes."
+ )
+ elif len(parents) > 1:
+ raise AttributeError(
+ f"Ambiguous Attribute access: name '{name}' may refer to:\n"
+ + "\n".join(
+ f"- '{parent}' (with a value of: '{value}')"
+ for parent, value in zip(parents, values)
+ )
+ )
+ else:
+ return values[0]
+
+ def __setattr__(self, name: str, value: Any):
+ """Write the attribute in self or in the children that has it.
+
+ If more than one child has attributes that match the given one, an `AttributeError` is
+ raised.
+ """
+ # potential parents and corresponding values.
+ parents: List[str] = []
+ values: List[Any] = []
+
+ field_names = {field.name for field in dataclasses.fields(self)}
+ if name in field_names:
+ object.__setattr__(self, name, value)
+ return
+
+ for attr_name, attr_value in self.attributes():
+ # if the attribute name of the attribute ends with `name`, we add it
+ # to some list of potential parent attributes.
+ name_parts = name.split(".")
+ dest_parts = attr_name.split(".")
+ if dest_parts[-len(name_parts) :] == name_parts:
+ parents.append(attr_name)
+ values.append(attr_value)
+
+ if not parents:
+ # We set the value on the dataclass directly, since it wasn't found.
+ warnings.warn(
+ UserWarning(
+ f"Setting a new attribute '{name}' on the"
+ f" dataclass, but it does not have a field of the same name. \n"
+ f"(Consider adding a field '{name}' of type {type(value)} to "
+ f"{type(self)})"
+ )
+ )
+ object.__setattr__(self, name, value)
+
+ elif len(parents) > 1:
+ # more than one parent (ambiguous).
+ raise AttributeError(
+ f"Ambiguous Attribute access: name '{name}' may refer to:\n"
+ + "\n".join(
+ f"- '{parent}' (with a value of: '{value}')"
+ for parent, value in zip(parents, values)
+ )
+ )
+ else:
+ # We recursively set the attribute.
+ attr_name = parents[0]
+ lineage = attr_name.split(".")[:-1]
+ parent: object = self
+ for parent_name in lineage:
+ # NOTE: we can't use getattr, otherwise we would recurse.
+ parent = object.__getattribute__(parent, parent_name)
+ # destination attribute name
+ dest_name = name.split(".")[-1]
+ # Set the attribute on the parent.
+ object.__setattr__(parent, dest_name, value)
+
+ def __getitem__(self, key):
+ return getattr(self, key)
+
+ def __setitem__(self, key, value):
+ setattr(self, key, value)
+
+ def asdict(self) -> Dict:
+ return dataclasses.asdict(self)
diff --git a/parrot/lib/python3.10/site-packages/simple_parsing/helpers/nested_partial.py b/parrot/lib/python3.10/site-packages/simple_parsing/helpers/nested_partial.py
new file mode 100644
index 0000000000000000000000000000000000000000..75a273e1cd6a8e9f43efa85d0ba6f5fcd2583a97
--- /dev/null
+++ b/parrot/lib/python3.10/site-packages/simple_parsing/helpers/nested_partial.py
@@ -0,0 +1,48 @@
+import functools
+from typing import Any, Generic, TypeVar
+
+_T = TypeVar("_T")
+
+
+class npartial(functools.partial, Generic[_T]):
+ """Partial that also invokes partials in args and kwargs before feeding them to the function.
+
+ Useful for creating nested partials, e.g.:
+
+
+ >>> from dataclasses import dataclass, field
+ >>> @dataclass
+ ... class Value:
+ ... v: int = 0
+ >>> @dataclass
+ ... class ValueWrapper:
+ ... value: Value
+ ...
+ >>> from functools import partial
+ >>> @dataclass
+ ... class WithRegularPartial:
+ ... wrapped: ValueWrapper = field(
+ ... default_factory=partial(ValueWrapper, value=Value(v=123)),
+ ... )
+
+ Here's the problem: This here is BAD! They both share the same instance of Value!
+
+ >>> WithRegularPartial().wrapped.value is WithRegularPartial().wrapped.value
+ True
+ >>> @dataclass
+ ... class WithNPartial:
+ ... wrapped: ValueWrapper = field(
+ ... default_factory=npartial(ValueWrapper, value=npartial(Value, v=123)),
+ ... )
+ >>> WithNPartial().wrapped.value is WithNPartial().wrapped.value
+ False
+
+ This is fine now!
+ """
+
+ def __call__(self, *args: Any, **keywords: Any) -> _T:
+ keywords = {**self.keywords, **keywords}
+ args = self.args + args
+ args = tuple(arg() if isinstance(arg, npartial) else arg for arg in args)
+ keywords = {k: v() if isinstance(v, npartial) else v for k, v in keywords.items()}
+ return self.func(*args, **keywords)
diff --git a/parrot/lib/python3.10/site-packages/simple_parsing/helpers/partial.py b/parrot/lib/python3.10/site-packages/simple_parsing/helpers/partial.py
new file mode 100644
index 0000000000000000000000000000000000000000..64b9f2c92a7de3e8b53349bb7570e4b0cd5c2e68
--- /dev/null
+++ b/parrot/lib/python3.10/site-packages/simple_parsing/helpers/partial.py
@@ -0,0 +1,314 @@
+"""A Partial helper that can be used to add arguments for an arbitrary class or callable."""
+from __future__ import annotations
+
+import dataclasses
+import functools
+import inspect
+import typing
+from dataclasses import make_dataclass
+from functools import lru_cache, singledispatch, wraps
+from logging import getLogger as get_logger
+from typing import (
+ Any,
+ Callable,
+ Dict,
+ Generic,
+ Hashable,
+ Sequence,
+ _ProtocolMeta,
+ cast,
+ get_type_hints,
+)
+
+from typing_extensions import ParamSpec, TypeVar
+
+import simple_parsing
+
+__all__ = ["Partial", "adjust_default", "config_for", "infer_type_annotation_from_default"]
+
+C = TypeVar("C", bound=Callable)
+_P = ParamSpec("_P")
+_T = TypeVar("_T", bound=Any)
+_C = TypeVar("_C", bound=Callable[..., Any])
+
+logger = get_logger(__name__)
+
+
+@singledispatch
+def adjust_default(default: Any) -> Any:
+ """Used the adjust the default value of a parameter that we extract from the signature.
+
+ IF in some libraries, the signature has a special default value, that we shouldn't use as the
+ default, e.g. "MyLibrary.REQUIRED" or something, then a handler can be registered here to
+ convert it to something else.
+
+ For example, here's a fix for the `lr` param of the `torch.optim.SGD` optimizer, which has a
+ weird annotation of `_RequiredParameter`:
+
+ ```python
+ from torch.optim.optimizer import _RequiredParameter
+
+ @adjust_default.register(_RequiredParameter)
+ def _(default: Any) -> Any:
+ return dataclasses.MISSING
+ ```
+ """
+ return default
+
+
+_P = ParamSpec("_P")
+_OutT = TypeVar("_OutT")
+
+
+def _cache_when_possible(fn: Callable[_P, _OutT]) -> Callable[_P, _OutT]:
+ """Makes `fn` behave like `functools.cache(fn)` when args are all hashable, else no change."""
+ cached_fn = lru_cache(maxsize=None)(fn)
+
+ def _all_hashable(args: tuple, kwargs: dict) -> bool:
+ return all(isinstance(arg, Hashable) for arg in args) and all(
+ isinstance(arg, Hashable) for arg in kwargs.values()
+ )
+
+ @wraps(fn)
+ def _switch(*args: _P.args, **kwargs: _P.kwargs) -> _OutT:
+ if _all_hashable(args, kwargs):
+ hashable_kwargs = typing.cast(Dict[str, Hashable], kwargs)
+ return cached_fn(*args, **hashable_kwargs)
+ return fn(*args, **kwargs)
+
+ return _switch
+
+
+@_cache_when_possible
+def config_for(
+ cls: type[_T] | Callable[_P, _T],
+ ignore_args: str | Sequence[str] = (),
+ frozen: bool = True,
+ **defaults,
+) -> type[Partial[_T]]:
+ """Create a dataclass that contains the arguments for the constructor of `cls`.
+
+ Example:
+
+ >>> import dataclasses
+ >>> import simple_parsing as sp
+ >>> class Adam: # i.e. `torch.optim.Adam`, which we don't have installed in this example.
+ ... def __init__(self, params, lr=1e-3, betas=(0.9, 0.999)):
+ ... self.params = params
+ ... self.lr = lr
+ ... self.betas = betas
+ ... def __repr__(self) -> str:
+ ... return f"Adam(params={self.params}, lr={self.lr}, betas={self.betas})"
+ ...
+ >>> AdamConfig = sp.config_for(Adam, ignore_args="params")
+ >>> parser = sp.ArgumentParser()
+ >>> _ = parser.add_arguments(AdamConfig, dest="optimizer")
+
+
+ >>> args = parser.parse_args(["--lr", "0.1", "--betas", "0.1", "0.2"])
+ >>> args.optimizer
+ AdamConfig(lr=0.1, betas=(0.1, 0.2))
+
+ The return dataclass is a subclass of `functools.partial` that returns the `Adam` object:
+
+ >>> isinstance(args.optimizer, functools.partial)
+ True
+ >>> dataclasses.is_dataclass(args.optimizer)
+ True
+ >>> args.optimizer(params=[1, 2, 3])
+ Adam(params=[1, 2, 3], lr=0.1, betas=(0.1, 0.2))
+
+ >>> parser.print_help() # doctest: +SKIP
+ usage: pytest [-h] [--lr float] [--betas float float]
+
+ options:
+ -h, --help show this help message and exit
+
+ AdamConfig ['optimizer']:
+ Auto-Generated configuration dataclass for simple_parsing.helpers.partial.Adam
+
+ --lr float
+ --betas float float
+ """
+ if isinstance(ignore_args, str):
+ ignore_args = (ignore_args,)
+ else:
+ ignore_args = tuple(ignore_args)
+
+ assert isinstance(defaults, dict)
+
+ signature = inspect.signature(cls)
+
+ fields: list[tuple[str, type, dataclasses.Field]] = []
+
+ class_annotations = get_type_hints(cls)
+
+ class_docstring_help = _parse_args_from_docstring(cls.__doc__ or "")
+ if inspect.isclass(cls):
+ class_constructor_help = _parse_args_from_docstring(cls.__init__.__doc__ or "")
+ else:
+ class_constructor_help = {}
+
+ for name, parameter in signature.parameters.items():
+ default = defaults.get(name, parameter.default)
+ if default is parameter.empty:
+ default = dataclasses.MISSING
+ default = adjust_default(default)
+
+ if name in ignore_args:
+ logger.debug(f"Ignoring argument {name}")
+ continue
+
+ if parameter.annotation is not inspect.Parameter.empty:
+ field_type = parameter.annotation
+ elif name in class_annotations:
+ field_type = class_annotations[name]
+ elif default is not dataclasses.MISSING:
+ # Infer the type from the default value.
+ field_type = infer_type_annotation_from_default(default)
+ else:
+ logger.warning(
+ f"Don't know what the type of field '{name}' of class {cls} is! "
+ f"Ignoring this argument."
+ )
+ continue
+
+ class_help_entries = {v for k, v in class_docstring_help.items() if k.startswith(name)}
+ init_help_entries = {v for k, v in class_constructor_help.items() if k.startswith(name)}
+ help_entries = init_help_entries or class_help_entries
+ if help_entries:
+ help_str = help_entries.pop()
+ else:
+ help_str = ""
+
+ if default is dataclasses.MISSING:
+ field = simple_parsing.field(help=help_str, required=True)
+ # insert since fields without defaults need to go first.
+ fields.insert(0, (name, field_type, field))
+ logger.debug(f"Adding required field: {fields[0]}")
+ else:
+ field = simple_parsing.field(default=default, help=help_str)
+ fields.append((name, field_type, field))
+ logger.debug(f"Adding optional field: {fields[-1]}")
+
+ cls_name = _get_generated_config_class_name(cls)
+ config_class = make_dataclass(
+ cls_name=cls_name, bases=(Partial,), fields=fields, frozen=frozen
+ )
+ config_class._target_ = cls
+ config_class.__doc__ = (
+ f"Auto-Generated configuration dataclass for {cls.__module__}.{cls.__qualname__}\n"
+ + (cls.__doc__ or "")
+ )
+
+ return config_class
+
+
+@singledispatch
+def infer_type_annotation_from_default(default: Any) -> Any | type:
+ """Used when there is a default value, but no type annotation, to infer the type of field to
+ create on the config dataclass."""
+ if isinstance(default, (int, str, float, bool)):
+ return type(default)
+ if isinstance(default, tuple):
+ return typing.Tuple[tuple(infer_type_annotation_from_default(d) for d in default)]
+ if isinstance(default, list):
+ if not default:
+ return list
+ # Assuming that all items have the same type.
+ return typing.List[infer_type_annotation_from_default(default[0])]
+ if isinstance(default, dict):
+ if not default:
+ return dict
+ raise NotImplementedError(
+ f"Don't know how to infer type annotation to use for default of {default}"
+ )
+
+
+def _parse_args_from_docstring(docstring: str) -> dict[str, str]:
+ """Taken from `pytorch_lightning.utilities.argparse`."""
+ arg_block_indent = None
+ current_arg = ""
+ parsed = {}
+ for line in docstring.split("\n"):
+ stripped = line.lstrip()
+ if not stripped:
+ continue
+ line_indent = len(line) - len(stripped)
+ if stripped.startswith(("Args:", "Arguments:", "Parameters:")):
+ arg_block_indent = line_indent + 4
+ elif arg_block_indent is None:
+ continue
+ elif line_indent < arg_block_indent:
+ break
+ elif line_indent == arg_block_indent:
+ current_arg, arg_description = stripped.split(":", maxsplit=1)
+ parsed[current_arg] = arg_description.lstrip()
+ elif line_indent > arg_block_indent:
+ parsed[current_arg] += f" {stripped}"
+ return parsed
+
+
+def _get_generated_config_class_name(target: type | Callable) -> str:
+ if inspect.isclass(target):
+ return target.__name__ + "Config"
+ elif inspect.isfunction(target):
+ return target.__name__ + "_config"
+ raise NotImplementedError(target)
+
+
+class _Partial(_ProtocolMeta):
+ _target_: _C
+
+ def __getitem__(cls, target: Callable[_P, _T]) -> type[Callable[_P, _T]]:
+ # full_path = target.__module__ + "." + target.__qualname__
+ # if full_path in _autogenerated_config_classes:
+ # return _autogenerated_config_classes[full_path]
+
+ # TODO: Maybe we should make a distinction here between Partial[_T] and Partial[SomeClass?]
+ # Create the config class.
+ config_class = config_for(target)
+ # Set it's module to be the one calling this, and set that class name in the globals of
+ # the calling module? --> No, too hacky.
+
+ # OR: Set the module to be simple_parsing.helpers.partial ?
+ # TODO: What if we had the name of the class directly encode how to recreate the class?
+ config_class.__module__ = __name__
+ _autogenerated_config_classes[config_class.__qualname__] = config_class
+ return config_class
+
+
+_autogenerated_config_classes: dict[str, type] = {}
+
+
+def __getattr__(name: str):
+ """Getting an attribute on this module here will check for the autogenerated config class with
+ that name."""
+ if name in globals():
+ return globals()[name]
+
+ if name in _autogenerated_config_classes:
+ return _autogenerated_config_classes[name]
+
+ raise AttributeError(f"Module {__name__} has no attribute {name}")
+
+
+class Partial(functools.partial, Generic[_T], metaclass=_Partial):
+ def __new__(cls, __func: Callable[_P, _T] | None = None, *args: _P.args, **kwargs: _P.kwargs):
+ _func = __func or cls._target_
+ assert _func is not None
+ return super().__new__(cls, _func, *args, **kwargs)
+
+ def __call__(self: Callable[_P, _T], *args: _P.args, **kwargs: _P.kwargs) -> _T:
+ constructor_kwargs = {
+ field.name: getattr(self, field.name) for field in dataclasses.fields(self)
+ }
+ constructor_kwargs.update(**kwargs)
+ # TODO: Use `nested_partial` as a base class? (to instantiate all the partials inside as
+ # well?)
+ self = cast(Partial, self)
+ return type(self)._target_(*args, **constructor_kwargs)
+
+ def __getattr__(self, name: str):
+ if name in self.keywords:
+ return self.keywords[name]
diff --git a/parrot/lib/python3.10/site-packages/simple_parsing/helpers/serialization/__pycache__/encoding.cpython-310.pyc b/parrot/lib/python3.10/site-packages/simple_parsing/helpers/serialization/__pycache__/encoding.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..72c5f86bdfefca2dd4fef9f6ac395de4b59f5fd4
Binary files /dev/null and b/parrot/lib/python3.10/site-packages/simple_parsing/helpers/serialization/__pycache__/encoding.cpython-310.pyc differ
diff --git a/parrot/lib/python3.10/site-packages/simple_parsing/helpers/serialization/__pycache__/serializable.cpython-310.pyc b/parrot/lib/python3.10/site-packages/simple_parsing/helpers/serialization/__pycache__/serializable.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..2bfabf92b16ec50e9c5a123d32481eed3710741d
Binary files /dev/null and b/parrot/lib/python3.10/site-packages/simple_parsing/helpers/serialization/__pycache__/serializable.cpython-310.pyc differ
diff --git a/parrot/lib/python3.10/site-packages/simple_parsing/helpers/serialization/__pycache__/yaml_serialization.cpython-310.pyc b/parrot/lib/python3.10/site-packages/simple_parsing/helpers/serialization/__pycache__/yaml_serialization.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..91e5f5b3dcd1c4f71f4ed18575b9ce4cad86c1ff
Binary files /dev/null and b/parrot/lib/python3.10/site-packages/simple_parsing/helpers/serialization/__pycache__/yaml_serialization.cpython-310.pyc differ
diff --git a/parrot/lib/python3.10/site-packages/simple_parsing/helpers/serialization/encoding.py b/parrot/lib/python3.10/site-packages/simple_parsing/helpers/serialization/encoding.py
new file mode 100644
index 0000000000000000000000000000000000000000..cc7b511101cf0c9f40e348d2bc807966fea7f938
--- /dev/null
+++ b/parrot/lib/python3.10/site-packages/simple_parsing/helpers/serialization/encoding.py
@@ -0,0 +1,141 @@
+"""Simple, extendable mechanism for encoding pracitaclly anything to string.
+
+Just register a new encoder for a given type like so:
+
+from simple_parsing.helpers.serialization import encode
+import numpy as np
+@encode.register
+def encode_ndarray(obj: np.ndarray) -> str:
+ return obj.tostring()
+"""
+import copy
+import json
+from argparse import Namespace
+from collections.abc import Mapping
+from dataclasses import fields, is_dataclass
+from enum import Enum
+from functools import singledispatch
+from logging import getLogger
+from os import PathLike
+from typing import Any, Dict, Hashable, List, Set, Tuple, Union
+
+logger = getLogger(__name__)
+
+
+class SimpleJsonEncoder(json.JSONEncoder):
+ def default(self, o: Any) -> Any:
+ return encode(o)
+
+
+"""
+# NOTE: This code is commented because of static typing check error.
+# The problem is incompatibility of mypy and singledispatch.
+# See mypy issues for more info:
+# https://github.com/python/mypy/issues/8356
+# https://github.com/python/mypy/issues/2904
+# https://github.com/python/mypy/issues/9112#issuecomment-725316936
+
+class Dataclass(Protocol):
+ # see dataclasses.is_dataclass implementation with _FIELDS
+ __dataclass_fields__: Dict[str, Field[Any]]
+
+
+T = TypeVar("T", bool, int, None, str)
+
+
+@overload
+def encode(obj: Dataclass) -> Dict[str, Any]: ...
+
+@overload
+def encode(obj: Union[List[Any], Set[Any], Tuple[Any, ...]]) -> List[Any]:
+ ...
+
+@overload
+def encode(obj: Mapping[Any, Any]) -> Dict[Any, Any]: ...
+
+@overload
+def encode(obj: T) -> T: ...
+"""
+
+
+@singledispatch
+def encode(obj: Any) -> Any:
+ """Encodes an object into a json/yaml-compatible primitive type.
+
+ This called to convert field attributes when calling `to_dict()` on a
+ `DictSerializable` instance (including JsonSerializable and YamlSerializable).
+
+ This is used as the 'default' keyword argument to `json.dumps` and
+ `json.dump`, and is called when an object is encountered that `json` doesn't
+ know how to serialize.
+
+ To register a type as JsonSerializable, you can just register a custom
+ serialization function. (There should be no need to do it for dataclasses,
+ since that is supported by this function), use @encode.register
+ (see the docs for singledispatch).
+ """
+ try:
+ if is_dataclass(obj):
+ # logger.debug(f"encoding object {obj} of class {type(obj)}")
+ d: Dict[str, Any] = dict()
+ for field in fields(obj):
+ value = getattr(obj, field.name)
+ try:
+ d[field.name] = encode(value)
+ except TypeError as e:
+ logger.error(f"Unable to encode field {field.name}: {e}")
+ raise e
+ return d
+ else:
+ # logger.debug(f"Deepcopying object {obj} of type {type(obj)}")
+ return copy.deepcopy(obj)
+ except Exception as e:
+ logger.debug(f"Cannot encode object {obj}: {e}")
+ raise e
+
+
+@encode.register(list)
+@encode.register(tuple)
+# @encode.register(Sequence) # Would also encompass `str!`
+@encode.register(set)
+def encode_list(obj: Union[List[Any], Set[Any], Tuple[Any, ...]]) -> List[Any]:
+ # TODO: Here we basically say "Encode all these types as lists before serializing"
+ # That's ok for JSON, but YAML can serialize stuff directly though.
+ # TODO: Also, with this, we also need to convert back to the right type when
+ # deserializing, which is totally doable for the fields of dataclasses,
+ # but maybe not for other stuff.
+ return list(map(encode, obj))
+
+
+@encode.register(Mapping)
+def encode_dict(obj: Mapping) -> Dict[Any, Any]:
+ constructor = type(obj)
+ result = constructor()
+ for k, v in obj.items():
+ k_ = encode(k)
+ v_ = encode(v)
+ if isinstance(k_, Hashable):
+ result[k_] = v_
+ else:
+ # If the encoded key isn't "Hashable", then we store it as a list of tuples
+ if isinstance(result, dict):
+ result = list(result.items())
+ result.append((k_, v_))
+ return result
+
+ return type(obj)((encode(k), encode(v)) for k, v in obj.items())
+
+
+@encode.register(PathLike)
+def encode_path(obj: PathLike) -> str:
+ return obj.__fspath__()
+
+
+@encode.register(Namespace)
+def encode_namespace(obj: Namespace) -> Any:
+ return encode(vars(obj))
+
+
+@encode.register(Enum)
+def encode_enum(obj: Enum) -> str:
+ return obj.name
diff --git a/parrot/lib/python3.10/site-packages/simple_parsing/helpers/serialization/serializable.py b/parrot/lib/python3.10/site-packages/simple_parsing/helpers/serialization/serializable.py
new file mode 100644
index 0000000000000000000000000000000000000000..ab37a9c169079855df561b2ee9d2702a353463cd
--- /dev/null
+++ b/parrot/lib/python3.10/site-packages/simple_parsing/helpers/serialization/serializable.py
@@ -0,0 +1,984 @@
+from __future__ import annotations
+
+import json
+import pickle
+import warnings
+from collections import OrderedDict
+from dataclasses import MISSING, Field, dataclass, fields, is_dataclass
+from functools import partial
+from importlib import import_module
+from itertools import chain
+from logging import getLogger
+from pathlib import Path
+from types import ModuleType
+from typing import IO, Any, Callable, ClassVar, TypeVar, Union
+
+from typing_extensions import Protocol
+
+from simple_parsing.utils import (
+ DataclassT,
+ all_subclasses,
+ get_args,
+ get_forward_arg,
+ is_optional,
+)
+
+from .decoding import decode_field, register_decoding_fn
+from .encoding import SimpleJsonEncoder, encode
+
+DumpFn = Callable[[Any, IO], None]
+DumpsFn = Callable[[Any], str]
+LoadFn = Callable[[IO], dict]
+LoadsFn = Callable[[str], dict]
+
+logger = getLogger(__name__)
+
+D = TypeVar("D", bound="SerializableMixin")
+
+try:
+ import yaml
+
+ def ordered_dict_constructor(loader: yaml.Loader, node: yaml.Node):
+ # NOTE(ycho): `deep` has to be true for `construct_yaml_seq`.
+ value = loader.construct_sequence(node, deep=True)
+ return OrderedDict(*value)
+
+ def ordered_dict_representer(dumper: yaml.Dumper, instance: OrderedDict) -> yaml.Node:
+ # NOTE(ycho): nested list for compatibility with PyYAML's representer
+ node = dumper.represent_sequence("OrderedDict", [list(instance.items())])
+ return node
+
+ yaml.add_representer(OrderedDict, ordered_dict_representer)
+ yaml.add_constructor("OrderedDict", ordered_dict_constructor)
+ yaml.add_constructor(
+ "tag:yaml.org,2002:python/object/apply:collections.OrderedDict",
+ ordered_dict_constructor,
+ )
+
+except ImportError:
+ pass
+
+
+class FormatExtension(Protocol):
+ binary: ClassVar[bool] = False
+
+ @staticmethod
+ def load(fp: IO) -> Any:
+ ...
+
+ @staticmethod
+ def dump(obj: Any, io: IO) -> None:
+ ...
+
+
+class JSONExtension(FormatExtension):
+ load = staticmethod(json.load)
+ dump = staticmethod(json.dump)
+
+
+class PickleExtension(FormatExtension):
+ binary: ClassVar[bool] = True
+ load: ClassVar[Callable[[IO], Any]] = staticmethod(pickle.load)
+ dump: ClassVar[Callable[[Any, IO[bytes]], None]] = staticmethod(pickle.dump)
+
+
+class YamlExtension(FormatExtension):
+ def load(self, io: IO) -> Any:
+ import yaml
+
+ return yaml.safe_load(io)
+
+ def dump(self, obj: Any, io: IO, **kwargs) -> None:
+ import yaml
+
+ return yaml.dump(obj, io, **kwargs)
+
+
+class NumpyExtension(FormatExtension):
+ binary: bool = True
+
+ def load(self, io: IO) -> Any:
+ import numpy
+
+ obj = numpy.load(io, allow_pickle=True)
+ if isinstance(obj, numpy.ndarray) and obj.dtype == object:
+ obj = obj.item()
+ return obj
+
+ def dump(self, obj: Any, io: IO[bytes], **kwargs) -> None:
+ import numpy
+
+ return numpy.save(io, obj, **kwargs)
+
+
+class TorchExtension(FormatExtension):
+ binary: bool = True
+
+ def load(self, io: IO) -> None:
+ import torch # type: ignore
+
+ return torch.load(io)
+
+ def dump(self, obj: Any, io: IO, **kwargs) -> None:
+ import torch # type: ignore
+
+ return torch.save(obj, io, **kwargs)
+
+
+class TOMLExtension(FormatExtension):
+ binary: bool = True
+
+ def load(self, io: IO) -> Any:
+ try:
+ import tomllib
+ except ImportError:
+ import tomli as tomllib
+
+ return tomllib.load(io)
+
+ def dump(self, obj: Any, io: IO, **kwargs) -> None:
+ import tomli_w
+
+ return tomli_w.dump(obj, io, **kwargs)
+
+
+json_extension = JSONExtension()
+yaml_extension = YamlExtension()
+
+
+extensions: dict[str, FormatExtension] = {
+ ".json": JSONExtension(),
+ ".pkl": PickleExtension(),
+ ".yaml": YamlExtension(),
+ ".yml": YamlExtension(),
+ ".npy": NumpyExtension(),
+ ".pth": TorchExtension(),
+ ".toml": TOMLExtension(),
+}
+
+
+def get_extension(path: str | Path) -> FormatExtension:
+ path = Path(path)
+ if path.suffix in extensions:
+ return extensions[path.suffix]
+ else:
+ raise RuntimeError(
+ f"Cannot load to/save from a {path.suffix} file because "
+ "this extension is not registered in the extensions dictionary."
+ )
+
+
+class SerializableMixin:
+ """Makes a dataclass serializable to and from dictionaries.
+
+ Supports JSON and YAML files for now.
+
+ >>> from dataclasses import dataclass
+ >>> from simple_parsing.helpers import Serializable
+ >>> @dataclass
+ ... class Config(Serializable):
+ ... a: int = 123
+ ... b: str = "456"
+ ...
+ >>> config = Config()
+ >>> config
+ Config(a=123, b='456')
+ >>> config.to_dict()
+ {'a': 123, 'b': '456'}
+ >>> config_ = Config.from_dict({"a": 123, "b": 456})
+ >>> config_
+ Config(a=123, b='456')
+ >>> assert config == config_
+ """
+
+ subclasses: ClassVar[list[type[D]]] = []
+ decode_into_subclasses: ClassVar[bool] = False
+
+ def __init_subclass__(
+ cls, decode_into_subclasses: bool | None = None, add_variants: bool = True
+ ):
+ logger.debug(f"Registering a new Serializable subclass: {cls}")
+ super().__init_subclass__()
+ if decode_into_subclasses is None:
+ # if decode_into_subclasses is None, we will use the value of the
+ # parent class, if it is also a subclass of Serializable.
+ # Skip the class itself as well as object.
+ parents = cls.mro()[1:-1]
+ logger.debug(f"parents: {parents}")
+
+ for parent in parents:
+ if parent in SerializableMixin.subclasses and parent is not SerializableMixin:
+ decode_into_subclasses = parent.decode_into_subclasses
+ logger.debug(
+ f"Parent class {parent} has decode_into_subclasses = {decode_into_subclasses}"
+ )
+ break
+
+ cls.decode_into_subclasses = decode_into_subclasses or False
+ if cls not in SerializableMixin.subclasses:
+ SerializableMixin.subclasses.append(cls)
+
+ encode.register(cls, cls.to_dict)
+ register_decoding_fn(cls, cls.from_dict)
+
+ def to_dict(
+ self, dict_factory: type[dict] = dict, recurse: bool = True, save_dc_types: bool = False
+ ) -> dict:
+ """Serializes this dataclass to a dict.
+
+ NOTE: This 'extends' the `asdict()` function from
+ the `dataclasses` package, allowing us to not include some fields in the
+ dict, or to perform some kind of custom encoding (for instance,
+ detaching `Tensor` objects before serializing the dataclass to a dict).
+ """
+ return to_dict(
+ self, dict_factory=dict_factory, recurse=recurse, save_dc_types=save_dc_types
+ )
+
+ @classmethod
+ def from_dict(cls: type[D], obj: dict, drop_extra_fields: bool | None = None) -> D:
+ """Parses an instance of `cls` from the given dict.
+
+ NOTE: If the `decode_into_subclasses` class attribute is set to True (or
+ if `decode_into_subclasses=True` was passed in the class definition),
+ then if there are keys in the dict that aren't fields of the dataclass,
+ this will decode the dict into an instance the first subclass of `cls`
+ which has all required field names present in the dictionary.
+
+ Passing `drop_extra_fields=None` (default) will use the class attribute
+ described above.
+ Passing `drop_extra_fields=True` will decode the dict into an instance
+ of `cls` and drop the extra keys in the dict.
+ Passing `drop_extra_fields=False` forces the above-mentioned behaviour.
+ """
+ return from_dict(cls, obj, drop_extra_fields=drop_extra_fields)
+
+ def dump(self, fp: IO[str], dump_fn: DumpFn = json.dump) -> None:
+ dump(self, fp=fp, dump_fn=dump_fn)
+
+ def dump_json(self, fp: IO[str], dump_fn: DumpFn = json.dump, **kwargs) -> None:
+ return dump_json(self, fp, dump_fn=dump_fn, **kwargs)
+
+ def dump_yaml(self, fp: IO[str], dump_fn: DumpFn | None = None, **kwargs) -> None:
+ return dump_yaml(self, fp, dump_fn=dump_fn, **kwargs)
+
+ def dumps(self, dump_fn: DumpsFn = json.dumps, **kwargs) -> str:
+ return dumps(self, dump_fn=dump_fn, **kwargs)
+
+ def dumps_json(self, dump_fn: DumpsFn = json.dumps, **kwargs) -> str:
+ return dumps_json(self, dump_fn=dump_fn, **kwargs)
+
+ def dumps_yaml(self, dump_fn: DumpsFn | None = None, **kwargs) -> str:
+ return dumps_yaml(self, dump_fn=dump_fn, **kwargs)
+
+ @classmethod
+ def load(
+ cls: type[D],
+ path: Path | str | IO[str],
+ drop_extra_fields: bool | None = None,
+ load_fn: LoadFn | None = None,
+ **kwargs,
+ ) -> D:
+ """Loads an instance of `cls` from the given file.
+
+ Args:
+ cls (Type[D]): A dataclass type to load.
+ path (Union[Path, str, IO[str]]): Path or Path string or open file.
+ drop_extra_fields (bool, optional): Whether to drop extra fields or
+ to decode the dictionary into the first subclass with matching
+ fields. Defaults to None, in which case we use the value of
+ `cls.decode_into_subclasses`.
+ For more info, see `cls.from_dict`.
+ load_fn (Callable, optional): Which loading function to use. Defaults
+ to None, in which case we try to use the appropriate loading
+ function depending on `path.suffix`:
+ {
+ ".yml": yaml.safe_load,
+ ".yaml": yaml.safe_load,
+ ".json": json.load,
+ ".pth": torch.load,
+ ".pkl": pickle.load,
+ }
+
+ Raises:
+ RuntimeError: If the extension of `path` is unsupported.
+
+ Returns:
+ D: An instance of `cls`.
+ """
+ return load(cls, path=path, drop_extra_fields=drop_extra_fields, load_fn=load_fn, **kwargs)
+
+ @classmethod
+ def _load(
+ cls: type[D],
+ fp: IO[str],
+ drop_extra_fields: bool | None = None,
+ load_fn: LoadFn = json.load,
+ **kwargs,
+ ) -> D:
+ return load(cls, path=fp, drop_extra_fields=drop_extra_fields, load_fn=load_fn, **kwargs)
+
+ @classmethod
+ def load_json(
+ cls: type[D],
+ path: str | Path,
+ drop_extra_fields: bool | None = None,
+ load_fn: LoadFn = json.load,
+ **kwargs,
+ ) -> D:
+ """Loads an instance from the corresponding json-formatted file.
+
+ Args:
+ cls (Type[D]): A dataclass type to load.
+ path (Union[str, Path]): Path to a json-formatted file.
+ load_fn ([type], optional): Loading function to use. Defaults to json.load.
+
+ Returns:
+ D: an instance of the dataclass.
+ """
+ return load_json(cls, path, drop_extra_fields=drop_extra_fields, load_fn=load_fn, **kwargs)
+
+ @classmethod
+ def load_yaml(
+ cls: type[D],
+ path: str | Path,
+ drop_extra_fields: bool | None = None,
+ load_fn=None,
+ **kwargs,
+ ) -> D:
+ """Loads an instance from the corresponding yaml-formatted file.
+
+ Args:
+ cls (Type[D]): A dataclass type to load.
+ path (Union[str, Path]): Path to a yaml-formatted file.
+ load_fn ([type], optional): Loading function to use. Defaults to
+ None, in which case `yaml.safe_load` is used.
+
+ Returns:
+ D: an instance of the dataclass.
+ """
+ return load_yaml(cls, path, load_fn=load_fn, drop_extra_fields=drop_extra_fields, **kwargs)
+
+ def save(self, path: str | Path, format: FormatExtension | None = None) -> None:
+ save(self, path=path, format=format)
+
+ def _save(self, path: str | Path, format: FormatExtension = json_extension, **kwargs) -> None:
+ save(self, path=path, format=format, **kwargs)
+
+ def save_yaml(self, path: str | Path, dump_fn: DumpFn | None = None, **kwargs) -> None:
+ save_yaml(self, path, **kwargs)
+
+ def save_json(self, path: str | Path, **kwargs) -> None:
+ save_json(self, path, **kwargs)
+
+ @classmethod
+ def loads(
+ cls: type[D],
+ s: str,
+ drop_extra_fields: bool | None = None,
+ load_fn: LoadsFn = json.loads,
+ ) -> D:
+ return loads(cls, s, drop_extra_fields=drop_extra_fields, load_fn=load_fn)
+
+ @classmethod
+ def loads_json(
+ cls: type[D],
+ s: str,
+ drop_extra_fields: bool | None = None,
+ load_fn=json.loads,
+ **kwargs,
+ ) -> D:
+ return loads_json(
+ cls, s, drop_extra_fields=drop_extra_fields, load_fn=partial(load_fn, **kwargs)
+ )
+
+ @classmethod
+ def loads_yaml(
+ cls: type[D],
+ s: str,
+ drop_extra_fields: bool | None = None,
+ load_fn: LoadsFn | None = None,
+ **kwargs,
+ ) -> D:
+ return loads_yaml(cls, s, drop_extra_fields=drop_extra_fields, load_fn=load_fn, **kwargs)
+
+
+@dataclass
+class Serializable(SerializableMixin):
+ """Makes a dataclass serializable to and from dictionaries.
+
+ Supports JSON and YAML files for now.
+
+ >>> from dataclasses import dataclass
+ >>> from simple_parsing.helpers import Serializable
+ >>> @dataclass
+ ... class Config(Serializable):
+ ... a: int = 123
+ ... b: str = "456"
+ ...
+ >>> config = Config()
+ >>> config
+ Config(a=123, b='456')
+ >>> config.to_dict()
+ {'a': 123, 'b': '456'}
+ >>> config_ = Config.from_dict({"a": 123, "b": 456})
+ >>> config_
+ Config(a=123, b='456')
+ >>> assert config == config_
+ """
+
+
+@dataclass(frozen=True)
+class FrozenSerializable(SerializableMixin):
+ """Makes a (frozen) dataclass serializable to and from dictionaries.
+
+ Supports JSON and YAML files for now.
+
+ >>> from dataclasses import dataclass
+ >>> from simple_parsing.helpers import Serializable
+ >>> @dataclass
+ ... class Config(Serializable):
+ ... a: int = 123
+ ... b: str = "456"
+ ...
+ >>> config = Config()
+ >>> config
+ Config(a=123, b='456')
+ >>> config.to_dict()
+ {'a': 123, 'b': '456'}
+ >>> config_ = Config.from_dict({"a": 123, "b": 456})
+ >>> config_
+ Config(a=123, b='456')
+ >>> assert config == config_
+ """
+
+
+@dataclass
+class SimpleSerializable(SerializableMixin, decode_into_subclasses=True):
+ pass
+
+
+S = TypeVar("S", bound=SerializableMixin)
+
+
+def get_serializable_dataclass_types_from_forward_ref(
+ forward_ref: type, serializable_base_class: type[S] = SerializableMixin
+) -> list[type[S]]:
+ """Gets all the subclasses of `serializable_base_class` that have the same name as the argument
+ of this forward reference annotation."""
+ arg = get_forward_arg(forward_ref)
+ potential_classes: list[type] = []
+ for serializable_class in serializable_base_class.subclasses:
+ if serializable_class.__name__ == arg:
+ potential_classes.append(serializable_class)
+ return potential_classes
+
+
+T = TypeVar("T")
+
+
+def load(
+ cls: type[DataclassT],
+ path: Path | str | IO,
+ drop_extra_fields: bool | None = None,
+ load_fn: LoadFn | None = None,
+) -> DataclassT:
+ """Loads an instance of `cls` from the given file.
+
+ First, `load_fn` is used to get a potentially nested dictionary of python primitives from a
+ file. Then, a decoding function is applied to each value, based on the type annotation of the
+ corresponding field. Finally, the resulting dictionary is used to instantiate an instance of
+ the dataclass `cls`.
+
+ - string -> `load_fn` (json/yaml/etc) -> dict with "raw" python values -> decode -> \
+ dict with constructor arguments -> `cls`(**dict) -> instance of `cls`
+
+ NOTE: This does not save the types of the dataclass fields. This is usually not an issue, since
+ we can recover the right type to use by looking at subclasses of the annotated type. However,
+ in some cases (e.g. subgroups), it might be useful to save all the types of all the
+ fields, in which case you should probably use something like `yaml.dump`, directly passing it
+ the dataclass, instead of this.
+
+ Args:
+ cls (Type[D]): A dataclass type to load.
+ path (Path | str): Path or Path string or open file.
+ drop_extra_fields (bool, optional): Whether to drop extra fields or
+ to decode the dictionary into the first subclass with matching
+ fields. Defaults to None, in which case we use the value of
+ `cls.decode_into_subclasses`.
+ For more info, see `cls.from_dict`.
+ load_fn ([type], optional): Which loading function to use. Defaults
+ to None, in which case we try to use the appropriate loading
+ function depending on `path.suffix`:
+ {
+ ".yml": yaml.safe_load,
+ ".yaml": yaml.safe_load,
+ ".json": json.load,
+ ".pth": torch.load,
+ ".pkl": pickle.load,
+ }
+
+ Raises:
+ RuntimeError: If the extension of `path` is unsupported.
+
+ Returns:
+ D: An instance of `cls`.
+ """
+ if isinstance(path, str):
+ path = Path(path)
+ if load_fn is None and isinstance(path, Path):
+ # Load a dict from the file.
+ d = read_file(path)
+ elif load_fn:
+ with path.open() if isinstance(path, Path) else path as f:
+ d = load_fn(f)
+ else:
+ raise ValueError(
+ "A loading function must be passed, since we got an io stream, and the "
+ "extension can't be retrieved."
+ )
+ # Convert the dict into an instance of the class.
+ if drop_extra_fields is None and getattr(cls, "decode_into_subclasses", None) is not None:
+ drop_extra_fields = not getattr(cls, "decode_into_subclasses")
+ return from_dict(cls, d, drop_extra_fields=drop_extra_fields)
+
+
+def load_json(
+ cls: type[DataclassT],
+ path: str | Path,
+ drop_extra_fields: bool | None = None,
+ load_fn: LoadFn = json.load,
+ **kwargs,
+) -> DataclassT:
+ """Loads an instance from the corresponding json-formatted file.
+
+ Args:
+ cls (Type[D]): A dataclass type to load.
+ path (Union[str, Path]): Path to a json-formatted file.
+ load_fn ([type], optional): Loading function to use. Defaults to json.load.
+
+ Returns:
+ D: an instance of the dataclass.
+ """
+ return load(cls, path, drop_extra_fields=drop_extra_fields, load_fn=partial(load_fn, **kwargs))
+
+
+def loads(
+ cls: type[DataclassT],
+ s: str,
+ drop_extra_fields: bool | None = None,
+ load_fn: LoadsFn = json.loads,
+) -> DataclassT:
+ d = load_fn(s)
+ return from_dict(cls, d, drop_extra_fields=drop_extra_fields)
+
+
+def loads_json(
+ cls: type[DataclassT],
+ s: str,
+ drop_extra_fields: bool | None = None,
+ load_fn: LoadsFn = json.loads,
+ **kwargs,
+) -> DataclassT:
+ return loads(cls, s, drop_extra_fields=drop_extra_fields, load_fn=partial(load_fn, **kwargs))
+
+
+def loads_yaml(
+ cls: type[DataclassT],
+ s: str,
+ drop_extra_fields: bool | None = None,
+ load_fn: LoadsFn | None = None,
+ **kwargs,
+) -> DataclassT:
+ import yaml
+
+ load_fn = load_fn or yaml.safe_load
+ return loads(cls, s, drop_extra_fields=drop_extra_fields, load_fn=partial(load_fn, **kwargs))
+
+
+def read_file(path: str | Path) -> dict:
+ """Returns the contents of the given file as a dictionary.
+ Uses the right function depending on `path.suffix`:
+ {
+ ".yml": yaml.safe_load,
+ ".yaml": yaml.safe_load,
+ ".json": json.load,
+ ".pth": torch.load,
+ ".pkl": pickle.load,
+ }
+ """
+ format = get_extension(path)
+ with open(path, mode="rb" if format.binary else "r") as f:
+ return format.load(f)
+
+
+def save(
+ obj: Any,
+ path: str | Path,
+ format: FormatExtension | None = None,
+ save_dc_types: bool = False,
+ **kwargs,
+) -> None:
+ """Save the given dataclass or dictionary to the given file."""
+ if not isinstance(obj, dict):
+ obj = to_dict(obj, save_dc_types=save_dc_types)
+ if format is None:
+ format = get_extension(path)
+ with open(path, mode="wb" if format.binary else "w") as f:
+ return format.dump(obj, f, **kwargs)
+
+
+def save_yaml(obj, path: str | Path, **kwargs) -> None:
+ save(obj, path, format=yaml_extension, **kwargs)
+
+
+def save_json(obj, path: str | Path, **kwargs) -> None:
+ save(obj, path, format=json_extension, **kwargs)
+
+
+def load_yaml(
+ cls: type[T],
+ path: str | Path,
+ drop_extra_fields: bool | None = None,
+ load_fn: LoadFn | None = None,
+ **kwargs,
+) -> T:
+ """Loads an instance from the corresponding yaml-formatted file.
+
+ Args:
+ cls (Type[T]): A dataclass type to load.
+ path (Union[str, Path]): Path to a yaml-formatted file.
+ load_fn ([type], optional): Loading function to use. Defaults to
+ None, in which case `yaml.safe_load` is used.
+
+ Returns:
+ T: an instance of the dataclass.
+ """
+ import yaml
+
+ if load_fn is None:
+ load_fn = yaml.safe_load
+ return load(cls, path, drop_extra_fields=drop_extra_fields, load_fn=partial(load_fn, **kwargs))
+
+
+def dump(dc, fp: IO[str], dump_fn: DumpFn = json.dump) -> None:
+ # Convert `dc` into a dict if needed.
+ if not isinstance(dc, dict):
+ dc = to_dict(dc)
+ # Serialize that dict to the file using dump_fn.
+ dump_fn(dc, fp)
+
+
+def dump_json(dc, fp: IO[str], dump_fn: DumpFn = json.dump, **kwargs) -> None:
+ return dump(dc, fp, dump_fn=partial(dump_fn, **kwargs))
+
+
+def dump_yaml(dc, fp: IO[str], dump_fn: DumpFn | None = None, **kwargs) -> None:
+ import yaml
+
+ if dump_fn is None:
+ dump_fn = yaml.dump
+ return dump(dc, fp, dump_fn=partial(dump_fn, **kwargs))
+
+
+def dumps(dc, dump_fn: DumpsFn = json.dumps) -> str:
+ if not isinstance(dc, dict):
+ dc = to_dict(dc)
+ return dump_fn(dc)
+
+
+def dumps_json(dc, dump_fn: DumpsFn = json.dumps, **kwargs) -> str:
+ kwargs.setdefault("cls", SimpleJsonEncoder)
+ return dumps(dc, dump_fn=partial(dump_fn, **kwargs))
+
+
+def dumps_yaml(dc, dump_fn: DumpsFn | None = None, **kwargs) -> str:
+ import yaml
+
+ if dump_fn is None:
+ dump_fn = yaml.dump
+ return dumps(dc, dump_fn=partial(dump_fn, **kwargs))
+
+
+DC_TYPE_KEY = "_type_"
+
+
+def to_dict(
+ dc: DataclassT,
+ dict_factory: type[dict] = dict,
+ recurse: bool = True,
+ save_dc_types: bool = False,
+) -> dict:
+ """Serializes this dataclass to a dict.
+
+ NOTE: This 'extends' the `asdict()` function from
+ the `dataclasses` package, allowing us to not include some fields in the
+ dict, or to perform some kind of custom encoding (for instance,
+ detaching `Tensor` objects before serializing the dataclass to a dict).
+
+ When `save_dc_types` is True, the type of each dataclass field is saved in the dict of that
+ field at a `DC_TYPE_KEY` entry.
+ """
+ if not is_dataclass(dc):
+ raise ValueError("to_dict should only be called on a dataclass instance.")
+
+ d: dict[str, Any] = dict_factory()
+
+ if save_dc_types:
+ class_name = dc.__class__.__qualname__
+ module = type(dc).__module__
+ if "" in class_name:
+ # Don't save the type of function-scoped dataclasses.
+ warnings.warn(
+ RuntimeWarning(
+ f"Dataclass type {type(dc)} is defined in a function scope, which might cause "
+ f"issues when deserializing the containing dataclass. Refusing to save the "
+ f"type of this dataclass in the serialized dictionary."
+ )
+ )
+ else:
+ d[DC_TYPE_KEY] = module + "." + class_name
+
+ for f in fields(dc):
+ name = f.name
+ value = getattr(dc, name)
+
+ # Do not include in dict if some corresponding flag was set in metadata.
+ include_in_dict = f.metadata.get("to_dict", True)
+ if not include_in_dict:
+ continue
+
+ custom_encoding_fn = f.metadata.get("encoding_fn")
+ if custom_encoding_fn:
+ # Use a custom encoding function if there is one.
+ d[name] = custom_encoding_fn(value)
+ continue
+
+ encoding_fn = encode
+ # TODO: Make a variant of the serialization tests that use the static functions everywhere.
+ if is_dataclass(value) and recurse:
+ encoded = to_dict(
+ value, dict_factory=dict_factory, recurse=recurse, save_dc_types=save_dc_types
+ )
+ logger.debug(f"Encoded dataclass field {name}: {encoded}")
+ else:
+ try:
+ encoded = encoding_fn(value)
+ except Exception as e:
+ logger.error(
+ f"Unable to encode value {value} of type {type(value)}! Leaving it as-is. (exception: {e})"
+ )
+ encoded = value
+ d[name] = encoded
+ return d
+
+
+def from_dict(
+ cls: type[DataclassT], d: dict[str, Any], drop_extra_fields: bool | None = None
+) -> DataclassT:
+ """Parses an instance of the dataclass `cls` from the dict `d`.
+
+ Args:
+ cls (Type[Dataclass]): A `dataclass` type.
+ d (Dict[str, Any]): A dictionary of `raw` values, obtained for example
+ when deserializing a json file into an instance of class `cls`.
+ drop_extra_fields (bool, optional): Whether or not to drop extra
+ dictionary keys (dataclass fields) when encountered. There are three
+ options:
+ - True:
+ The extra keys are dropped, and this function returns an
+ instance of `cls`.
+ - False:
+ The extra keys (if any) are kept, and we search through the
+ subclasses of `cls` for the first dataclass which has all the
+ required fields.
+ - None (default):
+ `drop_extra_fields = not cls.decode_into_subclasses`.
+
+ Raises:
+ RuntimeError: If an error is encountered while instantiating the class.
+
+ Returns:
+ Dataclass: An instance of the dataclass `cls`.
+ """
+ if d is None:
+ return None
+
+ obj_dict: dict[str, Any] = d.copy()
+
+ init_args: dict[str, Any] = {}
+ non_init_args: dict[str, Any] = {}
+
+ if DC_TYPE_KEY in obj_dict:
+ target = obj_dict.pop(DC_TYPE_KEY)
+ # module, dc_type = target.rsplit(".", 1)
+ live_dc_type = _locate(target)
+ # live_module = importlib.import_module(module)
+ # live_dc_type = getattr(live_module, dc_type)
+ return from_dict(live_dc_type, obj_dict, drop_extra_fields=drop_extra_fields)
+
+ if drop_extra_fields is None:
+ drop_extra_fields = not getattr(cls, "decode_into_subclasses", False)
+ logger.debug("drop_extra_fields is None. Using cls attribute.")
+
+ if cls in {Serializable, FrozenSerializable, SerializableMixin}:
+ # Passing `Serializable` means that we want to find the right
+ # subclass depending on the keys.
+ # We set the value to False when `Serializable` is passed, since
+ # we use this mechanism when we don't know which dataclass to use.
+ logger.debug("cls is `SerializableMixin`, drop_extra_fields = False.")
+ drop_extra_fields = False
+
+ logger.debug(f"from_dict for {cls}, drop extra fields: {drop_extra_fields}")
+ for field in fields(cls) if is_dataclass(cls) else []:
+ name = field.name
+ if name not in obj_dict:
+ if (
+ field.metadata.get("to_dict", True)
+ and field.default is MISSING
+ and field.default_factory is MISSING
+ ):
+ logger.warning(
+ f"Couldn't find the field '{name}' in the dict with keys " f"{list(d.keys())}"
+ )
+ continue
+
+ raw_value = obj_dict.pop(name)
+ field_value = decode_field(
+ field, raw_value, containing_dataclass=cls, drop_extra_fields=drop_extra_fields
+ )
+
+ if field.init:
+ init_args[name] = field_value
+ else:
+ non_init_args[name] = field_value
+
+ extra_args = obj_dict
+
+ # If there are arguments left over in the dict after taking all fields.
+ if extra_args:
+ if drop_extra_fields:
+ logger.warning(f"Dropping extra args {extra_args}")
+ extra_args.clear()
+
+ else:
+ # Use the first Serializable derived class that has all the required
+ # fields.
+ logger.debug(f"Missing field names: {extra_args.keys()}")
+
+ # Find all the "registered" subclasses of `cls`. (from Serializable)
+ derived_classes: list[type[DataclassT]] = []
+
+ for subclass in all_subclasses(cls):
+ if subclass is not cls:
+ derived_classes.append(subclass)
+ logger.debug(f"All derived classes of {cls} available: {derived_classes}")
+
+ # All the arguments that the dataclass should be able to accept in
+ # its 'init'.
+ req_init_field_names = set(chain(extra_args, init_args))
+
+ # Sort the derived classes by their number of init fields, so that
+ # we choose the first one with all the required fields.
+ derived_classes.sort(key=lambda dc: len(get_init_fields(dc)))
+
+ for child_class in derived_classes:
+ logger.debug(f"child class: {child_class.__name__}, mro: {child_class.mro()}")
+ child_init_fields: dict[str, Field] = get_init_fields(child_class)
+ child_init_field_names = set(child_init_fields.keys())
+
+ if child_init_field_names >= req_init_field_names:
+ # `child_class` is the first class with all required fields.
+ logger.debug(f"Using class {child_class} instead of {cls}")
+ return from_dict(child_class, d, drop_extra_fields=False)
+
+ init_args.update(extra_args)
+ try:
+ instance = cls(**init_args) # type: ignore
+ except TypeError as e:
+ # raise RuntimeError(f"Couldn't instantiate class {cls} using init args {init_args}.")
+ raise RuntimeError(
+ f"Couldn't instantiate class {cls} using init args {init_args.keys()}: {e}"
+ )
+
+ for name, value in non_init_args.items():
+ logger.debug(f"Setting non-init field '{name}' on the instance.")
+ setattr(instance, name, value)
+ return instance
+
+
+def get_init_fields(dataclass: type) -> dict[str, Field]:
+ result: dict[str, Field] = {}
+ for field in fields(dataclass):
+ if field.init:
+ result[field.name] = field
+ return result
+
+
+def get_first_non_None_type(optional_type: type | tuple[type, ...]) -> type | None:
+ if not isinstance(optional_type, tuple):
+ optional_type = get_args(optional_type)
+ for arg in optional_type:
+ if arg is not Union and arg is not type(None): # noqa: E721
+ logger.debug(f"arg: {arg} is not union? {arg is not Union}")
+ logger.debug(f"arg is not type(None)? {arg is not type(None)}")
+ return arg
+ return None
+
+
+def is_dataclass_or_optional_dataclass_type(t: type) -> bool:
+ """Returns whether `t` is a dataclass type or an Optional[]."""
+ return is_dataclass(t) or (is_optional(t) and is_dataclass(get_args(t)[0]))
+
+
+def _locate(path: str) -> Any:
+ """COPIED FROM Hydra: https://github.com/facebookresearch/hydra/blob/f8940600d0ab5c695961ad83ab
+ d042ffe9458caf/hydra/_internal/utils.py#L614.
+
+ Locate an object by name or dotted path, importing as necessary. This is similar to the pydoc
+ function `locate`, except that it checks for the module from the given path from back to front.
+ """
+ if path == "":
+ raise ImportError("Empty path")
+
+ parts = [part for part in path.split(".")]
+ for part in parts:
+ if not len(part):
+ raise ValueError(
+ f"Error loading '{path}': invalid dotstring."
+ + "\nRelative imports are not supported."
+ )
+ assert len(parts) > 0
+ part0 = parts[0]
+ try:
+ obj = import_module(part0)
+ except Exception as exc_import:
+ raise ImportError(
+ f"Error loading '{path}':\n{repr(exc_import)}"
+ + f"\nAre you sure that module '{part0}' is installed?"
+ ) from exc_import
+ for m in range(1, len(parts)):
+ part = parts[m]
+ try:
+ obj = getattr(obj, part)
+ except AttributeError as exc_attr:
+ parent_dotpath = ".".join(parts[:m])
+ if isinstance(obj, ModuleType):
+ mod = ".".join(parts[: m + 1])
+ try:
+ obj = import_module(mod)
+ continue
+ except ModuleNotFoundError as exc_import:
+ raise ImportError(
+ f"Error loading '{path}':\n{repr(exc_import)}"
+ + f"\nAre you sure that '{part}' is importable from module '{parent_dotpath}'?"
+ ) from exc_import
+ except Exception as exc_import:
+ raise ImportError(
+ f"Error loading '{path}':\n{repr(exc_import)}"
+ ) from exc_import
+ raise ImportError(
+ f"Error loading '{path}':\n{repr(exc_attr)}"
+ + f"\nAre you sure that '{part}' is an attribute of '{parent_dotpath}'?"
+ ) from exc_attr
+ return obj
diff --git a/parrot/lib/python3.10/site-packages/simple_parsing/helpers/serialization/yaml_serialization.py b/parrot/lib/python3.10/site-packages/simple_parsing/helpers/serialization/yaml_serialization.py
new file mode 100644
index 0000000000000000000000000000000000000000..13aa034c59fbc9f04719a85325a36c3f468ab71f
--- /dev/null
+++ b/parrot/lib/python3.10/site-packages/simple_parsing/helpers/serialization/yaml_serialization.py
@@ -0,0 +1,71 @@
+from __future__ import annotations
+
+from logging import getLogger
+from pathlib import Path
+from typing import IO
+
+try:
+ import yaml
+except ImportError:
+ pass
+
+from .serializable import D, Serializable
+
+logger = getLogger(__name__)
+
+
+class YamlSerializable(Serializable):
+ """Convenience class, just sets different `load_fn` and `dump_fn` defaults for the `dump`,
+ `dumps`, `load`, `loads` methods of `Serializable`.
+
+ Uses the `yaml.safe_load` and `yaml.dump` for loading and dumping.
+
+ Requires the pyyaml package.
+ """
+
+ def dump(self, fp: IO[str], dump_fn=None, **kwargs) -> None:
+ if dump_fn is None:
+ dump_fn = yaml.dump
+ dump_fn(self.to_dict(), fp, **kwargs)
+
+ def dumps(self, dump_fn=None, **kwargs) -> str:
+ if dump_fn is None:
+ dump_fn = yaml.dump
+ return dump_fn(self.to_dict(), **kwargs)
+
+ @classmethod
+ def load(
+ cls: type[D],
+ path: Path | str | IO[str],
+ drop_extra_fields: bool | None = None,
+ load_fn=None,
+ **kwargs,
+ ) -> D:
+ if load_fn is None:
+ load_fn = yaml.safe_load
+
+ return super().load(path, drop_extra_fields=drop_extra_fields, load_fn=load_fn, **kwargs)
+
+ @classmethod
+ def loads(
+ cls: type[D],
+ s: str,
+ drop_extra_fields: bool | None = None,
+ load_fn=None,
+ **kwargs,
+ ) -> D:
+ if load_fn is None:
+ load_fn = yaml.safe_load
+ return super().loads(s, drop_extra_fields=drop_extra_fields, load_fn=load_fn, **kwargs)
+
+ @classmethod
+ def _load(
+ cls: type[D],
+ fp: IO[str],
+ drop_extra_fields: bool | None = None,
+ load_fn=None,
+ **kwargs,
+ ) -> D:
+ if load_fn is None:
+ load_fn = yaml.safe_load
+ return super()._load(fp, drop_extra_fields=drop_extra_fields, load_fn=load_fn, **kwargs)
diff --git a/parrot/lib/python3.10/site-packages/simple_parsing/helpers/subgroups.py b/parrot/lib/python3.10/site-packages/simple_parsing/helpers/subgroups.py
new file mode 100644
index 0000000000000000000000000000000000000000..8b16794c71b90073bfc175052eda69dd4bf5df95
--- /dev/null
+++ b/parrot/lib/python3.10/site-packages/simple_parsing/helpers/subgroups.py
@@ -0,0 +1,277 @@
+from __future__ import annotations
+
+import functools
+import inspect
+import typing
+from dataclasses import _MISSING_TYPE, MISSING
+from enum import Enum
+from logging import getLogger as get_logger
+from typing import Any, Callable, TypeVar, Union, overload
+
+from typing_extensions import TypeAlias
+
+from simple_parsing.utils import DataclassT, is_dataclass_instance, is_dataclass_type
+
+logger = get_logger(__name__)
+
+SubgroupKey: TypeAlias = Union[str, int, bool, Enum]
+
+Key = TypeVar("Key", str, int, bool, Enum)
+
+
+@overload
+def subgroups(
+ subgroups: dict[Key, DataclassT | type[DataclassT] | functools.partial[DataclassT]],
+ *args,
+ default: Key | DataclassT,
+ default_factory: _MISSING_TYPE = MISSING,
+ **kwargs,
+) -> DataclassT:
+ ...
+
+
+@overload
+def subgroups(
+ subgroups: dict[Key, DataclassT | type[DataclassT] | functools.partial[DataclassT]],
+ *args,
+ default: _MISSING_TYPE = MISSING,
+ default_factory: type[DataclassT] | functools.partial[DataclassT],
+ **kwargs,
+) -> DataclassT:
+ ...
+
+
+@overload
+def subgroups(
+ subgroups: dict[Key, DataclassT | type[DataclassT] | functools.partial[DataclassT]],
+ *args,
+ default: _MISSING_TYPE = MISSING,
+ default_factory: _MISSING_TYPE = MISSING,
+ **kwargs,
+) -> DataclassT:
+ ...
+
+
+def subgroups(
+ subgroups: dict[Key, DataclassT | type[DataclassT] | functools.partial[DataclassT]],
+ *args,
+ default: Key | DataclassT | _MISSING_TYPE = MISSING,
+ default_factory: type[DataclassT] | functools.partial[DataclassT] | _MISSING_TYPE = MISSING,
+ **kwargs,
+) -> DataclassT:
+ """Creates a field that will be a choice between different subgroups of arguments.
+
+ This is different than adding a subparser action. There can only be one subparser action, while
+ there can be arbitrarily many subgroups. Subgroups can also be nested!
+
+
+ Parameters
+ ----------
+ subgroups :
+ Dictionary mapping from the subgroup name to the subgroup type.
+ default :
+ The default subgroup to use, by default MISSING, in which case a subgroup has to be
+ selected. Needs to be a key in the subgroups dictionary.
+ default_factory :
+ The default_factory to use to create the subgroup. Needs to be a value of the `subgroups`
+ dictionary.
+
+ Returns
+ -------
+ A field whose type is the Union of the different possible subgroups.
+ """
+ if default_factory is not MISSING and default is not MISSING:
+ raise ValueError("Can't pass both default and default_factory!")
+ from collections.abc import Hashable
+
+ if is_dataclass_instance(default):
+ if not isinstance(default, Hashable):
+ raise ValueError(
+ "'default' can either be a key of the subgroups dict or a hashable (frozen) "
+ "dataclass."
+ )
+ if default not in subgroups.values():
+ # TODO: (@lebrice): Do we really need to enforce this? What is the reasoning behind this
+ # restriction again?
+ raise ValueError(f"Default value {default} needs to be a value in the subgroups dict.")
+ elif default is not MISSING and default not in subgroups.keys():
+ raise ValueError("default must be a key in the subgroups dict!")
+
+ if default_factory is not MISSING and default_factory not in list(subgroups.values()):
+ # NOTE: This is because we need to have a "default key" to associate with the
+ # default_factory (and we set that as the default value for the argument of this field).
+ raise ValueError("`default_factory` must be a value in the subgroups dict.")
+ # IDEA: We could add a `default` key for this `default_factory` value into the `subgroups`
+ # dict? However if it's a lambda expression, then we wouldn't then be able to inspect the
+ # return type of that default factory (see above). Therefore there doesn't seem to be any
+ # good way to allow lambda expressions as default factories yet. Perhaps I'm
+ # overcomplicating things and it's actually very simple to do. I'll have to think about it.
+
+ metadata = kwargs.pop("metadata", {})
+ metadata["subgroups"] = subgroups
+ metadata["subgroup_default"] = default
+ metadata["subgroup_dataclass_types"] = {}
+
+ subgroup_dataclass_types: dict[Key, type[DataclassT]] = {}
+ choices = subgroups.keys()
+
+ # NOTE: Perhaps we could raise a warning if the default_factory is a Lambda, since we have to
+ # instantiate that value in order to inspect the attributes and its values..
+
+ # NOTE: This needs to be the right frame where the subgroups are set.
+ _current_frame = inspect.currentframe()
+ caller_frame = _current_frame.f_back if _current_frame else None
+ for subgroup_key, subgroup_value in subgroups.items():
+ if is_lambda(subgroup_value):
+ raise NotImplementedError(
+ f"Lambda expressions like {subgroup_value!r} can't currently be used as subgroup "
+ "values, since we're unable to inspect which dataclass they return without "
+ "invoking them.\n"
+ "If you want to choose between different versions of a dataclass where arguments "
+ "change between subgroups, consider using a `functools.partial` instead. "
+ )
+
+ if is_dataclass_instance(subgroup_value):
+ dataclass_type = type(subgroup_value)
+ elif is_dataclass_type(subgroup_value):
+ # all good! Just use that dataclass.
+ dataclass_type = subgroup_value
+ else:
+ try:
+ dataclass_type = _get_dataclass_type_from_callable(
+ subgroup_value, caller_frame=caller_frame
+ )
+ except Exception as exc:
+ raise NotImplementedError(
+ f"We are unable to figure out the dataclass to use for the selected subgroup "
+ f"{subgroup_key!r}, because the subgroup value is "
+ f"{subgroup_value!r}, and we don't know what type of "
+ f"dataclass it produces without invoking it!\n"
+ "🙏 Please make an issue on GitHub! 🙏\n"
+ f"Exception raised:\n" + str(exc)
+ ) from exc
+
+ subgroup_dataclass_types[subgroup_key] = dataclass_type
+ metadata["subgroup_dataclass_types"] = subgroup_dataclass_types
+
+ # TODO: Show the default value from the default factory in the help text.
+ # default_factory_dataclass = None
+ # if default_factory is not MISSING:
+ # default_factory_dataclass = _get_dataclass_type_from_callable(default_factory)
+ # subgroup_field_values = {}
+
+ if default is not MISSING:
+ if is_dataclass_instance(default):
+ assert default in subgroups.values()
+ subgroup_key = [k for k, v in subgroups.items() if v is default][0]
+ metadata["subgroup_default"] = subgroup_key
+ default = subgroup_key
+ else:
+ assert default in subgroups.keys()
+ default_factory = subgroups[default]
+ metadata["subgroup_default"] = default
+ default = MISSING
+
+ elif default_factory is not MISSING:
+ # assert default_factory in subgroups.values()
+ # default_factory passed, which is in the subgroups dict. Find the matching key.
+ matching_keys = [k for k, v in subgroups.items() if v is default_factory]
+ if not matching_keys:
+ # Use == instead of `is` this time.
+ matching_keys = [k for k, v in subgroups.items() if v == default_factory]
+
+ # We wouldn't get here if default_factory wasn't in the subgroups dict values.
+ assert matching_keys
+ if len(matching_keys) > 1:
+ raise ValueError(
+ f"Default subgroup {default} is found more than once in the subgroups dict?"
+ )
+ subgroup_default = matching_keys[0]
+ metadata["subgroup_default"] = subgroup_default
+ else:
+ # Store `MISSING` as the subgroup default.
+ metadata["subgroup_default"] = MISSING
+
+ from .fields import choice
+
+ return choice(
+ choices,
+ *args,
+ default=default,
+ default_factory=default_factory,
+ metadata=metadata,
+ **kwargs,
+ ) # type: ignore
+
+
+def _get_dataclass_type_from_callable(
+ dataclass_fn: Callable[..., DataclassT], caller_frame: inspect.FrameType | None = None
+) -> type[DataclassT]:
+ """Inspects and returns the type of dataclass that the given callable will return."""
+ if is_dataclass_type(dataclass_fn):
+ return dataclass_fn
+
+ signature = inspect.signature(dataclass_fn)
+
+ if isinstance(dataclass_fn, functools.partial):
+ if is_dataclass_type(dataclass_fn.func):
+ return dataclass_fn.func
+ # partial to a function that should return a dataclass. Hopefully it has a return type
+ # annotation, otherwise we'd have to call the function just to know the return type!
+ # NOTE: recurse here, so it also works with `partial(partial(...))` and `partial(some_function)`
+ return _get_dataclass_type_from_callable(
+ dataclass_fn=dataclass_fn.func, caller_frame=caller_frame
+ )
+
+ if signature.return_annotation is inspect.Signature.empty:
+ raise TypeError(
+ f"Unable to determine what type of dataclass would be returned by the callable "
+ f"{dataclass_fn!r}, because it doesn't have a return type annotation, and we don't "
+ f"want to call it just to figure out what it produces."
+ )
+ # NOTE: recurse here, so it also works with `partial(partial(...))` and `partial(some_function)`
+ # Recurse, so this also works with partial(partial(...)) (idk why you'd do that though.)
+
+ if isinstance(signature.return_annotation, str):
+ dataclass_fn_type = signature.return_annotation
+ if caller_frame is not None:
+ # Travel up until we find the right frame where the subgroup is defined.
+
+ while (
+ caller_frame.f_back is not None
+ # TODO: This will stop if it finds a variable with that name! But what if that
+ # isn't actually a dataclass ?
+ and signature.return_annotation not in caller_frame.f_locals
+ and signature.return_annotation not in caller_frame.f_globals
+ ):
+ caller_frame = caller_frame.f_back
+
+ caller_locals = caller_frame.f_locals
+ caller_globals = caller_frame.f_globals
+
+ try:
+ # NOTE: This doesn't seem to be very often different than just calling `get_type_hints`
+ type_hints = typing.get_type_hints(
+ dataclass_fn, globalns=caller_globals, localns=caller_locals
+ )
+ except NameError:
+ assert False, (caller_locals, caller_globals, caller_frame)
+ # assert type_hints == typing.get_type_hints(dataclass_fn)
+ else:
+ type_hints = typing.get_type_hints(dataclass_fn)
+ dataclass_fn_type = type_hints["return"]
+
+ # Recursing here would be a bit extra, let's be real. Might be good enough to just assume that
+ # the return annotation needs to be a dataclass.
+ # return _get_dataclass_type_from_callable(dataclass_fn_type, caller_frame=caller_frame)
+ assert is_dataclass_type(dataclass_fn_type)
+ return dataclass_fn_type
+
+
+def is_lambda(obj: Any) -> bool:
+ """Returns True if the given object is a lambda expression.
+
+ Taken froma-lambda
+ """
+ LAMBDA = lambda: 0 # noqa: E731
+ return isinstance(obj, type(LAMBDA)) and obj.__name__ == LAMBDA.__name__
diff --git a/parrot/lib/python3.10/site-packages/simple_parsing/parsing.py b/parrot/lib/python3.10/site-packages/simple_parsing/parsing.py
new file mode 100644
index 0000000000000000000000000000000000000000..ab1aa52467adbc4d633161a54c9283de510f366d
--- /dev/null
+++ b/parrot/lib/python3.10/site-packages/simple_parsing/parsing.py
@@ -0,0 +1,1146 @@
+"""Simple, Elegant Argument parsing.
+
+@author: Fabrice Normandin
+"""
+from __future__ import annotations
+
+import argparse
+import dataclasses
+import functools
+import itertools
+import shlex
+import sys
+import typing
+from argparse import SUPPRESS, Action, HelpFormatter, Namespace, _
+from collections import defaultdict
+from logging import getLogger
+from pathlib import Path
+from typing import Any, Callable, Sequence, Type, overload
+
+from simple_parsing.helpers.subgroups import SubgroupKey
+from simple_parsing.wrappers.dataclass_wrapper import DataclassWrapperType
+
+from . import utils
+from .conflicts import ConflictResolution, ConflictResolver
+from .help_formatter import SimpleHelpFormatter
+from .helpers.serialization.serializable import read_file
+from .utils import (
+ Dataclass,
+ DataclassT,
+ dict_union,
+ is_dataclass_instance,
+ is_dataclass_type,
+)
+from .wrappers import DashVariant, DataclassWrapper, FieldWrapper
+from .wrappers.field_wrapper import ArgumentGenerationMode, NestedMode
+
+logger = getLogger(__name__)
+
+
+class ParsingError(RuntimeError, SystemExit):
+ pass
+
+
+class ArgumentParser(argparse.ArgumentParser):
+ """Creates an ArgumentParser instance.
+
+ Parameters
+ ----------
+ - conflict_resolution : ConflictResolution, optional
+
+ What kind of prefixing mechanism to use when reusing dataclasses
+ (argument groups).
+ For more info, check the docstring of the `ConflictResolution` Enum.
+
+ - add_option_string_dash_variants : DashVariant, optional
+
+ Whether or not to add option_string variants where the underscores in
+ attribute names are replaced with dashes.
+ For example, when set to DashVariant.UNDERSCORE_AND_DASH,
+ "--no-cache" and "--no_cache" can both be used to point to the same
+ attribute `no_cache` on some dataclass.
+
+ - argument_generation_mode : ArgumentGenerationMode, optional
+
+ How to generate the arguments. In the ArgumentGenerationMode.FLAT mode,
+ the default one, the arguments are flat when possible, ignoring
+ their nested structure and including it only on the presence of a
+ conflict.
+
+ In the ArgumentGenerationMode.NESTED mode, the arguments are always
+ composed reflecting their nested structure.
+
+ In the ArgumentGenerationMode.BOTH mode, both kind of arguments
+ are generated.
+
+ - nested_mode : NestedMode, optional
+
+ How to handle argument generation in for nested arguments
+ in the modes ArgumentGenerationMode.NESTED and ArgumentGenerationMode.BOTH.
+ In the NestedMode.DEFAULT mode, the nested arguments are generated
+ reflecting their full 'destination' path from the returning namespace.
+
+ In the NestedMode.WITHOUT_ROOT, the first level is removed. This is useful when
+ parser.add_arguments is only called once, and where the same prefix would be shared
+ by all arguments. For example, if you have a single dataclass MyArguments and
+ you call parser.add_arguments(MyArguments, "args"), the arguments could look like this:
+ '--args.input.path --args.output.path'.
+ We could prefer to remove the root level in such a case
+ so that the arguments get generated as
+ '--input.path --output.path'.
+
+ - formatter_class : Type[HelpFormatter], optional
+
+ The formatter class to use. By default, uses
+ `simple_parsing.SimpleHelpFormatter`, which is a combination of the
+ `argparse.ArgumentDefaultsHelpFormatter`,
+ `argparse.MetavarTypeHelpFormatter` and
+ `argparse.RawDescriptionHelpFormatter` classes.
+
+ - add_config_path_arg : bool, optional
+ When set to `True`, adds a `--config_path` argument, of type Path, which is used to parse
+ """
+
+ def __init__(
+ self,
+ *args,
+ parents: Sequence[ArgumentParser] = (),
+ add_help: bool = True,
+ conflict_resolution: ConflictResolution = ConflictResolution.AUTO,
+ add_option_string_dash_variants: DashVariant = DashVariant.AUTO,
+ argument_generation_mode=ArgumentGenerationMode.FLAT,
+ nested_mode: NestedMode = NestedMode.DEFAULT,
+ formatter_class: type[HelpFormatter] = SimpleHelpFormatter,
+ add_config_path_arg: bool | None = None,
+ config_path: Path | str | Sequence[Path | str] | None = None,
+ add_dest_to_option_strings: bool | None = None,
+ **kwargs,
+ ):
+ kwargs["formatter_class"] = formatter_class
+ # Pass parents=[] since we override this mechanism below.
+ # NOTE: We end up with the same parents.
+ super().__init__(*args, parents=[], add_help=False, **kwargs)
+ self.conflict_resolution = conflict_resolution
+
+ # constructor arguments for the dataclass instances.
+ # (a Dict[dest, [attribute, value]])
+ # TODO: Stop using a defaultdict for the very important `self.constructor_arguments`!
+ self.constructor_arguments: dict[str, dict[str, Any]] = defaultdict(dict)
+
+ self._conflict_resolver = ConflictResolver(self.conflict_resolution)
+ self._wrappers: list[DataclassWrapper] = []
+
+ if add_dest_to_option_strings:
+ argument_generation_mode = ArgumentGenerationMode.BOTH
+
+ self._preprocessing_done: bool = False
+ self.add_option_string_dash_variants = add_option_string_dash_variants
+ self.argument_generation_mode = argument_generation_mode
+ self.nested_mode = nested_mode
+
+ FieldWrapper.add_dash_variants = add_option_string_dash_variants
+ FieldWrapper.argument_generation_mode = argument_generation_mode
+ FieldWrapper.nested_mode = nested_mode
+ self._parents = tuple(parents)
+
+ self.add_help = add_help
+ if self.add_help:
+ prefix_chars = self.prefix_chars
+ default_prefix = "-" if "-" in prefix_chars else prefix_chars[0]
+ self._help_action = super().add_argument(
+ default_prefix + "h",
+ default_prefix * 2 + "help",
+ action="help",
+ default=SUPPRESS,
+ help=_("show this help message and exit"),
+ )
+
+ self.config_path = Path(config_path) if isinstance(config_path, str) else config_path
+ if add_config_path_arg is None:
+ # By default, add a config path argument if a config path was passed.
+ add_config_path_arg = bool(config_path)
+ self.add_config_path_arg = add_config_path_arg
+
+ # TODO: Remove, since the base class already has nicer type hints.
+ def add_argument(
+ self,
+ *name_or_flags: str,
+ **kwargs,
+ ) -> Action:
+ return super().add_argument(
+ *name_or_flags,
+ **kwargs,
+ )
+
+ @overload
+ def add_arguments(
+ self,
+ dataclass: type[DataclassT],
+ dest: str,
+ *,
+ prefix: str = "",
+ default: DataclassT | None = None,
+ dataclass_wrapper_class: type[DataclassWrapper] = DataclassWrapper,
+ ) -> DataclassWrapper[DataclassT]:
+ pass
+
+ @overload
+ def add_arguments(
+ self,
+ dataclass: type[Dataclass],
+ dest: str,
+ *,
+ prefix: str = "",
+ dataclass_wrapper_class: type[DataclassWrapperType] = DataclassWrapper,
+ ) -> DataclassWrapperType:
+ pass
+
+ @overload
+ def add_arguments(
+ self,
+ dataclass: DataclassT,
+ dest: str,
+ *,
+ prefix: str = "",
+ default: None = None,
+ dataclass_wrapper_class: type[DataclassWrapper] = DataclassWrapper,
+ ) -> DataclassWrapper[DataclassT]:
+ pass
+
+ def add_arguments(
+ self,
+ dataclass: type[DataclassT] | DataclassT,
+ dest: str,
+ *,
+ prefix: str = "",
+ default: DataclassT | None = None,
+ dataclass_wrapper_class: type[DataclassWrapperType] = DataclassWrapper,
+ ) -> DataclassWrapper[DataclassT] | DataclassWrapperType:
+ """Adds command-line arguments for the fields of `dataclass`.
+
+ Parameters
+ ----------
+ dataclass : Union[Type[Dataclass], Dataclass]
+ The dataclass whose fields are to be parsed from the command-line.
+ If an instance of a dataclass is given, it is used as the default
+ value if none is provided.
+ dest : str
+ The destination attribute of the `argparse.Namespace` where the
+ dataclass instance will be stored after calling `parse_args()`
+ prefix : str, optional
+ An optional prefix to add prepend to the names of the argparse
+ arguments which will be generated for this dataclass.
+ This can be useful when registering multiple distinct instances of
+ the same dataclass, by default ""
+ default : Dataclass, optional
+ An instance of the dataclass type to get default values from, by
+ default None
+ dataclass_wrapper_class : Type[DataclassWrapper], optional
+ The type of `DataclassWrapper` to use for this dataclass. This can be used to customize
+ how the arguments are generated. However, I'd suggest making a GitHub issue if you find
+ yourself using this often.
+
+ Returns
+ -------
+ The generated DataclassWrapper instance. Feel free to inspect / play around with this if
+ you want :)
+ """
+ if is_dataclass_instance(dataclass):
+ if default is not None:
+ raise ValueError("Can't use `default` when `dataclass` is a dataclass instance.")
+ dataclass = typing.cast(DataclassT, dataclass)
+ dataclass_type = type(dataclass)
+ default = dataclass
+ else:
+ if not is_dataclass_type(dataclass):
+ raise ValueError(
+ f"`dataclass` should be a dataclass type or instance. Got {dataclass}."
+ )
+ dataclass = typing.cast(Type[DataclassT], dataclass)
+ dataclass_type = dataclass
+ default = default
+
+ new_wrapper = self._add_arguments(
+ dataclass_type=dataclass_type,
+ name=dest,
+ prefix=prefix,
+ default=default,
+ dataclass_wrapper_class=dataclass_wrapper_class,
+ )
+ self._wrappers.append(new_wrapper)
+ return new_wrapper
+
+ def parse_known_args(
+ self,
+ args: Sequence[str] | None = None,
+ namespace: Namespace | None = None,
+ attempt_to_reorder: bool = False,
+ ):
+ # NOTE: since the usual ArgumentParser.parse_args() calls
+ # parse_known_args, we therefore just need to overload the
+ # parse_known_args method to support both.
+ if args is None:
+ # args default to the system args
+ args = sys.argv[1:]
+ else:
+ # make sure that args are mutable
+ args = list(args)
+
+ # default Namespace built from parser defaults
+ if namespace is None:
+ namespace = Namespace()
+ if self.config_path:
+ if isinstance(self.config_path, Path):
+ config_paths = [self.config_path]
+ else:
+ config_paths = self.config_path
+ for config_file in config_paths:
+ self.set_defaults(config_file)
+
+ if self.add_config_path_arg:
+ temp_parser = ArgumentParser(
+ add_config_path_arg=False,
+ add_help=False,
+ add_option_string_dash_variants=FieldWrapper.add_dash_variants,
+ argument_generation_mode=FieldWrapper.argument_generation_mode,
+ nested_mode=FieldWrapper.nested_mode,
+ )
+ temp_parser.add_argument(
+ "--config_path",
+ type=Path,
+ nargs="*",
+ default=self.config_path,
+ help="Path to a config file containing default values to use.",
+ )
+ args_with_config_path, args = temp_parser.parse_known_args(args)
+ config_path = args_with_config_path.config_path
+
+ if config_path is not None:
+ config_paths = config_path if isinstance(config_path, list) else [config_path]
+ for config_file in config_paths:
+ self.set_defaults(config_file)
+
+ # Adding it here just so it shows up in the help message. The default will be set in
+ # the help string.
+ self.add_argument(
+ "--config_path",
+ type=Path,
+ default=config_path,
+ help="Path to a config file containing default values to use.",
+ )
+
+ assert isinstance(args, list)
+ self._preprocessing(args=args, namespace=namespace)
+
+ logger.debug(f"Parser {id(self)} is parsing args: {args}, namespace: {namespace}")
+ parsed_args, unparsed_args = super().parse_known_args(args, namespace)
+
+ if unparsed_args and self._subparsers and attempt_to_reorder:
+ logger.warning(
+ f"Unparsed arguments when using subparsers. Will "
+ f"attempt to automatically re-order the unparsed arguments "
+ f"{unparsed_args}."
+ )
+ index_in_start = args.index(unparsed_args[0])
+ # Simply 'cycle' the args to the right ordering.
+ new_start_args = args[index_in_start:] + args[:index_in_start]
+ parsed_args, unparsed_args = super().parse_known_args(new_start_args)
+
+ parsed_args = self._postprocessing(parsed_args)
+ return parsed_args, unparsed_args
+
+ def add_argument_group(
+ self,
+ title: str | None = None,
+ description: str | None = None,
+ prefix_chars=None,
+ argument_default=None,
+ conflict_handler=None,
+ ) -> argparse._ArgumentGroup:
+ return super().add_argument_group(
+ title=title,
+ description=description,
+ prefix_chars=prefix_chars or self.prefix_chars,
+ argument_default=argument_default or self.argument_default,
+ conflict_handler=conflict_handler or self.conflict_handler,
+ )
+
+ def print_help(self, file=None, args: Sequence[str] | None = None):
+ self._preprocessing(args=list(args) if args else [])
+ return super().print_help(file)
+
+ def set_defaults(self, config_path: str | Path | None = None, **kwargs: Any) -> None:
+ """Set the default argument values, either from a config file, or from the given kwargs."""
+ if config_path:
+ defaults = read_file(config_path)
+ if self.nested_mode == NestedMode.WITHOUT_ROOT and len(self._wrappers) == 1:
+ # The file should have the same format as the command-line args, e.g. contain the
+ # fields of the 'root' dataclass directly (e.g. "foo: 123"), rather a dict with
+ # "config: foo: 123" where foo is a field of the root dataclass at dest 'config'.
+ # Therefore, we add the prefix back here.
+ defaults = {self._wrappers[0].dest: defaults}
+ # We also assume that the kwargs are passed as foo=123
+ kwargs = {self._wrappers[0].dest: kwargs}
+ # Also include the values from **kwargs.
+ kwargs = dict_union(defaults, kwargs)
+
+ # The kwargs that are set in the dataclasses, rather than on the namespace.
+ kwarg_defaults_set_in_dataclasses = {}
+ for wrapper in self._wrappers:
+ if wrapper.dest in kwargs:
+ default_for_dataclass = kwargs[wrapper.dest]
+
+ if isinstance(default_for_dataclass, (str, Path)):
+ default_for_dataclass = read_file(path=default_for_dataclass)
+ elif not isinstance(default_for_dataclass, dict) and not dataclasses.is_dataclass(
+ default_for_dataclass
+ ):
+ raise ValueError(
+ f"Got a default for field {wrapper.dest} that isn't a dataclass, dict or "
+ f"path: {default_for_dataclass}"
+ )
+
+ # Set the .default attribute on the DataclassWrapper (which also updates the
+ # defaults of the fields and any nested dataclass fields).
+ wrapper.set_default(default_for_dataclass)
+
+ # It's impossible for multiple wrappers in kwargs to have the same destination.
+ assert wrapper.dest not in kwarg_defaults_set_in_dataclasses
+ value_for_constructor_arguments = (
+ default_for_dataclass
+ if isinstance(default_for_dataclass, dict)
+ else dataclasses.asdict(default_for_dataclass)
+ )
+ kwarg_defaults_set_in_dataclasses[wrapper.dest] = value_for_constructor_arguments
+ # Remove this from the **kwargs, so they don't get set on the namespace.
+ kwargs.pop(wrapper.dest)
+ # TODO: Stop using a defaultdict for the very important `self.constructor_arguments`!
+ self.constructor_arguments = dict_union(
+ self.constructor_arguments,
+ kwarg_defaults_set_in_dataclasses,
+ dict_factory=lambda: defaultdict(dict),
+ )
+ # For the rest of the values, use the default argparse behaviour (modifying the
+ # self._defaults dictionary).
+ super().set_defaults(**kwargs)
+
+ def equivalent_argparse_code(self, args: Sequence[str] | None = None) -> str:
+ """Returns the argparse code equivalent to that of `simple_parsing`.
+
+ TODO: Could be fun, pretty sure this is useless though.
+
+ Returns
+ -------
+ str
+ A string containing the auto-generated argparse code.
+ """
+ self._preprocessing(list(args) if args else [])
+ code = "parser = ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter)"
+ for wrapper in self._wrappers:
+ code += "\n"
+ code += wrapper.equivalent_argparse_code()
+ code += "\n"
+ code += "args = parser.parse_args()\n"
+ code += "print(args)\n"
+ return code
+
+ def _add_arguments(
+ self,
+ dataclass_type: type[DataclassT],
+ name: str,
+ *,
+ prefix: str = "",
+ dataclass_fn: Callable[..., DataclassT] | None = None,
+ default: DataclassT | dict | None = None,
+ dataclass_wrapper_class: type[DataclassWrapperType] = DataclassWrapper,
+ parent: DataclassWrapper | None = None,
+ ) -> DataclassWrapper[DataclassT] | DataclassWrapperType:
+ assert is_dataclass_type(dataclass_type)
+ assert (
+ default is None
+ or is_dataclass_instance(default)
+ or default is argparse.SUPPRESS
+ or isinstance(default, dict)
+ )
+ assert dataclass_fn is None or callable(dataclass_fn)
+
+ for wrapper in self._wrappers:
+ if wrapper.dest == name:
+ if wrapper.dataclass == dataclass_type:
+ raise argparse.ArgumentError(
+ argument=None,
+ message=f"Destination attribute {name} is already used for "
+ f"dataclass of type {dataclass_type}. Make sure all destinations"
+ f" are unique. (new dataclass type: {dataclass_type})",
+ )
+ if not isinstance(dataclass_type, type):
+ if default is None:
+ default = dataclass_type
+ dataclass_type = type(dataclass_type)
+
+ dataclass_fn = dataclass_fn or dataclass_type
+ # Create this object that holds the dataclass we will create arguments for and the
+ # arguments that were passed.
+ new_wrapper = dataclass_wrapper_class(
+ dataclass=dataclass_type,
+ name=name,
+ prefix=prefix,
+ default=default,
+ parent=parent,
+ dataclass_fn=dataclass_fn,
+ )
+
+ if new_wrapper.dest in self._defaults:
+ new_wrapper.set_default(self._defaults[new_wrapper.dest])
+ if self.nested_mode == NestedMode.WITHOUT_ROOT and all(
+ field.name in self._defaults for field in new_wrapper.fields
+ ):
+ # If we did .set_defaults before we knew what dataclass we're using, then we try to
+ # still make use of those defaults:
+ new_wrapper.set_default(
+ {
+ k: v
+ for k, v in self._defaults.items()
+ if k in [f.name for f in dataclasses.fields(new_wrapper.dataclass)]
+ }
+ )
+
+ return new_wrapper
+
+ def _preprocessing(self, args: Sequence[str] = (), namespace: Namespace | None = None) -> None:
+ """Resolve potential conflicts, resolve subgroups, and add all the arguments."""
+ logger.debug("\nPREPROCESSING\n")
+
+ if self._preprocessing_done:
+ return
+
+ args = list(args)
+
+ wrapped_dataclasses = self._wrappers.copy()
+ # Fix the potential conflicts between dataclass fields with the same names.
+ wrapped_dataclasses = self._conflict_resolver.resolve_and_flatten(wrapped_dataclasses)
+
+ wrapped_dataclasses, chosen_subgroups = self._resolve_subgroups(
+ wrappers=wrapped_dataclasses, args=args, namespace=namespace
+ )
+
+ # NOTE: We keep the subgroup fields in their dataclasses so they show up with the other
+ # arguments.
+ wrapped_dataclasses = _flatten_wrappers(wrapped_dataclasses)
+
+ # Create one argument group per dataclass
+ for wrapped_dataclass in wrapped_dataclasses:
+ logger.debug(
+ f"Parser {id(self)} is Adding arguments for dataclass: {wrapped_dataclass.dataclass} "
+ f"at destinations {wrapped_dataclass.destinations}"
+ )
+ wrapped_dataclass.add_arguments(parser=self)
+
+ self._wrappers = wrapped_dataclasses
+ # Save this so we don't re-add all the arguments.
+ self._preprocessing_done = True
+
+ def _postprocessing(self, parsed_args: Namespace) -> Namespace:
+ """Process the namespace by extract the fields and creating the objects.
+
+ Instantiate the dataclasses from the parsed arguments and set them at
+ their destination attribute in the namespace.
+
+ Parameters
+ ----------
+ parsed_args : Namespace
+ the result of calling `super().parse_args(...)` or
+ `super().parse_known_args(...)`.
+ TODO: Try and maybe return a nicer, typed version of parsed_args.
+
+
+ Returns
+ -------
+ Namespace
+ The original Namespace, with all the arguments corresponding to the
+ dataclass fields removed, and with the added dataclass instances.
+ Also keeps whatever arguments were added in the traditional fashion,
+ i.e. with `parser.add_argument(...)`.
+ """
+ logger.debug("\nPOST PROCESSING\n")
+ logger.debug(f"(raw) parsed args: {parsed_args}")
+
+ self._remove_subgroups_from_namespace(parsed_args)
+ # create the constructor arguments for each instance by consuming all
+ # the relevant attributes from `parsed_args`
+ wrappers = _flatten_wrappers(self._wrappers)
+
+ constructor_arguments = self.constructor_arguments.copy()
+ for wrapper in wrappers:
+ for destination in wrapper.destinations:
+ constructor_arguments.setdefault(destination, {})
+
+ parsed_args, constructor_arguments = self._fill_constructor_arguments_with_fields(
+ parsed_args, wrappers=wrappers, initial_constructor_arguments=constructor_arguments
+ )
+ parsed_args = self._instantiate_dataclasses(
+ parsed_args, wrappers=wrappers, constructor_arguments=constructor_arguments
+ )
+ return parsed_args
+
+ def _resolve_subgroups(
+ self,
+ wrappers: list[DataclassWrapper],
+ args: list[str],
+ namespace: Namespace | None = None,
+ ) -> tuple[list[DataclassWrapper], dict[str, str]]:
+ """Iteratively add and resolve all the choice of argument subgroups, if any.
+
+ This modifies the wrappers in-place, by possibly adding children to the wrappers in the
+ list.
+ Returns a list with the modified wrappers.
+
+ Each round does the following:
+ 1. Resolve any conflicts using the conflict resolver. Two subgroups at the same nesting
+ level, with the same name, get a different prefix, for example "--generator.optimizer"
+ and "--discriminator.optimizer".
+ 2. Add all the subgroup choice arguments to a parser.
+ 3. Add the chosen dataclasses to the list of dataclasses to parse later in the main
+ parser. This is done by adding wrapping the dataclass and adding it to the `wrappers`
+ list.
+ """
+
+ unresolved_subgroups = _get_subgroup_fields(wrappers)
+ # Dictionary of the subgroup choices that were resolved (key: subgroup dest, value: chosen
+ # subgroup name).
+ resolved_subgroups: dict[str, SubgroupKey] = {}
+
+ if not unresolved_subgroups:
+ # No subgroups to parse.
+ return wrappers, {}
+
+ # Use a temporary parser, to avoid parsing "vanilla argparse" arguments of `self` multiple
+ # times.
+ subgroup_choice_parser = argparse.ArgumentParser(
+ add_help=False,
+ # conflict_resolution=self.conflict_resolution,
+ # add_option_string_dash_variants=self.add_option_string_dash_variants,
+ # argument_generation_mode=self.argument_generation_mode,
+ # nested_mode=self.nested_mode,
+ formatter_class=self.formatter_class,
+ # add_config_path_arg=self.add_config_path_arg,
+ # config_path=self.config_path,
+ # NOTE: We disallow abbreviations for subgroups for now. This prevents potential issues
+ # for example if you have —a_or_b and A has a field —a then it will error out if you
+ # pass —a=1 because 1 isn’t a choice for the a_or_b argument (because --a matches it
+ # with the abbreviation feature turned on).
+ allow_abbrev=False,
+ )
+
+ for current_nesting_level in itertools.count():
+ # Do rounds of parsing with just the subgroup arguments, until all the subgroups
+ # are resolved to a dataclass type.
+ logger.debug(
+ f"Starting subgroup parsing round {current_nesting_level}: {list(unresolved_subgroups.keys())}"
+ )
+ # Add all the unresolved subgroups arguments.
+ for dest, subgroup_field in unresolved_subgroups.items():
+ flags = subgroup_field.option_strings
+ argument_options = subgroup_field.arg_options
+
+ if subgroup_field.subgroup_default is dataclasses.MISSING:
+ assert argument_options["required"]
+ else:
+ assert argument_options["default"] is subgroup_field.subgroup_default
+ assert not is_dataclass_instance(argument_options["default"])
+
+ # TODO: Do we really need to care about this "SUPPRESS" stuff here?
+ if argparse.SUPPRESS in subgroup_field.parent.defaults:
+ assert argument_options["default"] is argparse.SUPPRESS
+ argument_options["default"] = argparse.SUPPRESS
+
+ logger.debug(
+ f"Adding subgroup argument: add_argument(*{flags} **{str(argument_options)})"
+ )
+ subgroup_choice_parser.add_argument(*flags, **argument_options)
+
+ # Parse `args` repeatedly until all the subgroup choices are resolved.
+ parsed_args, unused_args = subgroup_choice_parser.parse_known_args(
+ args=args, namespace=namespace
+ )
+ logger.debug(
+ f"Nesting level {current_nesting_level}: args: {args}, "
+ f"parsed_args: {parsed_args}, unused_args: {unused_args}"
+ )
+
+ for dest, subgroup_field in list(unresolved_subgroups.items()):
+ # NOTE: There should always be a parsed value for the subgroup argument on the
+ # namespace. This is because we added all the subgroup arguments before we get
+ # here.
+ subgroup_dict = subgroup_field.subgroup_choices
+ chosen_subgroup_key: SubgroupKey = getattr(parsed_args, dest)
+ assert chosen_subgroup_key in subgroup_dict
+
+ # Changing the default value of the (now parsed) field for the subgroup choice,
+ # just so it shows (default: {chosen_subgroup_key}) on the command-line.
+ # Note: This really isn't required, we could have it just be the default value, but
+ # it seems a bit more consistent with us then showing the --help string for the
+ # chosen dataclass type (as we're doing below).
+ # subgroup_field.set_default(chosen_subgroup_key)
+ logger.debug(
+ f"resolved the subgroup at {dest!r}: will use the subgroup at key "
+ f"{chosen_subgroup_key!r}"
+ )
+
+ default_or_dataclass_fn = subgroup_dict[chosen_subgroup_key]
+ if is_dataclass_instance(default_or_dataclass_fn):
+ # The chosen value in the subgroup dict is a frozen dataclass instance.
+ default = default_or_dataclass_fn
+ dataclass_fn = functools.partial(dataclasses.replace, default)
+ dataclass_type = type(default)
+ else:
+ default = None
+ dataclass_fn = default_or_dataclass_fn
+ dataclass_type = subgroup_field.field.metadata["subgroup_dataclass_types"][
+ chosen_subgroup_key
+ ]
+
+ assert default is None or is_dataclass_instance(default)
+ assert callable(dataclass_fn)
+ assert is_dataclass_type(dataclass_type)
+
+ name = dest.split(".")[-1]
+ parent_dataclass_wrapper = subgroup_field.parent
+ # NOTE: Using self._add_arguments so it returns the modified wrapper and doesn't
+ # affect the `self._wrappers` list.
+ new_wrapper = self._add_arguments(
+ dataclass_type=dataclass_type,
+ name=name,
+ dataclass_fn=dataclass_fn,
+ default=default,
+ parent=parent_dataclass_wrapper,
+ )
+ # Make the new wrapper a child of the class which contains the field.
+ # - it isn't already a child
+ # - it's parent is the parent dataclass wrapper
+ # - the parent is already in the tree of DataclassWrappers.
+ assert new_wrapper not in parent_dataclass_wrapper._children
+ parent_dataclass_wrapper._children.append(new_wrapper)
+ assert new_wrapper.parent is parent_dataclass_wrapper
+ assert parent_dataclass_wrapper in _flatten_wrappers(wrappers)
+ assert new_wrapper in _flatten_wrappers(wrappers)
+
+ # Mark this subgroup as resolved.
+ unresolved_subgroups.pop(dest)
+ resolved_subgroups[dest] = chosen_subgroup_key
+ # TODO: Should we remove the FieldWrapper for the subgroups now that it's been
+ # resolved?
+
+ # Find the new subgroup fields that weren't resolved before.
+ # TODO: What if a name conflict occurs between a subgroup field and one of the new
+ # fields below it? For example, something like --model model_a (and inside the `ModelA`
+ # dataclass, there's a field called `model`. Then, this will cause a conflict!)
+ # For now, I'm just going to wait and see how this plays out. I'm hoping that the
+ # auto conflict resolution shouldn't run into any issues in this case.
+
+ wrappers = self._conflict_resolver.resolve(wrappers)
+
+ all_subgroup_fields = _get_subgroup_fields(wrappers)
+ unresolved_subgroups = {
+ k: v for k, v in all_subgroup_fields.items() if k not in resolved_subgroups
+ }
+ logger.debug(f"All subgroups: {list(all_subgroup_fields.keys())}")
+ logger.debug(f"Resolved subgroups: {resolved_subgroups}")
+ logger.debug(f"Unresolved subgroups: {list(unresolved_subgroups.keys())}")
+
+ if not unresolved_subgroups:
+ logger.debug("Done parsing all the subgroups!")
+ break
+ else:
+ logger.debug(
+ f"Done parsing a round of subparsers at nesting level "
+ f"{current_nesting_level}. Moving to the next round which has "
+ f"{len(unresolved_subgroups)} unresolved subgroup choices."
+ )
+ return wrappers, resolved_subgroups
+
+ def _remove_subgroups_from_namespace(self, parsed_args: argparse.Namespace) -> None:
+ """Removes the subgroup choice results from the namespace.
+
+ Modifies the namespace in-place.
+ """
+ # find all subgroup fields
+ subgroup_fields = _get_subgroup_fields(self._wrappers)
+
+ if not subgroup_fields:
+ return
+ # IDEA: Store the choices in a `subgroups` dict on the namespace.
+ if not hasattr(parsed_args, "subgroups"):
+ parsed_args.subgroups = {}
+
+ for dest in subgroup_fields:
+ chosen_value = getattr(parsed_args, dest)
+ parsed_args.subgroups[dest] = chosen_value
+ delattr(parsed_args, dest)
+
+ def _instantiate_dataclasses(
+ self,
+ parsed_args: argparse.Namespace,
+ wrappers: list[DataclassWrapper],
+ constructor_arguments: dict[str, dict[str, Any]],
+ ) -> argparse.Namespace:
+ """Create the instances set them at their destination in the namespace.
+
+ We now have all the constructor arguments for each instance.
+ We can now sort out the dependencies, create the instances, and set them
+ as attributes of the Namespace.
+
+ Since the dataclasses might have nested children, and we need to pass
+ all the constructor arguments when calling the dataclass constructors,
+ we create the instances in a "bottom-up" fashion, creating the deepest
+ objects first, and then setting their value in the
+ `constructor_arguments` dict.
+
+ Parameters
+ ----------
+ parsed_args : argparse.Namespace
+ The 'raw' Namespace that is produced by `parse_args`.
+
+ wrappers : list[DataclassWrapper]
+ The (assumed flattened) list of dataclass wrappers that were created with
+ `add_arguments`.
+
+ constructor_arguments : dict[str, dict[str, Any]]
+ The partially populated dict of constructor arguments for each dataclass. This will be
+ consumed in order to create the dataclass instances for each DataclassWrapper.
+
+ Returns
+ -------
+ argparse.Namespace
+ The transformed namespace with the instances set at their
+ corresponding destinations.
+ """
+ constructor_arguments = constructor_arguments.copy()
+ # FIXME: There's a bug here happening with the `ALWAYS_MERGE` case: The namespace has the
+ # values, but the constructor arguments dict doesn't.
+
+ if self.conflict_resolution != ConflictResolution.ALWAYS_MERGE:
+ assert len(wrappers) == len(constructor_arguments), "should have one dict per wrapper"
+
+ # sort the wrappers so as to construct the leaf nodes first.
+ sorted_dc_wrappers: list[DataclassWrapper] = sorted(
+ wrappers, key=lambda w: w.nesting_level, reverse=True
+ )
+ assert len(sorted_dc_wrappers) == len(set(sorted_dc_wrappers))
+
+ for dc_wrapper in sorted_dc_wrappers:
+ logger.info(f"Instantiating the wrapper with destinations {dc_wrapper.destinations}")
+
+ for destination in dc_wrapper.destinations:
+ logger.info(f"Instantiating the dataclass at destination {destination}")
+ # Instantiate the dataclass by passing the constructor arguments
+ # to the constructor.
+ constructor = dc_wrapper.dataclass_fn
+ constructor_args = constructor_arguments.pop(destination)
+ # If the dataclass wrapper is marked as 'optional' and all the
+ # constructor args are None, then the instance is None.
+ value_for_dataclass_field: Any | dict[str, Any] | None
+ if argparse.SUPPRESS in dc_wrapper.defaults:
+ if constructor_args == {}:
+ value_for_dataclass_field = None
+ else:
+ # Don't create the dataclass instance. Instead, keep the value as a dict.
+ value_for_dataclass_field = constructor_args
+ else:
+ value_for_dataclass_field = _create_dataclass_instance(
+ dc_wrapper, constructor, constructor_args
+ )
+
+ if argparse.SUPPRESS in dc_wrapper.defaults and value_for_dataclass_field is None:
+ logger.debug(
+ f"Suppressing entire destination {destination} because none of its"
+ f"subattributes were specified on the command line."
+ )
+
+ elif dc_wrapper.parent is not None:
+ parent_key, attr = utils.split_dest(destination)
+ logger.debug(
+ f"Setting a value of {value_for_dataclass_field} at attribute {attr} in "
+ f"parent at key {parent_key}."
+ )
+ constructor_arguments[parent_key][attr] = value_for_dataclass_field
+
+ elif not hasattr(parsed_args, destination):
+ logger.debug(
+ f"setting attribute '{destination}' on the Namespace "
+ f"to a value of {value_for_dataclass_field}"
+ )
+ setattr(parsed_args, destination, value_for_dataclass_field)
+
+ else:
+ # There is a collision: namespace already has an entry at this destination.
+ existing = getattr(parsed_args, destination)
+ if dc_wrapper.dest in self._defaults:
+ logger.debug(
+ f"Overwriting defaults in the namespace at destination '{destination}' "
+ f"on the Namespace ({existing}) to a value of {value_for_dataclass_field}"
+ )
+ setattr(parsed_args, destination, value_for_dataclass_field)
+ else:
+ raise RuntimeError(
+ f"Namespace should not already have a '{destination}' "
+ f"attribute!\n"
+ f"The value would be overwritten:\n"
+ f"- existing value: {existing}\n"
+ f"- new value: {value_for_dataclass_field}"
+ )
+
+ # We should be consuming all the constructor arguments.
+ assert not constructor_arguments
+
+ return parsed_args
+
+ def _fill_constructor_arguments_with_fields(
+ self,
+ parsed_args: argparse.Namespace,
+ wrappers: list[DataclassWrapper],
+ initial_constructor_arguments: dict[str, dict[str, Any]],
+ ) -> tuple[argparse.Namespace, dict[str, dict[str, Any]]]:
+ """Create the constructor arguments for each instance.
+
+ Creates the arguments by consuming all the attributes from
+ `parsed_args`.
+ Here we imitate a custom action, by having the FieldWrappers be
+ callables that set their value in the `constructor_args` attribute.
+
+ Parameters
+ ----------
+ parsed_args : argparse.Namespace
+ the argparse.Namespace returned from super().parse_args().
+
+ wrappers : list[DataclassWrapper]
+ The (assumed flattened) list of dataclass wrappers that were created with
+ `add_arguments`.
+
+ constructor_arguments : dict[str, dict[str, Any]]
+ The dict of constructor arguments to create for each dataclass. This will be filled by
+ each FieldWrapper.
+
+ Returns
+ -------
+ argparse.Namespace
+ The namespace, without the consumed arguments.
+ """
+
+ if self.conflict_resolution != ConflictResolution.ALWAYS_MERGE:
+ assert len(wrappers) == len(
+ initial_constructor_arguments
+ ), "should have one dict per wrapper"
+
+ # The output
+ constructor_arguments = initial_constructor_arguments.copy()
+
+ parsed_arg_values = vars(parsed_args)
+ deleted_values: dict[str, Any] = {}
+
+ for wrapper in wrappers:
+ for field in wrapper.fields:
+ if argparse.SUPPRESS in wrapper.defaults and field.dest not in parsed_args:
+ continue
+
+ if field.is_subgroup:
+ # Skip the subgroup fields, since we added a child DataclassWrapper for them.
+ logger.debug(f"Not calling the subgroup FieldWrapper for dest {field.dest}")
+ continue
+
+ if not field.field.init:
+ # The field isn't an argument of the dataclass constructor.
+ continue
+
+ # NOTE: If the field is reused (when using the ConflictResolution.ALWAYS_MERGE
+ # strategy), then we store the multiple values in the `dest` of the first field.
+ # They are they distributed in `constructor_arguments` using the
+ # `field.destinations`, which gives the destination for each value.
+ values = parsed_arg_values.pop(field.dest, field.default)
+ deleted_values[field.dest] = values
+
+ # call the "action" for the given attribute. This sets the right
+ # value in the `constructor_arguments` dictionary.
+ field(
+ parser=self,
+ namespace=parsed_args,
+ values=values,
+ constructor_arguments=constructor_arguments,
+ )
+
+ # "Clean up" the Namespace by returning a new Namespace without the
+ # consumed attributes.
+ leftover_args = argparse.Namespace(**parsed_arg_values)
+ if deleted_values:
+ logger.debug(f"deleted values: {deleted_values}")
+ logger.debug(f"leftover args: {leftover_args}")
+
+ return leftover_args, constructor_arguments
+
+ @property
+ def confilct_resolver_max_attempts(self) -> int:
+ return self._conflict_resolver.max_attempts
+
+ @confilct_resolver_max_attempts.setter
+ def confilct_resolver_max_attempts(self, value: int):
+ self._conflict_resolver.max_attempts = value
+
+
+# TODO: Change the order of arguments to put `args` as the second argument.
+def parse(
+ config_class: type[DataclassT],
+ config_path: Path | str | None = None,
+ args: str | Sequence[str] | None = None,
+ default: DataclassT | None = None,
+ dest: str = "config",
+ *,
+ prefix: str = "",
+ nested_mode: NestedMode = NestedMode.WITHOUT_ROOT,
+ conflict_resolution: ConflictResolution = ConflictResolution.AUTO,
+ add_option_string_dash_variants: DashVariant = DashVariant.AUTO,
+ argument_generation_mode=ArgumentGenerationMode.FLAT,
+ formatter_class: type[HelpFormatter] = SimpleHelpFormatter,
+ add_config_path_arg: bool | None = None,
+ **kwargs,
+) -> DataclassT:
+ """Parse the given dataclass from the command-line.
+
+ See the `ArgumentParser` constructor for more details on the arguments (they are the same here
+ except for `nested_mode`, which has a different default value).
+
+ If `config_path` is passed, loads the values from that file and uses them as defaults.
+ """
+ parser = ArgumentParser(
+ nested_mode=nested_mode,
+ add_help=True,
+ # add_config_path_arg=None,
+ config_path=config_path,
+ conflict_resolution=conflict_resolution,
+ add_option_string_dash_variants=add_option_string_dash_variants,
+ argument_generation_mode=argument_generation_mode,
+ formatter_class=formatter_class,
+ add_config_path_arg=add_config_path_arg,
+ **kwargs,
+ )
+
+ parser.add_arguments(config_class, prefix=prefix, dest=dest, default=default)
+
+ if isinstance(args, str):
+ args = shlex.split(args)
+ parsed_args = parser.parse_args(args)
+
+ config: Dataclass = getattr(parsed_args, dest)
+ return config
+
+
+def parse_known_args(
+ config_class: type[Dataclass],
+ config_path: Path | str | None = None,
+ args: str | Sequence[str] | None = None,
+ default: Dataclass | None = None,
+ dest: str = "config",
+ attempt_to_reorder: bool = False,
+ *,
+ nested_mode: NestedMode = NestedMode.WITHOUT_ROOT,
+ conflict_resolution: ConflictResolution = ConflictResolution.AUTO,
+ add_option_string_dash_variants: DashVariant = DashVariant.AUTO,
+ argument_generation_mode=ArgumentGenerationMode.FLAT,
+ formatter_class: type[HelpFormatter] = SimpleHelpFormatter,
+ add_config_path_arg: bool | None = None,
+) -> tuple[Dataclass, list[str]]:
+ """Parse the given dataclass from the command-line, returning the leftover arguments.
+
+ See the `ArgumentParser` constructor for more details on the arguments (they are the same here
+ except for `nested_mode`, which has a different default value).
+
+ If `config_path` is passed, loads the values from that file and uses them as defaults.
+ """
+
+ if isinstance(args, str):
+ args = shlex.split(args)
+ parser = ArgumentParser(
+ nested_mode=nested_mode,
+ add_help=True,
+ # add_config_path_arg=None,
+ config_path=config_path,
+ conflict_resolution=conflict_resolution,
+ add_option_string_dash_variants=add_option_string_dash_variants,
+ argument_generation_mode=argument_generation_mode,
+ formatter_class=formatter_class,
+ add_config_path_arg=add_config_path_arg,
+ )
+ parser.add_arguments(config_class, dest=dest, default=default)
+ parsed_args, unknown_args = parser.parse_known_args(
+ args, attempt_to_reorder=attempt_to_reorder
+ )
+ config: Dataclass = getattr(parsed_args, dest)
+ return config, unknown_args
+
+
+def _get_subgroup_fields(wrappers: list[DataclassWrapper]) -> dict[str, FieldWrapper]:
+ subgroup_fields = {}
+ all_wrappers = _flatten_wrappers(wrappers)
+ for wrapper in all_wrappers:
+ for field in wrapper.fields:
+ if field.is_subgroup:
+ assert field not in subgroup_fields.values()
+ subgroup_fields[field.dest] = field
+ return subgroup_fields
+
+
+def _remove_duplicates(wrappers: list[DataclassWrapper]) -> list[DataclassWrapper]:
+ return list(set(wrappers))
+
+
+def _assert_no_duplicates(wrappers: list[DataclassWrapper]) -> None:
+ if len(wrappers) != len(set(wrappers)):
+ raise RuntimeError(
+ "Duplicate wrappers found! This is a potentially nasty bug on our "
+ "part. Please make an issue at https://www.github.com/lebrice/SimpleParsing/issues "
+ )
+
+
+def _flatten_wrappers(wrappers: list[DataclassWrapper]) -> list[DataclassWrapper]:
+ """Takes a list of nodes, returns a flattened list of all nodes in the tree."""
+ _assert_no_duplicates(wrappers)
+ roots_only = _unflatten_wrappers(wrappers)
+ return sum(([w] + list(w.descendants) for w in roots_only), [])
+
+
+def _unflatten_wrappers(wrappers: list[DataclassWrapper]) -> list[DataclassWrapper]:
+ """Given a list of nodes in one or more trees, returns only the root nodes.
+
+ In our context, this is all the dataclass arg groups that were added with
+ `parser.add_arguments`.
+ """
+ _assert_no_duplicates(wrappers)
+ return [w for w in wrappers if w.parent is None]
+
+
+def _create_dataclass_instance(
+ wrapper: DataclassWrapper[DataclassT],
+ constructor: Callable[..., DataclassT],
+ constructor_args: dict[str, Any],
+) -> DataclassT | None:
+ # Check if the dataclass annotation is marked as Optional.
+ # In this case, if no arguments were passed, and the default value is None, then return
+ # None.
+ # TODO: (BUG!) This doesn't distinguish the case where the defaults are passed via the
+ # command-line from the case where no arguments are passed at all!
+ if wrapper.optional and wrapper.default is None:
+ for field_wrapper in wrapper.fields:
+ arg_value = constructor_args[field_wrapper.name]
+ default_value = field_wrapper.default
+ logger.debug(
+ f"field {field_wrapper.name}, arg value: {arg_value}, "
+ f"default value: {default_value}"
+ )
+ if arg_value != default_value:
+ # Value is not the default value, so an argument must have been passed.
+ # Break, and return the instance.
+ break
+ else:
+ logger.debug(f"All fields for {wrapper.dest} were either at their default, or None.")
+ return None
+ logger.debug(f"Calling constructor: {constructor}(**{constructor_args})")
+ return constructor(**constructor_args)
diff --git a/parrot/lib/python3.10/site-packages/simple_parsing/py.typed b/parrot/lib/python3.10/site-packages/simple_parsing/py.typed
new file mode 100644
index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391
diff --git a/parrot/lib/python3.10/site-packages/simple_parsing/replace.py b/parrot/lib/python3.10/site-packages/simple_parsing/replace.py
new file mode 100644
index 0000000000000000000000000000000000000000..db350fba5f590316d691b86c2b1d343f148fc87a
--- /dev/null
+++ b/parrot/lib/python3.10/site-packages/simple_parsing/replace.py
@@ -0,0 +1,231 @@
+from __future__ import annotations
+
+import copy
+import dataclasses
+import logging
+from typing import Any, Mapping, overload
+
+from simple_parsing.annotation_utils.get_field_annotations import (
+ get_field_type_from_annotations,
+)
+from simple_parsing.helpers.subgroups import Key
+from simple_parsing.utils import (
+ DataclassT,
+ PossiblyNestedDict,
+ V,
+ contains_dataclass_type_arg,
+ is_dataclass_instance,
+ is_dataclass_type,
+ is_optional,
+ unflatten_split,
+)
+
+logger = logging.getLogger(__name__)
+
+
+@overload
+def replace(obj: DataclassT, changes_dict: dict[str, Any]) -> DataclassT:
+ ...
+
+
+@overload
+def replace(obj: DataclassT, **changes) -> DataclassT:
+ ...
+
+
+def replace(obj: DataclassT, changes_dict: dict[str, Any] | None = None, **changes) -> DataclassT:
+ """Replace some values in a dataclass.
+
+ Compared to `dataclasses.replace`, this has two major differences:
+ 1. Allows recursively replacing values in nested dataclass fields. When a dictionary is passed
+ as the value, and the value of that field on `obj` is a dataclass, then `replace` is called
+ recursively.
+
+ 2. Allows passing a dictionary of flattened changes, e.g. `{"a.b": 1}` instead of
+ `{"a": {"b": 1}}`.
+
+ ## Examples
+
+ >>> import dataclasses
+ >>> import simple_parsing
+ >>> from typing import Union
+ >>> @dataclasses.dataclass
+ ... class A:
+ ... a: int = 0
+ >>> @dataclasses.dataclass
+ ... class B:
+ ... b: str = "b"
+ >>> @dataclasses.dataclass
+ ... class Config:
+ ... a_or_b: Union[A, B] = dataclasses.field(default_factory=A)
+
+ >>> base_config = Config(a_or_b=A(a=1))
+
+
+ NOTE: key difference with respect to `dataclasses.replace`:
+ >>> dataclasses.replace(base_config, a_or_b={"a": 123})
+ Config(a_or_b={'a': 123})
+ >>> simple_parsing.replace(base_config, a_or_b={"a": 123})
+ Config(a_or_b=A(a=123))
+
+ Replace accepts either a dictionary of changes or keyword arguments:
+
+ >>> simple_parsing.replace(base_config, {"a_or_b": B(b="bob")})
+ Config(a_or_b=B(b='bob'))
+ >>> simple_parsing.replace(base_config, a_or_b=B(b='bob'))
+ Config(a_or_b=B(b='bob'))
+
+ Changes can also be passed in a 'flat' format, which makes it easy to replace nested fields:
+ >>> simple_parsing.replace(base_config, {"a_or_b.a": 2})
+ Config(a_or_b=A(a=2))
+ """
+ if changes_dict and changes:
+ raise ValueError("Cannot pass both `changes_dict` and `changes`")
+ changes = changes_dict or changes
+ # changes can be given in a 'flat' format in `changes_dict`, e.g. {"a.b.c": 123}.
+ # Unflatten them back to a nested dict (e.g. {"a": {"b": {"c": 123}}})
+ changes = unflatten_split(changes)
+
+ replace_kwargs = {}
+ for field in dataclasses.fields(obj):
+ if field.name not in changes:
+ continue
+ if not field.init:
+ raise ValueError(f"Cannot replace value of non-init field {field.name}.")
+
+ field_value = getattr(obj, field.name)
+
+ if is_dataclass_instance(field_value) and isinstance(changes[field.name], dict):
+ field_changes = changes.pop(field.name)
+ new_value = replace(field_value, **field_changes)
+ else:
+ new_value = changes.pop(field.name)
+ replace_kwargs[field.name] = new_value
+
+ # note: there may be some leftover values in `changes` that are not fields of this dataclass.
+ # we still pass those.
+ replace_kwargs.update(changes)
+
+ return dataclasses.replace(obj, **replace_kwargs)
+
+
+def replace_subgroups(
+ obj: DataclassT, selections: dict[str, Key | DataclassT] | None = None
+) -> DataclassT:
+ """This function replaces the dataclass of subgroups, union, and optional union. The
+ `selections` dict can be in flat format or in nested format.
+
+ The values of selections can be `Key` of subgroups, dataclass type, and dataclass instance.
+ """
+ keyword = "__key__"
+
+ if not selections:
+ return obj
+ selections = _unflatten_selection_dict(selections, keyword, recursive=False)
+
+ replace_kwargs = {}
+ for field in dataclasses.fields(obj):
+ if not field.init:
+ raise ValueError(f"Cannot replace value of non-init field {field.name}.")
+
+ if field.name not in selections:
+ continue
+
+ field_value = getattr(obj, field.name)
+ field_annotation = get_field_type_from_annotations(obj.__class__, field.name)
+
+ new_value = None
+ # Replace subgroup is allowed when the type annotation contains dataclass
+ if not contains_dataclass_type_arg(field_annotation):
+ raise ValueError(
+ f"The replaced subgroups contains no dataclass in its annotation {field_annotation}"
+ )
+
+ selection = selections.pop(field.name)
+ if isinstance(selection, dict):
+ value_of_selection = selection.pop(keyword, None)
+ child_selections = selection
+ else:
+ value_of_selection = selection
+ child_selections = None
+
+ if is_dataclass_type(value_of_selection):
+ field_value = value_of_selection()
+ elif is_dataclass_instance(value_of_selection):
+ field_value = copy.deepcopy(value_of_selection)
+ elif field.metadata.get("subgroups", None):
+ assert isinstance(value_of_selection, str)
+ subgroup_selection = field.metadata["subgroups"][value_of_selection]
+ if is_dataclass_instance(subgroup_selection):
+ # when the subgroup selection is a frozen dataclass instance
+ field_value = subgroup_selection
+ else:
+ # when the subgroup selection is a dataclass type
+ field_value = field.metadata["subgroups"][value_of_selection]()
+ elif is_optional(field_annotation) and value_of_selection is None:
+ field_value = None
+ elif contains_dataclass_type_arg(field_annotation) and value_of_selection is None:
+ field_value = field.default_factory()
+ else:
+ raise ValueError(
+ f"invalid selection key '{value_of_selection}' for field '{field.name}'"
+ )
+
+ if child_selections:
+ new_value = replace_subgroups(field_value, child_selections)
+ else:
+ new_value = field_value
+
+ replace_kwargs[field.name] = new_value
+ return dataclasses.replace(obj, **replace_kwargs)
+
+
+def _unflatten_selection_dict(
+ flattened: Mapping[str, V], keyword: str = "__key__", sep: str = ".", recursive: bool = True
+) -> PossiblyNestedDict[str, V]:
+ """This function convert a flattened dict into a nested dict and it inserts the `keyword` as
+ the selection into the nested dict.
+
+ >>> _unflatten_selection_dict({'ab_or_cd': 'cd', 'ab_or_cd.c_or_d': 'd'})
+ {'ab_or_cd': {'__key__': 'cd', 'c_or_d': 'd'}}
+
+ >>> _unflatten_selection_dict({'lv1': 'a', 'lv1.lv2': 'b', 'lv1.lv2.lv3': 'c'})
+ {'lv1': {'__key__': 'a', 'lv2': {'__key__': 'b', 'lv3': 'c'}}}
+
+ >>> _unflatten_selection_dict({'lv1': 'a', 'lv1.lv2': 'b', 'lv1.lv2.lv3': 'c'}, recursive=False)
+ {'lv1': {'__key__': 'a', 'lv2': 'b', 'lv2.lv3': 'c'}}
+
+ >>> _unflatten_selection_dict({'ab_or_cd.c_or_d': 'd'})
+ {'ab_or_cd': {'c_or_d': 'd'}}
+
+ >>> _unflatten_selection_dict({"a": 1, "b": 2})
+ {'a': 1, 'b': 2}
+ """
+ dc = {}
+
+ unflatten_those_top_level_keys = set()
+ for k, v in flattened.items():
+ splited_keys = k.split(sep)
+ if len(splited_keys) >= 2:
+ unflatten_those_top_level_keys.add(splited_keys[0])
+
+ for k, v in flattened.items():
+ keys = k.split(sep)
+ top_level_key = keys[0]
+ rest_keys = keys[1:]
+ if top_level_key in unflatten_those_top_level_keys:
+ sub_dc = dc.get(top_level_key, {})
+ if len(rest_keys) == 0:
+ sub_dc[keyword] = v
+ else:
+ sub_dc[".".join(rest_keys)] = v
+ dc[top_level_key] = sub_dc
+ else:
+ dc[k] = v
+
+ if recursive:
+ for k in unflatten_those_top_level_keys:
+ v = dc.pop(k)
+ unflatten_v = _unflatten_selection_dict(v, recursive=recursive)
+ dc[k] = unflatten_v
+ return dc
diff --git a/parrot/lib/python3.10/site-packages/simple_parsing/utils.py b/parrot/lib/python3.10/site-packages/simple_parsing/utils.py
new file mode 100644
index 0000000000000000000000000000000000000000..b36dcc8c73251995c444acc0ed3d55229830cac7
--- /dev/null
+++ b/parrot/lib/python3.10/site-packages/simple_parsing/utils.py
@@ -0,0 +1,988 @@
+"""Utility functions used in various parts of the simple_parsing package."""
+from __future__ import annotations
+
+import argparse
+import builtins
+import dataclasses
+import enum
+import hashlib
+import inspect
+import itertools
+import re
+import sys
+import types
+import typing
+from collections import OrderedDict, defaultdict
+from collections import abc as c_abc
+from dataclasses import _MISSING_TYPE, MISSING, Field
+from enum import Enum
+from logging import getLogger
+from typing import (
+ Any,
+ Callable,
+ ClassVar,
+ Container,
+ Dict,
+ ForwardRef,
+ Iterable,
+ List,
+ Mapping,
+ MutableMapping,
+ Set,
+ Tuple,
+ TypeVar,
+ Union,
+ overload,
+)
+
+from typing_extensions import Literal, Protocol, TypeGuard, get_args, get_origin
+
+# There are cases where typing.Literal doesn't match typing_extensions.Literal:
+# https://github.com/python/typing_extensions/pull/148
+try:
+ from typing import Literal as LiteralAlt
+except ImportError:
+ LiteralAlt = Literal # type: ignore
+
+
+# NOTE: Copied from typing_inspect.
+def is_typevar(t) -> bool:
+ return type(t) is TypeVar
+
+
+def get_bound(t):
+ if is_typevar(t):
+ return getattr(t, "__bound__", None)
+ else:
+ raise TypeError(f"type is not a `TypeVar`: {t}")
+
+
+def is_forward_ref(t) -> TypeGuard[typing.ForwardRef]:
+ return isinstance(t, typing.ForwardRef)
+
+
+def get_forward_arg(fr: ForwardRef) -> str:
+ return getattr(fr, "__forward_arg__")
+
+
+logger = getLogger(__name__)
+
+builtin_types = [
+ getattr(builtins, d) for d in dir(builtins) if isinstance(getattr(builtins, d), type)
+]
+
+K = TypeVar("K")
+T = TypeVar("T")
+U = TypeVar("U")
+V = TypeVar("V")
+W = TypeVar("W")
+
+
+class Dataclass(Protocol):
+ __dataclass_fields__: ClassVar[dict[str, Field]]
+
+
+def is_dataclass_instance(obj: Any) -> TypeGuard[Dataclass]:
+ return dataclasses.is_dataclass(obj) and dataclasses.is_dataclass(type(obj))
+
+
+def is_dataclass_type(obj: Any) -> TypeGuard[type[Dataclass]]:
+ return inspect.isclass(obj) and dataclasses.is_dataclass(obj)
+
+
+DataclassT = TypeVar("DataclassT", bound=Dataclass)
+
+SimpleValueType = Union[bool, int, float, str]
+SimpleIterable = Union[List[SimpleValueType], Dict[Any, SimpleValueType], Set[SimpleValueType]]
+
+PossiblyNestedDict = Dict[K, Union[V, "PossiblyNestedDict[K, V]"]]
+PossiblyNestedMapping = Mapping[K, Union[V, "PossiblyNestedMapping[K, V]"]]
+
+
+def is_subparser_field(field: Field) -> bool:
+ if is_union(field.type) and not is_choice(field):
+ type_arguments = get_type_arguments(field.type)
+ return all(map(dataclasses.is_dataclass, type_arguments))
+ return bool(field.metadata.get("subparsers", {}))
+
+
+class InconsistentArgumentError(RuntimeError):
+ """Error raised when the number of arguments provided is inconsistent when parsing multiple
+ instances from command line."""
+
+ def __init__(self, *args, **kwargs):
+ super().__init__(*args, **kwargs)
+
+
+def camel_case(name):
+ s1 = re.sub("(.)([A-Z][a-z]+)", r"\1_\2", name)
+ return re.sub("([a-z0-9])([A-Z])", r"\1_\2", s1).lower()
+
+
+TRUE_STRINGS: list[str] = ["yes", "true", "t", "y", "1"]
+FALSE_STRINGS: list[str] = ["no", "false", "f", "n", "0"]
+
+
+def str2bool(raw_value: str | bool) -> bool:
+ """Taken from https://stackoverflow.com/questions/15008758/parsing-boolean-values-with-
+ argparse."""
+ if isinstance(raw_value, bool):
+ return raw_value
+ v = raw_value.strip().lower()
+ if v in TRUE_STRINGS:
+ return True
+ elif v in FALSE_STRINGS:
+ return False
+ else:
+ raise argparse.ArgumentTypeError(
+ f"Boolean value expected for argument, received '{raw_value}'"
+ )
+
+
+def get_item_type(container_type: type[Container[T]]) -> T:
+ """Returns the `type` of the items in the provided container `type`.
+
+ When no type annotation is found, or no item type is found, returns
+ `typing.Any`.
+ NOTE: If a type with multiple arguments is passed, only the first type
+ argument is returned.
+
+ >>> import typing
+ >>> from typing import List, Tuple
+ >>> get_item_type(list)
+ typing.Any
+ >>> get_item_type(List)
+ typing.Any
+ >>> get_item_type(tuple)
+ typing.Any
+ >>> get_item_type(Tuple)
+ typing.Any
+ >>> get_item_type(List[int])
+
+ >>> get_item_type(List[str])
+
+ >>> get_item_type(List[float])
+
+ >>> get_item_type(List[float])
+
+ >>> get_item_type(List[Tuple])
+ typing.Tuple
+ >>> get_item_type(List[Tuple[int, int]])
+ typing.Tuple[int, int]
+ >>> get_item_type(Tuple[int, str])
+
+ >>> get_item_type(Tuple[str, int])
+
+ >>> get_item_type(Tuple[str, str, str, str])
+
+
+ Arguments:
+ list_type {Type} -- A type, preferably one from the Typing module (List, Tuple, etc).
+
+ Returns:
+ Type -- the type of the container's items, if found, else Any.
+ """
+ if container_type in {
+ list,
+ set,
+ tuple,
+ List,
+ Set,
+ Tuple,
+ Dict,
+ Mapping,
+ MutableMapping,
+ }:
+ # the built-in `list` and `tuple` types don't have annotations for their item types.
+ return Any
+ type_arguments = getattr(container_type, "__args__", None)
+ if type_arguments:
+ return type_arguments[0]
+ else:
+ return Any
+
+
+def get_argparse_type_for_container(
+ container_type: type[Container[T]],
+) -> type[T] | Callable[[str], T]:
+ """Gets the argparse 'type' option to be used for a given container type. When an annotation is
+ present, the 'type' option of argparse is set to that type. if not, then the default value of
+ 'str' is returned.
+
+ Arguments:
+ container_type {Type} -- A container type (ideally a typing.Type such as List, Tuple, along with an item annotation: List[str], Tuple[int, int], etc.)
+
+ Returns:
+ typing.Type -- the type that should be used in argparse 'type' argument option.
+
+ TODO: This overlaps in a weird way with `get_parsing_fn`, which returns the 'type'
+ to use for a given annotation! This function however doesn't deal with 'weird' item
+ types, it just returns the first annotation.
+ """
+ T = get_item_type(container_type)
+ if T is bool:
+ return str2bool
+ if T is Any:
+ return str
+ if is_enum(T):
+ # IDEA: Fix this weirdness by first moving all this weird parsing logic into the
+ # field wrapper class, and then split it up into different subclasses of FieldWrapper,
+ # each for a different type of field.
+ from simple_parsing.wrappers.field_parsing import parse_enum
+
+ return parse_enum(T)
+ return T
+
+
+def _mro(t: type) -> list[type]:
+ # TODO: This is mostly used in 'is_tuple' and such, and should be replaced with
+ # either the built-in 'get_origin' from typing, or from typing-inspect.
+ if t is None:
+ return []
+ if hasattr(t, "__mro__"):
+ return t.__mro__
+ elif get_origin(t) is type:
+ return []
+ elif hasattr(t, "mro") and callable(t.mro):
+ return t.mro()
+ return []
+
+
+def is_literal(t: type) -> bool:
+ """Returns True with `t` is a Literal type.
+
+ >>> from typing_extensions import Literal
+ >>> from typing import *
+ >>> is_literal(list)
+ False
+ >>> is_literal("foo")
+ False
+ >>> is_literal(Literal[True, False])
+ True
+ >>> is_literal(Literal[1,2,3])
+ True
+ >>> is_literal(Literal["foo", "bar"])
+ True
+ >>> is_literal(Optional[Literal[1,2]])
+ False
+ """
+ return get_origin(t) in (Literal, LiteralAlt)
+
+
+def is_list(t: type) -> bool:
+ """returns True when `t` is a List type.
+
+ Args:
+ t (Type): a type.
+
+ Returns:
+ bool: True if `t` is list or a subclass of list.
+
+ >>> from typing import *
+ >>> is_list(list)
+ True
+ >>> is_list(tuple)
+ False
+ >>> is_list(List)
+ True
+ >>> is_list(List[int])
+ True
+ >>> is_list(List[Tuple[int, str, None]])
+ True
+ >>> is_list(Optional[List[int]])
+ False
+ >>> class foo(List[int]):
+ ... pass
+ ...
+ >>> is_list(foo)
+ True
+ """
+ return list in _mro(t)
+
+
+def is_tuple(t: type) -> bool:
+ """returns True when `t` is a tuple type.
+
+ Args:
+ t (Type): a type.
+
+ Returns:
+ bool: True if `t` is tuple or a subclass of tuple.
+
+ >>> from typing import *
+ >>> is_tuple(list)
+ False
+ >>> is_tuple(tuple)
+ True
+ >>> is_tuple(Tuple)
+ True
+ >>> is_tuple(Tuple[int])
+ True
+ >>> is_tuple(Tuple[int, str, None])
+ True
+ >>> class foo(tuple):
+ ... pass
+ ...
+ >>> is_tuple(foo)
+ True
+ >>> is_tuple(List[int])
+ False
+ """
+ return tuple in _mro(t)
+
+
+def is_dict(t: type) -> bool:
+ """returns True when `t` is a dict type or annotation.
+
+ Args:
+ t (Type): a type.
+
+ Returns:
+ bool: True if `t` is dict or a subclass of dict.
+
+ >>> from typing import *
+ >>> from collections import OrderedDict
+ >>> is_dict(dict)
+ True
+ >>> is_dict(OrderedDict)
+ True
+ >>> is_dict(tuple)
+ False
+ >>> is_dict(Dict)
+ True
+ >>> is_dict(Dict[int, float])
+ True
+ >>> is_dict(Dict[Any, Dict])
+ True
+ >>> is_dict(Optional[Dict])
+ False
+ >>> is_dict(Mapping[str, int])
+ True
+ >>> class foo(Dict):
+ ... pass
+ ...
+ >>> is_dict(foo)
+ True
+ """
+ mro = _mro(t)
+ return dict in mro or Mapping in mro or c_abc.Mapping in mro
+
+
+def is_set(t: type) -> bool:
+ """returns True when `t` is a set type or annotation.
+
+ Args:
+ t (Type): a type.
+
+ Returns:
+ bool: True if `t` is set or a subclass of set.
+
+ >>> from typing import *
+ >>> is_set(set)
+ True
+ >>> is_set(Set)
+ True
+ >>> is_set(tuple)
+ False
+ >>> is_set(Dict)
+ False
+ >>> is_set(Set[int])
+ True
+ >>> is_set(Set["something"])
+ True
+ >>> is_set(Optional[Set])
+ False
+ >>> class foo(Set):
+ ... pass
+ ...
+ >>> is_set(foo)
+ True
+ """
+ return set in _mro(t)
+
+
+def is_dataclass_type_or_typevar(t: type) -> bool:
+ """Returns whether t is a dataclass type or a TypeVar of a dataclass type.
+
+ Args:
+ t (Type): Some type.
+
+ Returns:
+ bool: Whether its a dataclass type.
+ """
+ return dataclasses.is_dataclass(t) or (
+ is_typevar(t) and dataclasses.is_dataclass(get_bound(t))
+ )
+
+
+def is_enum(t: type) -> bool:
+ if inspect.isclass(t):
+ return issubclass(t, enum.Enum)
+ return Enum in _mro(t)
+
+
+def is_bool(t: type) -> bool:
+ return bool in _mro(t)
+
+
+def is_tuple_or_list(t: type) -> bool:
+ return is_list(t) or is_tuple(t)
+
+
+def is_union(t: type) -> bool:
+ """Returns whether or not the given Type annotation is a variant (or subclass) of typing.Union.
+
+ Args:
+ t (Type): some type annotation
+
+ Returns:
+ bool: Whether this type represents a Union type.
+
+ >>> from typing import *
+ >>> is_union(Union[int, str])
+ True
+ >>> is_union(Union[int, str, float])
+ True
+ >>> is_union(Tuple[int, str])
+ False
+ """
+ if sys.version_info[:2] >= (3, 10) and isinstance(t, types.UnionType):
+ return True
+ return getattr(t, "__origin__", "") == Union
+
+
+def is_homogeneous_tuple_type(t: type[tuple]) -> bool:
+ """Returns whether the given Tuple type is homogeneous: if all items types are the same.
+
+ This also includes Tuple[, ...]
+
+ Returns
+ -------
+ bool
+
+ >>> from typing import *
+ >>> is_homogeneous_tuple_type(Tuple)
+ True
+ >>> is_homogeneous_tuple_type(Tuple[int, int])
+ True
+ >>> is_homogeneous_tuple_type(Tuple[int, str])
+ False
+ >>> is_homogeneous_tuple_type(Tuple[int, str, float])
+ False
+ >>> is_homogeneous_tuple_type(Tuple[int, ...])
+ True
+ >>> is_homogeneous_tuple_type(Tuple[Tuple[int, str], ...])
+ True
+ >>> is_homogeneous_tuple_type(Tuple[List[int], List[str]])
+ False
+ """
+ if not is_tuple(t):
+ return False
+ type_arguments = get_type_arguments(t)
+ if not type_arguments:
+ return True
+ assert isinstance(type_arguments, tuple), type_arguments
+ if len(type_arguments) == 2 and type_arguments[1] is Ellipsis:
+ return True
+ # Tuple[str, str, str] -> True
+ # Tuple[str, str, float] -> False
+ # TODO: Not sure if this will work with more complex item times (like nested tuples)
+ return len(set(type_arguments)) == 1
+
+
+def is_choice(field: Field) -> bool:
+ return bool(field.metadata.get("custom_args", {}).get("choices", {}))
+
+
+def is_optional(t: type) -> bool:
+ """Returns True if the given Type is a variant of the Optional type.
+
+ Parameters
+ ----------
+ - t : Type
+
+ a Type annotation (or "live" type)
+
+ Returns
+ -------
+ bool
+ Whether or not this is an Optional.
+
+ >>> from typing import Union, Optional, List, Literal
+ >>> is_optional(str)
+ False
+ >>> is_optional(Optional[str])
+ True
+ >>> is_optional(Union[str, None])
+ True
+ >>> is_optional(Union[str, List])
+ False
+ >>> is_optional(Union[str, List, int, float, None])
+ True
+ >>> is_optional(Literal["a", None, "b"])
+ True
+ >>> is_optional(Literal["a", 1])
+ False
+ """
+ if is_union(t) and type(None) in get_type_arguments(t):
+ return True
+ elif is_literal(t) and None in get_type_arguments(t):
+ return True
+ else:
+ return False
+
+
+def is_tuple_or_list_of_dataclasses(t: type) -> bool:
+ return is_tuple_or_list(t) and is_dataclass_type_or_typevar(get_item_type(t))
+
+
+def contains_dataclass_type_arg(t: type) -> bool:
+ if is_dataclass_type_or_typevar(t):
+ return True
+ elif is_tuple_or_list_of_dataclasses(t):
+ return True
+ elif is_union(t):
+ return any(contains_dataclass_type_arg(arg) for arg in get_type_arguments(t))
+ return False
+
+
+def get_dataclass_type_arg(t: type) -> type | None:
+ if not contains_dataclass_type_arg(t):
+ return None
+ if is_dataclass_type_or_typevar(t):
+ return t
+ elif is_tuple_or_list(t) or is_union(t):
+ return next(
+ filter(None, (get_dataclass_type_arg(arg) for arg in get_type_arguments(t))),
+ None,
+ )
+ return None
+
+
+def get_type_arguments(container_type: type) -> tuple[type, ...]:
+ # return getattr(container_type, "__args__", ())
+ return get_args(container_type)
+
+
+def get_type_name(some_type: type):
+ result = getattr(some_type, "__name__", str(some_type))
+ type_arguments = get_type_arguments(some_type)
+ if type_arguments:
+ result += f"[{','.join(get_type_name(T) for T in type_arguments)}]"
+ return result
+
+
+def get_container_nargs(container_type: type) -> int | str:
+ """Gets the value of 'nargs' appropriate for the given container type.
+
+ Parameters
+ ----------
+ container_type : Type
+ Some container type.
+
+ Returns
+ -------
+ Union[int, str]
+ [description]
+ """
+ if is_tuple(container_type):
+ # TODO: Should a `Tuple[int]` annotation be interpreted as "a tuple of an
+ # unknown number of ints"?.
+ type_arguments: tuple[type, ...] = get_type_arguments(container_type)
+ if not type_arguments:
+ return "*"
+ if len(type_arguments) == 2 and type_arguments[1] is Ellipsis:
+ return "*"
+
+ total_nargs: int = 0
+ for item_type in type_arguments:
+ # TODO: Handle the 'nargs' for nested container types!
+ if is_list(item_type) or is_tuple(item_type):
+ # BUG: If it's a container like Tuple[Tuple[int, str], Tuple[int, str]]
+ # we could do one of two things:
+ #
+ # - Option 1: Use nargs=4 and re-organize/split values in
+ # post-processing.
+ # item_nargs: Union[int, str] = get_container_nargs(item_type)
+ # if isinstance(item_nargs, int):
+ # total_nargs += item_nargs
+ # else:
+ # return "*"
+ #
+ # This is a bit confusing, and IMO it might be best to just do
+ # - Option 2: Use `nargs='*'` and use a custom parsing function that
+ # will convert entries appropriately..
+ return "*"
+ total_nargs += 1
+ return total_nargs
+
+ if is_list(container_type):
+ return "*"
+ raise NotImplementedError(f"Not sure what 'nargs' should be for type {container_type}")
+
+
+def _parse_multiple_containers(
+ container_type: type, append_action: bool = False
+) -> Callable[[str], list[Any]]:
+ T = get_argparse_type_for_container(container_type)
+ factory = tuple if is_tuple(container_type) else list
+
+ result = factory()
+
+ def parse_fn(value: str):
+ nonlocal result
+ logger.debug(f"parsing multiple {container_type} of {T}s, value is: '{value}'")
+ values = _parse_container(container_type)(value)
+ logger.debug(f"parsing result is '{values}'")
+
+ if append_action:
+ result += values
+ return result
+ else:
+ return values
+
+ return parse_fn
+
+
+def _parse_container(container_type: type[Container]) -> Callable[[str], list[Any]]:
+ T = get_argparse_type_for_container(container_type)
+ factory = tuple if is_tuple(container_type) else list
+ import ast
+
+ def _parse(value: str) -> list[Any]:
+ logger.debug(f"Parsing a {container_type} of {T}s, value is: '{value}'")
+ try:
+ values = _parse_literal(value)
+ except Exception as e:
+ logger.debug(f"Exception while trying to parse '{value}' as a literal: {type(e)}: {e}")
+ # if it doesn't work, fall back to the parse_fn.
+ values = _fallback_parse(value)
+
+ # we do the default 'argparse' action, which is to add the values to a bigger list of values.
+ # result.extend(values)
+ logger.debug(f"returning values: {values}")
+ return values
+
+ def _parse_literal(value: str) -> list[Any] | Any:
+ """try to parse the string to a python expression directly.
+
+ (useful for nested lists or tuples.)
+ """
+ literal = ast.literal_eval(value)
+ logger.debug(f"Parsed literal: {literal}")
+ if not isinstance(literal, (list, tuple)):
+ # we were passed a single-element container, like "--some_list 1", which should give [1].
+ # We therefore return the literal itself, and argparse will append it.
+ return T(literal)
+ else:
+ container = literal
+ values = factory(T(v) for v in container)
+ return values
+
+ def _fallback_parse(v: str) -> list[Any]:
+ v = " ".join(v.split())
+ if v.startswith("[") and v.endswith("]"):
+ v = v[1:-1]
+
+ separator = " "
+ for sep in [","]: # TODO: maybe add support for other separators?
+ if sep in v:
+ separator = sep
+
+ str_values = [v.strip() for v in v.split(separator)]
+ T_values = [T(v_str) for v_str in str_values]
+ values = factory(v for v in T_values)
+ return values
+
+ _parse.__name__ = T.__name__
+ return _parse
+
+
+def setattr_recursive(obj: object, attribute_name: str, value: Any):
+ if "." not in attribute_name:
+ setattr(obj, attribute_name, value)
+ else:
+ parts = attribute_name.split(".")
+ child_object = getattr(obj, parts[0])
+ setattr_recursive(child_object, ".".join(parts[1:]), value)
+
+
+def getattr_recursive(obj: object, attribute_name: str):
+ if "." not in attribute_name:
+ return getattr(obj, attribute_name)
+ else:
+ child_attr, _, rest_of_attribute_name = attribute_name.partition(".")
+ child_object = getattr(obj, child_attr)
+ return getattr_recursive(child_object, rest_of_attribute_name)
+
+
+def split_dest(destination: str) -> tuple[str, str]:
+ parent, _, attribute_in_parent = destination.rpartition(".")
+ return parent, attribute_in_parent
+
+
+def get_nesting_level(possibly_nested_list):
+ if not isinstance(possibly_nested_list, (list, tuple)):
+ return 0
+ elif len(possibly_nested_list) == 0:
+ return 1
+ else:
+ return 1 + max(get_nesting_level(item) for item in possibly_nested_list)
+
+
+def default_value(field: dataclasses.Field) -> T | _MISSING_TYPE:
+ """Returns the default value of a field in a dataclass, if available. When not available,
+ returns `dataclasses.MISSING`.
+
+ Args:
+ field (dataclasses.Field): The dataclasses.Field to get the default value of.
+
+ Returns:
+ Union[T, _MISSING_TYPE]: The default value for that field, if present, or None otherwise.
+ """
+ if field.default is not dataclasses.MISSING:
+ return field.default
+ elif field.default_factory is not dataclasses.MISSING: # type: ignore
+ constructor = field.default_factory # type: ignore
+ return constructor()
+ else:
+ return dataclasses.MISSING
+
+
+def trie(sentences: list[list[str]]) -> dict[str, str | dict]:
+ """Given a list of sentences, creates a trie as a nested dicts of word strings.
+
+ Args:
+ sentences (List[List[str]]): a list of sentences
+
+ Returns:
+ Dict[str, Union[str, Dict[str, ...]]]: A tree where each node is a word in a sentence.
+ Sentences which begin with the same words share the first nodes, etc.
+ """
+ first_word_to_sentences: dict[str, list[list[str]]] = defaultdict(list)
+ for sentence in sentences:
+ first_word = sentence[0]
+ first_word_to_sentences[first_word].append(sentence)
+
+ return_dict: dict[str, str | dict] = {}
+ for first_word, sentences in first_word_to_sentences.items():
+ if len(sentences) == 1:
+ return_dict[first_word] = ".".join(sentences[0])
+ else:
+ sentences_without_first_word = [sentence[1:] for sentence in sentences]
+ return_dict[first_word] = trie(sentences_without_first_word)
+ return return_dict
+
+
+def keep_keys(d: dict, keys_to_keep: Iterable[str]) -> tuple[dict, dict]:
+ """Removes all the keys in `d` that aren't in `keys`.
+
+ Parameters
+ ----------
+ d : Dict
+ Some dictionary.
+ keys_to_keep : Iterable[str]
+ The set of keys to keep
+
+ Returns
+ -------
+ Tuple[Dict, Dict]
+ The same dictionary (with all the unwanted keys removed) as well as a
+ new dict containing only the removed item.
+ """
+ d_keys = set(d.keys()) # save a copy since we will modify the dict.
+ removed = {}
+ for key in d_keys:
+ if key not in keys_to_keep:
+ removed[key] = d.pop(key)
+ return d, removed
+
+
+def compute_identity(size: int = 16, **sample) -> str:
+ """Compute a unique hash out of a dictionary.
+
+ Parameters
+ ----------
+ size: int
+ size of the unique hash
+
+ **sample:
+ Dictionary to compute the hash from
+ """
+ sample_hash = hashlib.sha256()
+
+ for k, v in sorted(sample.items()):
+ sample_hash.update(k.encode("utf8"))
+
+ if isinstance(v, dict):
+ sample_hash.update(compute_identity(size, **v).encode("utf8"))
+ else:
+ sample_hash.update(str(v).encode("utf8"))
+
+ return sample_hash.hexdigest()[:size]
+
+
+def dict_intersection(*dicts: dict[K, V]) -> Iterable[tuple[K, tuple[V, ...]]]:
+ common_keys = set(dicts[0])
+ for d in dicts:
+ common_keys.intersection_update(d)
+ for key in common_keys:
+ yield (key, tuple(d[key] for d in dicts))
+
+
+def field_dict(dataclass: Dataclass) -> dict[str, Field]:
+ result: dict[str, Field] = OrderedDict()
+ for field in dataclasses.fields(dataclass):
+ result[field.name] = field
+ return result
+
+
+def zip_dicts(*dicts: dict[K, V]) -> Iterable[tuple[K, tuple[V | None, ...]]]:
+ # If any attributes are common to both the Experiment and the State,
+ # copy them over to the Experiment.
+ keys = set(itertools.chain(*dicts))
+ for key in keys:
+ yield (key, tuple(d.get(key) for d in dicts))
+
+
+def dict_union(*dicts: dict[K, V], recurse: bool = True, dict_factory=dict) -> dict[K, V]:
+ """Simple dict union until we use python 3.9.
+
+ If `recurse` is True, also does the union of nested dictionaries.
+ NOTE: The returned dictionary has keys sorted alphabetically.
+ >>> from collections import OrderedDict
+ >>> a = OrderedDict(a=1, b=2, c=3)
+ >>> b = OrderedDict(c=5, d=6, e=7)
+ >>> dict_union(a, b, dict_factory=OrderedDict)
+ OrderedDict([('a', 1), ('b', 2), ('c', 5), ('d', 6), ('e', 7)])
+ >>> a = OrderedDict(a=1, b=OrderedDict(c=2, d=3))
+ >>> b = OrderedDict(a=2, b=OrderedDict(c=3, e=6))
+ >>> dict_union(a, b, dict_factory=OrderedDict)
+ OrderedDict([('a', 2), ('b', OrderedDict([('c', 3), ('d', 3), ('e', 6)]))])
+ """
+ result: dict = dict_factory()
+ if not dicts:
+ return result
+ assert len(dicts) >= 1
+ all_keys: set[str] = set()
+ all_keys.update(*dicts)
+ all_keys = sorted(all_keys)
+
+ # Create a neat generator of generators, to save some memory.
+ all_values: Iterable[tuple[V, Iterable[K]]] = (
+ (k, (d[k] for d in dicts if k in d)) for k in all_keys
+ )
+ for k, values in all_values:
+ sub_dicts: list[dict] = []
+ new_value: V = None
+ n_values = 0
+ for v in values:
+ if isinstance(v, dict) and recurse:
+ sub_dicts.append(v)
+ else:
+ # Overwrite the new value for that key.
+ new_value = v
+ n_values += 1
+
+ if len(sub_dicts) == n_values and recurse:
+ # We only get here if all values for key `k` were dictionaries,
+ # and if recurse was True.
+ new_value = dict_union(*sub_dicts, recurse=True, dict_factory=dict_factory)
+
+ result[k] = new_value
+ return result
+
+
+def flatten(nested: PossiblyNestedMapping[K, V]) -> dict[tuple[K, ...], V]:
+ """Flatten a dictionary of dictionaries. The returned dictionary's keys are tuples, one entry
+ per layer.
+
+ >>> flatten({"a": {"b": 2, "c": 3}, "c": {"d": 3, "e": 4}})
+ {('a', 'b'): 2, ('a', 'c'): 3, ('c', 'd'): 3, ('c', 'e'): 4}
+ """
+ flattened: dict[tuple[K, ...], V] = {}
+ for k, v in nested.items():
+ if isinstance(v, c_abc.Mapping):
+ for subkeys, subv in flatten(v).items():
+ collision_key = (k, *subkeys)
+ assert collision_key not in flattened
+ flattened[collision_key] = subv
+ else:
+ flattened[(k,)] = v
+ return flattened
+
+
+def unflatten(flattened: Mapping[tuple[K, ...], V]) -> PossiblyNestedDict[K, V]:
+ """Unflatten a dictionary back into a possibly nested dictionary.
+
+ >>> unflatten({('a', 'b'): 2, ('a', 'c'): 3, ('c', 'd'): 3, ('c', 'e'): 4})
+ {'a': {'b': 2, 'c': 3}, 'c': {'d': 3, 'e': 4}}
+ """
+ nested: PossiblyNestedDict[K, V] = {}
+ for keys, value in flattened.items():
+ sub_dictionary = nested
+ for part in keys[:-1]:
+ assert isinstance(sub_dictionary, dict)
+ sub_dictionary = sub_dictionary.setdefault(part, {})
+ assert isinstance(sub_dictionary, dict)
+ sub_dictionary[keys[-1]] = value
+ return nested
+
+
+def flatten_join(nested: PossiblyNestedMapping[str, V], sep: str = ".") -> dict[str, V]:
+ """Flatten a dictionary of dictionaries. Joins different nesting levels with `sep` as
+ separator.
+
+ >>> flatten_join({'a': {'b': 2, 'c': 3}, 'c': {'d': 3, 'e': 4}})
+ {'a.b': 2, 'a.c': 3, 'c.d': 3, 'c.e': 4}
+ >>> flatten_join({'a': {'b': 2, 'c': 3}, 'c': {'d': 3, 'e': 4}}, sep="/")
+ {'a/b': 2, 'a/c': 3, 'c/d': 3, 'c/e': 4}
+ """
+ return {sep.join(keys): value for keys, value in flatten(nested).items()}
+
+
+def unflatten_split(
+ flattened: Mapping[str, V], sep: str = ".", recursive: bool = False
+) -> PossiblyNestedDict[str, V]:
+ """Unflatten a dict into a possibly nested dict. Keys are split using `sep`.
+
+ >>> unflatten_split({'a.b': 2, 'a.c': 3, 'c.d': 3, 'c.e': 4})
+ {'a': {'b': 2, 'c': 3}, 'c': {'d': 3, 'e': 4}}
+
+ >>> unflatten_split({'a': 2, 'b.c': 3})
+ {'a': 2, 'b': {'c': 3}}
+
+ NOTE: This function expects the input to be flat. It does *not* unflatten nested dicts:
+ >>> unflatten_split({"a": {"b.c": 2}})
+ {'a': {'b.c': 2}}
+ """
+ return unflatten({tuple(key.split(sep)): value for key, value in flattened.items()})
+
+
+@overload
+def getitem_recursive(d: PossiblyNestedDict[K, V], keys: Iterable[K]) -> V:
+ ...
+
+
+@overload
+def getitem_recursive(d: PossiblyNestedDict[K, V], keys: Iterable[K], default: T) -> V | T:
+ ...
+
+
+def getitem_recursive(
+ d: PossiblyNestedDict[K, V], keys: Iterable[K], default: T | _MISSING_TYPE = MISSING
+) -> V | T:
+ if default is not MISSING:
+ return flatten(d).get(tuple(keys), default)
+ return flatten(d)[tuple(keys)]
+
+
+def all_subclasses(t: type[T]) -> set[type[T]]:
+ immediate_subclasses = t.__subclasses__()
+ return set(immediate_subclasses).union(*[all_subclasses(s) for s in immediate_subclasses])
+
+
+if __name__ == "__main__":
+ import doctest
+
+ doctest.testmod()
diff --git a/parrot/lib/python3.10/site-packages/simple_parsing/wrappers/__init__.py b/parrot/lib/python3.10/site-packages/simple_parsing/wrappers/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..43da961caa723261ee3f91ffec382497dbb31fc3
--- /dev/null
+++ b/parrot/lib/python3.10/site-packages/simple_parsing/wrappers/__init__.py
@@ -0,0 +1,4 @@
+from .dataclass_wrapper import DataclassWrapper
+from .field_wrapper import DashVariant, FieldWrapper
+
+__all__ = ["DataclassWrapper", "FieldWrapper", "DashVariant"]
diff --git a/parrot/lib/python3.10/site-packages/simple_parsing/wrappers/__pycache__/__init__.cpython-310.pyc b/parrot/lib/python3.10/site-packages/simple_parsing/wrappers/__pycache__/__init__.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..0c02cff7000595092c1926632672eb35840530a3
Binary files /dev/null and b/parrot/lib/python3.10/site-packages/simple_parsing/wrappers/__pycache__/__init__.cpython-310.pyc differ
diff --git a/parrot/lib/python3.10/site-packages/simple_parsing/wrappers/__pycache__/dataclass_wrapper.cpython-310.pyc b/parrot/lib/python3.10/site-packages/simple_parsing/wrappers/__pycache__/dataclass_wrapper.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..8ef1f12054aee1a6249c3eae0cfdff951ed49dc5
Binary files /dev/null and b/parrot/lib/python3.10/site-packages/simple_parsing/wrappers/__pycache__/dataclass_wrapper.cpython-310.pyc differ
diff --git a/parrot/lib/python3.10/site-packages/simple_parsing/wrappers/__pycache__/field_metavar.cpython-310.pyc b/parrot/lib/python3.10/site-packages/simple_parsing/wrappers/__pycache__/field_metavar.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..bb005d5ec9928042f341cdabf3d886dd04a6fa91
Binary files /dev/null and b/parrot/lib/python3.10/site-packages/simple_parsing/wrappers/__pycache__/field_metavar.cpython-310.pyc differ
diff --git a/parrot/lib/python3.10/site-packages/simple_parsing/wrappers/__pycache__/field_parsing.cpython-310.pyc b/parrot/lib/python3.10/site-packages/simple_parsing/wrappers/__pycache__/field_parsing.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..4d68d2e7a5222ee334e51c25f63d9a0c70afc603
Binary files /dev/null and b/parrot/lib/python3.10/site-packages/simple_parsing/wrappers/__pycache__/field_parsing.cpython-310.pyc differ
diff --git a/parrot/lib/python3.10/site-packages/simple_parsing/wrappers/__pycache__/field_wrapper.cpython-310.pyc b/parrot/lib/python3.10/site-packages/simple_parsing/wrappers/__pycache__/field_wrapper.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..7eb935926c1d78484b89843c2cd4ea91ca0c61b5
Binary files /dev/null and b/parrot/lib/python3.10/site-packages/simple_parsing/wrappers/__pycache__/field_wrapper.cpython-310.pyc differ
diff --git a/parrot/lib/python3.10/site-packages/simple_parsing/wrappers/__pycache__/wrapper.cpython-310.pyc b/parrot/lib/python3.10/site-packages/simple_parsing/wrappers/__pycache__/wrapper.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..df47de49d77c67b5acf7b127ebe38765be254a20
Binary files /dev/null and b/parrot/lib/python3.10/site-packages/simple_parsing/wrappers/__pycache__/wrapper.cpython-310.pyc differ
diff --git a/parrot/lib/python3.10/site-packages/simple_parsing/wrappers/dataclass_wrapper.py b/parrot/lib/python3.10/site-packages/simple_parsing/wrappers/dataclass_wrapper.py
new file mode 100644
index 0000000000000000000000000000000000000000..2efa98aff634aa555e2b450c0d154bc40eff8f10
--- /dev/null
+++ b/parrot/lib/python3.10/site-packages/simple_parsing/wrappers/dataclass_wrapper.py
@@ -0,0 +1,460 @@
+from __future__ import annotations
+
+import argparse
+import dataclasses
+import functools
+import textwrap
+from dataclasses import MISSING
+from logging import getLogger
+from typing import Any, Callable, Generic, TypeVar, cast
+
+from typing_extensions import Literal
+
+from simple_parsing import docstring, utils
+from simple_parsing.docstring import dp_parse, inspect_getdoc
+from simple_parsing.utils import Dataclass, DataclassT, is_dataclass_instance, is_dataclass_type
+from simple_parsing.wrappers.field_wrapper import FieldWrapper
+from simple_parsing.wrappers.wrapper import Wrapper
+
+logger = getLogger(__name__)
+
+MAX_DOCSTRING_DESC_LINES_HEIGHT: int = 50
+"""Maximum number of lines of the class docstring to include in the autogenerated argument group
+description.
+
+If fields don't have docstrings or help text, then this is not used, and the entire docstring is
+used as the description of the argument group.
+"""
+
+DataclassWrapperType = TypeVar("DataclassWrapperType", bound="DataclassWrapper")
+
+
+class DataclassWrapper(Wrapper, Generic[DataclassT]):
+ def __init__(
+ self,
+ dataclass: type[DataclassT],
+ name: str,
+ default: DataclassT | dict | None = None,
+ prefix: str = "",
+ parent: DataclassWrapper | None = None,
+ _field: dataclasses.Field | None = None,
+ field_wrapper_class: type[FieldWrapper] = FieldWrapper,
+ dataclass_fn: Callable[..., DataclassT] | None = None,
+ ):
+ super().__init__()
+ self.dataclass = dataclass
+ self._name = name
+ assert is_dataclass_type(dataclass) # FIXME: Remove
+ if dataclass_fn:
+ assert callable(dataclass_fn), dataclass_fn
+ self.dataclass_fn = dataclass_fn or dataclass
+ assert not is_dataclass_instance(self.dataclass_fn) # FIXME: Remove
+
+ self._default = default
+ self.prefix = prefix
+ self._parent = parent
+ # the field of the parent, which contains this child dataclass.
+ self._field = _field
+ self.field_wrapper_class = field_wrapper_class
+
+ self.fields: list[FieldWrapper] = []
+ self.optional: bool = False
+
+ self._destinations: list[str] = []
+ self._required: bool = False
+ self._explicit: bool = False
+ self._dest: str = ""
+ self._children: list[DataclassWrapper] = []
+ # the default value(s).
+ # NOTE: This is a list only because of the `ConflictResolution.ALWAYS_MERGE` option.
+ self._defaults: list[DataclassT] = [default] if default else []
+
+ dataclass_fields: tuple[dataclasses.Field, ...] = _get_dataclass_fields(dataclass)
+ # Create an object for each field, which is used to compute (and hold) the arguments that
+ # will then be passed to `argument_group.add_argument` later.
+ # This could eventually be refactored into a stateless thing. But for now it isn't.
+ for field in dataclass_fields:
+ if not field.init or field.metadata.get("cmd", True) is False:
+ # Don't add arguments for this field.
+ continue
+
+ if isinstance(field.type, str):
+ # NOTE: Here we'd like to convert the fields type to an actual type, in case the
+ # `from __future__ import annotations` feature is used.
+ from simple_parsing.annotation_utils.get_field_annotations import (
+ get_field_type_from_annotations,
+ )
+
+ field_type = get_field_type_from_annotations(self.dataclass, field.name)
+ # Modify the `type` of the Field object, in-place.
+ field.type = field_type
+ else:
+ field_type = field.type
+
+ # Manually overwrite the field default value with the corresponding attribute of the
+ # default for the parent.
+ field_default = dataclasses.MISSING
+ if isinstance(dataclass_fn, functools.partial) and field.name in dataclass_fn.keywords:
+ # NOTE: We need to override the default value of the field, because since the
+ # dataclass_fn is a partial, and we always set the defaults for all fields in the
+ # constructor arguments dict, those would be passed to the partial, and the value
+ # for that argument in the partial (e.g. `dataclass_fn = partial(A, a=123)`) would
+ # be unused when we call `dataclass_fn(**constructor_args[dataclass_dest])` later.
+ field_default = dataclass_fn.keywords[field.name]
+ # TODO: This is currently only really necessary in the case where the dataclass_fn
+ # is a `functools.partial` (e.g. when using subgroups). But the idea of specifying
+ # the default value and passing it here to the wrapper, rather than have the
+ # wrappers "fetch" it from their field or their parent, makes sense!
+ logger.debug(
+ f"Got a default value of {field_default} for field {field.name} from "
+ f"inspecting the dataclass function! ({dataclass_fn})"
+ )
+ elif isinstance(default, dict):
+ if field.name in default:
+ field_default = default[field.name]
+ elif default not in (None, argparse.SUPPRESS):
+ field_default = getattr(default, field.name)
+
+ if utils.is_tuple_or_list_of_dataclasses(field_type):
+ raise NotImplementedError(
+ f"Field {field.name} is of type {field_type}, which isn't "
+ f"supported yet. (container of a dataclass type)"
+ )
+
+ if utils.is_subparser_field(field) or utils.is_choice(field):
+ field_wrapper = self.field_wrapper_class(
+ field,
+ parent=self,
+ prefix=prefix,
+ )
+ if field_default is not dataclasses.MISSING:
+ field_wrapper.set_default(field_default)
+
+ self.fields.append(field_wrapper)
+
+ elif dataclasses.is_dataclass(field_type) and field.default is not None:
+ # Non-optional dataclass field.
+ # handle a nested dataclass attribute
+ dataclass, name = field_type, field.name
+ # todo: Figure out if this is still necessary, or if `field_default` can be handled
+ # the same way as above.
+ if field_default is dataclasses.MISSING:
+ field_default = None
+ child_wrapper = DataclassWrapper(
+ dataclass,
+ name,
+ parent=self,
+ _field=field,
+ default=field_default,
+ )
+ self._children.append(child_wrapper)
+
+ elif utils.contains_dataclass_type_arg(field_type):
+ # Extract the dataclass type from the annotation of the field.
+ field_dataclass = utils.get_dataclass_type_arg(field_type)
+ # todo: Figure out if this is still necessary, or if `field_default` can be handled
+ # the same way as above.
+ if field_default is dataclasses.MISSING:
+ field_default = None
+ child_wrapper = DataclassWrapper(
+ field_dataclass,
+ name=field.name,
+ parent=self,
+ _field=field,
+ default=field_default,
+ )
+ child_wrapper.required = False
+ child_wrapper.optional = True
+ self._children.append(child_wrapper)
+
+ else:
+ # a "normal" attribute
+ field_wrapper = self.field_wrapper_class(field, parent=self, prefix=self.prefix)
+ logger.debug(
+ f"wrapped field at {field_wrapper.dest} has a default value of {field_wrapper.default}"
+ )
+ if field_default is not dataclasses.MISSING:
+ field_wrapper.set_default(field_default)
+
+ self.fields.append(field_wrapper)
+
+ logger.debug(f"The dataclass at attribute {self.dest} has default values: {self.defaults}")
+
+ def add_arguments(self, parser: argparse.ArgumentParser):
+ from ..parsing import ArgumentParser
+
+ parser = cast(ArgumentParser, parser)
+
+ group = parser.add_argument_group(title=self.title, description=self.description)
+
+ for wrapped_field in self.fields:
+ # Note: This should be true since we don't create a FieldWrapper for fields with
+ # `cmd=False`.
+ assert wrapped_field.field.metadata.get("cmd", True)
+
+ if wrapped_field.is_subparser:
+ wrapped_field.add_subparsers(parser)
+ continue
+
+ arg_options = wrapped_field.arg_options
+
+ if argparse.SUPPRESS in self.defaults:
+ arg_options["default"] = argparse.SUPPRESS
+ if wrapped_field.is_subgroup:
+ # NOTE: Not skipping subgroup fields, because even though they will have been
+ # resolved at this point, we still want them to show up in the --help message!
+ logger.debug(
+ f"Adding a subgroup field {wrapped_field.name} just so it shows up in the "
+ f"--help text."
+ )
+
+ logger.info(f"group.add_argument(*{wrapped_field.option_strings}, **{arg_options})")
+ # TODO: Perhaps we could hook into the `action` that is returned here to know if the
+ # flag was passed or not for a given field.
+ _ = group.add_argument(*wrapped_field.option_strings, **arg_options)
+
+ def equivalent_argparse_code(self, leading="group") -> str:
+ code = ""
+ code += textwrap.dedent(
+ f"""
+ group = parser.add_argument_group(title="{self.title.strip()}", description="{self.description.strip()}")
+ """
+ )
+ for wrapped_field in self.fields:
+ if wrapped_field.is_subparser:
+ # TODO:
+ raise NotImplementedError("Subparsers equivalent is TODO.")
+ code += textwrap.dedent(
+ f"""\
+ # add subparsers for each dataclass type in the field.
+ subparsers = parser.add_subparsers(
+ title={wrapped_field.name},
+ description={wrapped_field.help},
+ dest={wrapped_field.dest},
+ )
+ subparsers.required = True
+
+ for subcommand, dataclass_type in {self.subparsers_dict.items()}:
+ subparser = subparsers.add_parser(subcommand)
+ subparser = cast(ArgumentParser, subparser)
+ subparser.add_arguments(dataclass_type, dest=self.dest)
+ """
+ )
+ elif wrapped_field.arg_options:
+ code += textwrap.dedent(wrapped_field.equivalent_argparse_code()) + "\n"
+ return code
+
+ @property
+ def name(self) -> str:
+ return self._name
+
+ @property
+ def parent(self) -> DataclassWrapper | None:
+ return self._parent
+
+ @property
+ def defaults(self) -> list[DataclassT | dict[str, Any] | None | Literal[argparse.SUPPRESS]]:
+ if self._defaults:
+ return self._defaults
+ if self._field is None:
+ return []
+ assert self.parent is not None
+ if self.parent.defaults:
+ self._defaults = []
+ for default in self.parent.defaults:
+ if default not in (None, argparse.SUPPRESS):
+ default = getattr(default, self.name)
+ self._defaults.append(default)
+ else:
+ default_field_value = utils.default_value(self._field)
+ if default_field_value is MISSING:
+ self._defaults = []
+ else:
+ self._defaults = [default_field_value]
+ return self._defaults
+
+ @defaults.setter
+ def defaults(self, value: list[DataclassT]):
+ self._defaults = value
+
+ @property
+ def default(self) -> DataclassT | None:
+ return self._default
+
+ # @default.setter
+ # def default(self, value: DataclassT) -> None:
+ # self._default = value
+
+ def set_default(self, value: DataclassT | dict | None):
+ """Sets the default values for the arguments of the fields of this dataclass."""
+ if value is not None and not isinstance(value, dict):
+ field_default_values = dataclasses.asdict(value)
+ else:
+ field_default_values = value
+ self._default = value
+ if field_default_values is None:
+ return
+ unknown_names = set(field_default_values)
+ for field_wrapper in self.fields:
+ if field_wrapper.name not in field_default_values:
+ continue
+ # Manually set the default value for this argument.
+ field_default_value = field_default_values[field_wrapper.name]
+ field_wrapper.set_default(field_default_value)
+ unknown_names.remove(field_wrapper.name)
+ for nested_dataclass_wrapper in self._children:
+ if nested_dataclass_wrapper.name not in field_default_values:
+ continue
+ field_default_value = field_default_values[nested_dataclass_wrapper.name]
+ nested_dataclass_wrapper.set_default(field_default_value)
+ unknown_names.remove(nested_dataclass_wrapper.name)
+ unknown_names.discard("_type_")
+ if unknown_names:
+ raise RuntimeError(
+ f"{sorted(unknown_names)} are not fields of {self.dataclass} at path {self.dest!r}!"
+ )
+
+ @property
+ def title(self) -> str:
+ names_string = f""" [{', '.join(f"'{dest}'" for dest in self.destinations)}]"""
+ title = self.dataclass.__qualname__ + names_string
+ return title
+
+ @property
+ def description(self) -> str:
+ if self.parent and self._field:
+ doc = docstring.get_attribute_docstring(self.parent.dataclass, self._field.name)
+ if doc is not None:
+ if doc.docstring_below:
+ return doc.docstring_below
+ elif doc.comment_above:
+ return doc.comment_above
+ elif doc.comment_inline:
+ return doc.comment_inline
+
+ # NOTE: The class docstring may be EXTRELEMY LARGE.
+
+ class_docstring = inspect_getdoc(self.dataclass) or ""
+ if not class_docstring:
+ return ""
+
+ doc = dp_parse(class_docstring)
+
+ from simple_parsing.decorators import _description_from_docstring
+
+ description = _description_from_docstring(doc)
+
+ num_lines = len(description.splitlines())
+ shortened_description = (
+ "\n".join(description.splitlines()[:MAX_DOCSTRING_DESC_LINES_HEIGHT]) + " ..."
+ )
+
+ fields_have_docstrings = any(f._docstring.help_string for f in self.fields)
+ docstring_is_huge = num_lines > MAX_DOCSTRING_DESC_LINES_HEIGHT
+ if not fields_have_docstrings:
+ # The fields don't have docstrings. Return the entire docstring, regardless of its
+ # size.
+ return description
+ # Fields have docstrings, so there's probably some duplication between the docstring and
+ # the dataclass fields help. Shorten the docstring, if needed.
+ if docstring_is_huge:
+ return shortened_description
+ return description
+
+ # @property
+ # def prefix(self) -> str:
+ # return self._prefix
+
+ # @prefix.setter
+ # def prefix(self, value: str):
+ # self._prefix = value
+ # for child_wrapper in self._children:
+ # child_wrapper.prefix = value
+
+ @property
+ def required(self) -> bool:
+ return self._required
+
+ @required.setter
+ def required(self, value: bool):
+ self._required = value
+ for field in self.fields:
+ field.required = value
+ for child_wrapper in self._children:
+ child_wrapper.required = value
+
+ @property
+ def multiple(self) -> bool:
+ return len(self.destinations) > 1
+
+ @property
+ def descendants(self):
+ for child in self._children:
+ yield child
+ yield from child.descendants
+
+ @property
+ def dest(self):
+ lineage = []
+ parent = self.parent
+ while parent is not None:
+ lineage.append(parent.name)
+ parent = parent.parent
+ lineage = list(reversed(lineage))
+ lineage.append(self.name)
+ _dest = ".".join(lineage)
+ # logger.debug(f"getting dest, returning {_dest}")
+ return _dest
+
+ @property
+ def destinations(self) -> list[str]:
+ if not self._destinations:
+ if self.parent:
+ self._destinations = [f"{d}.{self.name}" for d in self.parent.destinations]
+ else:
+ self._destinations = [self.name]
+ return self._destinations
+
+ @destinations.setter
+ def destinations(self, value: list[str]):
+ self._destinations = value
+
+ def merge(self, other: DataclassWrapper):
+ """Absorb all the relevant attributes from another wrapper.
+
+ Args:
+ other (DataclassWrapper): Another instance to absorb into this one.
+ """
+ # logger.debug(f"merging \n{self}\n with \n{other}")
+ logger.debug(f"self destinations: {self.destinations}")
+ logger.debug(f"other destinations: {other.destinations}")
+ # assert not set(self.destinations).intersection(set(other.destinations)), "shouldn't have overlap in destinations"
+ # self.destinations.extend(other.destinations)
+ for dest in other.destinations:
+ if dest not in self.destinations:
+ self.destinations.append(dest)
+ logger.debug(f"destinations after merge: {self.destinations}")
+ self.defaults.extend(other.defaults)
+
+ # Unset the default value for all fields.
+ # TODO: Shouldn't be needed anymore.
+ for field_wrapper in self.fields:
+ field_wrapper.set_default(None)
+
+ for child, other_child in zip(self._children, other._children):
+ child.merge(other_child)
+
+
+def _get_dataclass_fields(dataclass: type[Dataclass]) -> tuple[dataclasses.Field, ...]:
+ # NOTE: `dataclasses.fields` method retrieves only `dataclasses._FIELD`
+ # NOTE: but we also want to know about `dataclasses._FIELD_INITVAR`
+ # NOTE: therefore we partly copy-paste its implementation
+ try:
+ dataclass_fields_map = getattr(dataclass, dataclasses._FIELDS)
+ except AttributeError:
+ raise TypeError("must be called with a dataclass type or instance")
+ return tuple(
+ field
+ for field in dataclass_fields_map.values()
+ if field._field_type in (dataclasses._FIELD, dataclasses._FIELD_INITVAR)
+ )
diff --git a/parrot/lib/python3.10/site-packages/simple_parsing/wrappers/field_metavar.py b/parrot/lib/python3.10/site-packages/simple_parsing/wrappers/field_metavar.py
new file mode 100644
index 0000000000000000000000000000000000000000..8a75e79555c04c1abf25db43e51d59a2acc09181
--- /dev/null
+++ b/parrot/lib/python3.10/site-packages/simple_parsing/wrappers/field_metavar.py
@@ -0,0 +1,75 @@
+import functools
+from logging import getLogger
+from typing import Any, Callable, Dict, List, Optional, Type, TypeVar
+
+from ..utils import get_type_arguments, is_optional, is_tuple, is_union
+
+T = TypeVar("T")
+
+logger = getLogger(__name__)
+
+_new_metavars: Dict[Type[T], Optional[str]] = {
+ # the 'primitive' types don't get a 'new' metavar.
+ t: t.__name__
+ for t in [str, float, int, bytes]
+}
+
+
+def log_results(fn: Callable[[Type], str]):
+ @functools.wraps(fn)
+ def _wrapped(t: Type) -> str:
+ result = fn(t)
+ # logger.debug(f"Metavar for type {t}: {result}")
+ return result
+
+ return _wrapped
+
+
+@log_results
+def get_metavar(t: Type) -> str:
+ """Gets the metavar to be used for that type in help strings.
+
+ This is crucial when using a `weird` auto-generated parsing functions for
+ things like Union, Optional, Etc etc.
+
+ type the type arguments that were passed to `get_parsing_fn` that
+ produced the given parsing_fn.
+
+ returns None if the name shouldn't be changed.
+ """
+ # TODO: Maybe we can create the name for each returned call, a bit like how
+ # we dynamically create the parsing function itself?
+ new_name: str = getattr(t, "__name__", None)
+
+ optional = is_optional(t)
+
+ if t in _new_metavars:
+ return _new_metavars[t]
+
+ elif is_union(t):
+ args = get_type_arguments(t)
+ metavars: List[str] = []
+ for type_arg in args:
+ if type_arg is type(None): # noqa: E721
+ continue
+ metavars.append(get_metavar(type_arg))
+ metavar = "|".join(map(str, metavars))
+ if optional:
+ return f"[{metavar}]"
+ return metavar
+
+ elif is_tuple(t):
+ args = get_type_arguments(t)
+ if not args:
+ return get_metavar(Any)
+ logger.debug(f"Tuple args: {args}")
+ metavars: List[str] = []
+ for arg in args:
+ if arg is Ellipsis:
+ metavars.append(f"[{metavars[-1]}, ...]")
+ break
+ else:
+ metavars.append(get_metavar(arg))
+ return " ".join(metavars)
+
+ return new_name
diff --git a/parrot/lib/python3.10/site-packages/simple_parsing/wrappers/field_parsing.py b/parrot/lib/python3.10/site-packages/simple_parsing/wrappers/field_parsing.py
new file mode 100644
index 0000000000000000000000000000000000000000..6ab74220e0f3b3ef88601a2d3a2c2f6084bb3dd5
--- /dev/null
+++ b/parrot/lib/python3.10/site-packages/simple_parsing/wrappers/field_parsing.py
@@ -0,0 +1,298 @@
+"""Functions that are to be used to parse a field.
+
+Somewhat analogous to the 'parse' function in the `helpers.serialization.parsing` package.
+"""
+import enum
+import functools
+from dataclasses import Field
+from logging import getLogger
+from typing import Any, Callable, Dict, List, Optional, Tuple, Type, TypeVar, Union
+
+from simple_parsing.utils import (
+ get_bound,
+ get_forward_arg,
+ get_type_arguments,
+ is_enum,
+ is_forward_ref,
+ is_homogeneous_tuple_type,
+ is_list,
+ is_tuple,
+ is_typevar,
+ is_union,
+ str2bool,
+)
+
+logger = getLogger(__name__)
+
+T = TypeVar("T")
+K = TypeVar("K")
+
+
+# Dictionary mapping from types/type annotations to their parsing functions.
+_parsing_fns: Dict[Type[T], Callable[[Any], T]] = {
+ # the 'primitive' types are parsed using the type fn as a constructor.
+ t: t
+ for t in [str, float, int, bytes]
+}
+_parsing_fns[bool] = str2bool
+
+
+def get_parsing_fn_for_field(field: Field) -> Callable[[Any], T]:
+ """Gets the parsing function for the field `field`."""
+ name = field.name
+ field_type = field.type
+ logger.debug(f"name = {name}, field_type = {field_type}")
+
+ # If the user set a custom parsing function, we use it.
+ custom_parsing_fn = field.metadata.get("type")
+ if custom_parsing_fn is not None:
+ return custom_parsing_fn
+
+ parsing_fn = get_parsing_fn(field.type)
+ return parsing_fn
+
+
+def _register(t: Type, func: Callable) -> None:
+ if t not in _parsing_fns:
+ # logger.debug(f"Registering the type {t} with parsing function {func}")
+ _parsing_fns[t] = func
+
+
+def register_parsing_fn(some_type: Type[T], function: Callable[[Any], T]) -> None:
+ """Register a parsing function for the type `some_type`."""
+ _register(some_type, function)
+
+
+# This doesn't work as well as it did for serialization, in large part due to how
+# argparse uses the `type` function when parsing containers.
+# TODO: Replace this with a simpler function that just returns the 'arg_options' dict to
+# give for a given type annotation.
+def get_parsing_fn(t: Type[T]) -> Callable[[Any], T]:
+ """Gets a parsing function for the given type or type annotation.
+
+ Args:
+ t (Type[T]): A type or type annotation.
+
+ Returns:
+ Callable[[Any], T]: A function that will parse a value of the given type
+ from the command-line when available, or a no-op function that
+ will return the raw value, when a parsing fn cannot be found or
+ constructed.
+ """
+ if t in _parsing_fns:
+ logger.debug(f"The type {t} has a dedicated parsing function.")
+ return _parsing_fns[t]
+
+ elif t is Any:
+ logger.debug(f"parsing an Any type: {t}")
+ return no_op
+
+ # TODO: Do we want to support parsing a Dict from command-line?
+ # elif is_dict(t):
+ # logger.debug(f"parsing a Dict field: {t}")
+ # args = get_type_arguments(t)
+ # if len(args) != 2:
+ # args = (Any, Any)
+ # return parse_dict(*args)
+
+ # TODO: This would require some sort of 'postprocessing' step to convert a
+ # list to a Set or something like that.
+ # elif is_set(t):
+ # logger.debug(f"parsing a Set field: {t}")
+ # args = get_type_arguments(t)
+ # if len(args) != 1:
+ # args = (Any,)
+ # return parse_set(args[0])
+
+ elif is_tuple(t):
+ logger.debug(f"parsing a Tuple field: {t}")
+ args = get_type_arguments(t)
+ if is_homogeneous_tuple_type(t):
+ if not args:
+ args = (str, ...)
+ parsing_fn = get_parsing_fn(args[0])
+ else:
+ parsing_fn = parse_tuple(args)
+ parsing_fn.__name__ = str(t)
+ return parsing_fn
+
+ elif is_list(t):
+ logger.debug(f"parsing a List field: {t}")
+ args = get_type_arguments(t)
+ assert len(args) == 1
+ return parse_list(args[0])
+
+ elif is_union(t):
+ logger.debug(f"parsing a Union field: {t}")
+ args = get_type_arguments(t)
+ return parse_union(*args)
+
+ elif is_enum(t):
+ logger.debug(f"Parsing an Enum field of type {t}")
+ return parse_enum(t)
+
+ if is_forward_ref(t):
+ forward_arg = get_forward_arg(t)
+ for t, fn in _parsing_fns.items():
+ if getattr(t, "__name__", str(t)) == forward_arg:
+ return fn
+
+ if is_typevar(t):
+ bound = get_bound(t)
+ logger.debug(f"parsing a typevar: {t}, bound type is {bound}.")
+ if bound is not None:
+ return get_parsing_fn(bound)
+
+ logger.debug(
+ f"Couldn't find a parsing function for type {t}, will try " f"to use the type directly."
+ )
+ return t
+
+
+def try_functions(*funcs: Callable[[Any], T]) -> Callable[[Any], Union[T, Any]]:
+ """Tries to use the functions in succession, else raises a ValueError."""
+
+ def _try_functions(val: Any) -> Union[T, Any]:
+ logger.debug(f"Debugging the 'raw value' of {val}, will try functions {funcs}")
+ exceptions: list[Exception] = []
+ for func in funcs:
+ try:
+ parsed = func(val)
+ logger.debug(
+ f"Successfully used the function {func} to get a parsed value of {parsed}."
+ )
+ return parsed
+ except Exception as ex:
+ exceptions.append(ex)
+ logger.error(
+ f"Couldn't parse value {val}, returning the value as-is. (exceptions: {exceptions})"
+ )
+ raise ValueError(
+ f"Couldn't parse value {val}, returning the value as-is. (exceptions: {exceptions})"
+ )
+
+ _try_functions.__name__ = (
+ "Try<" + " and ".join(str(getattr(func, "__name__", func)) for func in funcs) + ">"
+ )
+ return _try_functions
+
+
+def parse_union(*types: Type[T]) -> Callable[[Any], Union[T, Any]]:
+ types = list(types)
+ optional = type(None) in types
+ # Partition the Union into None and non-None types.
+ while type(None) in types:
+ types.remove(type(None))
+
+ parsing_fns: List[Callable[[Any], T]] = [
+ parse_optional(t) if optional else get_parsing_fn(t) for t in types
+ ]
+ # Try using each of the non-None types, in succession. Worst case, return the value.
+ f = try_functions(*parsing_fns)
+ from simple_parsing.wrappers.field_metavar import get_metavar
+
+ f.__name__ = get_metavar(Union[tuple(types)]) # type: ignore
+ # f.__name__ = "|".join(str(t.__name__) for t in types)
+ return f
+
+
+def parse_optional(t: Type[T]) -> Callable[[Optional[Any]], Optional[T]]:
+ parse = get_parsing_fn(t)
+
+ def _parse_optional(val: Optional[Any]) -> Optional[T]:
+ return val if val is None else parse(val)
+
+ return _parse_optional
+
+
+def parse_tuple(tuple_item_types: Tuple[Type[T], ...]) -> Callable[[List[T]], Tuple[T, ...]]:
+ """Makes a parsing function for creating tuples from the command-line args.
+
+ Can handle tuples with different item types, for instance:
+ - `Tuple[int, Foo, str, float, ...]`.
+
+ Returns:
+ Callable[[List[T]], Tuple[T, ...]]: A parsing function for creating tuples.
+ """
+ # Note, if there are more values than types in the tuple type, then the
+ # last type is used.
+ # TODO: support the Ellipsis?
+ if not tuple_item_types:
+ tuple_item_types = (Any, Ellipsis)
+
+ calls_count: int = 0
+
+ def _parse_tuple(val: Any) -> Tuple[T, ...]:
+ nonlocal calls_count
+ logger.debug(f"Parsing a Tuple with item types {tuple_item_types}, raw value is {val}.")
+ parsing_fn_index = calls_count
+
+ if Ellipsis in tuple_item_types:
+ ellipsis_index = tuple_item_types.index(Ellipsis)
+ logger.debug(f"Ellipsis is at index {ellipsis_index}")
+ # If this function is being called for the 'Ellipsis' type argument
+ # or higher, just use the last type argument before the ellipsis.
+ # NOTE: AFAIK, using something like Tuple[t1, t2, ...] is impossible
+ # and it can only be something like Tuple[t1, ...], meaning an
+ # unknown number of arguments of type `t1`.
+ if parsing_fn_index >= ellipsis_index:
+ parsing_fn_index = ellipsis_index - 1
+
+ item_type = tuple_item_types[parsing_fn_index]
+ parsing_fn = get_parsing_fn(item_type)
+ parsed_value = parsing_fn(val)
+
+ calls_count += 1
+
+ return parsed_value
+
+ return _parse_tuple
+
+
+def parse_list(list_item_type: Type[T]) -> T:
+ return get_parsing_fn(list_item_type)
+
+
+def no_op(v: T) -> T:
+ """Parsing function that gives back the value as-is.
+
+ Args:
+ v ([Any]): Any value.
+
+ Returns:
+ [type]: The value unchanged.
+ """
+ return v
+
+
+E = TypeVar("E", bound=enum.Enum)
+
+
+def parse_enum(enum_type: Type[E]) -> Callable[[str], E]:
+ """Returns a function to use to parse an enum of type `enum_type` from a string.
+
+ Parameters
+ ----------
+ - enum_type : Type[enum.Enum]
+
+ The type of enum to create a parsing function for.
+
+ Returns
+ -------
+ Callable[[str], E]
+ A function that parses an enum object of type `enum_type` from a string.
+ """
+ # Save the function, since the same type will always be parsed the same way. Also
+ # makes testing easier.
+ if enum_type in _parsing_fns:
+ return _parsing_fns[enum_type]
+
+ # NOTE: Use `functools.wraps` so that fn name is the enum, so the metavar shows up
+ # just like the enum on the command-line, and not like
+ # "(...).parse_enum.._parse_enum" or something.
+ @functools.wraps(enum_type)
+ def _parse_enum(v: str) -> E:
+ return enum_type[v]
+
+ _parsing_fns[enum_type] = _parse_enum
+ return _parse_enum
diff --git a/parrot/lib/python3.10/site-packages/simple_parsing/wrappers/field_wrapper.py b/parrot/lib/python3.10/site-packages/simple_parsing/wrappers/field_wrapper.py
new file mode 100644
index 0000000000000000000000000000000000000000..3a4d18604426003475ac2505953be1d62dfeae77
--- /dev/null
+++ b/parrot/lib/python3.10/site-packages/simple_parsing/wrappers/field_wrapper.py
@@ -0,0 +1,1090 @@
+from __future__ import annotations
+
+import argparse
+import dataclasses
+import inspect
+import sys
+import typing
+from enum import Enum, auto
+from logging import getLogger
+from typing import Any, Callable, ClassVar, Hashable, Union, cast
+
+from typing_extensions import Literal
+
+from simple_parsing.help_formatter import TEMPORARY_TOKEN
+
+from .. import docstring, utils
+from ..helpers.custom_actions import BooleanOptionalAction
+from ..utils import Dataclass
+from .field_metavar import get_metavar
+from .field_parsing import get_parsing_fn
+from .wrapper import Wrapper
+
+if typing.TYPE_CHECKING:
+ from simple_parsing import ArgumentParser
+
+ from .dataclass_wrapper import DataclassWrapper
+
+logger = getLogger(__name__)
+
+
+class ArgumentGenerationMode(Enum):
+ """Enum for argument generation modes."""
+
+ FLAT = auto()
+ """Tries to generate flat arguments, removing the argument destination path when possible."""
+
+ NESTED = auto()
+ """Generates arguments with their full destination path."""
+
+ BOTH = auto()
+ """Generates both the flat and nested arguments."""
+
+
+class NestedMode(Enum):
+ """Controls how nested arguments are generated."""
+
+ DEFAULT = auto()
+ """By default, the full destination path is used."""
+
+ WITHOUT_ROOT = auto()
+ """The full destination path is used, but the first level is removed.
+
+ Useful because sometimes the first level is uninformative (i.e. 'args').
+ """
+
+
+class DashVariant(Enum):
+ """Specifies whether to prefer only '_', both '_'/'-', or only '-', for cmd-line-flags.
+
+ - AUTO (default):
+ Currently, UNDERSCORE.
+
+ - UNDERSCORE:
+
+ - UNDERSCORE_AND_DASH:
+
+ - DASH:
+ """
+
+ AUTO = False
+ UNDERSCORE = False
+ UNDERSCORE_AND_DASH = True
+ DASH = "only"
+
+
+class FieldWrapper(Wrapper):
+ """The FieldWrapper class acts a bit like an 'argparse.Action' class, which essentially just
+ creates the `option_strings` and `arg_options` that get passed to the
+ `add_argument(*option_strings, **arg_options)` function of the `argparse._ArgumentGroup` (in
+ this case represented by the `parent` attribute, an instance of the class `DataclassWrapper`).
+
+ The `option_strings`, `required`, `help`, `metavar`, `default`, etc.
+ attributes just autogenerate the argument of the same name of the
+ above-mentioned `add_argument` function. The `arg_options` attribute fills
+ in the rest and may overwrite these values, depending on the type of field.
+
+ The `field` argument is the actually wrapped `dataclasses.Field` instance.
+ """
+
+ # Whether or not `simple_parsing` should add option_string variants where
+ # underscores in attribute names are replaced with dashes.
+ # For example, when set to DashVariant.UNDERSCORE_AND_DASH,
+ # "--no-cache" and "--no_cache" could both
+ # be used to point to the same attribute `no_cache` on some dataclass.
+ # TODO: This can often make "--help" messages a bit crowded
+ add_dash_variants: ClassVar[DashVariant] = DashVariant.AUTO
+
+ # Whether to follow a flat or nested argument structure.
+ argument_generation_mode: ClassVar[ArgumentGenerationMode] = ArgumentGenerationMode.FLAT
+
+ # Controls how nested arguments are generated.
+ nested_mode: ClassVar[NestedMode] = NestedMode.DEFAULT
+
+ def __init__(
+ self, field: dataclasses.Field, parent: DataclassWrapper | None = None, prefix: str = ""
+ ):
+ super().__init__()
+ self.field: dataclasses.Field = field
+ self.prefix: str = prefix
+ self._parent: Any = parent
+ # Holders used to 'cache' the properties.
+ # (could've used cached_property with Python 3.8).
+ self._option_strings: set[str] | None = None
+ self._required: bool | None = None
+
+ try:
+ self._docstring = docstring.get_attribute_docstring(
+ self.parent.dataclass, self.field.name
+ )
+ except (SystemExit, Exception) as e:
+ logger.debug(f"Couldn't find attribute docstring for field {self.name}, {e}")
+ self._docstring = docstring.AttributeDocString()
+
+ self._help: str | None = None
+ self._metavar: str | None = None
+ self._default: Any | list[Any] | None = None
+ self._dest: str | None = None
+ # the argparse-related options:
+ self._arg_options: dict[str, Any] = {}
+ self._dest_field: FieldWrapper | None = None
+ self._type: type[Any] | None = None
+
+ # stores the resulting values for each of the destination attributes.
+ self._results: dict[str, Any] = {}
+
+ @property
+ def arg_options(self) -> dict[str, Any]:
+ """Dictionary of values to be passed to the `add_argument` method.
+
+ The main feature of this package is to infer these arguments
+ automatically using features of the built-in `dataclasses` package, as
+ well as Python's type annotations.
+
+ By passing additional keyword arguments to the `field()`
+ function, the autogenerated arguments can be overwritten,
+ giving access to all of the usual argparse features know and love.
+
+ NOTE: When passing an `action` keyword argument, we remove all the
+ autogenerated options that aren't required by the Action class
+ constructor.
+ For example, when specifying a custom `action` like "store_true" or
+ "store_false", the `type` argument autogenerated here shouldn't be
+ passed to the constructor of the `argparse._StoreFalseAction`, so we
+ discard it.
+ """
+ if self._arg_options:
+ return self._arg_options
+ # get the auto-generated options.
+ options = self.get_arg_options()
+ # overwrite the auto-generated options with given ones, if any.
+ options.update(self.custom_arg_options)
+ # only keep the arguments used by the Action constructor.
+ action = options.get("action", "store")
+ self._arg_options = only_keep_action_args(options, action)
+ return self._arg_options
+
+ def __call__(
+ self,
+ parser: argparse.ArgumentParser,
+ namespace: argparse.Namespace,
+ values: Any,
+ constructor_arguments: dict[str, dict[str, Any]],
+ option_string: str | None = None,
+ ):
+ """Immitates a custom Action, which sets the corresponding value from `values` at the right
+ destination in the `constructor_arguments` of the parser.
+
+ TODO: Doesn't seem currently possible to check whether the argument was passed in the
+ first place.
+
+ Args:
+ parser (argparse.ArgumentParser): the `simple_parsing.ArgumentParser` used.
+ namespace (argparse.Namespace): (unused).
+ values (Any): The parsed values for the argument.
+ constructor_arguments: The dict of constructor arguments for each dataclass.
+ option_string (Optional[str], optional): (unused). Defaults to None.
+ """
+ from simple_parsing import ArgumentParser
+
+ parser = cast(ArgumentParser, parser)
+
+ if self.is_reused:
+ values = self.duplicate_if_needed(values)
+ logger.debug(f"(replicated the parsed values: '{values}')")
+ else:
+ values = [values]
+
+ self._results = {}
+
+ for destination, value in zip(self.destinations, values):
+ if self.is_subgroup:
+ logger.debug(f"Ignoring the FieldWrapper for subgroup at dest {self.dest}")
+ return
+
+ parent_dest, attribute = utils.split_dest(destination)
+ value = self.postprocess(value)
+
+ self._results[destination] = value
+
+ # if destination.endswith(f"_{i}"):
+ # attribute = attribute[:-2]
+ # constructor_arguments[parent_dest][attribute] = value
+
+ # TODO: Need to decide which one to do here. Seems easier to always set all the values.
+ logger.debug(f"constructor_arguments[{parent_dest}][{attribute}] = {value}")
+ constructor_arguments[parent_dest][attribute] = value
+
+ if self.is_subgroup:
+ if not hasattr(namespace, "subgroups"):
+ namespace.subgroups = {}
+
+ if isinstance(value, str) and value in self.subgroup_choices.keys():
+ # We've just parsed the name of the chosen subgroup.
+ # NOTE: There can't be any ambiguity here, since the keys of the dictionary are
+ # string, and the values are always dataclass types. We don't need to worry
+ # about having to deal with {"bob": "alice", "alice": "foo"}-type weirdness.
+ namespace.subgroups[self.dest] = value
+ logger.info(f"Chosen subgroup for '{self.dest}': '{value}'")
+
+ def get_arg_options(self) -> dict[str, Any]:
+ """Create the `parser.add_arguments` kwargs for this field.
+
+ TODO: Refactor this, following https://github.com/lebrice/SimpleParsing/issues/150
+ """
+ if not self.field.init:
+ # Don't add command-line arguments for fields that have `init=False`.
+ return {}
+ _arg_options: dict[str, Any] = {}
+
+ # Not sure why, but argparse doesn't allow using a different dest for a positional arg.
+ # _Appears_ trivial to support within argparse.
+ if not self.field.metadata.get("positional"):
+ _arg_options["required"] = self.required
+ _arg_options["dest"] = self.dest
+ elif not self.required:
+ # For positional arguments that aren't required we need to set
+ # nargs='?' to make them optional.
+ _arg_options["nargs"] = "?"
+ _arg_options["default"] = self.default
+ _arg_options["metavar"] = get_metavar(self.type)
+
+ if self.help:
+ _arg_options["help"] = self.help
+ elif self.default is not None:
+ # issue 64: Need to add a temporary 'help' string, so that the formatter
+ # automatically adds the (default: '123'). We then remove it.
+ _arg_options["help"] = TEMPORARY_TOKEN
+
+ # TODO: Possible duplication between utils.is_foo(Field) and self.is_foo where foo in
+ # [choice, optional, list, tuple, dataclass, etc.]
+ if self.is_choice:
+ choices = self.choices
+ assert choices
+ item_type = str
+ _arg_options["type"] = item_type
+ _arg_options["choices"] = choices
+ # TODO: Refactor this. is_choice and is_list are both contributing, so it's unclear.
+ if utils.is_list(self.type):
+ _arg_options["nargs"] = argparse.ZERO_OR_MORE
+ # We use the default 'metavar' generated by argparse.
+ _arg_options.pop("metavar", None)
+
+ elif utils.is_optional(self.type) or self.field.default is None:
+ _arg_options["required"] = False
+
+ if utils.is_optional(self.type):
+ type_arguments = utils.get_args(self.type)
+ # NOTE: Optional[] is always translated to
+ # Union[, NoneType]
+ assert type_arguments
+ non_none_types = [
+ t
+ for t in type_arguments
+ if t is not type(None) # noqa: E721
+ ] # noqa: E721
+ assert non_none_types
+ if len(non_none_types) == 1:
+ wrapped_type = non_none_types[0]
+ else:
+ # Construct the type annotation for the non-optional version of the type.
+ wrapped_type = Union[tuple(non_none_types)] # type: ignore
+ else:
+ assert self.field.default is None
+ # If the default value is None, then type annotation is incorrect (i.e. not
+ # `Optional[T]` or `T | None`). We allow it, as discussed in issue #132:
+ # https://github.com/lebrice/SimpleParsing/issues/132, and treat the type
+ # annotation as the type of the field.
+ wrapped_type = self.type
+
+ if utils.is_tuple(wrapped_type):
+ # TODO: ISSUE 42: better handle optional/nested tuples.
+ # For now we just assume that the item types are 'simple'.
+
+ # IDEA: This could probably be a static method that takes in the type
+ # annotation, and uses a recursive call to fetch the arg options of the
+ # item type, and uses some of the entries from that dict to construct
+ # the arg options of the parent?
+ # NOTE: We'd have to return different value for the `type` argument
+ # depending on the nesting:
+ # Tuple[int, int] ->
+ # Tuple[int, str] ->
+ # Tuple[Tuple[int, str], Tuple[int, str]] -> from above twice!
+ # (Since we want to support passing --foo '(a, 1)' '(b, 4)'
+ # `)>
+ _arg_options["type"] = get_parsing_fn(wrapped_type)
+ _arg_options["nargs"] = utils.get_container_nargs(wrapped_type)
+
+ elif utils.is_list(wrapped_type):
+ _arg_options["type"] = utils.get_argparse_type_for_container(wrapped_type)
+ _arg_options["nargs"] = "*"
+ # NOTE: Can't set 'const', since we'd get:
+ # ValueError: nargs must be '?' to supply const
+ # _arg_options["const"] = []
+ else:
+ _arg_options["type"] = get_parsing_fn(wrapped_type)
+ # TODO: Should the 'nargs' really be '?' here?
+ _arg_options["nargs"] = "?"
+
+ elif self.is_union:
+ logger.debug("Parsing a Union type!")
+ _arg_options["type"] = get_parsing_fn(self.type)
+
+ elif self.is_enum:
+ logger.debug(f"Adding an Enum attribute '{self.name}'")
+ # we actually parse enums as string, and convert them back to enums
+ # in the `process` method.
+ logger.debug(f"self.choices = {self.choices}")
+ assert issubclass(self.type, Enum)
+ _arg_options["choices"] = list(e.name for e in self.type)
+ _arg_options["type"] = str
+ # if the default value is an Enum, we convert it to a string.
+ if self.default:
+
+ def enum_to_str(e):
+ return e.name if isinstance(e, Enum) else e
+
+ if self.is_reused:
+ _arg_options["default"] = [enum_to_str(default) for default in self.default]
+ else:
+ _arg_options["default"] = enum_to_str(self.default)
+
+ elif self.is_list:
+ logger.debug(f"Adding a List attribute '{self.name}': {self.type}")
+ _arg_options["nargs"] = "*"
+
+ if self.is_reused:
+ # TODO: Only the 'single-level' lists (not reused) use the new
+ # `get_parsing_fn` function (for now).
+ type_fn = utils._parse_multiple_containers(self.type)
+ type_fn.__name__ = utils.get_type_name(self.type)
+ _arg_options["type"] = type_fn
+ else:
+ _arg_options["type"] = utils.get_argparse_type_for_container(self.type)
+
+ elif utils.is_tuple(self.type):
+ logger.debug(f"Adding a Tuple attribute '{self.name}' with type {self.type}")
+ _arg_options["nargs"] = utils.get_container_nargs(self.type)
+ _arg_options["type"] = get_parsing_fn(self.type)
+
+ if self.is_reused:
+ type_fn = utils._parse_multiple_containers(self.type)
+ type_fn.__name__ = utils.get_type_name(self.type)
+ _arg_options["type"] = type_fn
+
+ elif utils.is_bool(self.type):
+ if self.is_reused:
+ _arg_options["type"] = utils.str2bool
+ _arg_options["type"].__name__ = "bool"
+ _arg_options["metavar"] = "bool"
+ _arg_options["nargs"] = "?"
+ else:
+ # NOTE: also pass the prefix to the boolean optional action, because it needs to add it
+ # to the generated negative flags as well.
+ _arg_options["action"] = BooleanOptionalAction
+ _arg_options["_conflict_prefix"] = self.prefix
+
+ else:
+ # "Plain" / simple argument.
+ # For the metavar, use a custom passed value, if present, else do
+ # not put any value in (which uses the usual value from argparse).
+ if self.metavar:
+ _arg_options["metavar"] = self.metavar
+ else:
+ # Remove the 'metavar' that we auto-generated above.
+ _arg_options.pop("metavar", None)
+ _arg_options["type"] = self.custom_arg_options.get("type", get_parsing_fn(self.type))
+
+ if self.is_reused:
+ if self.required:
+ _arg_options["nargs"] = "+"
+ else:
+ _arg_options["nargs"] = "*"
+
+ return _arg_options
+
+ def duplicate_if_needed(self, parsed_values: Any) -> list[Any]:
+ """Duplicates the passed argument values if needed, such that each instance gets a value.
+
+ For example, if we expected 3 values for an argument, and a single value was passed,
+ then we duplicate it so that each of the three instances get the same value.
+
+ Args:
+ parsed_values (Any): The parsed value(s)
+
+ Raises:
+ utils.InconsistentArgumentError: If the number of arguments passed is
+ inconsistent (neither 1 or the number of instances)
+
+ Returns:
+ List[Any]: The list of parsed values, of the right length.
+ """
+ num_instances_to_parse = len(self.destinations)
+ logger.debug(f"num to parse: {num_instances_to_parse}")
+ logger.debug(f"(raw) parsed values: '{parsed_values}'")
+
+ assert self.is_reused
+ assert (
+ num_instances_to_parse > 1
+ ), "multiple is true but we're expected to instantiate only one instance"
+
+ if utils.is_list(self.type) and isinstance(parsed_values, tuple):
+ parsed_values = list(parsed_values)
+
+ if not self.is_tuple and not self.is_list and isinstance(parsed_values, list):
+ nesting_level = utils.get_nesting_level(parsed_values)
+ if (
+ nesting_level == 2
+ and len(parsed_values) == 1
+ and len(parsed_values[0]) == num_instances_to_parse
+ ):
+ result: list = parsed_values[0]
+ return result
+
+ if not isinstance(parsed_values, (list, tuple)):
+ parsed_values = [parsed_values]
+
+ if len(parsed_values) == num_instances_to_parse:
+ return parsed_values
+ elif len(parsed_values) == 1:
+ return parsed_values * num_instances_to_parse
+ else:
+ raise utils.InconsistentArgumentError(
+ f"The field '{self.name}' contains {len(parsed_values)} values,"
+ f" but either 1 or {num_instances_to_parse} values were "
+ f"expected."
+ )
+
+ def postprocess(self, raw_parsed_value: Any) -> Any:
+ """Applies any conversions to the 'raw' parsed value before it is used in the constructor
+ of the dataclass.
+
+ Args:
+ raw_parsed_value (Any): The 'raw' parsed value.
+
+ Returns:
+ Any: The processed value
+ """
+ if self.is_enum:
+ logger.debug(
+ f"field postprocessing for Enum field '{self.name}' with value:"
+ f" {raw_parsed_value}'"
+ )
+ if isinstance(raw_parsed_value, str):
+ raw_parsed_value = self.type[raw_parsed_value] # type: ignore
+ return raw_parsed_value
+
+ elif self.is_choice:
+ choice_dict = self.choice_dict
+ if choice_dict:
+ key_type = type(next(iter(choice_dict.keys())))
+ if self.is_list and isinstance(raw_parsed_value[0], key_type):
+ return [choice_dict[value] for value in raw_parsed_value]
+ elif isinstance(raw_parsed_value, key_type):
+ return choice_dict[raw_parsed_value]
+ return raw_parsed_value
+
+ elif self.is_tuple:
+ logger.debug("we're parsing a tuple!")
+ # argparse always returns lists by default. If the field was of a
+ # Tuple type, we just transform the list to a Tuple.
+ if not isinstance(raw_parsed_value, tuple):
+ return tuple(raw_parsed_value)
+
+ elif self.is_bool:
+ return raw_parsed_value
+
+ elif self.is_list:
+ if isinstance(raw_parsed_value, tuple):
+ return list(raw_parsed_value)
+ else:
+ return raw_parsed_value
+
+ elif self.is_subparser:
+ return raw_parsed_value
+
+ elif utils.is_optional(self.type):
+ item_type = utils.get_args(self.type)[0]
+ if utils.is_tuple(item_type) and isinstance(raw_parsed_value, list):
+ # TODO: Make sure that this doesn't cause issues with NamedTuple types.
+ return tuple(raw_parsed_value)
+
+ elif self.type not in utils.builtin_types:
+ # TODO: what if we actually got an auto-generated parsing function?
+ try:
+ # if the field has a weird type, we try to call it directly.
+ return self.type(raw_parsed_value)
+ except Exception as e:
+ logger.debug(
+ f"Unable to instantiate the field '{self.name}' of type "
+ f"'{self.type}' by using the type as a constructor. "
+ f"Returning the raw parsed value instead "
+ f"({raw_parsed_value}, of type {type(raw_parsed_value)}). "
+ f"(Caught Exception: {e})"
+ )
+ return raw_parsed_value
+
+ logger.debug(
+ f"field postprocessing for field {self.name} of type '{self.type}' and with "
+ f"value '{raw_parsed_value}'"
+ )
+ return raw_parsed_value
+
+ @property
+ def is_reused(self) -> bool:
+ return len(self.destinations) > 1
+
+ @property
+ def action(self) -> str | type[argparse.Action]:
+ """The `action` argument to be passed to `add_argument(...)`."""
+ return self.custom_arg_options.get("action", "store")
+
+ @property
+ def action_str(self) -> str:
+ if isinstance(self.action, str):
+ return self.action
+ return self.action.__name__
+
+ @property
+ def custom_arg_options(self) -> dict[str, Any]:
+ """Custom argparse options that overwrite those in `arg_options`.
+
+ Can be set by using the `field` function, passing in a keyword argument
+ that would usually be passed to the parser.add_argument(
+ *option_strings, **kwargs) method.
+ """
+ return self.field.metadata.get("custom_args", {})
+
+ @property
+ def destinations(self) -> list[str]:
+ return [f"{parent_dest}.{self.name}" for parent_dest in self.parent.destinations]
+
+ @property
+ def option_strings(self) -> list[str]:
+ """Generates the `option_strings` argument to the `add_argument` call.
+
+ `parser.add_argument(*name_or_flags, **arg_options)`
+
+ ## Notes:
+ - Additional names for the same argument can be added via the `field`
+ function.
+ - Whenever the name of an attribute includes underscores ("_"), the same
+ argument can be passed by using dashes ("-") instead. This also includes
+ aliases.
+ - If an alias contained leading dashes, either single or double, the
+ same number of dashes will be used, even in the case where a prefix is
+ added.
+
+ For an illustration of this, see the aliases example.
+ """
+
+ dashes: list[str] = [] # contains the leading dashes.
+ options: list[str] = [] # contains the name following the dashes.
+
+ def add_args(dash: str, candidates: list[str]) -> None:
+ for candidate in candidates:
+ options.append(candidate)
+ dashes.append(dash)
+
+ # Handle user passing us "True" or "only" directly.
+ add_dash_variants = DashVariant(FieldWrapper.add_dash_variants)
+
+ gen_mode = type(self).argument_generation_mode
+ nested_mode = type(self).nested_mode
+
+ dash = "-" if len(self.name) == 1 else "--"
+ option = f"{self.prefix}{self.name}"
+ nested_option = (
+ self.dest if nested_mode == NestedMode.DEFAULT else ".".join(self.dest.split(".")[1:])
+ )
+ if add_dash_variants == DashVariant.DASH:
+ option = option.replace("_", "-")
+ nested_option = nested_option.replace("_", "-")
+
+ if self.field.metadata.get("positional"):
+ # Can't be positional AND have flags at same time. Also, need dest to be be this and not just option.
+ return [self.dest]
+
+ if gen_mode == ArgumentGenerationMode.FLAT:
+ candidates = [option]
+ elif gen_mode == ArgumentGenerationMode.NESTED:
+ candidates = [nested_option]
+ else:
+ candidates = [option, nested_option]
+
+ add_args(dash, candidates)
+
+ if dash == "-":
+ # also add a double-dash option:
+ add_args("--", candidates)
+
+ # add all the aliases that were passed to the `field` function.
+ for alias in self.aliases:
+ if alias.startswith("--"):
+ dash = "--"
+ name = alias[2:]
+ elif alias.startswith("-"):
+ dash = "-"
+ name = alias[1:]
+ else:
+ dash = "-" if len(alias) == 1 else "--"
+ name = alias
+ option = f"{self.prefix}{name}"
+
+ dashes.append(dash)
+ options.append(option)
+
+ # Additionally, add all name variants with the "_" replaced with "-".
+ # For example, "--no-cache" will correctly set the `no_cache` attribute,
+ # even if an alias isn't explicitly created.
+
+ if add_dash_variants == DashVariant.UNDERSCORE_AND_DASH:
+ additional_options = [option.replace("_", "-") for option in options if "_" in option]
+ additional_dashes = [
+ "-" if len(option) == 1 else "--" for option in additional_options
+ ]
+ options.extend(additional_options)
+ dashes.extend(additional_dashes)
+
+ # remove duplicates by creating a set.
+ option_strings = {f"{dash}{option}" for dash, option in zip(dashes, options)}
+ # TODO: possibly sort the option strings, if argparse doesn't do it
+ # already.
+ return list(sorted(option_strings, key=len))
+
+ # @property
+ # def prefix(self) -> str:
+ # return self._prefix
+
+ @property
+ def aliases(self) -> list[str]:
+ return self.field.metadata.get("alias", [])
+
+ @property
+ def dest(self) -> str:
+ """Where the attribute will be stored in the Namespace."""
+ self._dest = super().dest
+ # TODO: If a custom `dest` was passed, and it is a `Field` instance,
+ # find the corresponding FieldWrapper and use its `dest` instead of ours.
+ if self.dest_field:
+ self._dest = self.dest_field.dest
+ self.custom_arg_options.pop("dest", None)
+ return self._dest
+
+ @property
+ def is_proxy(self) -> bool:
+ return self.dest_field is not None
+
+ @property
+ def dest_field(self) -> FieldWrapper | None:
+ """Return the `FieldWrapper` for which `self` is a proxy (same dest). When a `dest`
+ argument is passed to `field()`, and its value is a `Field`, that indicates that this Field
+ is just a proxy for another.
+
+ In such a case, we replace the dest of `self` with that of the other wrapper's we then find
+ the corresponding FieldWrapper and use its `dest` instead of ours.
+ """
+ if self._dest_field is not None:
+ return self._dest_field
+ custom_dest = self.custom_arg_options.get("dest")
+ if isinstance(custom_dest, dataclasses.Field):
+ all_fields: list[FieldWrapper] = []
+ for parent in self.lineage():
+ all_fields.extend(parent.fields) # type: ignore
+ for other_wrapper in all_fields:
+ if custom_dest is other_wrapper.field:
+ self._dest_field = other_wrapper
+ break
+ return self._dest_field
+
+ @property
+ def nargs(self):
+ return self.custom_arg_options.get("nargs", None)
+
+ # @property
+ # def const(self):
+ # return self.custom_arg_options.get("const", None)
+
+ @property
+ def default(self) -> Any:
+ """Either a single default value, when parsing a single argument, or the list of default
+ values, when this argument is reused multiple times (which only happens with the
+ `ConflictResolution.ALWAYS_MERGE` option).
+
+ In order of increasing priority, this could either be:
+ 1. The default attribute of the field
+ 2. the value of the corresponding attribute on the parent,
+ if it has a default value
+ """
+
+ if self._default is not None:
+ # If a default value was set manually from the outside (e.g. from the DataclassWrapper)
+ # then use that value.
+ default = self._default
+ elif self.is_subgroup:
+ default = self.subgroup_default
+ elif any(
+ parent_default not in (None, argparse.SUPPRESS)
+ for parent_default in self.parent.defaults
+ ):
+ # if the dataclass with this field has a default value - either when a value was
+ # passed for the `default` argument of `add_arguments` or when the parent is a nested
+ # dataclass field with a default factory - we use the corresponding attribute on that
+ # default instance.
+ def _get_value(dataclass_default: utils.Dataclass | dict, name: str) -> Any:
+ if isinstance(dataclass_default, dict):
+ return dataclass_default.get(name)
+ return getattr(dataclass_default, name)
+
+ defaults = [
+ _get_value(parent_default, self.field.name)
+ for parent_default in self.parent.defaults
+ if parent_default not in (None, argparse.SUPPRESS)
+ ]
+ if len(self.parent.defaults) == 1:
+ default = defaults[0]
+ else:
+ default = defaults
+ # Try to get the default from the field, if possible.
+ elif self.field.default is not dataclasses.MISSING:
+ default = self.field.default
+ elif self.field.default_factory is not dataclasses.MISSING:
+ # Use the _default attribute to keep the result, so we can avoid calling the default
+ # factory another time.
+ # TODO: If the default factory is a function that returns None, it will still get
+ # called multiple times. We need to set a sentinel value as the initial value of the
+ # self._default attribute, so that we can correctly check whether we've already called
+ # the default_factory before.
+ if self._default is None:
+ self._default = self.field.default_factory()
+ default = self._default
+ # field doesn't have a default value set.
+ elif self.action == "store_true":
+ default = False
+ elif self.action == "store_false":
+ # NOTE: The boolean parsing when default is `True` is really un-intuitive, and should
+ # change in the future. See https://github.com/lebrice/SimpleParsing/issues/68
+ default = True
+ else:
+ default = None
+
+ # If this field is being reused, then we package up the `default` in a list.
+ # TODO: Get rid of this. makes the code way uglier for no good reason.
+ if self.is_reused and default is not None:
+ n_destinations = len(self.destinations)
+ assert n_destinations >= 1
+ # BUG: This second part (the `or` part) is weird. Probably only applies when using
+ # Lists of lists with the Reuse option, which is most likely not even supported..
+ if utils.is_tuple_or_list(self.field.type) and len(default) != n_destinations:
+ # The field is of a list type field,
+ default = [default] * n_destinations
+ elif not isinstance(default, list):
+ default = [default] * n_destinations
+ assert len(default) == n_destinations, (
+ f"Not the same number of default values and destinations. "
+ f"(default: {default}, # of destinations: {n_destinations})"
+ )
+
+ return default
+
+ def set_default(self, value: Any):
+ logger.debug(f"The field {self.name} has its default manually set to a value of {value}.")
+ self._default = value
+
+ @property
+ def required(self) -> bool:
+ if self._required is not None:
+ return self._required
+ if self.is_subgroup:
+ return self.subgroup_default in (None, dataclasses.MISSING)
+ if self.action_str.startswith("store_"):
+ # all the store_* actions do not require a value.
+ return False
+ if self.is_optional:
+ return False
+ if self.parent.required:
+ # if the parent dataclass is required, then this attribute is too.
+ # TODO: does that make sense though?
+ return True
+ if self.nargs in {"?", "*"}:
+ return False
+ if self.nargs == "+":
+ return True
+ if self.default is None and argparse.SUPPRESS not in self.parent.defaults:
+ return True
+ if self.is_reused:
+ # if we're reusing this argument, the default value might be a list
+ # of `MISSING` values.
+ return any(v == dataclasses.MISSING for v in self.default)
+ return False
+
+ @required.setter
+ def required(self, value: bool):
+ self._required = value
+
+ @property
+ def type(self) -> type[Any]:
+ """Returns the wrapped field's type annotation."""
+ # TODO: Refactor this. Really ugly.
+ if self._type is None:
+ self._type = self.field.type
+ if isinstance(self._type, str):
+ # The type of the field might be a string when using `from __future__ import annotations`.
+ # NOTE: Here we'd like to convert the fields type to an actual type, in case the
+ # `from __future__ import annotations` feature is used.
+ # This should also resolve most forward references.
+ from simple_parsing.annotation_utils.get_field_annotations import (
+ get_field_type_from_annotations,
+ )
+
+ field_type = get_field_type_from_annotations(
+ self.parent.dataclass, self.field.name
+ )
+ self._type = field_type
+ elif isinstance(self._type, dataclasses.InitVar):
+ self._type = self._type.type
+ return self._type
+
+ def __str__(self):
+ return f""""""
+
+ @property
+ def is_choice(self) -> bool:
+ return self.choices is not None
+
+ @property
+ def choices(self) -> list | None:
+ """The list of possible values that can be passed on the command-line for this field, or
+ None."""
+
+ if "choices" in self.custom_arg_options:
+ return self.custom_arg_options["choices"]
+ if "choices" in self.field.metadata:
+ return list(self.field.metadata["choices"])
+ if "choice_dict" in self.field.metadata:
+ return list(self.field.metadata["choice_dict"].keys())
+ if utils.is_literal(self.type):
+ literal_values = list(utils.get_args(self.type))
+ literal_value_names = [
+ v.name if isinstance(v, Enum) else str(v) for v in literal_values
+ ]
+ return literal_value_names
+ return None
+
+ @property
+ def choice_dict(self) -> dict[str, Any] | None:
+ if "choice_dict" in self.field.metadata:
+ return self.field.metadata["choice_dict"]
+ if utils.is_literal(self.type):
+ literal_values = list(utils.get_args(self.type))
+ assert literal_values, "Literal always has at least one argument."
+ # We map from literal values (as strings) to the actual values.
+ # e.g. from BLUE -> Color.Blue
+ return {(v.name if isinstance(v, Enum) else str(v)): v for v in literal_values}
+ return None
+
+ @property
+ def help(self) -> str | None:
+ if self._help:
+ return self._help
+ if self.field.metadata.get("help"):
+ return self.field.metadata.get("help")
+
+ self._help = (
+ self._docstring.docstring_below
+ or self._docstring.comment_above
+ or self._docstring.comment_inline
+ or self._docstring.desc_from_cls_docstring
+ )
+ # NOTE: Need to make sure this doesn't interfere with the default value added to the help
+ # string.
+ if self._help == "":
+ self._help = None
+ return self._help
+
+ @help.setter
+ def help(self, value: str):
+ self._help = value
+
+ @property
+ def metavar(self) -> str | None:
+ """Returns the 'metavar' when set using one of the `field` functions, else None."""
+ if self._metavar:
+ return self._metavar
+ self._metavar = self.custom_arg_options.get("metavar")
+ return self._metavar
+
+ @metavar.setter
+ def metavar(self, value: str):
+ self._metavar = value
+
+ @property
+ def name(self) -> str:
+ return self.field.name
+
+ @property
+ def is_list(self):
+ return utils.is_list(self.type)
+
+ @property
+ def is_enum(self) -> bool:
+ return utils.is_enum(self.type)
+
+ @property
+ def is_tuple(self) -> bool:
+ return utils.is_tuple(self.type)
+
+ @property
+ def is_bool(self) -> bool:
+ return utils.is_bool(self.type)
+
+ @property
+ def is_optional(self) -> bool:
+ return utils.is_optional(self.field.type)
+
+ @property
+ def is_union(self) -> bool:
+ return utils.is_union(self.field.type)
+
+ @property
+ def is_subparser(self) -> bool:
+ return utils.is_subparser_field(self.field) and "subgroups" not in self.field.metadata
+
+ @property
+ def is_subgroup(self) -> bool:
+ return "subgroups" in self.field.metadata
+
+ @property
+ def subgroup_choices(self) -> dict[Hashable, Callable[[], Dataclass] | Dataclass]:
+ if not self.is_subgroup:
+ raise RuntimeError(f"Field {self.field} doesn't have subgroups! ")
+ return self.field.metadata["subgroups"]
+
+ @property
+ def subgroup_default(self) -> Hashable | Literal[dataclasses.MISSING] | None:
+ if not self.is_subgroup:
+ raise RuntimeError(f"Field {self.field} doesn't have subgroups! ")
+ return self.field.metadata.get("subgroup_default")
+
+ @property
+ def type_arguments(self) -> tuple[type, ...] | None:
+ return utils.get_type_arguments(self.type)
+
+ @property
+ def parent(self) -> DataclassWrapper:
+ return self._parent
+
+ @property
+ def subparsers_dict(self) -> dict[str, type] | None:
+ """The dict of subparsers, which is created either when using a Union[,
+
+ ] type annotation, or when using the `subparsers()` function.
+ """
+ if self.field.metadata.get("subparsers"):
+ return self.field.metadata["subparsers"]
+ elif self.is_union:
+ type_arguments = utils.get_type_arguments(self.field.type)
+ if type_arguments and any(map(utils.is_dataclass_type_or_typevar, type_arguments)):
+ return {
+ utils.get_type_name(dataclass_type).lower(): dataclass_type
+ for dataclass_type in type_arguments
+ }
+
+ def add_subparsers(self, parser: ArgumentParser):
+ assert self.is_subparser
+
+ # add subparsers for each dataclass type in the field.
+ default_value = self.field.default
+ if default_value is dataclasses.MISSING:
+ if self.field.default_factory is not dataclasses.MISSING:
+ default_value = self.field.default_factory()
+
+ add_subparser_kwargs = dict(
+ title=self.name,
+ description=self.help,
+ dest=self.dest,
+ parser_class=type(parser),
+ required=(default_value is dataclasses.MISSING),
+ )
+
+ if sys.version_info[:2] == (3, 6):
+ required = add_subparser_kwargs.pop("required")
+ subparsers = parser.add_subparsers(**add_subparser_kwargs)
+ subparsers.required = required
+ else:
+ subparsers = parser.add_subparsers(**add_subparser_kwargs)
+
+ if default_value is not dataclasses.MISSING:
+ parser.set_defaults(**{self.dest: default_value})
+ # subparsers.required = default_value is dataclasses.MISSING
+ for subcommand, dataclass_type in self.subparsers_dict.items():
+ logger.debug(f"adding subparser '{subcommand}' for type {dataclass_type}")
+ subparser = subparsers.add_parser(subcommand)
+ # Just for typing correctness, as we didn't explicitly change
+ # the return type of subparsers.add_parser method.)
+ subparser = cast("ArgumentParser", subparser)
+ subparser.add_arguments(dataclass_type, dest=self.dest)
+
+ def equivalent_argparse_code(self):
+ arg_options = self.arg_options.copy()
+ arg_options_string = f"{{'type': {arg_options.pop('type', str).__qualname__}"
+ arg_options_string += str(arg_options).replace("{", ", ").replace(TEMPORARY_TOKEN, " ")
+ return f"group.add_argument(*{self.option_strings}, **{arg_options_string})"
+
+
+def only_keep_action_args(options: dict[str, Any], action: str | Any) -> dict[str, Any]:
+ """Remove all the arguments in `options` that aren't required by the Action.
+
+ Parameters
+ ----------
+ options : Dict[str, Any]
+ A dictionary of options that would usually be passed to
+ `add_arguments(*option_strings, **options)`.
+ action : Union[str, Any]
+ The action class or name.
+
+ Returns
+ -------
+ Dict[str, Any]
+ [description]
+ """
+ # TODO: explicitly test these custom actions?
+ argparse_action_classes: dict[str, type[argparse.Action]] = {
+ "store": argparse._StoreAction,
+ "store_const": argparse._StoreConstAction,
+ "store_true": argparse._StoreTrueAction,
+ "store_false": argparse._StoreFalseAction,
+ "append": argparse._AppendAction,
+ "append_const": argparse._AppendConstAction,
+ "count": argparse._CountAction,
+ "help": argparse._HelpAction,
+ "version": argparse._VersionAction,
+ "parsers": argparse._SubParsersAction,
+ }
+ if action not in argparse_action_classes:
+ # the provided `action` is not a standard argparse-action.
+ # We don't remove any of the provided options.
+ return options
+
+ # Remove all the keys that aren't needed by the action constructor:
+ action_class = argparse_action_classes[action]
+ argspec = inspect.getfullargspec(action_class)
+
+ if argspec.varargs is not None or argspec.varkw is not None:
+ # if the constructor takes variable arguments, pass all the options.
+ logger.debug("Constructor takes var args. returning all options.")
+ return options
+
+ args_to_keep = argspec.args + ["action"]
+
+ kept_options, deleted_options = utils.keep_keys(options, args_to_keep)
+ if deleted_options:
+ logger.debug(
+ f"Some auto-generated options were deleted, as they were "
+ f"not required by the Action constructor: {deleted_options}."
+ )
+ if deleted_options:
+ logger.debug(f"Kept options: \t{kept_options.keys()}")
+ logger.debug(f"Removed options: \t{deleted_options.keys()}")
+ return kept_options
diff --git a/parrot/lib/python3.10/site-packages/simple_parsing/wrappers/wrapper.py b/parrot/lib/python3.10/site-packages/simple_parsing/wrappers/wrapper.py
new file mode 100644
index 0000000000000000000000000000000000000000..ff91928508baa2e332382405dd41a13a1efe8cd2
--- /dev/null
+++ b/parrot/lib/python3.10/site-packages/simple_parsing/wrappers/wrapper.py
@@ -0,0 +1,49 @@
+"""Abstract Wrapper base-class for the FieldWrapper and DataclassWrapper."""
+
+from abc import ABC, abstractmethod
+from typing import List, Optional
+
+
+class Wrapper(ABC):
+ def __init__(self):
+ self._dest: Optional[str] = None
+
+ @abstractmethod
+ def equivalent_argparse_code(self) -> str:
+ pass
+
+ @property
+ @abstractmethod
+ def name(self) -> str:
+ pass
+
+ @property
+ @abstractmethod
+ def parent(self) -> Optional["Wrapper"]:
+ pass
+
+ @property
+ def dest(self) -> str:
+ """Where the attribute will be stored in the Namespace."""
+ lineage_names: List[str] = [w.name for w in self.lineage()]
+ self._dest = ".".join(reversed([self.name] + lineage_names))
+ assert self._dest is not None
+ return self._dest
+
+ def lineage(self) -> List["Wrapper"]:
+ lineage: List[Wrapper] = []
+ parent = self.parent
+ while parent is not None:
+ lineage.append(parent)
+ parent = parent.parent
+ return lineage
+
+ @property
+ def nesting_level(self) -> int:
+ return len(self.lineage())
+ level = 0
+ parent = self.parent
+ while parent is not None:
+ parent = parent.parent
+ level += 1
+ return level