repo
stringlengths
7
90
file_url
stringlengths
81
315
file_path
stringlengths
4
228
content
stringlengths
0
32.8k
language
stringclasses
1 value
license
stringclasses
7 values
commit_sha
stringlengths
40
40
retrieved_at
stringdate
2026-01-04 14:38:15
2026-01-05 02:33:18
truncated
bool
2 classes
psf/requests
https://github.com/psf/requests/blob/70298332899f25826e35e42f8d83425124f755a5/src/requests/adapters.py
src/requests/adapters.py
""" requests.adapters ~~~~~~~~~~~~~~~~~ This module contains the transport adapters that Requests uses to define and maintain connections. """ import os.path import socket # noqa: F401 import typing import warnings from urllib3.exceptions import ClosedPoolError, ConnectTimeoutError from urllib3.exceptions import HTTPError as _HTTPError from urllib3.exceptions import InvalidHeader as _InvalidHeader from urllib3.exceptions import ( LocationValueError, MaxRetryError, NewConnectionError, ProtocolError, ) from urllib3.exceptions import ProxyError as _ProxyError from urllib3.exceptions import ReadTimeoutError, ResponseError from urllib3.exceptions import SSLError as _SSLError from urllib3.poolmanager import PoolManager, proxy_from_url from urllib3.util import Timeout as TimeoutSauce from urllib3.util import parse_url from urllib3.util.retry import Retry from .auth import _basic_auth_str from .compat import basestring, urlparse from .cookies import extract_cookies_to_jar from .exceptions import ( ConnectionError, ConnectTimeout, InvalidHeader, InvalidProxyURL, InvalidSchema, InvalidURL, ProxyError, ReadTimeout, RetryError, SSLError, ) from .models import Response from .structures import CaseInsensitiveDict from .utils import ( DEFAULT_CA_BUNDLE_PATH, extract_zipped_paths, get_auth_from_url, get_encoding_from_headers, prepend_scheme_if_needed, select_proxy, urldefragauth, ) try: from urllib3.contrib.socks import SOCKSProxyManager except ImportError: def SOCKSProxyManager(*args, **kwargs): raise InvalidSchema("Missing dependencies for SOCKS support.") if typing.TYPE_CHECKING: from .models import PreparedRequest DEFAULT_POOLBLOCK = False DEFAULT_POOLSIZE = 10 DEFAULT_RETRIES = 0 DEFAULT_POOL_TIMEOUT = None def _urllib3_request_context( request: "PreparedRequest", verify: "bool | str | None", client_cert: "typing.Tuple[str, str] | str | None", poolmanager: "PoolManager", ) -> "(typing.Dict[str, typing.Any], typing.Dict[str, typing.Any])": host_params = {} pool_kwargs = {} parsed_request_url = urlparse(request.url) scheme = parsed_request_url.scheme.lower() port = parsed_request_url.port cert_reqs = "CERT_REQUIRED" if verify is False: cert_reqs = "CERT_NONE" elif isinstance(verify, str): if not os.path.isdir(verify): pool_kwargs["ca_certs"] = verify else: pool_kwargs["ca_cert_dir"] = verify pool_kwargs["cert_reqs"] = cert_reqs if client_cert is not None: if isinstance(client_cert, tuple) and len(client_cert) == 2: pool_kwargs["cert_file"] = client_cert[0] pool_kwargs["key_file"] = client_cert[1] else: # According to our docs, we allow users to specify just the client # cert path pool_kwargs["cert_file"] = client_cert host_params = { "scheme": scheme, "host": parsed_request_url.hostname, "port": port, } return host_params, pool_kwargs class BaseAdapter: """The Base Transport Adapter""" def __init__(self): super().__init__() def send( self, request, stream=False, timeout=None, verify=True, cert=None, proxies=None ): """Sends PreparedRequest object. Returns Response object. :param request: The :class:`PreparedRequest <PreparedRequest>` being sent. :param stream: (optional) Whether to stream the request content. :param timeout: (optional) How long to wait for the server to send data before giving up, as a float, or a :ref:`(connect timeout, read timeout) <timeouts>` tuple. :type timeout: float or tuple :param verify: (optional) Either a boolean, in which case it controls whether we verify the server's TLS certificate, or a string, in which case it must be a path to a CA bundle to use :param cert: (optional) Any user-provided SSL certificate to be trusted. :param proxies: (optional) The proxies dictionary to apply to the request. """ raise NotImplementedError def close(self): """Cleans up adapter specific items.""" raise NotImplementedError class HTTPAdapter(BaseAdapter): """The built-in HTTP Adapter for urllib3. Provides a general-case interface for Requests sessions to contact HTTP and HTTPS urls by implementing the Transport Adapter interface. This class will usually be created by the :class:`Session <Session>` class under the covers. :param pool_connections: The number of urllib3 connection pools to cache. :param pool_maxsize: The maximum number of connections to save in the pool. :param max_retries: The maximum number of retries each connection should attempt. Note, this applies only to failed DNS lookups, socket connections and connection timeouts, never to requests where data has made it to the server. By default, Requests does not retry failed connections. If you need granular control over the conditions under which we retry a request, import urllib3's ``Retry`` class and pass that instead. :param pool_block: Whether the connection pool should block for connections. Usage:: >>> import requests >>> s = requests.Session() >>> a = requests.adapters.HTTPAdapter(max_retries=3) >>> s.mount('http://', a) """ __attrs__ = [ "max_retries", "config", "_pool_connections", "_pool_maxsize", "_pool_block", ] def __init__( self, pool_connections=DEFAULT_POOLSIZE, pool_maxsize=DEFAULT_POOLSIZE, max_retries=DEFAULT_RETRIES, pool_block=DEFAULT_POOLBLOCK, ): if max_retries == DEFAULT_RETRIES: self.max_retries = Retry(0, read=False) else: self.max_retries = Retry.from_int(max_retries) self.config = {} self.proxy_manager = {} super().__init__() self._pool_connections = pool_connections self._pool_maxsize = pool_maxsize self._pool_block = pool_block self.init_poolmanager(pool_connections, pool_maxsize, block=pool_block) def __getstate__(self): return {attr: getattr(self, attr, None) for attr in self.__attrs__} def __setstate__(self, state): # Can't handle by adding 'proxy_manager' to self.__attrs__ because # self.poolmanager uses a lambda function, which isn't pickleable. self.proxy_manager = {} self.config = {} for attr, value in state.items(): setattr(self, attr, value) self.init_poolmanager( self._pool_connections, self._pool_maxsize, block=self._pool_block ) def init_poolmanager( self, connections, maxsize, block=DEFAULT_POOLBLOCK, **pool_kwargs ): """Initializes a urllib3 PoolManager. This method should not be called from user code, and is only exposed for use when subclassing the :class:`HTTPAdapter <requests.adapters.HTTPAdapter>`. :param connections: The number of urllib3 connection pools to cache. :param maxsize: The maximum number of connections to save in the pool. :param block: Block when no free connections are available. :param pool_kwargs: Extra keyword arguments used to initialize the Pool Manager. """ # save these values for pickling self._pool_connections = connections self._pool_maxsize = maxsize self._pool_block = block self.poolmanager = PoolManager( num_pools=connections, maxsize=maxsize, block=block, **pool_kwargs, ) def proxy_manager_for(self, proxy, **proxy_kwargs): """Return urllib3 ProxyManager for the given proxy. This method should not be called from user code, and is only exposed for use when subclassing the :class:`HTTPAdapter <requests.adapters.HTTPAdapter>`. :param proxy: The proxy to return a urllib3 ProxyManager for. :param proxy_kwargs: Extra keyword arguments used to configure the Proxy Manager. :returns: ProxyManager :rtype: urllib3.ProxyManager """ if proxy in self.proxy_manager: manager = self.proxy_manager[proxy] elif proxy.lower().startswith("socks"): username, password = get_auth_from_url(proxy) manager = self.proxy_manager[proxy] = SOCKSProxyManager( proxy, username=username, password=password, num_pools=self._pool_connections, maxsize=self._pool_maxsize, block=self._pool_block, **proxy_kwargs, ) else: proxy_headers = self.proxy_headers(proxy) manager = self.proxy_manager[proxy] = proxy_from_url( proxy, proxy_headers=proxy_headers, num_pools=self._pool_connections, maxsize=self._pool_maxsize, block=self._pool_block, **proxy_kwargs, ) return manager def cert_verify(self, conn, url, verify, cert): """Verify a SSL certificate. This method should not be called from user code, and is only exposed for use when subclassing the :class:`HTTPAdapter <requests.adapters.HTTPAdapter>`. :param conn: The urllib3 connection object associated with the cert. :param url: The requested URL. :param verify: Either a boolean, in which case it controls whether we verify the server's TLS certificate, or a string, in which case it must be a path to a CA bundle to use :param cert: The SSL certificate to verify. """ if url.lower().startswith("https") and verify: cert_loc = None # Allow self-specified cert location. if verify is not True: cert_loc = verify if not cert_loc: cert_loc = extract_zipped_paths(DEFAULT_CA_BUNDLE_PATH) if not cert_loc or not os.path.exists(cert_loc): raise OSError( f"Could not find a suitable TLS CA certificate bundle, " f"invalid path: {cert_loc}" ) conn.cert_reqs = "CERT_REQUIRED" if not os.path.isdir(cert_loc): conn.ca_certs = cert_loc else: conn.ca_cert_dir = cert_loc else: conn.cert_reqs = "CERT_NONE" conn.ca_certs = None conn.ca_cert_dir = None if cert: if not isinstance(cert, basestring): conn.cert_file = cert[0] conn.key_file = cert[1] else: conn.cert_file = cert conn.key_file = None if conn.cert_file and not os.path.exists(conn.cert_file): raise OSError( f"Could not find the TLS certificate file, " f"invalid path: {conn.cert_file}" ) if conn.key_file and not os.path.exists(conn.key_file): raise OSError( f"Could not find the TLS key file, invalid path: {conn.key_file}" ) def build_response(self, req, resp): """Builds a :class:`Response <requests.Response>` object from a urllib3 response. This should not be called from user code, and is only exposed for use when subclassing the :class:`HTTPAdapter <requests.adapters.HTTPAdapter>` :param req: The :class:`PreparedRequest <PreparedRequest>` used to generate the response. :param resp: The urllib3 response object. :rtype: requests.Response """ response = Response() # Fallback to None if there's no status_code, for whatever reason. response.status_code = getattr(resp, "status", None) # Make headers case-insensitive. response.headers = CaseInsensitiveDict(getattr(resp, "headers", {})) # Set encoding. response.encoding = get_encoding_from_headers(response.headers) response.raw = resp response.reason = response.raw.reason if isinstance(req.url, bytes): response.url = req.url.decode("utf-8") else: response.url = req.url # Add new cookies from the server. extract_cookies_to_jar(response.cookies, req, resp) # Give the Response some context. response.request = req response.connection = self return response def build_connection_pool_key_attributes(self, request, verify, cert=None): """Build the PoolKey attributes used by urllib3 to return a connection. This looks at the PreparedRequest, the user-specified verify value, and the value of the cert parameter to determine what PoolKey values to use to select a connection from a given urllib3 Connection Pool. The SSL related pool key arguments are not consistently set. As of this writing, use the following to determine what keys may be in that dictionary: * If ``verify`` is ``True``, ``"ssl_context"`` will be set and will be the default Requests SSL Context * If ``verify`` is ``False``, ``"ssl_context"`` will not be set but ``"cert_reqs"`` will be set * If ``verify`` is a string, (i.e., it is a user-specified trust bundle) ``"ca_certs"`` will be set if the string is not a directory recognized by :py:func:`os.path.isdir`, otherwise ``"ca_cert_dir"`` will be set. * If ``"cert"`` is specified, ``"cert_file"`` will always be set. If ``"cert"`` is a tuple with a second item, ``"key_file"`` will also be present To override these settings, one may subclass this class, call this method and use the above logic to change parameters as desired. For example, if one wishes to use a custom :py:class:`ssl.SSLContext` one must both set ``"ssl_context"`` and based on what else they require, alter the other keys to ensure the desired behaviour. :param request: The PreparedReqest being sent over the connection. :type request: :class:`~requests.models.PreparedRequest` :param verify: Either a boolean, in which case it controls whether we verify the server's TLS certificate, or a string, in which case it must be a path to a CA bundle to use. :param cert: (optional) Any user-provided SSL certificate for client authentication (a.k.a., mTLS). This may be a string (i.e., just the path to a file which holds both certificate and key) or a tuple of length 2 with the certificate file path and key file path. :returns: A tuple of two dictionaries. The first is the "host parameters" portion of the Pool Key including scheme, hostname, and port. The second is a dictionary of SSLContext related parameters. """ return _urllib3_request_context(request, verify, cert, self.poolmanager) def get_connection_with_tls_context(self, request, verify, proxies=None, cert=None): """Returns a urllib3 connection for the given request and TLS settings. This should not be called from user code, and is only exposed for use when subclassing the :class:`HTTPAdapter <requests.adapters.HTTPAdapter>`. :param request: The :class:`PreparedRequest <PreparedRequest>` object to be sent over the connection. :param verify: Either a boolean, in which case it controls whether we verify the server's TLS certificate, or a string, in which case it must be a path to a CA bundle to use. :param proxies: (optional) The proxies dictionary to apply to the request. :param cert: (optional) Any user-provided SSL certificate to be used for client authentication (a.k.a., mTLS). :rtype: urllib3.ConnectionPool """ proxy = select_proxy(request.url, proxies) try: host_params, pool_kwargs = self.build_connection_pool_key_attributes( request, verify, cert, ) except ValueError as e: raise InvalidURL(e, request=request) if proxy: proxy = prepend_scheme_if_needed(proxy, "http") proxy_url = parse_url(proxy) if not proxy_url.host: raise InvalidProxyURL( "Please check proxy URL. It is malformed " "and could be missing the host." ) proxy_manager = self.proxy_manager_for(proxy) conn = proxy_manager.connection_from_host( **host_params, pool_kwargs=pool_kwargs ) else: # Only scheme should be lower case conn = self.poolmanager.connection_from_host( **host_params, pool_kwargs=pool_kwargs ) return conn def get_connection(self, url, proxies=None): """DEPRECATED: Users should move to `get_connection_with_tls_context` for all subclasses of HTTPAdapter using Requests>=2.32.2. Returns a urllib3 connection for the given URL. This should not be called from user code, and is only exposed for use when subclassing the :class:`HTTPAdapter <requests.adapters.HTTPAdapter>`. :param url: The URL to connect to. :param proxies: (optional) A Requests-style dictionary of proxies used on this request. :rtype: urllib3.ConnectionPool """ warnings.warn( ( "`get_connection` has been deprecated in favor of " "`get_connection_with_tls_context`. Custom HTTPAdapter subclasses " "will need to migrate for Requests>=2.32.2. Please see " "https://github.com/psf/requests/pull/6710 for more details." ), DeprecationWarning, ) proxy = select_proxy(url, proxies) if proxy: proxy = prepend_scheme_if_needed(proxy, "http") proxy_url = parse_url(proxy) if not proxy_url.host: raise InvalidProxyURL( "Please check proxy URL. It is malformed " "and could be missing the host." ) proxy_manager = self.proxy_manager_for(proxy) conn = proxy_manager.connection_from_url(url) else: # Only scheme should be lower case parsed = urlparse(url) url = parsed.geturl() conn = self.poolmanager.connection_from_url(url) return conn def close(self): """Disposes of any internal state. Currently, this closes the PoolManager and any active ProxyManager, which closes any pooled connections. """ self.poolmanager.clear() for proxy in self.proxy_manager.values(): proxy.clear() def request_url(self, request, proxies): """Obtain the url to use when making the final request. If the message is being sent through a HTTP proxy, the full URL has to be used. Otherwise, we should only use the path portion of the URL. This should not be called from user code, and is only exposed for use when subclassing the :class:`HTTPAdapter <requests.adapters.HTTPAdapter>`. :param request: The :class:`PreparedRequest <PreparedRequest>` being sent. :param proxies: A dictionary of schemes or schemes and hosts to proxy URLs. :rtype: str """ proxy = select_proxy(request.url, proxies) scheme = urlparse(request.url).scheme is_proxied_http_request = proxy and scheme != "https" using_socks_proxy = False if proxy: proxy_scheme = urlparse(proxy).scheme.lower() using_socks_proxy = proxy_scheme.startswith("socks") url = request.path_url if url.startswith("//"): # Don't confuse urllib3 url = f"/{url.lstrip('/')}" if is_proxied_http_request and not using_socks_proxy: url = urldefragauth(request.url) return url def add_headers(self, request, **kwargs): """Add any headers needed by the connection. As of v2.0 this does nothing by default, but is left for overriding by users that subclass the :class:`HTTPAdapter <requests.adapters.HTTPAdapter>`. This should not be called from user code, and is only exposed for use when subclassing the :class:`HTTPAdapter <requests.adapters.HTTPAdapter>`. :param request: The :class:`PreparedRequest <PreparedRequest>` to add headers to. :param kwargs: The keyword arguments from the call to send(). """ pass def proxy_headers(self, proxy): """Returns a dictionary of the headers to add to any request sent through a proxy. This works with urllib3 magic to ensure that they are correctly sent to the proxy, rather than in a tunnelled request if CONNECT is being used. This should not be called from user code, and is only exposed for use when subclassing the :class:`HTTPAdapter <requests.adapters.HTTPAdapter>`. :param proxy: The url of the proxy being used for this request. :rtype: dict """ headers = {} username, password = get_auth_from_url(proxy) if username: headers["Proxy-Authorization"] = _basic_auth_str(username, password) return headers def send( self, request, stream=False, timeout=None, verify=True, cert=None, proxies=None ): """Sends PreparedRequest object. Returns Response object. :param request: The :class:`PreparedRequest <PreparedRequest>` being sent. :param stream: (optional) Whether to stream the request content. :param timeout: (optional) How long to wait for the server to send data before giving up, as a float, or a :ref:`(connect timeout, read timeout) <timeouts>` tuple. :type timeout: float or tuple or urllib3 Timeout object :param verify: (optional) Either a boolean, in which case it controls whether we verify the server's TLS certificate, or a string, in which case it must be a path to a CA bundle to use :param cert: (optional) Any user-provided SSL certificate to be trusted. :param proxies: (optional) The proxies dictionary to apply to the request. :rtype: requests.Response """ try: conn = self.get_connection_with_tls_context( request, verify, proxies=proxies, cert=cert ) except LocationValueError as e: raise InvalidURL(e, request=request) self.cert_verify(conn, request.url, verify, cert) url = self.request_url(request, proxies) self.add_headers( request, stream=stream, timeout=timeout, verify=verify, cert=cert, proxies=proxies, ) chunked = not (request.body is None or "Content-Length" in request.headers) if isinstance(timeout, tuple): try: connect, read = timeout timeout = TimeoutSauce(connect=connect, read=read) except ValueError: raise ValueError( f"Invalid timeout {timeout}. Pass a (connect, read) timeout tuple, " f"or a single float to set both timeouts to the same value." ) elif isinstance(timeout, TimeoutSauce): pass else: timeout = TimeoutSauce(connect=timeout, read=timeout) try: resp = conn.urlopen( method=request.method, url=url, body=request.body, headers=request.headers, redirect=False, assert_same_host=False, preload_content=False, decode_content=False, retries=self.max_retries, timeout=timeout, chunked=chunked, ) except (ProtocolError, OSError) as err: raise ConnectionError(err, request=request) except MaxRetryError as e: if isinstance(e.reason, ConnectTimeoutError): # TODO: Remove this in 3.0.0: see #2811 if not isinstance(e.reason, NewConnectionError): raise ConnectTimeout(e, request=request) if isinstance(e.reason, ResponseError): raise RetryError(e, request=request) if isinstance(e.reason, _ProxyError): raise ProxyError(e, request=request) if isinstance(e.reason, _SSLError): # This branch is for urllib3 v1.22 and later. raise SSLError(e, request=request) raise ConnectionError(e, request=request) except ClosedPoolError as e: raise ConnectionError(e, request=request) except _ProxyError as e: raise ProxyError(e) except (_SSLError, _HTTPError) as e: if isinstance(e, _SSLError): # This branch is for urllib3 versions earlier than v1.22 raise SSLError(e, request=request) elif isinstance(e, ReadTimeoutError): raise ReadTimeout(e, request=request) elif isinstance(e, _InvalidHeader): raise InvalidHeader(e, request=request) else: raise return self.build_response(request, resp)
python
Apache-2.0
70298332899f25826e35e42f8d83425124f755a5
2026-01-04T14:39:22.525405Z
false
psf/requests
https://github.com/psf/requests/blob/70298332899f25826e35e42f8d83425124f755a5/src/requests/_internal_utils.py
src/requests/_internal_utils.py
""" requests._internal_utils ~~~~~~~~~~~~~~ Provides utility functions that are consumed internally by Requests which depend on extremely few external helpers (such as compat) """ import re from .compat import builtin_str _VALID_HEADER_NAME_RE_BYTE = re.compile(rb"^[^:\s][^:\r\n]*$") _VALID_HEADER_NAME_RE_STR = re.compile(r"^[^:\s][^:\r\n]*$") _VALID_HEADER_VALUE_RE_BYTE = re.compile(rb"^\S[^\r\n]*$|^$") _VALID_HEADER_VALUE_RE_STR = re.compile(r"^\S[^\r\n]*$|^$") _HEADER_VALIDATORS_STR = (_VALID_HEADER_NAME_RE_STR, _VALID_HEADER_VALUE_RE_STR) _HEADER_VALIDATORS_BYTE = (_VALID_HEADER_NAME_RE_BYTE, _VALID_HEADER_VALUE_RE_BYTE) HEADER_VALIDATORS = { bytes: _HEADER_VALIDATORS_BYTE, str: _HEADER_VALIDATORS_STR, } def to_native_string(string, encoding="ascii"): """Given a string object, regardless of type, returns a representation of that string in the native string type, encoding and decoding where necessary. This assumes ASCII unless told otherwise. """ if isinstance(string, builtin_str): out = string else: out = string.decode(encoding) return out def unicode_is_ascii(u_string): """Determine if unicode string only contains ASCII characters. :param str u_string: unicode string to check. Must be unicode and not Python 2 `str`. :rtype: bool """ assert isinstance(u_string, str) try: u_string.encode("ascii") return True except UnicodeEncodeError: return False
python
Apache-2.0
70298332899f25826e35e42f8d83425124f755a5
2026-01-04T14:39:22.525405Z
false
psf/requests
https://github.com/psf/requests/blob/70298332899f25826e35e42f8d83425124f755a5/src/requests/__init__.py
src/requests/__init__.py
# __ # /__) _ _ _ _ _/ _ # / ( (- (/ (/ (- _) / _) # / """ Requests HTTP Library ~~~~~~~~~~~~~~~~~~~~~ Requests is an HTTP library, written in Python, for human beings. Basic GET usage: >>> import requests >>> r = requests.get('https://www.python.org') >>> r.status_code 200 >>> b'Python is a programming language' in r.content True ... or POST: >>> payload = dict(key1='value1', key2='value2') >>> r = requests.post('https://httpbin.org/post', data=payload) >>> print(r.text) { ... "form": { "key1": "value1", "key2": "value2" }, ... } The other HTTP methods are supported - see `requests.api`. Full documentation is at <https://requests.readthedocs.io>. :copyright: (c) 2017 by Kenneth Reitz. :license: Apache 2.0, see LICENSE for more details. """ import warnings import urllib3 from .exceptions import RequestsDependencyWarning try: from charset_normalizer import __version__ as charset_normalizer_version except ImportError: charset_normalizer_version = None try: from chardet import __version__ as chardet_version except ImportError: chardet_version = None def check_compatibility(urllib3_version, chardet_version, charset_normalizer_version): urllib3_version = urllib3_version.split(".") assert urllib3_version != ["dev"] # Verify urllib3 isn't installed from git. # Sometimes, urllib3 only reports its version as 16.1. if len(urllib3_version) == 2: urllib3_version.append("0") # Check urllib3 for compatibility. major, minor, patch = urllib3_version # noqa: F811 major, minor, patch = int(major), int(minor), int(patch) # urllib3 >= 1.21.1 assert major >= 1 if major == 1: assert minor >= 21 # Check charset_normalizer for compatibility. if chardet_version: major, minor, patch = chardet_version.split(".")[:3] major, minor, patch = int(major), int(minor), int(patch) # chardet_version >= 3.0.2, < 6.0.0 assert (3, 0, 2) <= (major, minor, patch) < (6, 0, 0) elif charset_normalizer_version: major, minor, patch = charset_normalizer_version.split(".")[:3] major, minor, patch = int(major), int(minor), int(patch) # charset_normalizer >= 2.0.0 < 4.0.0 assert (2, 0, 0) <= (major, minor, patch) < (4, 0, 0) else: warnings.warn( "Unable to find acceptable character detection dependency " "(chardet or charset_normalizer).", RequestsDependencyWarning, ) def _check_cryptography(cryptography_version): # cryptography < 1.3.4 try: cryptography_version = list(map(int, cryptography_version.split("."))) except ValueError: return if cryptography_version < [1, 3, 4]: warning = "Old version of cryptography ({}) may cause slowdown.".format( cryptography_version ) warnings.warn(warning, RequestsDependencyWarning) # Check imported dependencies for compatibility. try: check_compatibility( urllib3.__version__, chardet_version, charset_normalizer_version ) except (AssertionError, ValueError): warnings.warn( "urllib3 ({}) or chardet ({})/charset_normalizer ({}) doesn't match a supported " "version!".format( urllib3.__version__, chardet_version, charset_normalizer_version ), RequestsDependencyWarning, ) # Attempt to enable urllib3's fallback for SNI support # if the standard library doesn't support SNI or the # 'ssl' library isn't available. try: try: import ssl except ImportError: ssl = None if not getattr(ssl, "HAS_SNI", False): from urllib3.contrib import pyopenssl pyopenssl.inject_into_urllib3() # Check cryptography version from cryptography import __version__ as cryptography_version _check_cryptography(cryptography_version) except ImportError: pass # urllib3's DependencyWarnings should be silenced. from urllib3.exceptions import DependencyWarning warnings.simplefilter("ignore", DependencyWarning) # Set default logging handler to avoid "No handler found" warnings. import logging from logging import NullHandler from . import packages, utils from .__version__ import ( __author__, __author_email__, __build__, __cake__, __copyright__, __description__, __license__, __title__, __url__, __version__, ) from .api import delete, get, head, options, patch, post, put, request from .exceptions import ( ConnectionError, ConnectTimeout, FileModeWarning, HTTPError, JSONDecodeError, ReadTimeout, RequestException, Timeout, TooManyRedirects, URLRequired, ) from .models import PreparedRequest, Request, Response from .sessions import Session, session from .status_codes import codes logging.getLogger(__name__).addHandler(NullHandler()) # FileModeWarnings go off per the default. warnings.simplefilter("default", FileModeWarning, append=True)
python
Apache-2.0
70298332899f25826e35e42f8d83425124f755a5
2026-01-04T14:39:22.525405Z
false
psf/requests
https://github.com/psf/requests/blob/70298332899f25826e35e42f8d83425124f755a5/src/requests/structures.py
src/requests/structures.py
""" requests.structures ~~~~~~~~~~~~~~~~~~~ Data structures that power Requests. """ from collections import OrderedDict from .compat import Mapping, MutableMapping class CaseInsensitiveDict(MutableMapping): """A case-insensitive ``dict``-like object. Implements all methods and operations of ``MutableMapping`` as well as dict's ``copy``. Also provides ``lower_items``. All keys are expected to be strings. The structure remembers the case of the last key to be set, and ``iter(instance)``, ``keys()``, ``items()``, ``iterkeys()``, and ``iteritems()`` will contain case-sensitive keys. However, querying and contains testing is case insensitive:: cid = CaseInsensitiveDict() cid['Accept'] = 'application/json' cid['aCCEPT'] == 'application/json' # True list(cid) == ['Accept'] # True For example, ``headers['content-encoding']`` will return the value of a ``'Content-Encoding'`` response header, regardless of how the header name was originally stored. If the constructor, ``.update``, or equality comparison operations are given keys that have equal ``.lower()``s, the behavior is undefined. """ def __init__(self, data=None, **kwargs): self._store = OrderedDict() if data is None: data = {} self.update(data, **kwargs) def __setitem__(self, key, value): # Use the lowercased key for lookups, but store the actual # key alongside the value. self._store[key.lower()] = (key, value) def __getitem__(self, key): return self._store[key.lower()][1] def __delitem__(self, key): del self._store[key.lower()] def __iter__(self): return (casedkey for casedkey, mappedvalue in self._store.values()) def __len__(self): return len(self._store) def lower_items(self): """Like iteritems(), but with all lowercase keys.""" return ((lowerkey, keyval[1]) for (lowerkey, keyval) in self._store.items()) def __eq__(self, other): if isinstance(other, Mapping): other = CaseInsensitiveDict(other) else: return NotImplemented # Compare insensitively return dict(self.lower_items()) == dict(other.lower_items()) # Copy is required def copy(self): return CaseInsensitiveDict(self._store.values()) def __repr__(self): return str(dict(self.items())) class LookupDict(dict): """Dictionary lookup object.""" def __init__(self, name=None): self.name = name super().__init__() def __repr__(self): return f"<lookup '{self.name}'>" def __getitem__(self, key): # We allow fall-through here, so values default to None return self.__dict__.get(key, None) def get(self, key, default=None): return self.__dict__.get(key, default)
python
Apache-2.0
70298332899f25826e35e42f8d83425124f755a5
2026-01-04T14:39:22.525405Z
false
psf/requests
https://github.com/psf/requests/blob/70298332899f25826e35e42f8d83425124f755a5/src/requests/sessions.py
src/requests/sessions.py
""" requests.sessions ~~~~~~~~~~~~~~~~~ This module provides a Session object to manage and persist settings across requests (cookies, auth, proxies). """ import os import sys import time from collections import OrderedDict from datetime import timedelta from ._internal_utils import to_native_string from .adapters import HTTPAdapter from .auth import _basic_auth_str from .compat import Mapping, cookielib, urljoin, urlparse from .cookies import ( RequestsCookieJar, cookiejar_from_dict, extract_cookies_to_jar, merge_cookies, ) from .exceptions import ( ChunkedEncodingError, ContentDecodingError, InvalidSchema, TooManyRedirects, ) from .hooks import default_hooks, dispatch_hook # formerly defined here, reexposed here for backward compatibility from .models import ( # noqa: F401 DEFAULT_REDIRECT_LIMIT, REDIRECT_STATI, PreparedRequest, Request, ) from .status_codes import codes from .structures import CaseInsensitiveDict from .utils import ( # noqa: F401 DEFAULT_PORTS, default_headers, get_auth_from_url, get_environ_proxies, get_netrc_auth, requote_uri, resolve_proxies, rewind_body, should_bypass_proxies, to_key_val_list, ) # Preferred clock, based on which one is more accurate on a given system. if sys.platform == "win32": preferred_clock = time.perf_counter else: preferred_clock = time.time def merge_setting(request_setting, session_setting, dict_class=OrderedDict): """Determines appropriate setting for a given request, taking into account the explicit setting on that request, and the setting in the session. If a setting is a dictionary, they will be merged together using `dict_class` """ if session_setting is None: return request_setting if request_setting is None: return session_setting # Bypass if not a dictionary (e.g. verify) if not ( isinstance(session_setting, Mapping) and isinstance(request_setting, Mapping) ): return request_setting merged_setting = dict_class(to_key_val_list(session_setting)) merged_setting.update(to_key_val_list(request_setting)) # Remove keys that are set to None. Extract keys first to avoid altering # the dictionary during iteration. none_keys = [k for (k, v) in merged_setting.items() if v is None] for key in none_keys: del merged_setting[key] return merged_setting def merge_hooks(request_hooks, session_hooks, dict_class=OrderedDict): """Properly merges both requests and session hooks. This is necessary because when request_hooks == {'response': []}, the merge breaks Session hooks entirely. """ if session_hooks is None or session_hooks.get("response") == []: return request_hooks if request_hooks is None or request_hooks.get("response") == []: return session_hooks return merge_setting(request_hooks, session_hooks, dict_class) class SessionRedirectMixin: def get_redirect_target(self, resp): """Receives a Response. Returns a redirect URI or ``None``""" # Due to the nature of how requests processes redirects this method will # be called at least once upon the original response and at least twice # on each subsequent redirect response (if any). # If a custom mixin is used to handle this logic, it may be advantageous # to cache the redirect location onto the response object as a private # attribute. if resp.is_redirect: location = resp.headers["location"] # Currently the underlying http module on py3 decode headers # in latin1, but empirical evidence suggests that latin1 is very # rarely used with non-ASCII characters in HTTP headers. # It is more likely to get UTF8 header rather than latin1. # This causes incorrect handling of UTF8 encoded location headers. # To solve this, we re-encode the location in latin1. location = location.encode("latin1") return to_native_string(location, "utf8") return None def should_strip_auth(self, old_url, new_url): """Decide whether Authorization header should be removed when redirecting""" old_parsed = urlparse(old_url) new_parsed = urlparse(new_url) if old_parsed.hostname != new_parsed.hostname: return True # Special case: allow http -> https redirect when using the standard # ports. This isn't specified by RFC 7235, but is kept to avoid # breaking backwards compatibility with older versions of requests # that allowed any redirects on the same host. if ( old_parsed.scheme == "http" and old_parsed.port in (80, None) and new_parsed.scheme == "https" and new_parsed.port in (443, None) ): return False # Handle default port usage corresponding to scheme. changed_port = old_parsed.port != new_parsed.port changed_scheme = old_parsed.scheme != new_parsed.scheme default_port = (DEFAULT_PORTS.get(old_parsed.scheme, None), None) if ( not changed_scheme and old_parsed.port in default_port and new_parsed.port in default_port ): return False # Standard case: root URI must match return changed_port or changed_scheme def resolve_redirects( self, resp, req, stream=False, timeout=None, verify=True, cert=None, proxies=None, yield_requests=False, **adapter_kwargs, ): """Receives a Response. Returns a generator of Responses or Requests.""" hist = [] # keep track of history url = self.get_redirect_target(resp) previous_fragment = urlparse(req.url).fragment while url: prepared_request = req.copy() # Update history and keep track of redirects. # resp.history must ignore the original request in this loop hist.append(resp) resp.history = hist[1:] try: resp.content # Consume socket so it can be released except (ChunkedEncodingError, ContentDecodingError, RuntimeError): resp.raw.read(decode_content=False) if len(resp.history) >= self.max_redirects: raise TooManyRedirects( f"Exceeded {self.max_redirects} redirects.", response=resp ) # Release the connection back into the pool. resp.close() # Handle redirection without scheme (see: RFC 1808 Section 4) if url.startswith("//"): parsed_rurl = urlparse(resp.url) url = ":".join([to_native_string(parsed_rurl.scheme), url]) # Normalize url case and attach previous fragment if needed (RFC 7231 7.1.2) parsed = urlparse(url) if parsed.fragment == "" and previous_fragment: parsed = parsed._replace(fragment=previous_fragment) elif parsed.fragment: previous_fragment = parsed.fragment url = parsed.geturl() # Facilitate relative 'location' headers, as allowed by RFC 7231. # (e.g. '/path/to/resource' instead of 'http://domain.tld/path/to/resource') # Compliant with RFC3986, we percent encode the url. if not parsed.netloc: url = urljoin(resp.url, requote_uri(url)) else: url = requote_uri(url) prepared_request.url = to_native_string(url) self.rebuild_method(prepared_request, resp) # https://github.com/psf/requests/issues/1084 if resp.status_code not in ( codes.temporary_redirect, codes.permanent_redirect, ): # https://github.com/psf/requests/issues/3490 purged_headers = ("Content-Length", "Content-Type", "Transfer-Encoding") for header in purged_headers: prepared_request.headers.pop(header, None) prepared_request.body = None headers = prepared_request.headers headers.pop("Cookie", None) # Extract any cookies sent on the response to the cookiejar # in the new request. Because we've mutated our copied prepared # request, use the old one that we haven't yet touched. extract_cookies_to_jar(prepared_request._cookies, req, resp.raw) merge_cookies(prepared_request._cookies, self.cookies) prepared_request.prepare_cookies(prepared_request._cookies) # Rebuild auth and proxy information. proxies = self.rebuild_proxies(prepared_request, proxies) self.rebuild_auth(prepared_request, resp) # A failed tell() sets `_body_position` to `object()`. This non-None # value ensures `rewindable` will be True, allowing us to raise an # UnrewindableBodyError, instead of hanging the connection. rewindable = prepared_request._body_position is not None and ( "Content-Length" in headers or "Transfer-Encoding" in headers ) # Attempt to rewind consumed file-like object. if rewindable: rewind_body(prepared_request) # Override the original request. req = prepared_request if yield_requests: yield req else: resp = self.send( req, stream=stream, timeout=timeout, verify=verify, cert=cert, proxies=proxies, allow_redirects=False, **adapter_kwargs, ) extract_cookies_to_jar(self.cookies, prepared_request, resp.raw) # extract redirect url, if any, for the next loop url = self.get_redirect_target(resp) yield resp def rebuild_auth(self, prepared_request, response): """When being redirected we may want to strip authentication from the request to avoid leaking credentials. This method intelligently removes and reapplies authentication where possible to avoid credential loss. """ headers = prepared_request.headers url = prepared_request.url if "Authorization" in headers and self.should_strip_auth( response.request.url, url ): # If we get redirected to a new host, we should strip out any # authentication headers. del headers["Authorization"] # .netrc might have more auth for us on our new host. new_auth = get_netrc_auth(url) if self.trust_env else None if new_auth is not None: prepared_request.prepare_auth(new_auth) def rebuild_proxies(self, prepared_request, proxies): """This method re-evaluates the proxy configuration by considering the environment variables. If we are redirected to a URL covered by NO_PROXY, we strip the proxy configuration. Otherwise, we set missing proxy keys for this URL (in case they were stripped by a previous redirect). This method also replaces the Proxy-Authorization header where necessary. :rtype: dict """ headers = prepared_request.headers scheme = urlparse(prepared_request.url).scheme new_proxies = resolve_proxies(prepared_request, proxies, self.trust_env) if "Proxy-Authorization" in headers: del headers["Proxy-Authorization"] try: username, password = get_auth_from_url(new_proxies[scheme]) except KeyError: username, password = None, None # urllib3 handles proxy authorization for us in the standard adapter. # Avoid appending this to TLS tunneled requests where it may be leaked. if not scheme.startswith("https") and username and password: headers["Proxy-Authorization"] = _basic_auth_str(username, password) return new_proxies def rebuild_method(self, prepared_request, response): """When being redirected we may want to change the method of the request based on certain specs or browser behavior. """ method = prepared_request.method # https://tools.ietf.org/html/rfc7231#section-6.4.4 if response.status_code == codes.see_other and method != "HEAD": method = "GET" # Do what the browsers do, despite standards... # First, turn 302s into GETs. if response.status_code == codes.found and method != "HEAD": method = "GET" # Second, if a POST is responded to with a 301, turn it into a GET. # This bizarre behaviour is explained in Issue 1704. if response.status_code == codes.moved and method == "POST": method = "GET" prepared_request.method = method class Session(SessionRedirectMixin): """A Requests session. Provides cookie persistence, connection-pooling, and configuration. Basic Usage:: >>> import requests >>> s = requests.Session() >>> s.get('https://httpbin.org/get') <Response [200]> Or as a context manager:: >>> with requests.Session() as s: ... s.get('https://httpbin.org/get') <Response [200]> """ __attrs__ = [ "headers", "cookies", "auth", "proxies", "hooks", "params", "verify", "cert", "adapters", "stream", "trust_env", "max_redirects", ] def __init__(self): #: A case-insensitive dictionary of headers to be sent on each #: :class:`Request <Request>` sent from this #: :class:`Session <Session>`. self.headers = default_headers() #: Default Authentication tuple or object to attach to #: :class:`Request <Request>`. self.auth = None #: Dictionary mapping protocol or protocol and host to the URL of the proxy #: (e.g. {'http': 'foo.bar:3128', 'http://host.name': 'foo.bar:4012'}) to #: be used on each :class:`Request <Request>`. self.proxies = {} #: Event-handling hooks. self.hooks = default_hooks() #: Dictionary of querystring data to attach to each #: :class:`Request <Request>`. The dictionary values may be lists for #: representing multivalued query parameters. self.params = {} #: Stream response content default. self.stream = False #: SSL Verification default. #: Defaults to `True`, requiring requests to verify the TLS certificate at the #: remote end. #: If verify is set to `False`, requests will accept any TLS certificate #: presented by the server, and will ignore hostname mismatches and/or #: expired certificates, which will make your application vulnerable to #: man-in-the-middle (MitM) attacks. #: Only set this to `False` for testing. self.verify = True #: SSL client certificate default, if String, path to ssl client #: cert file (.pem). If Tuple, ('cert', 'key') pair. self.cert = None #: Maximum number of redirects allowed. If the request exceeds this #: limit, a :class:`TooManyRedirects` exception is raised. #: This defaults to requests.models.DEFAULT_REDIRECT_LIMIT, which is #: 30. self.max_redirects = DEFAULT_REDIRECT_LIMIT #: Trust environment settings for proxy configuration, default #: authentication and similar. self.trust_env = True #: A CookieJar containing all currently outstanding cookies set on this #: session. By default it is a #: :class:`RequestsCookieJar <requests.cookies.RequestsCookieJar>`, but #: may be any other ``cookielib.CookieJar`` compatible object. self.cookies = cookiejar_from_dict({}) # Default connection adapters. self.adapters = OrderedDict() self.mount("https://", HTTPAdapter()) self.mount("http://", HTTPAdapter()) def __enter__(self): return self def __exit__(self, *args): self.close() def prepare_request(self, request): """Constructs a :class:`PreparedRequest <PreparedRequest>` for transmission and returns it. The :class:`PreparedRequest` has settings merged from the :class:`Request <Request>` instance and those of the :class:`Session`. :param request: :class:`Request` instance to prepare with this session's settings. :rtype: requests.PreparedRequest """ cookies = request.cookies or {} # Bootstrap CookieJar. if not isinstance(cookies, cookielib.CookieJar): cookies = cookiejar_from_dict(cookies) # Merge with session cookies merged_cookies = merge_cookies( merge_cookies(RequestsCookieJar(), self.cookies), cookies ) # Set environment's basic authentication if not explicitly set. auth = request.auth if self.trust_env and not auth and not self.auth: auth = get_netrc_auth(request.url) p = PreparedRequest() p.prepare( method=request.method.upper(), url=request.url, files=request.files, data=request.data, json=request.json, headers=merge_setting( request.headers, self.headers, dict_class=CaseInsensitiveDict ), params=merge_setting(request.params, self.params), auth=merge_setting(auth, self.auth), cookies=merged_cookies, hooks=merge_hooks(request.hooks, self.hooks), ) return p def request( self, method, url, params=None, data=None, headers=None, cookies=None, files=None, auth=None, timeout=None, allow_redirects=True, proxies=None, hooks=None, stream=None, verify=None, cert=None, json=None, ): """Constructs a :class:`Request <Request>`, prepares it and sends it. Returns :class:`Response <Response>` object. :param method: method for the new :class:`Request` object. :param url: URL for the new :class:`Request` object. :param params: (optional) Dictionary or bytes to be sent in the query string for the :class:`Request`. :param data: (optional) Dictionary, list of tuples, bytes, or file-like object to send in the body of the :class:`Request`. :param json: (optional) json to send in the body of the :class:`Request`. :param headers: (optional) Dictionary of HTTP Headers to send with the :class:`Request`. :param cookies: (optional) Dict or CookieJar object to send with the :class:`Request`. :param files: (optional) Dictionary of ``'filename': file-like-objects`` for multipart encoding upload. :param auth: (optional) Auth tuple or callable to enable Basic/Digest/Custom HTTP Auth. :param timeout: (optional) How many seconds to wait for the server to send data before giving up, as a float, or a :ref:`(connect timeout, read timeout) <timeouts>` tuple. :type timeout: float or tuple :param allow_redirects: (optional) Set to True by default. :type allow_redirects: bool :param proxies: (optional) Dictionary mapping protocol or protocol and hostname to the URL of the proxy. :param hooks: (optional) Dictionary mapping hook name to one event or list of events, event must be callable. :param stream: (optional) whether to immediately download the response content. Defaults to ``False``. :param verify: (optional) Either a boolean, in which case it controls whether we verify the server's TLS certificate, or a string, in which case it must be a path to a CA bundle to use. Defaults to ``True``. When set to ``False``, requests will accept any TLS certificate presented by the server, and will ignore hostname mismatches and/or expired certificates, which will make your application vulnerable to man-in-the-middle (MitM) attacks. Setting verify to ``False`` may be useful during local development or testing. :param cert: (optional) if String, path to ssl client cert file (.pem). If Tuple, ('cert', 'key') pair. :rtype: requests.Response """ # Create the Request. req = Request( method=method.upper(), url=url, headers=headers, files=files, data=data or {}, json=json, params=params or {}, auth=auth, cookies=cookies, hooks=hooks, ) prep = self.prepare_request(req) proxies = proxies or {} settings = self.merge_environment_settings( prep.url, proxies, stream, verify, cert ) # Send the request. send_kwargs = { "timeout": timeout, "allow_redirects": allow_redirects, } send_kwargs.update(settings) resp = self.send(prep, **send_kwargs) return resp def get(self, url, **kwargs): r"""Sends a GET request. Returns :class:`Response` object. :param url: URL for the new :class:`Request` object. :param \*\*kwargs: Optional arguments that ``request`` takes. :rtype: requests.Response """ kwargs.setdefault("allow_redirects", True) return self.request("GET", url, **kwargs) def options(self, url, **kwargs): r"""Sends a OPTIONS request. Returns :class:`Response` object. :param url: URL for the new :class:`Request` object. :param \*\*kwargs: Optional arguments that ``request`` takes. :rtype: requests.Response """ kwargs.setdefault("allow_redirects", True) return self.request("OPTIONS", url, **kwargs) def head(self, url, **kwargs): r"""Sends a HEAD request. Returns :class:`Response` object. :param url: URL for the new :class:`Request` object. :param \*\*kwargs: Optional arguments that ``request`` takes. :rtype: requests.Response """ kwargs.setdefault("allow_redirects", False) return self.request("HEAD", url, **kwargs) def post(self, url, data=None, json=None, **kwargs): r"""Sends a POST request. Returns :class:`Response` object. :param url: URL for the new :class:`Request` object. :param data: (optional) Dictionary, list of tuples, bytes, or file-like object to send in the body of the :class:`Request`. :param json: (optional) json to send in the body of the :class:`Request`. :param \*\*kwargs: Optional arguments that ``request`` takes. :rtype: requests.Response """ return self.request("POST", url, data=data, json=json, **kwargs) def put(self, url, data=None, **kwargs): r"""Sends a PUT request. Returns :class:`Response` object. :param url: URL for the new :class:`Request` object. :param data: (optional) Dictionary, list of tuples, bytes, or file-like object to send in the body of the :class:`Request`. :param \*\*kwargs: Optional arguments that ``request`` takes. :rtype: requests.Response """ return self.request("PUT", url, data=data, **kwargs) def patch(self, url, data=None, **kwargs): r"""Sends a PATCH request. Returns :class:`Response` object. :param url: URL for the new :class:`Request` object. :param data: (optional) Dictionary, list of tuples, bytes, or file-like object to send in the body of the :class:`Request`. :param \*\*kwargs: Optional arguments that ``request`` takes. :rtype: requests.Response """ return self.request("PATCH", url, data=data, **kwargs) def delete(self, url, **kwargs): r"""Sends a DELETE request. Returns :class:`Response` object. :param url: URL for the new :class:`Request` object. :param \*\*kwargs: Optional arguments that ``request`` takes. :rtype: requests.Response """ return self.request("DELETE", url, **kwargs) def send(self, request, **kwargs): """Send a given PreparedRequest. :rtype: requests.Response """ # Set defaults that the hooks can utilize to ensure they always have # the correct parameters to reproduce the previous request. kwargs.setdefault("stream", self.stream) kwargs.setdefault("verify", self.verify) kwargs.setdefault("cert", self.cert) if "proxies" not in kwargs: kwargs["proxies"] = resolve_proxies(request, self.proxies, self.trust_env) # It's possible that users might accidentally send a Request object. # Guard against that specific failure case. if isinstance(request, Request): raise ValueError("You can only send PreparedRequests.") # Set up variables needed for resolve_redirects and dispatching of hooks allow_redirects = kwargs.pop("allow_redirects", True) stream = kwargs.get("stream") hooks = request.hooks # Get the appropriate adapter to use adapter = self.get_adapter(url=request.url) # Start time (approximately) of the request start = preferred_clock() # Send the request r = adapter.send(request, **kwargs) # Total elapsed time of the request (approximately) elapsed = preferred_clock() - start r.elapsed = timedelta(seconds=elapsed) # Response manipulation hooks r = dispatch_hook("response", hooks, r, **kwargs) # Persist cookies if r.history: # If the hooks create history then we want those cookies too for resp in r.history: extract_cookies_to_jar(self.cookies, resp.request, resp.raw) extract_cookies_to_jar(self.cookies, request, r.raw) # Resolve redirects if allowed. if allow_redirects: # Redirect resolving generator. gen = self.resolve_redirects(r, request, **kwargs) history = [resp for resp in gen] else: history = [] # Shuffle things around if there's history. if history: # Insert the first (original) request at the start history.insert(0, r) # Get the last request made r = history.pop() r.history = history # If redirects aren't being followed, store the response on the Request for Response.next(). if not allow_redirects: try: r._next = next( self.resolve_redirects(r, request, yield_requests=True, **kwargs) ) except StopIteration: pass if not stream: r.content return r def merge_environment_settings(self, url, proxies, stream, verify, cert): """ Check the environment and merge it with some settings. :rtype: dict """ # Gather clues from the surrounding environment. if self.trust_env: # Set environment's proxies. no_proxy = proxies.get("no_proxy") if proxies is not None else None env_proxies = get_environ_proxies(url, no_proxy=no_proxy) for k, v in env_proxies.items(): proxies.setdefault(k, v) # Look for requests environment configuration # and be compatible with cURL. if verify is True or verify is None: verify = ( os.environ.get("REQUESTS_CA_BUNDLE") or os.environ.get("CURL_CA_BUNDLE") or verify ) # Merge all the kwargs. proxies = merge_setting(proxies, self.proxies) stream = merge_setting(stream, self.stream) verify = merge_setting(verify, self.verify) cert = merge_setting(cert, self.cert) return {"proxies": proxies, "stream": stream, "verify": verify, "cert": cert} def get_adapter(self, url): """ Returns the appropriate connection adapter for the given URL. :rtype: requests.adapters.BaseAdapter """ for prefix, adapter in self.adapters.items(): if url.lower().startswith(prefix.lower()): return adapter # Nothing matches :-/ raise InvalidSchema(f"No connection adapters were found for {url!r}") def close(self): """Closes all adapters and as such the session""" for v in self.adapters.values(): v.close() def mount(self, prefix, adapter): """Registers a connection adapter to a prefix. Adapters are sorted in descending order by prefix length. """ self.adapters[prefix] = adapter keys_to_move = [k for k in self.adapters if len(k) < len(prefix)] for key in keys_to_move: self.adapters[key] = self.adapters.pop(key) def __getstate__(self): state = {attr: getattr(self, attr, None) for attr in self.__attrs__} return state def __setstate__(self, state): for attr, value in state.items(): setattr(self, attr, value) def session(): """ Returns a :class:`Session` for context-management. .. deprecated:: 1.0.0 This method has been deprecated since version 1.0.0 and is only kept for backwards compatibility. New code should use :class:`~requests.sessions.Session` to create a session. This may be removed at a future date. :rtype: Session """ return Session()
python
Apache-2.0
70298332899f25826e35e42f8d83425124f755a5
2026-01-04T14:39:22.525405Z
false
psf/requests
https://github.com/psf/requests/blob/70298332899f25826e35e42f8d83425124f755a5/src/requests/certs.py
src/requests/certs.py
#!/usr/bin/env python """ requests.certs ~~~~~~~~~~~~~~ This module returns the preferred default CA certificate bundle. There is only one — the one from the certifi package. If you are packaging Requests, e.g., for a Linux distribution or a managed environment, you can change the definition of where() to return a separately packaged CA bundle. """ from certifi import where if __name__ == "__main__": print(where())
python
Apache-2.0
70298332899f25826e35e42f8d83425124f755a5
2026-01-04T14:39:22.525405Z
false
psf/requests
https://github.com/psf/requests/blob/70298332899f25826e35e42f8d83425124f755a5/src/requests/hooks.py
src/requests/hooks.py
""" requests.hooks ~~~~~~~~~~~~~~ This module provides the capabilities for the Requests hooks system. Available hooks: ``response``: The response generated from a Request. """ HOOKS = ["response"] def default_hooks(): return {event: [] for event in HOOKS} # TODO: response is the only one def dispatch_hook(key, hooks, hook_data, **kwargs): """Dispatches a hook dictionary on a given piece of data.""" hooks = hooks or {} hooks = hooks.get(key) if hooks: if hasattr(hooks, "__call__"): hooks = [hooks] for hook in hooks: _hook_data = hook(hook_data, **kwargs) if _hook_data is not None: hook_data = _hook_data return hook_data
python
Apache-2.0
70298332899f25826e35e42f8d83425124f755a5
2026-01-04T14:39:22.525405Z
false
psf/requests
https://github.com/psf/requests/blob/70298332899f25826e35e42f8d83425124f755a5/src/requests/auth.py
src/requests/auth.py
""" requests.auth ~~~~~~~~~~~~~ This module contains the authentication handlers for Requests. """ import hashlib import os import re import threading import time import warnings from base64 import b64encode from ._internal_utils import to_native_string from .compat import basestring, str, urlparse from .cookies import extract_cookies_to_jar from .utils import parse_dict_header CONTENT_TYPE_FORM_URLENCODED = "application/x-www-form-urlencoded" CONTENT_TYPE_MULTI_PART = "multipart/form-data" def _basic_auth_str(username, password): """Returns a Basic Auth string.""" # "I want us to put a big-ol' comment on top of it that # says that this behaviour is dumb but we need to preserve # it because people are relying on it." # - Lukasa # # These are here solely to maintain backwards compatibility # for things like ints. This will be removed in 3.0.0. if not isinstance(username, basestring): warnings.warn( "Non-string usernames will no longer be supported in Requests " "3.0.0. Please convert the object you've passed in ({!r}) to " "a string or bytes object in the near future to avoid " "problems.".format(username), category=DeprecationWarning, ) username = str(username) if not isinstance(password, basestring): warnings.warn( "Non-string passwords will no longer be supported in Requests " "3.0.0. Please convert the object you've passed in ({!r}) to " "a string or bytes object in the near future to avoid " "problems.".format(type(password)), category=DeprecationWarning, ) password = str(password) # -- End Removal -- if isinstance(username, str): username = username.encode("latin1") if isinstance(password, str): password = password.encode("latin1") authstr = "Basic " + to_native_string( b64encode(b":".join((username, password))).strip() ) return authstr class AuthBase: """Base class that all auth implementations derive from""" def __call__(self, r): raise NotImplementedError("Auth hooks must be callable.") class HTTPBasicAuth(AuthBase): """Attaches HTTP Basic Authentication to the given Request object.""" def __init__(self, username, password): self.username = username self.password = password def __eq__(self, other): return all( [ self.username == getattr(other, "username", None), self.password == getattr(other, "password", None), ] ) def __ne__(self, other): return not self == other def __call__(self, r): r.headers["Authorization"] = _basic_auth_str(self.username, self.password) return r class HTTPProxyAuth(HTTPBasicAuth): """Attaches HTTP Proxy Authentication to a given Request object.""" def __call__(self, r): r.headers["Proxy-Authorization"] = _basic_auth_str(self.username, self.password) return r class HTTPDigestAuth(AuthBase): """Attaches HTTP Digest Authentication to the given Request object.""" def __init__(self, username, password): self.username = username self.password = password # Keep state in per-thread local storage self._thread_local = threading.local() def init_per_thread_state(self): # Ensure state is initialized just once per-thread if not hasattr(self._thread_local, "init"): self._thread_local.init = True self._thread_local.last_nonce = "" self._thread_local.nonce_count = 0 self._thread_local.chal = {} self._thread_local.pos = None self._thread_local.num_401_calls = None def build_digest_header(self, method, url): """ :rtype: str """ realm = self._thread_local.chal["realm"] nonce = self._thread_local.chal["nonce"] qop = self._thread_local.chal.get("qop") algorithm = self._thread_local.chal.get("algorithm") opaque = self._thread_local.chal.get("opaque") hash_utf8 = None if algorithm is None: _algorithm = "MD5" else: _algorithm = algorithm.upper() # lambdas assume digest modules are imported at the top level if _algorithm == "MD5" or _algorithm == "MD5-SESS": def md5_utf8(x): if isinstance(x, str): x = x.encode("utf-8") return hashlib.md5(x).hexdigest() hash_utf8 = md5_utf8 elif _algorithm == "SHA": def sha_utf8(x): if isinstance(x, str): x = x.encode("utf-8") return hashlib.sha1(x).hexdigest() hash_utf8 = sha_utf8 elif _algorithm == "SHA-256": def sha256_utf8(x): if isinstance(x, str): x = x.encode("utf-8") return hashlib.sha256(x).hexdigest() hash_utf8 = sha256_utf8 elif _algorithm == "SHA-512": def sha512_utf8(x): if isinstance(x, str): x = x.encode("utf-8") return hashlib.sha512(x).hexdigest() hash_utf8 = sha512_utf8 KD = lambda s, d: hash_utf8(f"{s}:{d}") # noqa:E731 if hash_utf8 is None: return None # XXX not implemented yet entdig = None p_parsed = urlparse(url) #: path is request-uri defined in RFC 2616 which should not be empty path = p_parsed.path or "/" if p_parsed.query: path += f"?{p_parsed.query}" A1 = f"{self.username}:{realm}:{self.password}" A2 = f"{method}:{path}" HA1 = hash_utf8(A1) HA2 = hash_utf8(A2) if nonce == self._thread_local.last_nonce: self._thread_local.nonce_count += 1 else: self._thread_local.nonce_count = 1 ncvalue = f"{self._thread_local.nonce_count:08x}" s = str(self._thread_local.nonce_count).encode("utf-8") s += nonce.encode("utf-8") s += time.ctime().encode("utf-8") s += os.urandom(8) cnonce = hashlib.sha1(s).hexdigest()[:16] if _algorithm == "MD5-SESS": HA1 = hash_utf8(f"{HA1}:{nonce}:{cnonce}") if not qop: respdig = KD(HA1, f"{nonce}:{HA2}") elif qop == "auth" or "auth" in qop.split(","): noncebit = f"{nonce}:{ncvalue}:{cnonce}:auth:{HA2}" respdig = KD(HA1, noncebit) else: # XXX handle auth-int. return None self._thread_local.last_nonce = nonce # XXX should the partial digests be encoded too? base = ( f'username="{self.username}", realm="{realm}", nonce="{nonce}", ' f'uri="{path}", response="{respdig}"' ) if opaque: base += f', opaque="{opaque}"' if algorithm: base += f', algorithm="{algorithm}"' if entdig: base += f', digest="{entdig}"' if qop: base += f', qop="auth", nc={ncvalue}, cnonce="{cnonce}"' return f"Digest {base}" def handle_redirect(self, r, **kwargs): """Reset num_401_calls counter on redirects.""" if r.is_redirect: self._thread_local.num_401_calls = 1 def handle_401(self, r, **kwargs): """ Takes the given response and tries digest-auth, if needed. :rtype: requests.Response """ # If response is not 4xx, do not auth # See https://github.com/psf/requests/issues/3772 if not 400 <= r.status_code < 500: self._thread_local.num_401_calls = 1 return r if self._thread_local.pos is not None: # Rewind the file position indicator of the body to where # it was to resend the request. r.request.body.seek(self._thread_local.pos) s_auth = r.headers.get("www-authenticate", "") if "digest" in s_auth.lower() and self._thread_local.num_401_calls < 2: self._thread_local.num_401_calls += 1 pat = re.compile(r"digest ", flags=re.IGNORECASE) self._thread_local.chal = parse_dict_header(pat.sub("", s_auth, count=1)) # Consume content and release the original connection # to allow our new request to reuse the same one. r.content r.close() prep = r.request.copy() extract_cookies_to_jar(prep._cookies, r.request, r.raw) prep.prepare_cookies(prep._cookies) prep.headers["Authorization"] = self.build_digest_header( prep.method, prep.url ) _r = r.connection.send(prep, **kwargs) _r.history.append(r) _r.request = prep return _r self._thread_local.num_401_calls = 1 return r def __call__(self, r): # Initialize per-thread state, if needed self.init_per_thread_state() # If we have a saved nonce, skip the 401 if self._thread_local.last_nonce: r.headers["Authorization"] = self.build_digest_header(r.method, r.url) try: self._thread_local.pos = r.body.tell() except AttributeError: # In the case of HTTPDigestAuth being reused and the body of # the previous request was a file-like object, pos has the # file position of the previous body. Ensure it's set to # None. self._thread_local.pos = None r.register_hook("response", self.handle_401) r.register_hook("response", self.handle_redirect) self._thread_local.num_401_calls = 1 return r def __eq__(self, other): return all( [ self.username == getattr(other, "username", None), self.password == getattr(other, "password", None), ] ) def __ne__(self, other): return not self == other
python
Apache-2.0
70298332899f25826e35e42f8d83425124f755a5
2026-01-04T14:39:22.525405Z
false
psf/requests
https://github.com/psf/requests/blob/70298332899f25826e35e42f8d83425124f755a5/src/requests/__version__.py
src/requests/__version__.py
# .-. .-. .-. . . .-. .-. .-. .-. # |( |- |.| | | |- `-. | `-. # ' ' `-' `-`.`-' `-' `-' ' `-' __title__ = "requests" __description__ = "Python HTTP for Humans." __url__ = "https://requests.readthedocs.io" __version__ = "2.32.5" __build__ = 0x023205 __author__ = "Kenneth Reitz" __author_email__ = "me@kennethreitz.org" __license__ = "Apache-2.0" __copyright__ = "Copyright Kenneth Reitz" __cake__ = "\u2728 \U0001f370 \u2728"
python
Apache-2.0
70298332899f25826e35e42f8d83425124f755a5
2026-01-04T14:39:22.525405Z
false
psf/requests
https://github.com/psf/requests/blob/70298332899f25826e35e42f8d83425124f755a5/src/requests/help.py
src/requests/help.py
"""Module containing bug report helper(s).""" import json import platform import ssl import sys import idna import urllib3 from . import __version__ as requests_version try: import charset_normalizer except ImportError: charset_normalizer = None try: import chardet except ImportError: chardet = None try: from urllib3.contrib import pyopenssl except ImportError: pyopenssl = None OpenSSL = None cryptography = None else: import cryptography import OpenSSL def _implementation(): """Return a dict with the Python implementation and version. Provide both the name and the version of the Python implementation currently running. For example, on CPython 3.10.3 it will return {'name': 'CPython', 'version': '3.10.3'}. This function works best on CPython and PyPy: in particular, it probably doesn't work for Jython or IronPython. Future investigation should be done to work out the correct shape of the code for those platforms. """ implementation = platform.python_implementation() if implementation == "CPython": implementation_version = platform.python_version() elif implementation == "PyPy": implementation_version = "{}.{}.{}".format( sys.pypy_version_info.major, sys.pypy_version_info.minor, sys.pypy_version_info.micro, ) if sys.pypy_version_info.releaselevel != "final": implementation_version = "".join( [implementation_version, sys.pypy_version_info.releaselevel] ) elif implementation == "Jython": implementation_version = platform.python_version() # Complete Guess elif implementation == "IronPython": implementation_version = platform.python_version() # Complete Guess else: implementation_version = "Unknown" return {"name": implementation, "version": implementation_version} def info(): """Generate information for a bug report.""" try: platform_info = { "system": platform.system(), "release": platform.release(), } except OSError: platform_info = { "system": "Unknown", "release": "Unknown", } implementation_info = _implementation() urllib3_info = {"version": urllib3.__version__} charset_normalizer_info = {"version": None} chardet_info = {"version": None} if charset_normalizer: charset_normalizer_info = {"version": charset_normalizer.__version__} if chardet: chardet_info = {"version": chardet.__version__} pyopenssl_info = { "version": None, "openssl_version": "", } if OpenSSL: pyopenssl_info = { "version": OpenSSL.__version__, "openssl_version": f"{OpenSSL.SSL.OPENSSL_VERSION_NUMBER:x}", } cryptography_info = { "version": getattr(cryptography, "__version__", ""), } idna_info = { "version": getattr(idna, "__version__", ""), } system_ssl = ssl.OPENSSL_VERSION_NUMBER system_ssl_info = {"version": f"{system_ssl:x}" if system_ssl is not None else ""} return { "platform": platform_info, "implementation": implementation_info, "system_ssl": system_ssl_info, "using_pyopenssl": pyopenssl is not None, "using_charset_normalizer": chardet is None, "pyOpenSSL": pyopenssl_info, "urllib3": urllib3_info, "chardet": chardet_info, "charset_normalizer": charset_normalizer_info, "cryptography": cryptography_info, "idna": idna_info, "requests": { "version": requests_version, }, } def main(): """Pretty-print the bug information as JSON.""" print(json.dumps(info(), sort_keys=True, indent=2)) if __name__ == "__main__": main()
python
Apache-2.0
70298332899f25826e35e42f8d83425124f755a5
2026-01-04T14:39:22.525405Z
false
psf/requests
https://github.com/psf/requests/blob/70298332899f25826e35e42f8d83425124f755a5/src/requests/status_codes.py
src/requests/status_codes.py
r""" The ``codes`` object defines a mapping from common names for HTTP statuses to their numerical codes, accessible either as attributes or as dictionary items. Example:: >>> import requests >>> requests.codes['temporary_redirect'] 307 >>> requests.codes.teapot 418 >>> requests.codes['\o/'] 200 Some codes have multiple names, and both upper- and lower-case versions of the names are allowed. For example, ``codes.ok``, ``codes.OK``, and ``codes.okay`` all correspond to the HTTP status code 200. """ from .structures import LookupDict _codes = { # Informational. 100: ("continue",), 101: ("switching_protocols",), 102: ("processing", "early-hints"), 103: ("checkpoint",), 122: ("uri_too_long", "request_uri_too_long"), 200: ("ok", "okay", "all_ok", "all_okay", "all_good", "\\o/", "✓"), 201: ("created",), 202: ("accepted",), 203: ("non_authoritative_info", "non_authoritative_information"), 204: ("no_content",), 205: ("reset_content", "reset"), 206: ("partial_content", "partial"), 207: ("multi_status", "multiple_status", "multi_stati", "multiple_stati"), 208: ("already_reported",), 226: ("im_used",), # Redirection. 300: ("multiple_choices",), 301: ("moved_permanently", "moved", "\\o-"), 302: ("found",), 303: ("see_other", "other"), 304: ("not_modified",), 305: ("use_proxy",), 306: ("switch_proxy",), 307: ("temporary_redirect", "temporary_moved", "temporary"), 308: ( "permanent_redirect", "resume_incomplete", "resume", ), # "resume" and "resume_incomplete" to be removed in 3.0 # Client Error. 400: ("bad_request", "bad"), 401: ("unauthorized",), 402: ("payment_required", "payment"), 403: ("forbidden",), 404: ("not_found", "-o-"), 405: ("method_not_allowed", "not_allowed"), 406: ("not_acceptable",), 407: ("proxy_authentication_required", "proxy_auth", "proxy_authentication"), 408: ("request_timeout", "timeout"), 409: ("conflict",), 410: ("gone",), 411: ("length_required",), 412: ("precondition_failed", "precondition"), 413: ("request_entity_too_large", "content_too_large"), 414: ("request_uri_too_large", "uri_too_long"), 415: ("unsupported_media_type", "unsupported_media", "media_type"), 416: ( "requested_range_not_satisfiable", "requested_range", "range_not_satisfiable", ), 417: ("expectation_failed",), 418: ("im_a_teapot", "teapot", "i_am_a_teapot"), 421: ("misdirected_request",), 422: ("unprocessable_entity", "unprocessable", "unprocessable_content"), 423: ("locked",), 424: ("failed_dependency", "dependency"), 425: ("unordered_collection", "unordered", "too_early"), 426: ("upgrade_required", "upgrade"), 428: ("precondition_required", "precondition"), 429: ("too_many_requests", "too_many"), 431: ("header_fields_too_large", "fields_too_large"), 444: ("no_response", "none"), 449: ("retry_with", "retry"), 450: ("blocked_by_windows_parental_controls", "parental_controls"), 451: ("unavailable_for_legal_reasons", "legal_reasons"), 499: ("client_closed_request",), # Server Error. 500: ("internal_server_error", "server_error", "/o\\", "✗"), 501: ("not_implemented",), 502: ("bad_gateway",), 503: ("service_unavailable", "unavailable"), 504: ("gateway_timeout",), 505: ("http_version_not_supported", "http_version"), 506: ("variant_also_negotiates",), 507: ("insufficient_storage",), 509: ("bandwidth_limit_exceeded", "bandwidth"), 510: ("not_extended",), 511: ("network_authentication_required", "network_auth", "network_authentication"), } codes = LookupDict(name="status_codes") def _init(): for code, titles in _codes.items(): for title in titles: setattr(codes, title, code) if not title.startswith(("\\", "/")): setattr(codes, title.upper(), code) def doc(code): names = ", ".join(f"``{n}``" for n in _codes[code]) return "* %d: %s" % (code, names) global __doc__ __doc__ = ( __doc__ + "\n" + "\n".join(doc(code) for code in sorted(_codes)) if __doc__ is not None else None ) _init()
python
Apache-2.0
70298332899f25826e35e42f8d83425124f755a5
2026-01-04T14:39:22.525405Z
false
psf/requests
https://github.com/psf/requests/blob/70298332899f25826e35e42f8d83425124f755a5/src/requests/packages.py
src/requests/packages.py
import sys from .compat import chardet # This code exists for backwards compatibility reasons. # I don't like it either. Just look the other way. :) for package in ("urllib3", "idna"): locals()[package] = __import__(package) # This traversal is apparently necessary such that the identities are # preserved (requests.packages.urllib3.* is urllib3.*) for mod in list(sys.modules): if mod == package or mod.startswith(f"{package}."): sys.modules[f"requests.packages.{mod}"] = sys.modules[mod] if chardet is not None: target = chardet.__name__ for mod in list(sys.modules): if mod == target or mod.startswith(f"{target}."): imported_mod = sys.modules[mod] sys.modules[f"requests.packages.{mod}"] = imported_mod mod = mod.replace(target, "chardet") sys.modules[f"requests.packages.{mod}"] = imported_mod
python
Apache-2.0
70298332899f25826e35e42f8d83425124f755a5
2026-01-04T14:39:22.525405Z
false
psf/requests
https://github.com/psf/requests/blob/70298332899f25826e35e42f8d83425124f755a5/tests/test_testserver.py
tests/test_testserver.py
import socket import threading import time import pytest from tests.testserver.server import Server import requests class TestTestServer: def test_basic(self): """messages are sent and received properly""" question = b"success?" answer = b"yeah, success" def handler(sock): text = sock.recv(1000) assert text == question sock.sendall(answer) with Server(handler) as (host, port): sock = socket.socket() sock.connect((host, port)) sock.sendall(question) text = sock.recv(1000) assert text == answer sock.close() def test_server_closes(self): """the server closes when leaving the context manager""" with Server.basic_response_server() as (host, port): sock = socket.socket() sock.connect((host, port)) sock.close() with pytest.raises(socket.error): new_sock = socket.socket() new_sock.connect((host, port)) def test_text_response(self): """the text_response_server sends the given text""" server = Server.text_response_server( "HTTP/1.1 200 OK\r\n" "Content-Length: 6\r\n" "\r\nroflol" ) with server as (host, port): r = requests.get(f"http://{host}:{port}") assert r.status_code == 200 assert r.text == "roflol" assert r.headers["Content-Length"] == "6" def test_basic_response(self): """the basic response server returns an empty http response""" with Server.basic_response_server() as (host, port): r = requests.get(f"http://{host}:{port}") assert r.status_code == 200 assert r.text == "" assert r.headers["Content-Length"] == "0" def test_basic_waiting_server(self): """the server waits for the block_server event to be set before closing""" block_server = threading.Event() with Server.basic_response_server(wait_to_close_event=block_server) as ( host, port, ): sock = socket.socket() sock.connect((host, port)) sock.sendall(b"send something") time.sleep(2.5) sock.sendall(b"still alive") block_server.set() # release server block def test_multiple_requests(self): """multiple requests can be served""" requests_to_handle = 5 server = Server.basic_response_server(requests_to_handle=requests_to_handle) with server as (host, port): server_url = f"http://{host}:{port}" for _ in range(requests_to_handle): r = requests.get(server_url) assert r.status_code == 200 # the (n+1)th request fails with pytest.raises(requests.exceptions.ConnectionError): r = requests.get(server_url) @pytest.mark.skip(reason="this fails non-deterministically under pytest-xdist") def test_request_recovery(self): """can check the requests content""" # TODO: figure out why this sometimes fails when using pytest-xdist. server = Server.basic_response_server(requests_to_handle=2) first_request = b"put your hands up in the air" second_request = b"put your hand down in the floor" with server as address: sock1 = socket.socket() sock2 = socket.socket() sock1.connect(address) sock1.sendall(first_request) sock1.close() sock2.connect(address) sock2.sendall(second_request) sock2.close() assert server.handler_results[0] == first_request assert server.handler_results[1] == second_request def test_requests_after_timeout_are_not_received(self): """the basic response handler times out when receiving requests""" server = Server.basic_response_server(request_timeout=1) with server as address: sock = socket.socket() sock.connect(address) time.sleep(1.5) sock.sendall(b"hehehe, not received") sock.close() assert server.handler_results[0] == b"" def test_request_recovery_with_bigger_timeout(self): """a biggest timeout can be specified""" server = Server.basic_response_server(request_timeout=3) data = b"bananadine" with server as address: sock = socket.socket() sock.connect(address) time.sleep(1.5) sock.sendall(data) sock.close() assert server.handler_results[0] == data def test_server_finishes_on_error(self): """the server thread exits even if an exception exits the context manager""" server = Server.basic_response_server() with pytest.raises(Exception): with server: raise Exception() assert len(server.handler_results) == 0 # if the server thread fails to finish, the test suite will hang # and get killed by the jenkins timeout. def test_server_finishes_when_no_connections(self): """the server thread exits even if there are no connections""" server = Server.basic_response_server() with server: pass assert len(server.handler_results) == 0 # if the server thread fails to finish, the test suite will hang # and get killed by the jenkins timeout.
python
Apache-2.0
70298332899f25826e35e42f8d83425124f755a5
2026-01-04T14:39:22.525405Z
false
psf/requests
https://github.com/psf/requests/blob/70298332899f25826e35e42f8d83425124f755a5/tests/test_adapters.py
tests/test_adapters.py
import requests.adapters def test_request_url_trims_leading_path_separators(): """See also https://github.com/psf/requests/issues/6643.""" a = requests.adapters.HTTPAdapter() p = requests.Request(method="GET", url="http://127.0.0.1:10000//v:h").prepare() assert "/v:h" == a.request_url(p, {})
python
Apache-2.0
70298332899f25826e35e42f8d83425124f755a5
2026-01-04T14:39:22.525405Z
false
psf/requests
https://github.com/psf/requests/blob/70298332899f25826e35e42f8d83425124f755a5/tests/test_hooks.py
tests/test_hooks.py
import pytest from requests import hooks def hook(value): return value[1:] @pytest.mark.parametrize( "hooks_list, result", ( (hook, "ata"), ([hook, lambda x: None, hook], "ta"), ), ) def test_hooks(hooks_list, result): assert hooks.dispatch_hook("response", {"response": hooks_list}, "Data") == result def test_default_hooks(): assert hooks.default_hooks() == {"response": []}
python
Apache-2.0
70298332899f25826e35e42f8d83425124f755a5
2026-01-04T14:39:22.525405Z
false
psf/requests
https://github.com/psf/requests/blob/70298332899f25826e35e42f8d83425124f755a5/tests/test_utils.py
tests/test_utils.py
import copy import filecmp import os import tarfile import zipfile from collections import deque from io import BytesIO from unittest import mock import pytest from requests import compat from requests._internal_utils import unicode_is_ascii from requests.cookies import RequestsCookieJar from requests.structures import CaseInsensitiveDict from requests.utils import ( _parse_content_type_header, add_dict_to_cookiejar, address_in_network, dotted_netmask, extract_zipped_paths, get_auth_from_url, get_encoding_from_headers, get_encodings_from_content, get_environ_proxies, get_netrc_auth, guess_filename, guess_json_utf, is_ipv4_address, is_valid_cidr, iter_slices, parse_dict_header, parse_header_links, prepend_scheme_if_needed, requote_uri, select_proxy, set_environ, should_bypass_proxies, super_len, to_key_val_list, to_native_string, unquote_header_value, unquote_unreserved, urldefragauth, ) from .compat import StringIO, cStringIO class TestSuperLen: @pytest.mark.parametrize( "stream, value", ( (StringIO.StringIO, "Test"), (BytesIO, b"Test"), pytest.param( cStringIO, "Test", marks=pytest.mark.skipif("cStringIO is None") ), ), ) def test_io_streams(self, stream, value): """Ensures that we properly deal with different kinds of IO streams.""" assert super_len(stream()) == 0 assert super_len(stream(value)) == 4 def test_super_len_correctly_calculates_len_of_partially_read_file(self): """Ensure that we handle partially consumed file like objects.""" s = StringIO.StringIO() s.write("foobarbogus") assert super_len(s) == 0 @pytest.mark.parametrize("error", [IOError, OSError]) def test_super_len_handles_files_raising_weird_errors_in_tell(self, error): """If tell() raises errors, assume the cursor is at position zero.""" class BoomFile: def __len__(self): return 5 def tell(self): raise error() assert super_len(BoomFile()) == 0 @pytest.mark.parametrize("error", [IOError, OSError]) def test_super_len_tell_ioerror(self, error): """Ensure that if tell gives an IOError super_len doesn't fail""" class NoLenBoomFile: def tell(self): raise error() def seek(self, offset, whence): pass assert super_len(NoLenBoomFile()) == 0 def test_string(self): assert super_len("Test") == 4 @pytest.mark.parametrize( "mode, warnings_num", ( ("r", 1), ("rb", 0), ), ) def test_file(self, tmpdir, mode, warnings_num, recwarn): file_obj = tmpdir.join("test.txt") file_obj.write("Test") with file_obj.open(mode) as fd: assert super_len(fd) == 4 assert len(recwarn) == warnings_num def test_tarfile_member(self, tmpdir): file_obj = tmpdir.join("test.txt") file_obj.write("Test") tar_obj = str(tmpdir.join("test.tar")) with tarfile.open(tar_obj, "w") as tar: tar.add(str(file_obj), arcname="test.txt") with tarfile.open(tar_obj) as tar: member = tar.extractfile("test.txt") assert super_len(member) == 4 def test_super_len_with__len__(self): foo = [1, 2, 3, 4] len_foo = super_len(foo) assert len_foo == 4 def test_super_len_with_no__len__(self): class LenFile: def __init__(self): self.len = 5 assert super_len(LenFile()) == 5 def test_super_len_with_tell(self): foo = StringIO.StringIO("12345") assert super_len(foo) == 5 foo.read(2) assert super_len(foo) == 3 def test_super_len_with_fileno(self): with open(__file__, "rb") as f: length = super_len(f) file_data = f.read() assert length == len(file_data) def test_super_len_with_no_matches(self): """Ensure that objects without any length methods default to 0""" assert super_len(object()) == 0 class TestGetNetrcAuth: def test_works(self, tmp_path, monkeypatch): netrc_path = tmp_path / ".netrc" monkeypatch.setenv("NETRC", str(netrc_path)) with open(netrc_path, "w") as f: f.write("machine example.com login aaaa password bbbb\n") auth = get_netrc_auth("http://example.com/thing") assert auth == ("aaaa", "bbbb") def test_not_vulnerable_to_bad_url_parsing(self, tmp_path, monkeypatch): netrc_path = tmp_path / ".netrc" monkeypatch.setenv("NETRC", str(netrc_path)) with open(netrc_path, "w") as f: f.write("machine example.com login aaaa password bbbb\n") auth = get_netrc_auth("http://example.com:@evil.com/&apos;") assert auth is None class TestToKeyValList: @pytest.mark.parametrize( "value, expected", ( ([("key", "val")], [("key", "val")]), ((("key", "val"),), [("key", "val")]), ({"key": "val"}, [("key", "val")]), (None, None), ), ) def test_valid(self, value, expected): assert to_key_val_list(value) == expected def test_invalid(self): with pytest.raises(ValueError): to_key_val_list("string") class TestUnquoteHeaderValue: @pytest.mark.parametrize( "value, expected", ( (None, None), ("Test", "Test"), ('"Test"', "Test"), ('"Test\\\\"', "Test\\"), ('"\\\\Comp\\Res"', "\\Comp\\Res"), ), ) def test_valid(self, value, expected): assert unquote_header_value(value) == expected def test_is_filename(self): assert unquote_header_value('"\\\\Comp\\Res"', True) == "\\\\Comp\\Res" class TestGetEnvironProxies: """Ensures that IP addresses are correctly matches with ranges in no_proxy variable. """ @pytest.fixture(autouse=True, params=["no_proxy", "NO_PROXY"]) def no_proxy(self, request, monkeypatch): monkeypatch.setenv( request.param, "192.168.0.0/24,127.0.0.1,localhost.localdomain,172.16.1.1" ) @pytest.mark.parametrize( "url", ( "http://192.168.0.1:5000/", "http://192.168.0.1/", "http://172.16.1.1/", "http://172.16.1.1:5000/", "http://localhost.localdomain:5000/v1.0/", ), ) def test_bypass(self, url): assert get_environ_proxies(url, no_proxy=None) == {} @pytest.mark.parametrize( "url", ( "http://192.168.1.1:5000/", "http://192.168.1.1/", "http://www.requests.com/", ), ) def test_not_bypass(self, url): assert get_environ_proxies(url, no_proxy=None) != {} @pytest.mark.parametrize( "url", ( "http://192.168.1.1:5000/", "http://192.168.1.1/", "http://www.requests.com/", ), ) def test_bypass_no_proxy_keyword(self, url): no_proxy = "192.168.1.1,requests.com" assert get_environ_proxies(url, no_proxy=no_proxy) == {} @pytest.mark.parametrize( "url", ( "http://192.168.0.1:5000/", "http://192.168.0.1/", "http://172.16.1.1/", "http://172.16.1.1:5000/", "http://localhost.localdomain:5000/v1.0/", ), ) def test_not_bypass_no_proxy_keyword(self, url, monkeypatch): # This is testing that the 'no_proxy' argument overrides the # environment variable 'no_proxy' monkeypatch.setenv("http_proxy", "http://proxy.example.com:3128/") no_proxy = "192.168.1.1,requests.com" assert get_environ_proxies(url, no_proxy=no_proxy) != {} class TestIsIPv4Address: def test_valid(self): assert is_ipv4_address("8.8.8.8") @pytest.mark.parametrize("value", ("8.8.8.8.8", "localhost.localdomain")) def test_invalid(self, value): assert not is_ipv4_address(value) class TestIsValidCIDR: def test_valid(self): assert is_valid_cidr("192.168.1.0/24") @pytest.mark.parametrize( "value", ( "8.8.8.8", "192.168.1.0/a", "192.168.1.0/128", "192.168.1.0/-1", "192.168.1.999/24", ), ) def test_invalid(self, value): assert not is_valid_cidr(value) class TestAddressInNetwork: def test_valid(self): assert address_in_network("192.168.1.1", "192.168.1.0/24") def test_invalid(self): assert not address_in_network("172.16.0.1", "192.168.1.0/24") class TestGuessFilename: @pytest.mark.parametrize( "value", (1, type("Fake", (object,), {"name": 1})()), ) def test_guess_filename_invalid(self, value): assert guess_filename(value) is None @pytest.mark.parametrize( "value, expected_type", ( (b"value", compat.bytes), (b"value".decode("utf-8"), compat.str), ), ) def test_guess_filename_valid(self, value, expected_type): obj = type("Fake", (object,), {"name": value})() result = guess_filename(obj) assert result == value assert isinstance(result, expected_type) class TestExtractZippedPaths: @pytest.mark.parametrize( "path", ( "/", __file__, pytest.__file__, "/etc/invalid/location", ), ) def test_unzipped_paths_unchanged(self, path): assert path == extract_zipped_paths(path) def test_zipped_paths_extracted(self, tmpdir): zipped_py = tmpdir.join("test.zip") with zipfile.ZipFile(zipped_py.strpath, "w") as f: f.write(__file__) _, name = os.path.splitdrive(__file__) zipped_path = os.path.join(zipped_py.strpath, name.lstrip(r"\/")) extracted_path = extract_zipped_paths(zipped_path) assert extracted_path != zipped_path assert os.path.exists(extracted_path) assert filecmp.cmp(extracted_path, __file__) def test_invalid_unc_path(self): path = r"\\localhost\invalid\location" assert extract_zipped_paths(path) == path class TestContentEncodingDetection: def test_none(self): encodings = get_encodings_from_content("") assert not len(encodings) @pytest.mark.parametrize( "content", ( # HTML5 meta charset attribute '<meta charset="UTF-8">', # HTML4 pragma directive '<meta http-equiv="Content-type" content="text/html;charset=UTF-8">', # XHTML 1.x served with text/html MIME type '<meta http-equiv="Content-type" content="text/html;charset=UTF-8" />', # XHTML 1.x served as XML '<?xml version="1.0" encoding="UTF-8"?>', ), ) def test_pragmas(self, content): encodings = get_encodings_from_content(content) assert len(encodings) == 1 assert encodings[0] == "UTF-8" def test_precedence(self): content = """ <?xml version="1.0" encoding="XML"?> <meta charset="HTML5"> <meta http-equiv="Content-type" content="text/html;charset=HTML4" /> """.strip() assert get_encodings_from_content(content) == ["HTML5", "HTML4", "XML"] class TestGuessJSONUTF: @pytest.mark.parametrize( "encoding", ( "utf-32", "utf-8-sig", "utf-16", "utf-8", "utf-16-be", "utf-16-le", "utf-32-be", "utf-32-le", ), ) def test_encoded(self, encoding): data = "{}".encode(encoding) assert guess_json_utf(data) == encoding def test_bad_utf_like_encoding(self): assert guess_json_utf(b"\x00\x00\x00\x00") is None @pytest.mark.parametrize( ("encoding", "expected"), ( ("utf-16-be", "utf-16"), ("utf-16-le", "utf-16"), ("utf-32-be", "utf-32"), ("utf-32-le", "utf-32"), ), ) def test_guess_by_bom(self, encoding, expected): data = "\ufeff{}".encode(encoding) assert guess_json_utf(data) == expected USER = PASSWORD = "%!*'();:@&=+$,/?#[] " ENCODED_USER = compat.quote(USER, "") ENCODED_PASSWORD = compat.quote(PASSWORD, "") @pytest.mark.parametrize( "url, auth", ( ( f"http://{ENCODED_USER}:{ENCODED_PASSWORD}@request.com/url.html#test", (USER, PASSWORD), ), ("http://user:pass@complex.url.com/path?query=yes", ("user", "pass")), ( "http://user:pass%20pass@complex.url.com/path?query=yes", ("user", "pass pass"), ), ("http://user:pass pass@complex.url.com/path?query=yes", ("user", "pass pass")), ( "http://user%25user:pass@complex.url.com/path?query=yes", ("user%user", "pass"), ), ( "http://user:pass%23pass@complex.url.com/path?query=yes", ("user", "pass#pass"), ), ("http://complex.url.com/path?query=yes", ("", "")), ), ) def test_get_auth_from_url(url, auth): assert get_auth_from_url(url) == auth @pytest.mark.parametrize( "uri, expected", ( ( # Ensure requoting doesn't break expectations "http://example.com/fiz?buz=%25ppicture", "http://example.com/fiz?buz=%25ppicture", ), ( # Ensure we handle unquoted percent signs in redirects "http://example.com/fiz?buz=%ppicture", "http://example.com/fiz?buz=%25ppicture", ), ), ) def test_requote_uri_with_unquoted_percents(uri, expected): """See: https://github.com/psf/requests/issues/2356""" assert requote_uri(uri) == expected @pytest.mark.parametrize( "uri, expected", ( ( # Illegal bytes "http://example.com/?a=%--", "http://example.com/?a=%--", ), ( # Reserved characters "http://example.com/?a=%300", "http://example.com/?a=00", ), ), ) def test_unquote_unreserved(uri, expected): assert unquote_unreserved(uri) == expected @pytest.mark.parametrize( "mask, expected", ( (8, "255.0.0.0"), (24, "255.255.255.0"), (25, "255.255.255.128"), ), ) def test_dotted_netmask(mask, expected): assert dotted_netmask(mask) == expected http_proxies = { "http": "http://http.proxy", "http://some.host": "http://some.host.proxy", } all_proxies = { "all": "socks5://http.proxy", "all://some.host": "socks5://some.host.proxy", } mixed_proxies = { "http": "http://http.proxy", "http://some.host": "http://some.host.proxy", "all": "socks5://http.proxy", } @pytest.mark.parametrize( "url, expected, proxies", ( ("hTTp://u:p@Some.Host/path", "http://some.host.proxy", http_proxies), ("hTTp://u:p@Other.Host/path", "http://http.proxy", http_proxies), ("hTTp:///path", "http://http.proxy", http_proxies), ("hTTps://Other.Host", None, http_proxies), ("file:///etc/motd", None, http_proxies), ("hTTp://u:p@Some.Host/path", "socks5://some.host.proxy", all_proxies), ("hTTp://u:p@Other.Host/path", "socks5://http.proxy", all_proxies), ("hTTp:///path", "socks5://http.proxy", all_proxies), ("hTTps://Other.Host", "socks5://http.proxy", all_proxies), ("http://u:p@other.host/path", "http://http.proxy", mixed_proxies), ("http://u:p@some.host/path", "http://some.host.proxy", mixed_proxies), ("https://u:p@other.host/path", "socks5://http.proxy", mixed_proxies), ("https://u:p@some.host/path", "socks5://http.proxy", mixed_proxies), ("https://", "socks5://http.proxy", mixed_proxies), # XXX: unsure whether this is reasonable behavior ("file:///etc/motd", "socks5://http.proxy", all_proxies), ), ) def test_select_proxies(url, expected, proxies): """Make sure we can select per-host proxies correctly.""" assert select_proxy(url, proxies) == expected @pytest.mark.parametrize( "value, expected", ( ('foo="is a fish", bar="as well"', {"foo": "is a fish", "bar": "as well"}), ("key_without_value", {"key_without_value": None}), ), ) def test_parse_dict_header(value, expected): assert parse_dict_header(value) == expected @pytest.mark.parametrize( "value, expected", ( ("application/xml", ("application/xml", {})), ( "application/json ; charset=utf-8", ("application/json", {"charset": "utf-8"}), ), ( "application/json ; Charset=utf-8", ("application/json", {"charset": "utf-8"}), ), ("text/plain", ("text/plain", {})), ( "multipart/form-data; boundary = something ; boundary2='something_else' ; no_equals ", ( "multipart/form-data", { "boundary": "something", "boundary2": "something_else", "no_equals": True, }, ), ), ( 'multipart/form-data; boundary = something ; boundary2="something_else" ; no_equals ', ( "multipart/form-data", { "boundary": "something", "boundary2": "something_else", "no_equals": True, }, ), ), ( "multipart/form-data; boundary = something ; 'boundary2=something_else' ; no_equals ", ( "multipart/form-data", { "boundary": "something", "boundary2": "something_else", "no_equals": True, }, ), ), ( 'multipart/form-data; boundary = something ; "boundary2=something_else" ; no_equals ', ( "multipart/form-data", { "boundary": "something", "boundary2": "something_else", "no_equals": True, }, ), ), ("application/json ; ; ", ("application/json", {})), ), ) def test__parse_content_type_header(value, expected): assert _parse_content_type_header(value) == expected @pytest.mark.parametrize( "value, expected", ( (CaseInsensitiveDict(), None), ( CaseInsensitiveDict({"content-type": "application/json; charset=utf-8"}), "utf-8", ), (CaseInsensitiveDict({"content-type": "text/plain"}), "ISO-8859-1"), ), ) def test_get_encoding_from_headers(value, expected): assert get_encoding_from_headers(value) == expected @pytest.mark.parametrize( "value, length", ( ("", 0), ("T", 1), ("Test", 4), ("Cont", 0), ("Other", -5), ("Content", None), ), ) def test_iter_slices(value, length): if length is None or (length <= 0 and len(value) > 0): # Reads all content at once assert len(list(iter_slices(value, length))) == 1 else: assert len(list(iter_slices(value, 1))) == length @pytest.mark.parametrize( "value, expected", ( ( '<http:/.../front.jpeg>; rel=front; type="image/jpeg"', [{"url": "http:/.../front.jpeg", "rel": "front", "type": "image/jpeg"}], ), ("<http:/.../front.jpeg>", [{"url": "http:/.../front.jpeg"}]), ("<http:/.../front.jpeg>;", [{"url": "http:/.../front.jpeg"}]), ( '<http:/.../front.jpeg>; type="image/jpeg",<http://.../back.jpeg>;', [ {"url": "http:/.../front.jpeg", "type": "image/jpeg"}, {"url": "http://.../back.jpeg"}, ], ), ("", []), ), ) def test_parse_header_links(value, expected): assert parse_header_links(value) == expected @pytest.mark.parametrize( "value, expected", ( ("example.com/path", "http://example.com/path"), ("//example.com/path", "http://example.com/path"), ("example.com:80", "http://example.com:80"), ( "http://user:pass@example.com/path?query", "http://user:pass@example.com/path?query", ), ("http://user@example.com/path?query", "http://user@example.com/path?query"), ), ) def test_prepend_scheme_if_needed(value, expected): assert prepend_scheme_if_needed(value, "http") == expected @pytest.mark.parametrize( "value, expected", ( ("T", "T"), (b"T", "T"), ("T", "T"), ), ) def test_to_native_string(value, expected): assert to_native_string(value) == expected @pytest.mark.parametrize( "url, expected", ( ("http://u:p@example.com/path?a=1#test", "http://example.com/path?a=1"), ("http://example.com/path", "http://example.com/path"), ("//u:p@example.com/path", "//example.com/path"), ("//example.com/path", "//example.com/path"), ("example.com/path", "//example.com/path"), ("scheme:u:p@example.com/path", "scheme://example.com/path"), ), ) def test_urldefragauth(url, expected): assert urldefragauth(url) == expected @pytest.mark.parametrize( "url, expected", ( ("http://192.168.0.1:5000/", True), ("http://192.168.0.1/", True), ("http://172.16.1.1/", True), ("http://172.16.1.1:5000/", True), ("http://localhost.localdomain:5000/v1.0/", True), ("http://google.com:6000/", True), ("http://172.16.1.12/", False), ("http://172.16.1.12:5000/", False), ("http://google.com:5000/v1.0/", False), ("file:///some/path/on/disk", True), ), ) def test_should_bypass_proxies(url, expected, monkeypatch): """Tests for function should_bypass_proxies to check if proxy can be bypassed or not """ monkeypatch.setenv( "no_proxy", "192.168.0.0/24,127.0.0.1,localhost.localdomain,172.16.1.1, google.com:6000", ) monkeypatch.setenv( "NO_PROXY", "192.168.0.0/24,127.0.0.1,localhost.localdomain,172.16.1.1, google.com:6000", ) assert should_bypass_proxies(url, no_proxy=None) == expected @pytest.mark.parametrize( "url, expected", ( ("http://172.16.1.1/", "172.16.1.1"), ("http://172.16.1.1:5000/", "172.16.1.1"), ("http://user:pass@172.16.1.1", "172.16.1.1"), ("http://user:pass@172.16.1.1:5000", "172.16.1.1"), ("http://hostname/", "hostname"), ("http://hostname:5000/", "hostname"), ("http://user:pass@hostname", "hostname"), ("http://user:pass@hostname:5000", "hostname"), ), ) def test_should_bypass_proxies_pass_only_hostname(url, expected): """The proxy_bypass function should be called with a hostname or IP without a port number or auth credentials. """ with mock.patch("requests.utils.proxy_bypass") as proxy_bypass: should_bypass_proxies(url, no_proxy=None) proxy_bypass.assert_called_once_with(expected) @pytest.mark.parametrize( "cookiejar", ( compat.cookielib.CookieJar(), RequestsCookieJar(), ), ) def test_add_dict_to_cookiejar(cookiejar): """Ensure add_dict_to_cookiejar works for non-RequestsCookieJar CookieJars """ cookiedict = {"test": "cookies", "good": "cookies"} cj = add_dict_to_cookiejar(cookiejar, cookiedict) cookies = {cookie.name: cookie.value for cookie in cj} assert cookiedict == cookies @pytest.mark.parametrize( "value, expected", ( ("test", True), ("æíöû", False), ("ジェーピーニック", False), ), ) def test_unicode_is_ascii(value, expected): assert unicode_is_ascii(value) is expected @pytest.mark.parametrize( "url, expected", ( ("http://192.168.0.1:5000/", True), ("http://192.168.0.1/", True), ("http://172.16.1.1/", True), ("http://172.16.1.1:5000/", True), ("http://localhost.localdomain:5000/v1.0/", True), ("http://172.16.1.12/", False), ("http://172.16.1.12:5000/", False), ("http://google.com:5000/v1.0/", False), ), ) def test_should_bypass_proxies_no_proxy(url, expected, monkeypatch): """Tests for function should_bypass_proxies to check if proxy can be bypassed or not using the 'no_proxy' argument """ no_proxy = "192.168.0.0/24,127.0.0.1,localhost.localdomain,172.16.1.1" # Test 'no_proxy' argument assert should_bypass_proxies(url, no_proxy=no_proxy) == expected @pytest.mark.skipif(os.name != "nt", reason="Test only on Windows") @pytest.mark.parametrize( "url, expected, override", ( ("http://192.168.0.1:5000/", True, None), ("http://192.168.0.1/", True, None), ("http://172.16.1.1/", True, None), ("http://172.16.1.1:5000/", True, None), ("http://localhost.localdomain:5000/v1.0/", True, None), ("http://172.16.1.22/", False, None), ("http://172.16.1.22:5000/", False, None), ("http://google.com:5000/v1.0/", False, None), ("http://mylocalhostname:5000/v1.0/", True, "<local>"), ("http://192.168.0.1/", False, ""), ), ) def test_should_bypass_proxies_win_registry(url, expected, override, monkeypatch): """Tests for function should_bypass_proxies to check if proxy can be bypassed or not with Windows registry settings """ if override is None: override = "192.168.*;127.0.0.1;localhost.localdomain;172.16.1.1" import winreg class RegHandle: def Close(self): pass ie_settings = RegHandle() proxyEnableValues = deque([1, "1"]) def OpenKey(key, subkey): return ie_settings def QueryValueEx(key, value_name): if key is ie_settings: if value_name == "ProxyEnable": # this could be a string (REG_SZ) or a 32-bit number (REG_DWORD) proxyEnableValues.rotate() return [proxyEnableValues[0]] elif value_name == "ProxyOverride": return [override] monkeypatch.setenv("http_proxy", "") monkeypatch.setenv("https_proxy", "") monkeypatch.setenv("ftp_proxy", "") monkeypatch.setenv("no_proxy", "") monkeypatch.setenv("NO_PROXY", "") monkeypatch.setattr(winreg, "OpenKey", OpenKey) monkeypatch.setattr(winreg, "QueryValueEx", QueryValueEx) assert should_bypass_proxies(url, None) == expected @pytest.mark.skipif(os.name != "nt", reason="Test only on Windows") def test_should_bypass_proxies_win_registry_bad_values(monkeypatch): """Tests for function should_bypass_proxies to check if proxy can be bypassed or not with Windows invalid registry settings. """ import winreg class RegHandle: def Close(self): pass ie_settings = RegHandle() def OpenKey(key, subkey): return ie_settings def QueryValueEx(key, value_name): if key is ie_settings: if value_name == "ProxyEnable": # Invalid response; Should be an int or int-y value return [""] elif value_name == "ProxyOverride": return ["192.168.*;127.0.0.1;localhost.localdomain;172.16.1.1"] monkeypatch.setenv("http_proxy", "") monkeypatch.setenv("https_proxy", "") monkeypatch.setenv("no_proxy", "") monkeypatch.setenv("NO_PROXY", "") monkeypatch.setattr(winreg, "OpenKey", OpenKey) monkeypatch.setattr(winreg, "QueryValueEx", QueryValueEx) assert should_bypass_proxies("http://172.16.1.1/", None) is False @pytest.mark.parametrize( "env_name, value", ( ("no_proxy", "192.168.0.0/24,127.0.0.1,localhost.localdomain"), ("no_proxy", None), ("a_new_key", "192.168.0.0/24,127.0.0.1,localhost.localdomain"), ("a_new_key", None), ), ) def test_set_environ(env_name, value): """Tests set_environ will set environ values and will restore the environ.""" environ_copy = copy.deepcopy(os.environ) with set_environ(env_name, value): assert os.environ.get(env_name) == value assert os.environ == environ_copy def test_set_environ_raises_exception(): """Tests set_environ will raise exceptions in context when the value parameter is None.""" with pytest.raises(Exception) as exception: with set_environ("test1", None): raise Exception("Expected exception") assert "Expected exception" in str(exception.value) @pytest.mark.skipif(os.name != "nt", reason="Test only on Windows") def test_should_bypass_proxies_win_registry_ProxyOverride_value(monkeypatch): """Tests for function should_bypass_proxies to check if proxy can be bypassed or not with Windows ProxyOverride registry value ending with a semicolon. """ import winreg class RegHandle: def Close(self): pass ie_settings = RegHandle() def OpenKey(key, subkey): return ie_settings def QueryValueEx(key, value_name): if key is ie_settings: if value_name == "ProxyEnable": return [1] elif value_name == "ProxyOverride": return [ "192.168.*;127.0.0.1;localhost.localdomain;172.16.1.1;<-loopback>;" ] monkeypatch.setenv("NO_PROXY", "") monkeypatch.setenv("no_proxy", "") monkeypatch.setattr(winreg, "OpenKey", OpenKey) monkeypatch.setattr(winreg, "QueryValueEx", QueryValueEx) assert should_bypass_proxies("http://example.com/", None) is False
python
Apache-2.0
70298332899f25826e35e42f8d83425124f755a5
2026-01-04T14:39:22.525405Z
false
psf/requests
https://github.com/psf/requests/blob/70298332899f25826e35e42f8d83425124f755a5/tests/compat.py
tests/compat.py
import warnings try: import StringIO except ImportError: import io as StringIO try: from cStringIO import StringIO as cStringIO except ImportError: cStringIO = None def u(s): warnings.warn( ( "This helper function is no longer relevant in Python 3. " "Usage of this alias should be discontinued as it will be " "removed in a future release of Requests." ), DeprecationWarning, ) return s
python
Apache-2.0
70298332899f25826e35e42f8d83425124f755a5
2026-01-04T14:39:22.525405Z
false
psf/requests
https://github.com/psf/requests/blob/70298332899f25826e35e42f8d83425124f755a5/tests/conftest.py
tests/conftest.py
try: from http.server import HTTPServer, SimpleHTTPRequestHandler except ImportError: from BaseHTTPServer import HTTPServer from SimpleHTTPServer import SimpleHTTPRequestHandler import ssl import threading import pytest from requests.compat import urljoin def prepare_url(value): # Issue #1483: Make sure the URL always has a trailing slash httpbin_url = value.url.rstrip("/") + "/" def inner(*suffix): return urljoin(httpbin_url, "/".join(suffix)) return inner @pytest.fixture def httpbin(httpbin): return prepare_url(httpbin) @pytest.fixture def httpbin_secure(httpbin_secure): return prepare_url(httpbin_secure) @pytest.fixture def nosan_server(tmp_path_factory): # delay importing until the fixture in order to make it possible # to deselect the test via command-line when trustme is not available import trustme tmpdir = tmp_path_factory.mktemp("certs") ca = trustme.CA() # only commonName, no subjectAltName server_cert = ca.issue_cert(common_name="localhost") ca_bundle = str(tmpdir / "ca.pem") ca.cert_pem.write_to_path(ca_bundle) context = ssl.create_default_context(ssl.Purpose.CLIENT_AUTH) server_cert.configure_cert(context) server = HTTPServer(("localhost", 0), SimpleHTTPRequestHandler) server.socket = context.wrap_socket(server.socket, server_side=True) server_thread = threading.Thread(target=server.serve_forever) server_thread.start() yield "localhost", server.server_address[1], ca_bundle server.shutdown() server_thread.join()
python
Apache-2.0
70298332899f25826e35e42f8d83425124f755a5
2026-01-04T14:39:22.525405Z
false
psf/requests
https://github.com/psf/requests/blob/70298332899f25826e35e42f8d83425124f755a5/tests/utils.py
tests/utils.py
import contextlib import os @contextlib.contextmanager def override_environ(**kwargs): save_env = dict(os.environ) for key, value in kwargs.items(): if value is None: del os.environ[key] else: os.environ[key] = value try: yield finally: os.environ.clear() os.environ.update(save_env)
python
Apache-2.0
70298332899f25826e35e42f8d83425124f755a5
2026-01-04T14:39:22.525405Z
false
psf/requests
https://github.com/psf/requests/blob/70298332899f25826e35e42f8d83425124f755a5/tests/test_lowlevel.py
tests/test_lowlevel.py
import threading import pytest from tests.testserver.server import Server, consume_socket_content import requests from requests.compat import JSONDecodeError from .utils import override_environ def echo_response_handler(sock): """Simple handler that will take request and echo it back to requester.""" request_content = consume_socket_content(sock, timeout=0.5) text_200 = ( b"HTTP/1.1 200 OK\r\n" b"Content-Length: %d\r\n\r\n" b"%s" ) % (len(request_content), request_content) sock.send(text_200) def test_chunked_upload(): """can safely send generators""" close_server = threading.Event() server = Server.basic_response_server(wait_to_close_event=close_server) data = iter([b"a", b"b", b"c"]) with server as (host, port): url = f"http://{host}:{port}/" r = requests.post(url, data=data, stream=True) close_server.set() # release server block assert r.status_code == 200 assert r.request.headers["Transfer-Encoding"] == "chunked" def test_chunked_encoding_error(): """get a ChunkedEncodingError if the server returns a bad response""" def incomplete_chunked_response_handler(sock): request_content = consume_socket_content(sock, timeout=0.5) # The server never ends the request and doesn't provide any valid chunks sock.send( b"HTTP/1.1 200 OK\r\n" b"Transfer-Encoding: chunked\r\n" ) return request_content close_server = threading.Event() server = Server(incomplete_chunked_response_handler) with server as (host, port): url = f"http://{host}:{port}/" with pytest.raises(requests.exceptions.ChunkedEncodingError): requests.get(url) close_server.set() # release server block def test_chunked_upload_uses_only_specified_host_header(): """Ensure we use only the specified Host header for chunked requests.""" close_server = threading.Event() server = Server(echo_response_handler, wait_to_close_event=close_server) data = iter([b"a", b"b", b"c"]) custom_host = "sample-host" with server as (host, port): url = f"http://{host}:{port}/" r = requests.post(url, data=data, headers={"Host": custom_host}, stream=True) close_server.set() # release server block expected_header = b"Host: %s\r\n" % custom_host.encode("utf-8") assert expected_header in r.content assert r.content.count(b"Host: ") == 1 def test_chunked_upload_doesnt_skip_host_header(): """Ensure we don't omit all Host headers with chunked requests.""" close_server = threading.Event() server = Server(echo_response_handler, wait_to_close_event=close_server) data = iter([b"a", b"b", b"c"]) with server as (host, port): expected_host = f"{host}:{port}" url = f"http://{host}:{port}/" r = requests.post(url, data=data, stream=True) close_server.set() # release server block expected_header = b"Host: %s\r\n" % expected_host.encode("utf-8") assert expected_header in r.content assert r.content.count(b"Host: ") == 1 def test_conflicting_content_lengths(): """Ensure we correctly throw an InvalidHeader error if multiple conflicting Content-Length headers are returned. """ def multiple_content_length_response_handler(sock): request_content = consume_socket_content(sock, timeout=0.5) response = ( b"HTTP/1.1 200 OK\r\n" b"Content-Type: text/plain\r\n" b"Content-Length: 16\r\n" b"Content-Length: 32\r\n\r\n" b"-- Bad Actor -- Original Content\r\n" ) sock.send(response) return request_content close_server = threading.Event() server = Server(multiple_content_length_response_handler) with server as (host, port): url = f"http://{host}:{port}/" with pytest.raises(requests.exceptions.InvalidHeader): requests.get(url) close_server.set() def test_digestauth_401_count_reset_on_redirect(): """Ensure we correctly reset num_401_calls after a successful digest auth, followed by a 302 redirect to another digest auth prompt. See https://github.com/psf/requests/issues/1979. """ text_401 = (b'HTTP/1.1 401 UNAUTHORIZED\r\n' b'Content-Length: 0\r\n' b'WWW-Authenticate: Digest nonce="6bf5d6e4da1ce66918800195d6b9130d"' b', opaque="372825293d1c26955496c80ed6426e9e", ' b'realm="me@kennethreitz.com", qop=auth\r\n\r\n') text_302 = (b'HTTP/1.1 302 FOUND\r\n' b'Content-Length: 0\r\n' b'Location: /\r\n\r\n') text_200 = (b'HTTP/1.1 200 OK\r\n' b'Content-Length: 0\r\n\r\n') expected_digest = (b'Authorization: Digest username="user", ' b'realm="me@kennethreitz.com", ' b'nonce="6bf5d6e4da1ce66918800195d6b9130d", uri="/"') auth = requests.auth.HTTPDigestAuth('user', 'pass') def digest_response_handler(sock): # Respond to initial GET with a challenge. request_content = consume_socket_content(sock, timeout=0.5) assert request_content.startswith(b"GET / HTTP/1.1") sock.send(text_401) # Verify we receive an Authorization header in response, then redirect. request_content = consume_socket_content(sock, timeout=0.5) assert expected_digest in request_content sock.send(text_302) # Verify Authorization isn't sent to the redirected host, # then send another challenge. request_content = consume_socket_content(sock, timeout=0.5) assert b'Authorization:' not in request_content sock.send(text_401) # Verify Authorization is sent correctly again, and return 200 OK. request_content = consume_socket_content(sock, timeout=0.5) assert expected_digest in request_content sock.send(text_200) return request_content close_server = threading.Event() server = Server(digest_response_handler, wait_to_close_event=close_server) with server as (host, port): url = f'http://{host}:{port}/' r = requests.get(url, auth=auth) # Verify server succeeded in authenticating. assert r.status_code == 200 # Verify Authorization was sent in final request. assert 'Authorization' in r.request.headers assert r.request.headers['Authorization'].startswith('Digest ') # Verify redirect happened as we expected. assert r.history[0].status_code == 302 close_server.set() def test_digestauth_401_only_sent_once(): """Ensure we correctly respond to a 401 challenge once, and then stop responding if challenged again. """ text_401 = (b'HTTP/1.1 401 UNAUTHORIZED\r\n' b'Content-Length: 0\r\n' b'WWW-Authenticate: Digest nonce="6bf5d6e4da1ce66918800195d6b9130d"' b', opaque="372825293d1c26955496c80ed6426e9e", ' b'realm="me@kennethreitz.com", qop=auth\r\n\r\n') expected_digest = (b'Authorization: Digest username="user", ' b'realm="me@kennethreitz.com", ' b'nonce="6bf5d6e4da1ce66918800195d6b9130d", uri="/"') auth = requests.auth.HTTPDigestAuth('user', 'pass') def digest_failed_response_handler(sock): # Respond to initial GET with a challenge. request_content = consume_socket_content(sock, timeout=0.5) assert request_content.startswith(b"GET / HTTP/1.1") sock.send(text_401) # Verify we receive an Authorization header in response, then # challenge again. request_content = consume_socket_content(sock, timeout=0.5) assert expected_digest in request_content sock.send(text_401) # Verify the client didn't respond to second challenge. request_content = consume_socket_content(sock, timeout=0.5) assert request_content == b'' return request_content close_server = threading.Event() server = Server(digest_failed_response_handler, wait_to_close_event=close_server) with server as (host, port): url = f'http://{host}:{port}/' r = requests.get(url, auth=auth) # Verify server didn't authenticate us. assert r.status_code == 401 assert r.history[0].status_code == 401 close_server.set() def test_digestauth_only_on_4xx(): """Ensure we only send digestauth on 4xx challenges. See https://github.com/psf/requests/issues/3772. """ text_200_chal = (b'HTTP/1.1 200 OK\r\n' b'Content-Length: 0\r\n' b'WWW-Authenticate: Digest nonce="6bf5d6e4da1ce66918800195d6b9130d"' b', opaque="372825293d1c26955496c80ed6426e9e", ' b'realm="me@kennethreitz.com", qop=auth\r\n\r\n') auth = requests.auth.HTTPDigestAuth('user', 'pass') def digest_response_handler(sock): # Respond to GET with a 200 containing www-authenticate header. request_content = consume_socket_content(sock, timeout=0.5) assert request_content.startswith(b"GET / HTTP/1.1") sock.send(text_200_chal) # Verify the client didn't respond with auth. request_content = consume_socket_content(sock, timeout=0.5) assert request_content == b'' return request_content close_server = threading.Event() server = Server(digest_response_handler, wait_to_close_event=close_server) with server as (host, port): url = f'http://{host}:{port}/' r = requests.get(url, auth=auth) # Verify server didn't receive auth from us. assert r.status_code == 200 assert len(r.history) == 0 close_server.set() _schemes_by_var_prefix = [ ('http', ['http']), ('https', ['https']), ('all', ['http', 'https']), ] _proxy_combos = [] for prefix, schemes in _schemes_by_var_prefix: for scheme in schemes: _proxy_combos.append((f"{prefix}_proxy", scheme)) _proxy_combos += [(var.upper(), scheme) for var, scheme in _proxy_combos] @pytest.mark.parametrize("var,scheme", _proxy_combos) def test_use_proxy_from_environment(httpbin, var, scheme): url = f"{scheme}://httpbin.org" fake_proxy = Server() # do nothing with the requests; just close the socket with fake_proxy as (host, port): proxy_url = f"socks5://{host}:{port}" kwargs = {var: proxy_url} with override_environ(**kwargs): # fake proxy's lack of response will cause a ConnectionError with pytest.raises(requests.exceptions.ConnectionError): requests.get(url) # the fake proxy received a request assert len(fake_proxy.handler_results) == 1 # it had actual content (not checking for SOCKS protocol for now) assert len(fake_proxy.handler_results[0]) > 0 def test_redirect_rfc1808_to_non_ascii_location(): path = 'š' expected_path = b'%C5%A1' redirect_request = [] # stores the second request to the server def redirect_resp_handler(sock): consume_socket_content(sock, timeout=0.5) location = f'//{host}:{port}/{path}' sock.send( ( b'HTTP/1.1 301 Moved Permanently\r\n' b'Content-Length: 0\r\n' b'Location: %s\r\n' b'\r\n' ) % location.encode('utf8') ) redirect_request.append(consume_socket_content(sock, timeout=0.5)) sock.send(b'HTTP/1.1 200 OK\r\n\r\n') close_server = threading.Event() server = Server(redirect_resp_handler, wait_to_close_event=close_server) with server as (host, port): url = f'http://{host}:{port}' r = requests.get(url=url, allow_redirects=True) assert r.status_code == 200 assert len(r.history) == 1 assert r.history[0].status_code == 301 assert redirect_request[0].startswith(b'GET /' + expected_path + b' HTTP/1.1') assert r.url == '{}/{}'.format(url, expected_path.decode('ascii')) close_server.set() def test_fragment_not_sent_with_request(): """Verify that the fragment portion of a URI isn't sent to the server.""" close_server = threading.Event() server = Server(echo_response_handler, wait_to_close_event=close_server) with server as (host, port): url = f'http://{host}:{port}/path/to/thing/#view=edit&token=hunter2' r = requests.get(url) raw_request = r.content assert r.status_code == 200 headers, body = raw_request.split(b'\r\n\r\n', 1) status_line, headers = headers.split(b'\r\n', 1) assert status_line == b'GET /path/to/thing/ HTTP/1.1' for frag in (b'view', b'edit', b'token', b'hunter2'): assert frag not in headers assert frag not in body close_server.set() def test_fragment_update_on_redirect(): """Verify we only append previous fragment if one doesn't exist on new location. If a new fragment is encountered in a Location header, it should be added to all subsequent requests. """ def response_handler(sock): consume_socket_content(sock, timeout=0.5) sock.send( b'HTTP/1.1 302 FOUND\r\n' b'Content-Length: 0\r\n' b'Location: /get#relevant-section\r\n\r\n' ) consume_socket_content(sock, timeout=0.5) sock.send( b'HTTP/1.1 302 FOUND\r\n' b'Content-Length: 0\r\n' b'Location: /final-url/\r\n\r\n' ) consume_socket_content(sock, timeout=0.5) sock.send( b'HTTP/1.1 200 OK\r\n\r\n' ) close_server = threading.Event() server = Server(response_handler, wait_to_close_event=close_server) with server as (host, port): url = f'http://{host}:{port}/path/to/thing/#view=edit&token=hunter2' r = requests.get(url) assert r.status_code == 200 assert len(r.history) == 2 assert r.history[0].request.url == url # Verify we haven't overwritten the location with our previous fragment. assert r.history[1].request.url == f'http://{host}:{port}/get#relevant-section' # Verify previous fragment is used and not the original. assert r.url == f'http://{host}:{port}/final-url/#relevant-section' close_server.set() def test_json_decode_compatibility_for_alt_utf_encodings(): def response_handler(sock): consume_socket_content(sock, timeout=0.5) sock.send( b'HTTP/1.1 200 OK\r\n' b'Content-Length: 18\r\n\r\n' b'\xff\xfe{\x00"\x00K0"\x00=\x00"\x00\xab0"\x00\r\n' ) close_server = threading.Event() server = Server(response_handler, wait_to_close_event=close_server) with server as (host, port): url = f'http://{host}:{port}/' r = requests.get(url) r.encoding = None with pytest.raises(requests.exceptions.JSONDecodeError) as excinfo: r.json() assert isinstance(excinfo.value, requests.exceptions.RequestException) assert isinstance(excinfo.value, JSONDecodeError) assert r.text not in str(excinfo.value)
python
Apache-2.0
70298332899f25826e35e42f8d83425124f755a5
2026-01-04T14:39:22.525405Z
false
psf/requests
https://github.com/psf/requests/blob/70298332899f25826e35e42f8d83425124f755a5/tests/__init__.py
tests/__init__.py
"""Requests test package initialisation.""" import warnings try: from urllib3.exceptions import SNIMissingWarning # urllib3 1.x sets SNIMissingWarning to only go off once, # while this test suite requires it to always fire # so that it occurs during test_requests.test_https_warnings warnings.simplefilter("always", SNIMissingWarning) except ImportError: # urllib3 2.0 removed that warning and errors out instead SNIMissingWarning = None
python
Apache-2.0
70298332899f25826e35e42f8d83425124f755a5
2026-01-04T14:39:22.525405Z
false
psf/requests
https://github.com/psf/requests/blob/70298332899f25826e35e42f8d83425124f755a5/tests/test_packages.py
tests/test_packages.py
import requests def test_can_access_urllib3_attribute(): requests.packages.urllib3 def test_can_access_idna_attribute(): requests.packages.idna def test_can_access_chardet_attribute(): requests.packages.chardet
python
Apache-2.0
70298332899f25826e35e42f8d83425124f755a5
2026-01-04T14:39:22.525405Z
false
psf/requests
https://github.com/psf/requests/blob/70298332899f25826e35e42f8d83425124f755a5/tests/test_help.py
tests/test_help.py
from unittest import mock from requests.help import info def test_system_ssl(): """Verify we're actually setting system_ssl when it should be available.""" assert info()["system_ssl"]["version"] != "" class VersionedPackage: def __init__(self, version): self.__version__ = version def test_idna_without_version_attribute(): """Older versions of IDNA don't provide a __version__ attribute, verify that if we have such a package, we don't blow up. """ with mock.patch("requests.help.idna", new=None): assert info()["idna"] == {"version": ""} def test_idna_with_version_attribute(): """Verify we're actually setting idna version when it should be available.""" with mock.patch("requests.help.idna", new=VersionedPackage("2.6")): assert info()["idna"] == {"version": "2.6"}
python
Apache-2.0
70298332899f25826e35e42f8d83425124f755a5
2026-01-04T14:39:22.525405Z
false
psf/requests
https://github.com/psf/requests/blob/70298332899f25826e35e42f8d83425124f755a5/tests/test_structures.py
tests/test_structures.py
import pytest from requests.structures import CaseInsensitiveDict, LookupDict class TestCaseInsensitiveDict: @pytest.fixture(autouse=True) def setup(self): """CaseInsensitiveDict instance with "Accept" header.""" self.case_insensitive_dict = CaseInsensitiveDict() self.case_insensitive_dict["Accept"] = "application/json" def test_list(self): assert list(self.case_insensitive_dict) == ["Accept"] possible_keys = pytest.mark.parametrize( "key", ("accept", "ACCEPT", "aCcEpT", "Accept") ) @possible_keys def test_getitem(self, key): assert self.case_insensitive_dict[key] == "application/json" @possible_keys def test_delitem(self, key): del self.case_insensitive_dict[key] assert key not in self.case_insensitive_dict def test_lower_items(self): assert list(self.case_insensitive_dict.lower_items()) == [ ("accept", "application/json") ] def test_repr(self): assert repr(self.case_insensitive_dict) == "{'Accept': 'application/json'}" def test_copy(self): copy = self.case_insensitive_dict.copy() assert copy is not self.case_insensitive_dict assert copy == self.case_insensitive_dict @pytest.mark.parametrize( "other, result", ( ({"AccePT": "application/json"}, True), ({}, False), (None, False), ), ) def test_instance_equality(self, other, result): assert (self.case_insensitive_dict == other) is result class TestLookupDict: @pytest.fixture(autouse=True) def setup(self): """LookupDict instance with "bad_gateway" attribute.""" self.lookup_dict = LookupDict("test") self.lookup_dict.bad_gateway = 502 def test_repr(self): assert repr(self.lookup_dict) == "<lookup 'test'>" get_item_parameters = pytest.mark.parametrize( "key, value", ( ("bad_gateway", 502), ("not_a_key", None), ), ) @get_item_parameters def test_getitem(self, key, value): assert self.lookup_dict[key] == value @get_item_parameters def test_get(self, key, value): assert self.lookup_dict.get(key) == value
python
Apache-2.0
70298332899f25826e35e42f8d83425124f755a5
2026-01-04T14:39:22.525405Z
false
psf/requests
https://github.com/psf/requests/blob/70298332899f25826e35e42f8d83425124f755a5/tests/test_requests.py
tests/test_requests.py
"""Tests for Requests.""" import collections import contextlib import io import json import os import pickle import re import tempfile import threading import warnings from unittest import mock import pytest import urllib3 from urllib3.util import Timeout as Urllib3Timeout import requests from requests.adapters import HTTPAdapter from requests.auth import HTTPDigestAuth, _basic_auth_str from requests.compat import ( JSONDecodeError, Morsel, MutableMapping, builtin_str, cookielib, getproxies, is_urllib3_1, urlparse, ) from requests.cookies import cookiejar_from_dict, morsel_to_cookie from requests.exceptions import ( ChunkedEncodingError, ConnectionError, ConnectTimeout, ContentDecodingError, InvalidHeader, InvalidProxyURL, InvalidSchema, InvalidURL, MissingSchema, ProxyError, ReadTimeout, RequestException, RetryError, ) from requests.exceptions import SSLError as RequestsSSLError from requests.exceptions import Timeout, TooManyRedirects, UnrewindableBodyError from requests.hooks import default_hooks from requests.models import PreparedRequest, urlencode from requests.sessions import SessionRedirectMixin from requests.structures import CaseInsensitiveDict from . import SNIMissingWarning from .compat import StringIO from .testserver.server import TLSServer, consume_socket_content from .utils import override_environ # Requests to this URL should always fail with a connection timeout (nothing # listening on that port) TARPIT = "http://10.255.255.1" # This is to avoid waiting the timeout of using TARPIT INVALID_PROXY = "http://localhost:1" try: from ssl import SSLContext del SSLContext HAS_MODERN_SSL = True except ImportError: HAS_MODERN_SSL = False try: requests.pyopenssl HAS_PYOPENSSL = True except AttributeError: HAS_PYOPENSSL = False class TestRequests: digest_auth_algo = ("MD5", "SHA-256", "SHA-512") def test_entry_points(self): requests.session requests.session().get requests.session().head requests.get requests.head requests.put requests.patch requests.post # Not really an entry point, but people rely on it. from requests.packages.urllib3.poolmanager import PoolManager # noqa:F401 @pytest.mark.parametrize( "exception, url", ( (MissingSchema, "hiwpefhipowhefopw"), (InvalidSchema, "localhost:3128"), (InvalidSchema, "localhost.localdomain:3128/"), (InvalidSchema, "10.122.1.1:3128/"), (InvalidURL, "http://"), (InvalidURL, "http://*example.com"), (InvalidURL, "http://.example.com"), ), ) def test_invalid_url(self, exception, url): with pytest.raises(exception): requests.get(url) def test_basic_building(self): req = requests.Request() req.url = "http://kennethreitz.org/" req.data = {"life": "42"} pr = req.prepare() assert pr.url == req.url assert pr.body == "life=42" @pytest.mark.parametrize("method", ("GET", "HEAD")) def test_no_content_length(self, httpbin, method): req = requests.Request(method, httpbin(method.lower())).prepare() assert "Content-Length" not in req.headers @pytest.mark.parametrize("method", ("POST", "PUT", "PATCH", "OPTIONS")) def test_no_body_content_length(self, httpbin, method): req = requests.Request(method, httpbin(method.lower())).prepare() assert req.headers["Content-Length"] == "0" @pytest.mark.parametrize("method", ("POST", "PUT", "PATCH", "OPTIONS")) def test_empty_content_length(self, httpbin, method): req = requests.Request(method, httpbin(method.lower()), data="").prepare() assert req.headers["Content-Length"] == "0" def test_override_content_length(self, httpbin): headers = {"Content-Length": "not zero"} r = requests.Request("POST", httpbin("post"), headers=headers).prepare() assert "Content-Length" in r.headers assert r.headers["Content-Length"] == "not zero" def test_path_is_not_double_encoded(self): request = requests.Request("GET", "http://0.0.0.0/get/test case").prepare() assert request.path_url == "/get/test%20case" @pytest.mark.parametrize( "url, expected", ( ( "http://example.com/path#fragment", "http://example.com/path?a=b#fragment", ), ( "http://example.com/path?key=value#fragment", "http://example.com/path?key=value&a=b#fragment", ), ), ) def test_params_are_added_before_fragment(self, url, expected): request = requests.Request("GET", url, params={"a": "b"}).prepare() assert request.url == expected def test_params_original_order_is_preserved_by_default(self): param_ordered_dict = collections.OrderedDict( (("z", 1), ("a", 1), ("k", 1), ("d", 1)) ) session = requests.Session() request = requests.Request( "GET", "http://example.com/", params=param_ordered_dict ) prep = session.prepare_request(request) assert prep.url == "http://example.com/?z=1&a=1&k=1&d=1" def test_params_bytes_are_encoded(self): request = requests.Request( "GET", "http://example.com", params=b"test=foo" ).prepare() assert request.url == "http://example.com/?test=foo" def test_binary_put(self): request = requests.Request( "PUT", "http://example.com", data="ööö".encode() ).prepare() assert isinstance(request.body, bytes) def test_whitespaces_are_removed_from_url(self): # Test for issue #3696 request = requests.Request("GET", " http://example.com").prepare() assert request.url == "http://example.com/" @pytest.mark.parametrize("scheme", ("http://", "HTTP://", "hTTp://", "HttP://")) def test_mixed_case_scheme_acceptable(self, httpbin, scheme): s = requests.Session() s.proxies = getproxies() parts = urlparse(httpbin("get")) url = scheme + parts.netloc + parts.path r = requests.Request("GET", url) r = s.send(r.prepare()) assert r.status_code == 200, f"failed for scheme {scheme}" def test_HTTP_200_OK_GET_ALTERNATIVE(self, httpbin): r = requests.Request("GET", httpbin("get")) s = requests.Session() s.proxies = getproxies() r = s.send(r.prepare()) assert r.status_code == 200 def test_HTTP_302_ALLOW_REDIRECT_GET(self, httpbin): r = requests.get(httpbin("redirect", "1")) assert r.status_code == 200 assert r.history[0].status_code == 302 assert r.history[0].is_redirect def test_HTTP_307_ALLOW_REDIRECT_POST(self, httpbin): r = requests.post( httpbin("redirect-to"), data="test", params={"url": "post", "status_code": 307}, ) assert r.status_code == 200 assert r.history[0].status_code == 307 assert r.history[0].is_redirect assert r.json()["data"] == "test" def test_HTTP_307_ALLOW_REDIRECT_POST_WITH_SEEKABLE(self, httpbin): byte_str = b"test" r = requests.post( httpbin("redirect-to"), data=io.BytesIO(byte_str), params={"url": "post", "status_code": 307}, ) assert r.status_code == 200 assert r.history[0].status_code == 307 assert r.history[0].is_redirect assert r.json()["data"] == byte_str.decode("utf-8") def test_HTTP_302_TOO_MANY_REDIRECTS(self, httpbin): try: requests.get(httpbin("relative-redirect", "50")) except TooManyRedirects as e: url = httpbin("relative-redirect", "20") assert e.request.url == url assert e.response.url == url assert len(e.response.history) == 30 else: pytest.fail("Expected redirect to raise TooManyRedirects but it did not") def test_HTTP_302_TOO_MANY_REDIRECTS_WITH_PARAMS(self, httpbin): s = requests.session() s.max_redirects = 5 try: s.get(httpbin("relative-redirect", "50")) except TooManyRedirects as e: url = httpbin("relative-redirect", "45") assert e.request.url == url assert e.response.url == url assert len(e.response.history) == 5 else: pytest.fail( "Expected custom max number of redirects to be respected but was not" ) def test_http_301_changes_post_to_get(self, httpbin): r = requests.post(httpbin("status", "301")) assert r.status_code == 200 assert r.request.method == "GET" assert r.history[0].status_code == 301 assert r.history[0].is_redirect def test_http_301_doesnt_change_head_to_get(self, httpbin): r = requests.head(httpbin("status", "301"), allow_redirects=True) print(r.content) assert r.status_code == 200 assert r.request.method == "HEAD" assert r.history[0].status_code == 301 assert r.history[0].is_redirect def test_http_302_changes_post_to_get(self, httpbin): r = requests.post(httpbin("status", "302")) assert r.status_code == 200 assert r.request.method == "GET" assert r.history[0].status_code == 302 assert r.history[0].is_redirect def test_http_302_doesnt_change_head_to_get(self, httpbin): r = requests.head(httpbin("status", "302"), allow_redirects=True) assert r.status_code == 200 assert r.request.method == "HEAD" assert r.history[0].status_code == 302 assert r.history[0].is_redirect def test_http_303_changes_post_to_get(self, httpbin): r = requests.post(httpbin("status", "303")) assert r.status_code == 200 assert r.request.method == "GET" assert r.history[0].status_code == 303 assert r.history[0].is_redirect def test_http_303_doesnt_change_head_to_get(self, httpbin): r = requests.head(httpbin("status", "303"), allow_redirects=True) assert r.status_code == 200 assert r.request.method == "HEAD" assert r.history[0].status_code == 303 assert r.history[0].is_redirect def test_header_and_body_removal_on_redirect(self, httpbin): purged_headers = ("Content-Length", "Content-Type") ses = requests.Session() req = requests.Request("POST", httpbin("post"), data={"test": "data"}) prep = ses.prepare_request(req) resp = ses.send(prep) # Mimic a redirect response resp.status_code = 302 resp.headers["location"] = "get" # Run request through resolve_redirects next_resp = next(ses.resolve_redirects(resp, prep)) assert next_resp.request.body is None for header in purged_headers: assert header not in next_resp.request.headers def test_transfer_enc_removal_on_redirect(self, httpbin): purged_headers = ("Transfer-Encoding", "Content-Type") ses = requests.Session() req = requests.Request("POST", httpbin("post"), data=(b"x" for x in range(1))) prep = ses.prepare_request(req) assert "Transfer-Encoding" in prep.headers # Create Response to avoid https://github.com/kevin1024/pytest-httpbin/issues/33 resp = requests.Response() resp.raw = io.BytesIO(b"the content") resp.request = prep setattr(resp.raw, "release_conn", lambda *args: args) # Mimic a redirect response resp.status_code = 302 resp.headers["location"] = httpbin("get") # Run request through resolve_redirect next_resp = next(ses.resolve_redirects(resp, prep)) assert next_resp.request.body is None for header in purged_headers: assert header not in next_resp.request.headers def test_fragment_maintained_on_redirect(self, httpbin): fragment = "#view=edit&token=hunter2" r = requests.get(httpbin("redirect-to?url=get") + fragment) assert len(r.history) > 0 assert r.history[0].request.url == httpbin("redirect-to?url=get") + fragment assert r.url == httpbin("get") + fragment def test_HTTP_200_OK_GET_WITH_PARAMS(self, httpbin): heads = {"User-agent": "Mozilla/5.0"} r = requests.get(httpbin("user-agent"), headers=heads) assert heads["User-agent"] in r.text assert r.status_code == 200 def test_HTTP_200_OK_GET_WITH_MIXED_PARAMS(self, httpbin): heads = {"User-agent": "Mozilla/5.0"} r = requests.get( httpbin("get") + "?test=true", params={"q": "test"}, headers=heads ) assert r.status_code == 200 def test_set_cookie_on_301(self, httpbin): s = requests.session() url = httpbin("cookies/set?foo=bar") s.get(url) assert s.cookies["foo"] == "bar" def test_cookie_sent_on_redirect(self, httpbin): s = requests.session() s.get(httpbin("cookies/set?foo=bar")) r = s.get(httpbin("redirect/1")) # redirects to httpbin('get') assert "Cookie" in r.json()["headers"] def test_cookie_removed_on_expire(self, httpbin): s = requests.session() s.get(httpbin("cookies/set?foo=bar")) assert s.cookies["foo"] == "bar" s.get( httpbin("response-headers"), params={"Set-Cookie": "foo=deleted; expires=Thu, 01-Jan-1970 00:00:01 GMT"}, ) assert "foo" not in s.cookies def test_cookie_quote_wrapped(self, httpbin): s = requests.session() s.get(httpbin('cookies/set?foo="bar:baz"')) assert s.cookies["foo"] == '"bar:baz"' def test_cookie_persists_via_api(self, httpbin): s = requests.session() r = s.get(httpbin("redirect/1"), cookies={"foo": "bar"}) assert "foo" in r.request.headers["Cookie"] assert "foo" in r.history[0].request.headers["Cookie"] def test_request_cookie_overrides_session_cookie(self, httpbin): s = requests.session() s.cookies["foo"] = "bar" r = s.get(httpbin("cookies"), cookies={"foo": "baz"}) assert r.json()["cookies"]["foo"] == "baz" # Session cookie should not be modified assert s.cookies["foo"] == "bar" def test_request_cookies_not_persisted(self, httpbin): s = requests.session() s.get(httpbin("cookies"), cookies={"foo": "baz"}) # Sending a request with cookies should not add cookies to the session assert not s.cookies def test_generic_cookiejar_works(self, httpbin): cj = cookielib.CookieJar() cookiejar_from_dict({"foo": "bar"}, cj) s = requests.session() s.cookies = cj r = s.get(httpbin("cookies")) # Make sure the cookie was sent assert r.json()["cookies"]["foo"] == "bar" # Make sure the session cj is still the custom one assert s.cookies is cj def test_param_cookiejar_works(self, httpbin): cj = cookielib.CookieJar() cookiejar_from_dict({"foo": "bar"}, cj) s = requests.session() r = s.get(httpbin("cookies"), cookies=cj) # Make sure the cookie was sent assert r.json()["cookies"]["foo"] == "bar" def test_cookielib_cookiejar_on_redirect(self, httpbin): """Tests resolve_redirect doesn't fail when merging cookies with non-RequestsCookieJar cookiejar. See GH #3579 """ cj = cookiejar_from_dict({"foo": "bar"}, cookielib.CookieJar()) s = requests.Session() s.cookies = cookiejar_from_dict({"cookie": "tasty"}) # Prepare request without using Session req = requests.Request("GET", httpbin("headers"), cookies=cj) prep_req = req.prepare() # Send request and simulate redirect resp = s.send(prep_req) resp.status_code = 302 resp.headers["location"] = httpbin("get") redirects = s.resolve_redirects(resp, prep_req) resp = next(redirects) # Verify CookieJar isn't being converted to RequestsCookieJar assert isinstance(prep_req._cookies, cookielib.CookieJar) assert isinstance(resp.request._cookies, cookielib.CookieJar) assert not isinstance(resp.request._cookies, requests.cookies.RequestsCookieJar) cookies = {} for c in resp.request._cookies: cookies[c.name] = c.value assert cookies["foo"] == "bar" assert cookies["cookie"] == "tasty" def test_requests_in_history_are_not_overridden(self, httpbin): resp = requests.get(httpbin("redirect/3")) urls = [r.url for r in resp.history] req_urls = [r.request.url for r in resp.history] assert urls == req_urls def test_history_is_always_a_list(self, httpbin): """Show that even with redirects, Response.history is always a list.""" resp = requests.get(httpbin("get")) assert isinstance(resp.history, list) resp = requests.get(httpbin("redirect/1")) assert isinstance(resp.history, list) assert not isinstance(resp.history, tuple) def test_headers_on_session_with_None_are_not_sent(self, httpbin): """Do not send headers in Session.headers with None values.""" ses = requests.Session() ses.headers["Accept-Encoding"] = None req = requests.Request("GET", httpbin("get")) prep = ses.prepare_request(req) assert "Accept-Encoding" not in prep.headers def test_headers_preserve_order(self, httpbin): """Preserve order when headers provided as OrderedDict.""" ses = requests.Session() ses.headers = collections.OrderedDict() ses.headers["Accept-Encoding"] = "identity" ses.headers["First"] = "1" ses.headers["Second"] = "2" headers = collections.OrderedDict([("Third", "3"), ("Fourth", "4")]) headers["Fifth"] = "5" headers["Second"] = "222" req = requests.Request("GET", httpbin("get"), headers=headers) prep = ses.prepare_request(req) items = list(prep.headers.items()) assert items[0] == ("Accept-Encoding", "identity") assert items[1] == ("First", "1") assert items[2] == ("Second", "222") assert items[3] == ("Third", "3") assert items[4] == ("Fourth", "4") assert items[5] == ("Fifth", "5") @pytest.mark.parametrize("key", ("User-agent", "user-agent")) def test_user_agent_transfers(self, httpbin, key): heads = {key: "Mozilla/5.0 (github.com/psf/requests)"} r = requests.get(httpbin("user-agent"), headers=heads) assert heads[key] in r.text def test_HTTP_200_OK_HEAD(self, httpbin): r = requests.head(httpbin("get")) assert r.status_code == 200 def test_HTTP_200_OK_PUT(self, httpbin): r = requests.put(httpbin("put")) assert r.status_code == 200 def test_BASICAUTH_TUPLE_HTTP_200_OK_GET(self, httpbin): auth = ("user", "pass") url = httpbin("basic-auth", "user", "pass") r = requests.get(url, auth=auth) assert r.status_code == 200 r = requests.get(url) assert r.status_code == 401 s = requests.session() s.auth = auth r = s.get(url) assert r.status_code == 200 @pytest.mark.parametrize( "username, password", ( ("user", "pass"), ("имя".encode(), "пароль".encode()), (42, 42), (None, None), ), ) def test_set_basicauth(self, httpbin, username, password): auth = (username, password) url = httpbin("get") r = requests.Request("GET", url, auth=auth) p = r.prepare() assert p.headers["Authorization"] == _basic_auth_str(username, password) def test_basicauth_encodes_byte_strings(self): """Ensure b'test' formats as the byte string "test" rather than the unicode string "b'test'" in Python 3. """ auth = (b"\xc5\xafsername", b"test\xc6\xb6") r = requests.Request("GET", "http://localhost", auth=auth) p = r.prepare() assert p.headers["Authorization"] == "Basic xa9zZXJuYW1lOnRlc3TGtg==" @pytest.mark.parametrize( "url, exception", ( # Connecting to an unknown domain should raise a ConnectionError ("http://doesnotexist.google.com", ConnectionError), # Connecting to an invalid port should raise a ConnectionError ("http://localhost:1", ConnectionError), # Inputing a URL that cannot be parsed should raise an InvalidURL error ("http://fe80::5054:ff:fe5a:fc0", InvalidURL), ), ) def test_errors(self, url, exception): with pytest.raises(exception): requests.get(url, timeout=1) def test_proxy_error(self): # any proxy related error (address resolution, no route to host, etc) should result in a ProxyError with pytest.raises(ProxyError): requests.get( "http://localhost:1", proxies={"http": "non-resolvable-address"} ) def test_proxy_error_on_bad_url(self, httpbin, httpbin_secure): with pytest.raises(InvalidProxyURL): requests.get(httpbin_secure(), proxies={"https": "http:/badproxyurl:3128"}) with pytest.raises(InvalidProxyURL): requests.get(httpbin(), proxies={"http": "http://:8080"}) with pytest.raises(InvalidProxyURL): requests.get(httpbin_secure(), proxies={"https": "https://"}) with pytest.raises(InvalidProxyURL): requests.get(httpbin(), proxies={"http": "http:///example.com:8080"}) def test_respect_proxy_env_on_send_self_prepared_request(self, httpbin): with override_environ(http_proxy=INVALID_PROXY): with pytest.raises(ProxyError): session = requests.Session() request = requests.Request("GET", httpbin()) session.send(request.prepare()) def test_respect_proxy_env_on_send_session_prepared_request(self, httpbin): with override_environ(http_proxy=INVALID_PROXY): with pytest.raises(ProxyError): session = requests.Session() request = requests.Request("GET", httpbin()) prepared = session.prepare_request(request) session.send(prepared) def test_respect_proxy_env_on_send_with_redirects(self, httpbin): with override_environ(http_proxy=INVALID_PROXY): with pytest.raises(ProxyError): session = requests.Session() url = httpbin("redirect/1") print(url) request = requests.Request("GET", url) session.send(request.prepare()) def test_respect_proxy_env_on_get(self, httpbin): with override_environ(http_proxy=INVALID_PROXY): with pytest.raises(ProxyError): session = requests.Session() session.get(httpbin()) def test_respect_proxy_env_on_request(self, httpbin): with override_environ(http_proxy=INVALID_PROXY): with pytest.raises(ProxyError): session = requests.Session() session.request(method="GET", url=httpbin()) def test_proxy_authorization_preserved_on_request(self, httpbin): proxy_auth_value = "Bearer XXX" session = requests.Session() session.headers.update({"Proxy-Authorization": proxy_auth_value}) resp = session.request(method="GET", url=httpbin("get")) sent_headers = resp.json().get("headers", {}) assert sent_headers.get("Proxy-Authorization") == proxy_auth_value @pytest.mark.parametrize( "url,has_proxy_auth", ( ("http://example.com", True), ("https://example.com", False), ), ) def test_proxy_authorization_not_appended_to_https_request( self, url, has_proxy_auth ): session = requests.Session() proxies = { "http": "http://test:pass@localhost:8080", "https": "http://test:pass@localhost:8090", } req = requests.Request("GET", url) prep = req.prepare() session.rebuild_proxies(prep, proxies) assert ("Proxy-Authorization" in prep.headers) is has_proxy_auth def test_basicauth_with_netrc(self, httpbin): auth = ("user", "pass") wrong_auth = ("wronguser", "wrongpass") url = httpbin("basic-auth", "user", "pass") old_auth = requests.sessions.get_netrc_auth try: def get_netrc_auth_mock(url): return auth requests.sessions.get_netrc_auth = get_netrc_auth_mock # Should use netrc and work. r = requests.get(url) assert r.status_code == 200 # Given auth should override and fail. r = requests.get(url, auth=wrong_auth) assert r.status_code == 401 s = requests.session() # Should use netrc and work. r = s.get(url) assert r.status_code == 200 # Given auth should override and fail. s.auth = wrong_auth r = s.get(url) assert r.status_code == 401 finally: requests.sessions.get_netrc_auth = old_auth def test_basicauth_with_netrc_leak(self, httpbin): url1 = httpbin("basic-auth", "user", "pass") url = url1[len("http://") :] domain = url.split(":")[0] url = f"http://example.com:@{url}" netrc_file = "" with tempfile.NamedTemporaryFile(mode="w", delete=False) as fp: fp.write("machine example.com\n") fp.write("login wronguser\n") fp.write("password wrongpass\n") fp.write(f"machine {domain}\n") fp.write("login user\n") fp.write("password pass\n") fp.close() netrc_file = fp.name old_netrc = os.environ.get("NETRC", "") os.environ["NETRC"] = netrc_file try: # Should use netrc # Make sure that we don't use the example.com credentails # for the request r = requests.get(url) assert r.status_code == 200 finally: os.environ["NETRC"] = old_netrc os.unlink(netrc_file) def test_DIGEST_HTTP_200_OK_GET(self, httpbin): for authtype in self.digest_auth_algo: auth = HTTPDigestAuth("user", "pass") url = httpbin("digest-auth", "auth", "user", "pass", authtype, "never") r = requests.get(url, auth=auth) assert r.status_code == 200 r = requests.get(url) assert r.status_code == 401 print(r.headers["WWW-Authenticate"]) s = requests.session() s.auth = HTTPDigestAuth("user", "pass") r = s.get(url) assert r.status_code == 200 def test_DIGEST_AUTH_RETURNS_COOKIE(self, httpbin): for authtype in self.digest_auth_algo: url = httpbin("digest-auth", "auth", "user", "pass", authtype) auth = HTTPDigestAuth("user", "pass") r = requests.get(url) assert r.cookies["fake"] == "fake_value" r = requests.get(url, auth=auth) assert r.status_code == 200 def test_DIGEST_AUTH_SETS_SESSION_COOKIES(self, httpbin): for authtype in self.digest_auth_algo: url = httpbin("digest-auth", "auth", "user", "pass", authtype) auth = HTTPDigestAuth("user", "pass") s = requests.Session() s.get(url, auth=auth) assert s.cookies["fake"] == "fake_value" def test_DIGEST_STREAM(self, httpbin): for authtype in self.digest_auth_algo: auth = HTTPDigestAuth("user", "pass") url = httpbin("digest-auth", "auth", "user", "pass", authtype) r = requests.get(url, auth=auth, stream=True) assert r.raw.read() != b"" r = requests.get(url, auth=auth, stream=False) assert r.raw.read() == b"" def test_DIGESTAUTH_WRONG_HTTP_401_GET(self, httpbin): for authtype in self.digest_auth_algo: auth = HTTPDigestAuth("user", "wrongpass") url = httpbin("digest-auth", "auth", "user", "pass", authtype) r = requests.get(url, auth=auth) assert r.status_code == 401 r = requests.get(url) assert r.status_code == 401 s = requests.session() s.auth = auth r = s.get(url) assert r.status_code == 401 def test_DIGESTAUTH_QUOTES_QOP_VALUE(self, httpbin): for authtype in self.digest_auth_algo: auth = HTTPDigestAuth("user", "pass") url = httpbin("digest-auth", "auth", "user", "pass", authtype) r = requests.get(url, auth=auth) assert '"auth"' in r.request.headers["Authorization"] def test_POSTBIN_GET_POST_FILES(self, httpbin): url = httpbin("post") requests.post(url).raise_for_status() post1 = requests.post(url, data={"some": "data"}) assert post1.status_code == 200 with open("requirements-dev.txt") as f: post2 = requests.post(url, files={"some": f}) assert post2.status_code == 200 post4 = requests.post(url, data='[{"some": "json"}]') assert post4.status_code == 200 with pytest.raises(ValueError): requests.post(url, files=["bad file data"]) def test_invalid_files_input(self, httpbin): url = httpbin("post") post = requests.post(url, files={"random-file-1": None, "random-file-2": 1}) assert b'name="random-file-1"' not in post.request.body assert b'name="random-file-2"' in post.request.body def test_POSTBIN_SEEKED_OBJECT_WITH_NO_ITER(self, httpbin): class TestStream: def __init__(self, data): self.data = data.encode() self.length = len(self.data) self.index = 0 def __len__(self): return self.length def read(self, size=None): if size: ret = self.data[self.index : self.index + size] self.index += size else: ret = self.data[self.index :] self.index = self.length return ret def tell(self): return self.index def seek(self, offset, where=0): if where == 0: self.index = offset elif where == 1: self.index += offset elif where == 2: self.index = self.length + offset test = TestStream("test") post1 = requests.post(httpbin("post"), data=test) assert post1.status_code == 200 assert post1.json()["data"] == "test" test = TestStream("test") test.seek(2) post2 = requests.post(httpbin("post"), data=test) assert post2.status_code == 200 assert post2.json()["data"] == "st" def test_POSTBIN_GET_POST_FILES_WITH_DATA(self, httpbin): url = httpbin("post") requests.post(url).raise_for_status() post1 = requests.post(url, data={"some": "data"}) assert post1.status_code == 200 with open("requirements-dev.txt") as f: post2 = requests.post(url, data={"some": "data"}, files={"some": f}) assert post2.status_code == 200 post4 = requests.post(url, data='[{"some": "json"}]') assert post4.status_code == 200 with pytest.raises(ValueError): requests.post(url, files=["bad file data"]) def test_post_with_custom_mapping(self, httpbin): class CustomMapping(MutableMapping): def __init__(self, *args, **kwargs): self.data = dict(*args, **kwargs) def __delitem__(self, key): del self.data[key] def __getitem__(self, key): return self.data[key] def __setitem__(self, key, value): self.data[key] = value def __iter__(self):
python
Apache-2.0
70298332899f25826e35e42f8d83425124f755a5
2026-01-04T14:39:22.525405Z
true
psf/requests
https://github.com/psf/requests/blob/70298332899f25826e35e42f8d83425124f755a5/tests/testserver/__init__.py
tests/testserver/__init__.py
python
Apache-2.0
70298332899f25826e35e42f8d83425124f755a5
2026-01-04T14:39:22.525405Z
false
psf/requests
https://github.com/psf/requests/blob/70298332899f25826e35e42f8d83425124f755a5/tests/testserver/server.py
tests/testserver/server.py
import select import socket import ssl import threading def consume_socket_content(sock, timeout=0.5): chunks = 65536 content = b"" while True: more_to_read = select.select([sock], [], [], timeout)[0] if not more_to_read: break new_content = sock.recv(chunks) if not new_content: break content += new_content return content class Server(threading.Thread): """Dummy server using for unit testing""" WAIT_EVENT_TIMEOUT = 5 def __init__( self, handler=None, host="localhost", port=0, requests_to_handle=1, wait_to_close_event=None, ): super().__init__() self.handler = handler or consume_socket_content self.handler_results = [] self.host = host self.port = port self.requests_to_handle = requests_to_handle self.wait_to_close_event = wait_to_close_event self.ready_event = threading.Event() self.stop_event = threading.Event() @classmethod def text_response_server(cls, text, request_timeout=0.5, **kwargs): def text_response_handler(sock): request_content = consume_socket_content(sock, timeout=request_timeout) sock.send(text.encode("utf-8")) return request_content return Server(text_response_handler, **kwargs) @classmethod def basic_response_server(cls, **kwargs): return cls.text_response_server( "HTTP/1.1 200 OK\r\n" + "Content-Length: 0\r\n\r\n", **kwargs ) def run(self): try: self.server_sock = self._create_socket_and_bind() # in case self.port = 0 self.port = self.server_sock.getsockname()[1] self.ready_event.set() self._handle_requests() if self.wait_to_close_event: self.wait_to_close_event.wait(self.WAIT_EVENT_TIMEOUT) finally: self.ready_event.set() # just in case of exception self._close_server_sock_ignore_errors() self.stop_event.set() def _create_socket_and_bind(self): sock = socket.socket() sock.bind((self.host, self.port)) sock.listen() return sock def _close_server_sock_ignore_errors(self): try: self.server_sock.close() except OSError: pass def _handle_requests(self): for _ in range(self.requests_to_handle): sock = self._accept_connection() if not sock: break handler_result = self.handler(sock) self.handler_results.append(handler_result) sock.close() def _accept_connection(self): try: ready, _, _ = select.select( [self.server_sock], [], [], self.WAIT_EVENT_TIMEOUT ) if not ready: return None return self.server_sock.accept()[0] except OSError: return None def __enter__(self): self.start() if not self.ready_event.wait(self.WAIT_EVENT_TIMEOUT): raise RuntimeError("Timeout waiting for server to be ready.") return self.host, self.port def __exit__(self, exc_type, exc_value, traceback): if exc_type is None: self.stop_event.wait(self.WAIT_EVENT_TIMEOUT) else: if self.wait_to_close_event: # avoid server from waiting for event timeouts # if an exception is found in the main thread self.wait_to_close_event.set() # ensure server thread doesn't get stuck waiting for connections self._close_server_sock_ignore_errors() self.join() return False # allow exceptions to propagate class TLSServer(Server): def __init__( self, *, handler=None, host="localhost", port=0, requests_to_handle=1, wait_to_close_event=None, cert_chain=None, keyfile=None, mutual_tls=False, cacert=None, ): super().__init__( handler=handler, host=host, port=port, requests_to_handle=requests_to_handle, wait_to_close_event=wait_to_close_event, ) self.cert_chain = cert_chain self.keyfile = keyfile self.ssl_context = ssl.SSLContext(ssl.PROTOCOL_TLS_SERVER) self.ssl_context.load_cert_chain(self.cert_chain, keyfile=self.keyfile) self.mutual_tls = mutual_tls self.cacert = cacert if mutual_tls: # For simplicity, we're going to assume that the client cert is # issued by the same CA as our Server certificate self.ssl_context.verify_mode = ssl.CERT_OPTIONAL self.ssl_context.load_verify_locations(self.cacert) def _create_socket_and_bind(self): sock = socket.socket() sock = self.ssl_context.wrap_socket(sock, server_side=True) sock.bind((self.host, self.port)) sock.listen() return sock
python
Apache-2.0
70298332899f25826e35e42f8d83425124f755a5
2026-01-04T14:39:22.525405Z
false
psf/requests
https://github.com/psf/requests/blob/70298332899f25826e35e42f8d83425124f755a5/docs/conf.py
docs/conf.py
# -*- coding: utf-8 -*- # # Requests documentation build configuration file, created by # sphinx-quickstart on Fri Feb 19 00:05:47 2016. # # This file is execfile()d with the current directory set to its # containing dir. # # Note that not all possible configuration values are present in this # autogenerated file. # # All configuration values have a default; values that are commented out # serve to show the default. import sys import os # If extensions (or modules to document with autodoc) are in another directory, # add these directories to sys.path here. If the directory is relative to the # documentation root, use os.path.abspath to make it absolute, like shown here. # sys.path.insert(0, os.path.abspath('.')) # Insert Requests' path into the system. sys.path.insert(0, os.path.abspath("..")) sys.path.insert(0, os.path.abspath("_themes")) import requests # -- General configuration ------------------------------------------------ # If your documentation needs a minimal Sphinx version, state it here. # needs_sphinx = '1.0' # Add any Sphinx extension module names here, as strings. They can be # extensions coming with Sphinx (named 'sphinx.ext.*') or your custom # ones. extensions = [ "sphinx.ext.autodoc", "sphinx.ext.intersphinx", "sphinx.ext.todo", "sphinx.ext.viewcode", ] # Add any paths that contain templates here, relative to this directory. templates_path = ["_templates"] # The suffix(es) of source filenames. # You can specify multiple suffix as a list of string: # source_suffix = ['.rst', '.md'] source_suffix = ".rst" # The encoding of source files. # source_encoding = 'utf-8-sig' # The master toctree document. master_doc = "index" # General information about the project. project = u"Requests" copyright = u'MMXVIX. A Kenneth Reitz Project' author = u"Kenneth Reitz" # The version info for the project you're documenting, acts as replacement for # |version| and |release|, also used in various other places throughout the # built documents. # # The short X.Y version. version = requests.__version__ # The full version, including alpha/beta/rc tags. release = requests.__version__ # The language for content autogenerated by Sphinx. Refer to documentation # for a list of supported languages. # # This is also used if you do content translation via gettext catalogs. # Usually you set "language" from the command line for these cases. language = None # There are two options for replacing |today|: either, you set today to some # non-false value, then it is used: # today = '' # Else, today_fmt is used as the format for a strftime call. # today_fmt = '%B %d, %Y' # List of patterns, relative to source directory, that match files and # directories to ignore when looking for source files. exclude_patterns = ["_build"] # The reST default role (used for this markup: `text`) to use for all # documents. # default_role = None # If true, '()' will be appended to :func: etc. cross-reference text. add_function_parentheses = False # If true, the current module name will be prepended to all description # unit titles (such as .. function::). add_module_names = True # If true, sectionauthor and moduleauthor directives will be shown in the # output. They are ignored by default. # show_authors = False # The name of the Pygments (syntax highlighting) style to use. pygments_style = "flask_theme_support.FlaskyStyle" # A list of ignored prefixes for module index sorting. # modindex_common_prefix = [] # If true, keep warnings as "system message" paragraphs in the built documents. # keep_warnings = False # If true, `todo` and `todoList` produce output, else they produce nothing. todo_include_todos = True # -- Options for HTML output ---------------------------------------------- # The theme to use for HTML and HTML Help pages. See the documentation for # a list of builtin themes. html_theme = "alabaster" # Theme options are theme-specific and customize the look and feel of a theme # further. For a list of options available for each theme, see the # documentation. html_theme_options = { "show_powered_by": False, "github_user": "requests", "github_repo": "requests", "github_banner": True, "show_related": False, "note_bg": "#FFF59C", } # Add any paths that contain custom themes here, relative to this directory. # html_theme_path = [] # The name for this set of Sphinx documents. If None, it defaults to # "<project> v<release> documentation". # html_title = None # A shorter title for the navigation bar. Default is the same as html_title. # html_short_title = None # The name of an image file (relative to this directory) to place at the top # of the sidebar. # html_logo = None # The name of an image file (within the static path) to use as favicon of the # docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32 # pixels large. # html_favicon = None # Add any paths that contain custom static files (such as style sheets) here, # relative to this directory. They are copied after the builtin static files, # so a file named "default.css" will overwrite the builtin "default.css". html_static_path = ["_static"] # Add any extra paths that contain custom files (such as robots.txt or # .htaccess) here, relative to this directory. These files are copied # directly to the root of the documentation. # html_extra_path = [] # If not '', a 'Last updated on:' timestamp is inserted at every page bottom, # using the given strftime format. # html_last_updated_fmt = '%b %d, %Y' # If true, SmartyPants will be used to convert quotes and dashes to # typographically correct entities. html_use_smartypants = False # Custom sidebar templates, maps document names to template names. html_sidebars = { "index": ["sidebarintro.html", "sourcelink.html", "searchbox.html", "hacks.html"], "**": [ "sidebarlogo.html", "localtoc.html", "relations.html", "sourcelink.html", "searchbox.html", "hacks.html", ], } # Additional templates that should be rendered to pages, maps page names to # template names. # html_additional_pages = {} # If false, no module index is generated. # html_domain_indices = True # If false, no index is generated. # html_use_index = True # If true, the index is split into individual pages for each letter. # html_split_index = False # If true, links to the reST sources are added to the pages. html_show_sourcelink = False # If true, "Created using Sphinx" is shown in the HTML footer. Default is True. html_show_sphinx = False # If true, "(C) Copyright ..." is shown in the HTML footer. Default is True. html_show_copyright = True # If true, an OpenSearch description file will be output, and all pages will # contain a <link> tag referring to it. The value of this option must be the # base URL from which the finished HTML is served. # html_use_opensearch = '' # This is the file name suffix for HTML files (e.g. ".xhtml"). # html_file_suffix = None # Language to be used for generating the HTML full-text search index. # Sphinx supports the following languages: # 'da', 'de', 'en', 'es', 'fi', 'fr', 'hu', 'it', 'ja' # 'nl', 'no', 'pt', 'ro', 'ru', 'sv', 'tr' # html_search_language = 'en' # A dictionary with options for the search language support, empty by default. # Now only 'ja' uses this config value # html_search_options = {'type': 'default'} # The name of a javascript file (relative to the configuration directory) that # implements a search results scorer. If empty, the default will be used. # html_search_scorer = 'scorer.js' # Output file base name for HTML help builder. htmlhelp_basename = "Requestsdoc" # -- Options for LaTeX output --------------------------------------------- latex_elements = { # The paper size ('letterpaper' or 'a4paper'). #'papersize': 'letterpaper', # The font size ('10pt', '11pt' or '12pt'). #'pointsize': '10pt', # Additional stuff for the LaTeX preamble. #'preamble': '', # Latex figure (float) alignment #'figure_align': 'htbp', } # Grouping the document tree into LaTeX files. List of tuples # (source start file, target name, title, # author, documentclass [howto, manual, or own class]). latex_documents = [ (master_doc, "Requests.tex", u"Requests Documentation", u"Kenneth Reitz", "manual") ] # The name of an image file (relative to this directory) to place at the top of # the title page. # latex_logo = None # For "manual" documents, if this is true, then toplevel headings are parts, # not chapters. # latex_use_parts = False # If true, show page references after internal links. # latex_show_pagerefs = False # If true, show URL addresses after external links. # latex_show_urls = False # Documents to append as an appendix to all manuals. # latex_appendices = [] # If false, no module index is generated. # latex_domain_indices = True # -- Options for manual page output --------------------------------------- # One entry per manual page. List of tuples # (source start file, name, description, authors, manual section). man_pages = [(master_doc, "requests", u"Requests Documentation", [author], 1)] # If true, show URL addresses after external links. # man_show_urls = False # -- Options for Texinfo output ------------------------------------------- # Grouping the document tree into Texinfo files. List of tuples # (source start file, target name, title, author, # dir menu entry, description, category) texinfo_documents = [ ( master_doc, "Requests", u"Requests Documentation", author, "Requests", "One line description of project.", "Miscellaneous", ) ] # Documents to append as an appendix to all manuals. # texinfo_appendices = [] # If false, no module index is generated. # texinfo_domain_indices = True # How to display URL addresses: 'footnote', 'no', or 'inline'. # texinfo_show_urls = 'footnote' # If true, do not generate a @detailmenu in the "Top" node's menu. # texinfo_no_detailmenu = False # -- Options for Epub output ---------------------------------------------- # Bibliographic Dublin Core info. epub_title = project epub_author = author epub_publisher = author epub_copyright = copyright # The basename for the epub file. It defaults to the project name. # epub_basename = project # The HTML theme for the epub output. Since the default themes are not # optimized for small screen space, using the same theme for HTML and epub # output is usually not wise. This defaults to 'epub', a theme designed to save # visual space. # epub_theme = 'epub' # The language of the text. It defaults to the language option # or 'en' if the language is not set. # epub_language = '' # The scheme of the identifier. Typical schemes are ISBN or URL. # epub_scheme = '' # The unique identifier of the text. This can be a ISBN number # or the project homepage. # epub_identifier = '' # A unique identification for the text. # epub_uid = '' # A tuple containing the cover image and cover page html template filenames. # epub_cover = () # A sequence of (type, uri, title) tuples for the guide element of content.opf. # epub_guide = () # HTML files that should be inserted before the pages created by sphinx. # The format is a list of tuples containing the path and title. # epub_pre_files = [] # HTML files that should be inserted after the pages created by sphinx. # The format is a list of tuples containing the path and title. # epub_post_files = [] # A list of files that should not be packed into the epub file. epub_exclude_files = ["search.html"] # The depth of the table of contents in toc.ncx. # epub_tocdepth = 3 # Allow duplicate toc entries. # epub_tocdup = True # Choose between 'default' and 'includehidden'. # epub_tocscope = 'default' # Fix unsupported image types using the Pillow. # epub_fix_images = False # Scale large images. # epub_max_image_width = 0 # How to display URL addresses: 'footnote', 'no', or 'inline'. # epub_show_urls = 'inline' # If false, no index is generated. # epub_use_index = True intersphinx_mapping = { "python": ("https://docs.python.org/3/", None), "urllib3": ("https://urllib3.readthedocs.io/en/latest", None), }
python
Apache-2.0
70298332899f25826e35e42f8d83425124f755a5
2026-01-04T14:39:22.525405Z
false
psf/requests
https://github.com/psf/requests/blob/70298332899f25826e35e42f8d83425124f755a5/docs/_themes/flask_theme_support.py
docs/_themes/flask_theme_support.py
# flasky extensions. flasky pygments style based on tango style from pygments.style import Style from pygments.token import Keyword, Name, Comment, String, Error, \ Number, Operator, Generic, Whitespace, Punctuation, Other, Literal class FlaskyStyle(Style): background_color = "#f8f8f8" default_style = "" styles = { # No corresponding class for the following: #Text: "", # class: '' Whitespace: "underline #f8f8f8", # class: 'w' Error: "#a40000 border:#ef2929", # class: 'err' Other: "#000000", # class 'x' Comment: "italic #8f5902", # class: 'c' Comment.Preproc: "noitalic", # class: 'cp' Keyword: "bold #004461", # class: 'k' Keyword.Constant: "bold #004461", # class: 'kc' Keyword.Declaration: "bold #004461", # class: 'kd' Keyword.Namespace: "bold #004461", # class: 'kn' Keyword.Pseudo: "bold #004461", # class: 'kp' Keyword.Reserved: "bold #004461", # class: 'kr' Keyword.Type: "bold #004461", # class: 'kt' Operator: "#582800", # class: 'o' Operator.Word: "bold #004461", # class: 'ow' - like keywords Punctuation: "bold #000000", # class: 'p' # because special names such as Name.Class, Name.Function, etc. # are not recognized as such later in the parsing, we choose them # to look the same as ordinary variables. Name: "#000000", # class: 'n' Name.Attribute: "#c4a000", # class: 'na' - to be revised Name.Builtin: "#004461", # class: 'nb' Name.Builtin.Pseudo: "#3465a4", # class: 'bp' Name.Class: "#000000", # class: 'nc' - to be revised Name.Constant: "#000000", # class: 'no' - to be revised Name.Decorator: "#888", # class: 'nd' - to be revised Name.Entity: "#ce5c00", # class: 'ni' Name.Exception: "bold #cc0000", # class: 'ne' Name.Function: "#000000", # class: 'nf' Name.Property: "#000000", # class: 'py' Name.Label: "#f57900", # class: 'nl' Name.Namespace: "#000000", # class: 'nn' - to be revised Name.Other: "#000000", # class: 'nx' Name.Tag: "bold #004461", # class: 'nt' - like a keyword Name.Variable: "#000000", # class: 'nv' - to be revised Name.Variable.Class: "#000000", # class: 'vc' - to be revised Name.Variable.Global: "#000000", # class: 'vg' - to be revised Name.Variable.Instance: "#000000", # class: 'vi' - to be revised Number: "#990000", # class: 'm' Literal: "#000000", # class: 'l' Literal.Date: "#000000", # class: 'ld' String: "#4e9a06", # class: 's' String.Backtick: "#4e9a06", # class: 'sb' String.Char: "#4e9a06", # class: 'sc' String.Doc: "italic #8f5902", # class: 'sd' - like a comment String.Double: "#4e9a06", # class: 's2' String.Escape: "#4e9a06", # class: 'se' String.Heredoc: "#4e9a06", # class: 'sh' String.Interpol: "#4e9a06", # class: 'si' String.Other: "#4e9a06", # class: 'sx' String.Regex: "#4e9a06", # class: 'sr' String.Single: "#4e9a06", # class: 's1' String.Symbol: "#4e9a06", # class: 'ss' Generic: "#000000", # class: 'g' Generic.Deleted: "#a40000", # class: 'gd' Generic.Emph: "italic #000000", # class: 'ge' Generic.Error: "#ef2929", # class: 'gr' Generic.Heading: "bold #000080", # class: 'gh' Generic.Inserted: "#00A000", # class: 'gi' Generic.Output: "#888", # class: 'go' Generic.Prompt: "#745334", # class: 'gp' Generic.Strong: "bold #000000", # class: 'gs' Generic.Subheading: "bold #800080", # class: 'gu' Generic.Traceback: "bold #a40000", # class: 'gt' }
python
Apache-2.0
70298332899f25826e35e42f8d83425124f755a5
2026-01-04T14:39:22.525405Z
false
FoundationAgents/OpenManus
https://github.com/FoundationAgents/OpenManus/blob/52a13f2a57d8c7f6737eefb02ccf569594d44273/setup.py
setup.py
from setuptools import find_packages, setup with open("README.md", "r", encoding="utf-8") as fh: long_description = fh.read() setup( name="openmanus", version="0.1.0", author="mannaandpoem and OpenManus Team", author_email="mannaandpoem@gmail.com", description="A versatile agent that can solve various tasks using multiple tools", long_description=long_description, long_description_content_type="text/markdown", url="https://github.com/FoundationAgents/OpenManus", packages=find_packages(), install_requires=[ "pydantic~=2.10.4", "openai>=1.58.1,<1.67.0", "tenacity~=9.0.0", "pyyaml~=6.0.2", "loguru~=0.7.3", "numpy", "datasets>=3.2,<3.5", "html2text~=2024.2.26", "gymnasium>=1.0,<1.2", "pillow>=10.4,<11.2", "browsergym~=0.13.3", "uvicorn~=0.34.0", "unidiff~=0.7.5", "browser-use~=0.1.40", "googlesearch-python~=1.3.0", "aiofiles~=24.1.0", "pydantic_core>=2.27.2,<2.28.0", "colorama~=0.4.6", ], classifiers=[ "Programming Language :: Python :: 3", "Programming Language :: Python :: 3.12", "License :: OSI Approved :: MIT License", "Operating System :: OS Independent", ], python_requires=">=3.12", entry_points={ "console_scripts": [ "openmanus=main:main", ], }, )
python
MIT
52a13f2a57d8c7f6737eefb02ccf569594d44273
2026-01-04T14:39:27.873507Z
false
FoundationAgents/OpenManus
https://github.com/FoundationAgents/OpenManus/blob/52a13f2a57d8c7f6737eefb02ccf569594d44273/sandbox_main.py
sandbox_main.py
import argparse import asyncio from app.agent.sandbox_agent import SandboxManus from app.logger import logger async def main(): # Parse command line arguments parser = argparse.ArgumentParser(description="Run Manus agent with a prompt") parser.add_argument( "--prompt", type=str, required=False, help="Input prompt for the agent" ) args = parser.parse_args() # Create and initialize Manus agent agent = await SandboxManus.create() try: # Use command line prompt if provided, otherwise ask for input prompt = args.prompt if args.prompt else input("Enter your prompt: ") if not prompt.strip(): logger.warning("Empty prompt provided.") return logger.warning("Processing your request...") await agent.run(prompt) logger.info("Request processing completed.") except KeyboardInterrupt: logger.warning("Operation interrupted.") finally: # Ensure agent resources are cleaned up before exiting await agent.cleanup() if __name__ == "__main__": asyncio.run(main())
python
MIT
52a13f2a57d8c7f6737eefb02ccf569594d44273
2026-01-04T14:39:27.873507Z
false
FoundationAgents/OpenManus
https://github.com/FoundationAgents/OpenManus/blob/52a13f2a57d8c7f6737eefb02ccf569594d44273/run_mcp.py
run_mcp.py
#!/usr/bin/env python import argparse import asyncio import sys from app.agent.mcp import MCPAgent from app.config import config from app.logger import logger class MCPRunner: """Runner class for MCP Agent with proper path handling and configuration.""" def __init__(self): self.root_path = config.root_path self.server_reference = config.mcp_config.server_reference self.agent = MCPAgent() async def initialize( self, connection_type: str, server_url: str | None = None, ) -> None: """Initialize the MCP agent with the appropriate connection.""" logger.info(f"Initializing MCPAgent with {connection_type} connection...") if connection_type == "stdio": await self.agent.initialize( connection_type="stdio", command=sys.executable, args=["-m", self.server_reference], ) else: # sse await self.agent.initialize(connection_type="sse", server_url=server_url) logger.info(f"Connected to MCP server via {connection_type}") async def run_interactive(self) -> None: """Run the agent in interactive mode.""" print("\nMCP Agent Interactive Mode (type 'exit' to quit)\n") while True: user_input = input("\nEnter your request: ") if user_input.lower() in ["exit", "quit", "q"]: break response = await self.agent.run(user_input) print(f"\nAgent: {response}") async def run_single_prompt(self, prompt: str) -> None: """Run the agent with a single prompt.""" await self.agent.run(prompt) async def run_default(self) -> None: """Run the agent in default mode.""" prompt = input("Enter your prompt: ") if not prompt.strip(): logger.warning("Empty prompt provided.") return logger.warning("Processing your request...") await self.agent.run(prompt) logger.info("Request processing completed.") async def cleanup(self) -> None: """Clean up agent resources.""" await self.agent.cleanup() logger.info("Session ended") def parse_args() -> argparse.Namespace: """Parse command line arguments.""" parser = argparse.ArgumentParser(description="Run the MCP Agent") parser.add_argument( "--connection", "-c", choices=["stdio", "sse"], default="stdio", help="Connection type: stdio or sse", ) parser.add_argument( "--server-url", default="http://127.0.0.1:8000/sse", help="URL for SSE connection", ) parser.add_argument( "--interactive", "-i", action="store_true", help="Run in interactive mode" ) parser.add_argument("--prompt", "-p", help="Single prompt to execute and exit") return parser.parse_args() async def run_mcp() -> None: """Main entry point for the MCP runner.""" args = parse_args() runner = MCPRunner() try: await runner.initialize(args.connection, args.server_url) if args.prompt: await runner.run_single_prompt(args.prompt) elif args.interactive: await runner.run_interactive() else: await runner.run_default() except KeyboardInterrupt: logger.info("Program interrupted by user") except Exception as e: logger.error(f"Error running MCPAgent: {str(e)}", exc_info=True) sys.exit(1) finally: await runner.cleanup() if __name__ == "__main__": asyncio.run(run_mcp())
python
MIT
52a13f2a57d8c7f6737eefb02ccf569594d44273
2026-01-04T14:39:27.873507Z
false
FoundationAgents/OpenManus
https://github.com/FoundationAgents/OpenManus/blob/52a13f2a57d8c7f6737eefb02ccf569594d44273/main.py
main.py
import argparse import asyncio from app.agent.manus import Manus from app.logger import logger async def main(): # Parse command line arguments parser = argparse.ArgumentParser(description="Run Manus agent with a prompt") parser.add_argument( "--prompt", type=str, required=False, help="Input prompt for the agent" ) args = parser.parse_args() # Create and initialize Manus agent agent = await Manus.create() try: # Use command line prompt if provided, otherwise ask for input prompt = args.prompt if args.prompt else input("Enter your prompt: ") if not prompt.strip(): logger.warning("Empty prompt provided.") return logger.warning("Processing your request...") await agent.run(prompt) logger.info("Request processing completed.") except KeyboardInterrupt: logger.warning("Operation interrupted.") finally: # Ensure agent resources are cleaned up before exiting await agent.cleanup() if __name__ == "__main__": asyncio.run(main())
python
MIT
52a13f2a57d8c7f6737eefb02ccf569594d44273
2026-01-04T14:39:27.873507Z
false
FoundationAgents/OpenManus
https://github.com/FoundationAgents/OpenManus/blob/52a13f2a57d8c7f6737eefb02ccf569594d44273/run_flow.py
run_flow.py
import asyncio import time from app.agent.data_analysis import DataAnalysis from app.agent.manus import Manus from app.config import config from app.flow.flow_factory import FlowFactory, FlowType from app.logger import logger async def run_flow(): agents = { "manus": Manus(), } if config.run_flow_config.use_data_analysis_agent: agents["data_analysis"] = DataAnalysis() try: prompt = input("Enter your prompt: ") if prompt.strip().isspace() or not prompt: logger.warning("Empty prompt provided.") return flow = FlowFactory.create_flow( flow_type=FlowType.PLANNING, agents=agents, ) logger.warning("Processing your request...") try: start_time = time.time() result = await asyncio.wait_for( flow.execute(prompt), timeout=3600, # 60 minute timeout for the entire execution ) elapsed_time = time.time() - start_time logger.info(f"Request processed in {elapsed_time:.2f} seconds") logger.info(result) except asyncio.TimeoutError: logger.error("Request processing timed out after 1 hour") logger.info( "Operation terminated due to timeout. Please try a simpler request." ) except KeyboardInterrupt: logger.info("Operation cancelled by user.") except Exception as e: logger.error(f"Error: {str(e)}") if __name__ == "__main__": asyncio.run(run_flow())
python
MIT
52a13f2a57d8c7f6737eefb02ccf569594d44273
2026-01-04T14:39:27.873507Z
false
FoundationAgents/OpenManus
https://github.com/FoundationAgents/OpenManus/blob/52a13f2a57d8c7f6737eefb02ccf569594d44273/run_mcp_server.py
run_mcp_server.py
# coding: utf-8 # A shortcut to launch OpenManus MCP server, where its introduction also solves other import issues. from app.mcp.server import MCPServer, parse_args if __name__ == "__main__": args = parse_args() # Create and run server (maintaining original flow) server = MCPServer() server.run(transport=args.transport)
python
MIT
52a13f2a57d8c7f6737eefb02ccf569594d44273
2026-01-04T14:39:27.873507Z
false
FoundationAgents/OpenManus
https://github.com/FoundationAgents/OpenManus/blob/52a13f2a57d8c7f6737eefb02ccf569594d44273/app/llm.py
app/llm.py
import math from typing import Dict, List, Optional, Union import tiktoken from openai import ( APIError, AsyncAzureOpenAI, AsyncOpenAI, AuthenticationError, OpenAIError, RateLimitError, ) from openai.types.chat import ChatCompletion, ChatCompletionMessage from tenacity import ( retry, retry_if_exception_type, stop_after_attempt, wait_random_exponential, ) from app.bedrock import BedrockClient from app.config import LLMSettings, config from app.exceptions import TokenLimitExceeded from app.logger import logger # Assuming a logger is set up in your app from app.schema import ( ROLE_VALUES, TOOL_CHOICE_TYPE, TOOL_CHOICE_VALUES, Message, ToolChoice, ) REASONING_MODELS = ["o1", "o3-mini"] MULTIMODAL_MODELS = [ "gpt-4-vision-preview", "gpt-4o", "gpt-4o-mini", "claude-3-opus-20240229", "claude-3-sonnet-20240229", "claude-3-haiku-20240307", ] class TokenCounter: # Token constants BASE_MESSAGE_TOKENS = 4 FORMAT_TOKENS = 2 LOW_DETAIL_IMAGE_TOKENS = 85 HIGH_DETAIL_TILE_TOKENS = 170 # Image processing constants MAX_SIZE = 2048 HIGH_DETAIL_TARGET_SHORT_SIDE = 768 TILE_SIZE = 512 def __init__(self, tokenizer): self.tokenizer = tokenizer def count_text(self, text: str) -> int: """Calculate tokens for a text string""" return 0 if not text else len(self.tokenizer.encode(text)) def count_image(self, image_item: dict) -> int: """ Calculate tokens for an image based on detail level and dimensions For "low" detail: fixed 85 tokens For "high" detail: 1. Scale to fit in 2048x2048 square 2. Scale shortest side to 768px 3. Count 512px tiles (170 tokens each) 4. Add 85 tokens """ detail = image_item.get("detail", "medium") # For low detail, always return fixed token count if detail == "low": return self.LOW_DETAIL_IMAGE_TOKENS # For medium detail (default in OpenAI), use high detail calculation # OpenAI doesn't specify a separate calculation for medium # For high detail, calculate based on dimensions if available if detail == "high" or detail == "medium": # If dimensions are provided in the image_item if "dimensions" in image_item: width, height = image_item["dimensions"] return self._calculate_high_detail_tokens(width, height) return ( self._calculate_high_detail_tokens(1024, 1024) if detail == "high" else 1024 ) def _calculate_high_detail_tokens(self, width: int, height: int) -> int: """Calculate tokens for high detail images based on dimensions""" # Step 1: Scale to fit in MAX_SIZE x MAX_SIZE square if width > self.MAX_SIZE or height > self.MAX_SIZE: scale = self.MAX_SIZE / max(width, height) width = int(width * scale) height = int(height * scale) # Step 2: Scale so shortest side is HIGH_DETAIL_TARGET_SHORT_SIDE scale = self.HIGH_DETAIL_TARGET_SHORT_SIDE / min(width, height) scaled_width = int(width * scale) scaled_height = int(height * scale) # Step 3: Count number of 512px tiles tiles_x = math.ceil(scaled_width / self.TILE_SIZE) tiles_y = math.ceil(scaled_height / self.TILE_SIZE) total_tiles = tiles_x * tiles_y # Step 4: Calculate final token count return ( total_tiles * self.HIGH_DETAIL_TILE_TOKENS ) + self.LOW_DETAIL_IMAGE_TOKENS def count_content(self, content: Union[str, List[Union[str, dict]]]) -> int: """Calculate tokens for message content""" if not content: return 0 if isinstance(content, str): return self.count_text(content) token_count = 0 for item in content: if isinstance(item, str): token_count += self.count_text(item) elif isinstance(item, dict): if "text" in item: token_count += self.count_text(item["text"]) elif "image_url" in item: token_count += self.count_image(item) return token_count def count_tool_calls(self, tool_calls: List[dict]) -> int: """Calculate tokens for tool calls""" token_count = 0 for tool_call in tool_calls: if "function" in tool_call: function = tool_call["function"] token_count += self.count_text(function.get("name", "")) token_count += self.count_text(function.get("arguments", "")) return token_count def count_message_tokens(self, messages: List[dict]) -> int: """Calculate the total number of tokens in a message list""" total_tokens = self.FORMAT_TOKENS # Base format tokens for message in messages: tokens = self.BASE_MESSAGE_TOKENS # Base tokens per message # Add role tokens tokens += self.count_text(message.get("role", "")) # Add content tokens if "content" in message: tokens += self.count_content(message["content"]) # Add tool calls tokens if "tool_calls" in message: tokens += self.count_tool_calls(message["tool_calls"]) # Add name and tool_call_id tokens tokens += self.count_text(message.get("name", "")) tokens += self.count_text(message.get("tool_call_id", "")) total_tokens += tokens return total_tokens class LLM: _instances: Dict[str, "LLM"] = {} def __new__( cls, config_name: str = "default", llm_config: Optional[LLMSettings] = None ): if config_name not in cls._instances: instance = super().__new__(cls) instance.__init__(config_name, llm_config) cls._instances[config_name] = instance return cls._instances[config_name] def __init__( self, config_name: str = "default", llm_config: Optional[LLMSettings] = None ): if not hasattr(self, "client"): # Only initialize if not already initialized llm_config = llm_config or config.llm llm_config = llm_config.get(config_name, llm_config["default"]) self.model = llm_config.model self.max_tokens = llm_config.max_tokens self.temperature = llm_config.temperature self.api_type = llm_config.api_type self.api_key = llm_config.api_key self.api_version = llm_config.api_version self.base_url = llm_config.base_url # Add token counting related attributes self.total_input_tokens = 0 self.total_completion_tokens = 0 self.max_input_tokens = ( llm_config.max_input_tokens if hasattr(llm_config, "max_input_tokens") else None ) # Initialize tokenizer try: self.tokenizer = tiktoken.encoding_for_model(self.model) except KeyError: # If the model is not in tiktoken's presets, use cl100k_base as default self.tokenizer = tiktoken.get_encoding("cl100k_base") if self.api_type == "azure": self.client = AsyncAzureOpenAI( base_url=self.base_url, api_key=self.api_key, api_version=self.api_version, ) elif self.api_type == "aws": self.client = BedrockClient() else: self.client = AsyncOpenAI(api_key=self.api_key, base_url=self.base_url) self.token_counter = TokenCounter(self.tokenizer) def count_tokens(self, text: str) -> int: """Calculate the number of tokens in a text""" if not text: return 0 return len(self.tokenizer.encode(text)) def count_message_tokens(self, messages: List[dict]) -> int: return self.token_counter.count_message_tokens(messages) def update_token_count(self, input_tokens: int, completion_tokens: int = 0) -> None: """Update token counts""" # Only track tokens if max_input_tokens is set self.total_input_tokens += input_tokens self.total_completion_tokens += completion_tokens logger.info( f"Token usage: Input={input_tokens}, Completion={completion_tokens}, " f"Cumulative Input={self.total_input_tokens}, Cumulative Completion={self.total_completion_tokens}, " f"Total={input_tokens + completion_tokens}, Cumulative Total={self.total_input_tokens + self.total_completion_tokens}" ) def check_token_limit(self, input_tokens: int) -> bool: """Check if token limits are exceeded""" if self.max_input_tokens is not None: return (self.total_input_tokens + input_tokens) <= self.max_input_tokens # If max_input_tokens is not set, always return True return True def get_limit_error_message(self, input_tokens: int) -> str: """Generate error message for token limit exceeded""" if ( self.max_input_tokens is not None and (self.total_input_tokens + input_tokens) > self.max_input_tokens ): return f"Request may exceed input token limit (Current: {self.total_input_tokens}, Needed: {input_tokens}, Max: {self.max_input_tokens})" return "Token limit exceeded" @staticmethod def format_messages( messages: List[Union[dict, Message]], supports_images: bool = False ) -> List[dict]: """ Format messages for LLM by converting them to OpenAI message format. Args: messages: List of messages that can be either dict or Message objects supports_images: Flag indicating if the target model supports image inputs Returns: List[dict]: List of formatted messages in OpenAI format Raises: ValueError: If messages are invalid or missing required fields TypeError: If unsupported message types are provided Examples: >>> msgs = [ ... Message.system_message("You are a helpful assistant"), ... {"role": "user", "content": "Hello"}, ... Message.user_message("How are you?") ... ] >>> formatted = LLM.format_messages(msgs) """ formatted_messages = [] for message in messages: # Convert Message objects to dictionaries if isinstance(message, Message): message = message.to_dict() if isinstance(message, dict): # If message is a dict, ensure it has required fields if "role" not in message: raise ValueError("Message dict must contain 'role' field") # Process base64 images if present and model supports images if supports_images and message.get("base64_image"): # Initialize or convert content to appropriate format if not message.get("content"): message["content"] = [] elif isinstance(message["content"], str): message["content"] = [ {"type": "text", "text": message["content"]} ] elif isinstance(message["content"], list): # Convert string items to proper text objects message["content"] = [ ( {"type": "text", "text": item} if isinstance(item, str) else item ) for item in message["content"] ] # Add the image to content message["content"].append( { "type": "image_url", "image_url": { "url": f"data:image/jpeg;base64,{message['base64_image']}" }, } ) # Remove the base64_image field del message["base64_image"] # If model doesn't support images but message has base64_image, handle gracefully elif not supports_images and message.get("base64_image"): # Just remove the base64_image field and keep the text content del message["base64_image"] if "content" in message or "tool_calls" in message: formatted_messages.append(message) # else: do not include the message else: raise TypeError(f"Unsupported message type: {type(message)}") # Validate all messages have required fields for msg in formatted_messages: if msg["role"] not in ROLE_VALUES: raise ValueError(f"Invalid role: {msg['role']}") return formatted_messages @retry( wait=wait_random_exponential(min=1, max=60), stop=stop_after_attempt(6), retry=retry_if_exception_type( (OpenAIError, Exception, ValueError) ), # Don't retry TokenLimitExceeded ) async def ask( self, messages: List[Union[dict, Message]], system_msgs: Optional[List[Union[dict, Message]]] = None, stream: bool = True, temperature: Optional[float] = None, ) -> str: """ Send a prompt to the LLM and get the response. Args: messages: List of conversation messages system_msgs: Optional system messages to prepend stream (bool): Whether to stream the response temperature (float): Sampling temperature for the response Returns: str: The generated response Raises: TokenLimitExceeded: If token limits are exceeded ValueError: If messages are invalid or response is empty OpenAIError: If API call fails after retries Exception: For unexpected errors """ try: # Check if the model supports images supports_images = self.model in MULTIMODAL_MODELS # Format system and user messages with image support check if system_msgs: system_msgs = self.format_messages(system_msgs, supports_images) messages = system_msgs + self.format_messages(messages, supports_images) else: messages = self.format_messages(messages, supports_images) # Calculate input token count input_tokens = self.count_message_tokens(messages) # Check if token limits are exceeded if not self.check_token_limit(input_tokens): error_message = self.get_limit_error_message(input_tokens) # Raise a special exception that won't be retried raise TokenLimitExceeded(error_message) params = { "model": self.model, "messages": messages, } if self.model in REASONING_MODELS: params["max_completion_tokens"] = self.max_tokens else: params["max_tokens"] = self.max_tokens params["temperature"] = ( temperature if temperature is not None else self.temperature ) if not stream: # Non-streaming request response = await self.client.chat.completions.create( **params, stream=False ) if not response.choices or not response.choices[0].message.content: raise ValueError("Empty or invalid response from LLM") # Update token counts self.update_token_count( response.usage.prompt_tokens, response.usage.completion_tokens ) return response.choices[0].message.content # Streaming request, For streaming, update estimated token count before making the request self.update_token_count(input_tokens) response = await self.client.chat.completions.create(**params, stream=True) collected_messages = [] completion_text = "" async for chunk in response: chunk_message = chunk.choices[0].delta.content or "" collected_messages.append(chunk_message) completion_text += chunk_message print(chunk_message, end="", flush=True) print() # Newline after streaming full_response = "".join(collected_messages).strip() if not full_response: raise ValueError("Empty response from streaming LLM") # estimate completion tokens for streaming response completion_tokens = self.count_tokens(completion_text) logger.info( f"Estimated completion tokens for streaming response: {completion_tokens}" ) self.total_completion_tokens += completion_tokens return full_response except TokenLimitExceeded: # Re-raise token limit errors without logging raise except ValueError: logger.exception(f"Validation error") raise except OpenAIError as oe: logger.exception(f"OpenAI API error") if isinstance(oe, AuthenticationError): logger.error("Authentication failed. Check API key.") elif isinstance(oe, RateLimitError): logger.error("Rate limit exceeded. Consider increasing retry attempts.") elif isinstance(oe, APIError): logger.error(f"API error: {oe}") raise except Exception: logger.exception(f"Unexpected error in ask") raise @retry( wait=wait_random_exponential(min=1, max=60), stop=stop_after_attempt(6), retry=retry_if_exception_type( (OpenAIError, Exception, ValueError) ), # Don't retry TokenLimitExceeded ) async def ask_with_images( self, messages: List[Union[dict, Message]], images: List[Union[str, dict]], system_msgs: Optional[List[Union[dict, Message]]] = None, stream: bool = False, temperature: Optional[float] = None, ) -> str: """ Send a prompt with images to the LLM and get the response. Args: messages: List of conversation messages images: List of image URLs or image data dictionaries system_msgs: Optional system messages to prepend stream (bool): Whether to stream the response temperature (float): Sampling temperature for the response Returns: str: The generated response Raises: TokenLimitExceeded: If token limits are exceeded ValueError: If messages are invalid or response is empty OpenAIError: If API call fails after retries Exception: For unexpected errors """ try: # For ask_with_images, we always set supports_images to True because # this method should only be called with models that support images if self.model not in MULTIMODAL_MODELS: raise ValueError( f"Model {self.model} does not support images. Use a model from {MULTIMODAL_MODELS}" ) # Format messages with image support formatted_messages = self.format_messages(messages, supports_images=True) # Ensure the last message is from the user to attach images if not formatted_messages or formatted_messages[-1]["role"] != "user": raise ValueError( "The last message must be from the user to attach images" ) # Process the last user message to include images last_message = formatted_messages[-1] # Convert content to multimodal format if needed content = last_message["content"] multimodal_content = ( [{"type": "text", "text": content}] if isinstance(content, str) else content if isinstance(content, list) else [] ) # Add images to content for image in images: if isinstance(image, str): multimodal_content.append( {"type": "image_url", "image_url": {"url": image}} ) elif isinstance(image, dict) and "url" in image: multimodal_content.append({"type": "image_url", "image_url": image}) elif isinstance(image, dict) and "image_url" in image: multimodal_content.append(image) else: raise ValueError(f"Unsupported image format: {image}") # Update the message with multimodal content last_message["content"] = multimodal_content # Add system messages if provided if system_msgs: all_messages = ( self.format_messages(system_msgs, supports_images=True) + formatted_messages ) else: all_messages = formatted_messages # Calculate tokens and check limits input_tokens = self.count_message_tokens(all_messages) if not self.check_token_limit(input_tokens): raise TokenLimitExceeded(self.get_limit_error_message(input_tokens)) # Set up API parameters params = { "model": self.model, "messages": all_messages, "stream": stream, } # Add model-specific parameters if self.model in REASONING_MODELS: params["max_completion_tokens"] = self.max_tokens else: params["max_tokens"] = self.max_tokens params["temperature"] = ( temperature if temperature is not None else self.temperature ) # Handle non-streaming request if not stream: response = await self.client.chat.completions.create(**params) if not response.choices or not response.choices[0].message.content: raise ValueError("Empty or invalid response from LLM") self.update_token_count(response.usage.prompt_tokens) return response.choices[0].message.content # Handle streaming request self.update_token_count(input_tokens) response = await self.client.chat.completions.create(**params) collected_messages = [] async for chunk in response: chunk_message = chunk.choices[0].delta.content or "" collected_messages.append(chunk_message) print(chunk_message, end="", flush=True) print() # Newline after streaming full_response = "".join(collected_messages).strip() if not full_response: raise ValueError("Empty response from streaming LLM") return full_response except TokenLimitExceeded: raise except ValueError as ve: logger.error(f"Validation error in ask_with_images: {ve}") raise except OpenAIError as oe: logger.error(f"OpenAI API error: {oe}") if isinstance(oe, AuthenticationError): logger.error("Authentication failed. Check API key.") elif isinstance(oe, RateLimitError): logger.error("Rate limit exceeded. Consider increasing retry attempts.") elif isinstance(oe, APIError): logger.error(f"API error: {oe}") raise except Exception as e: logger.error(f"Unexpected error in ask_with_images: {e}") raise @retry( wait=wait_random_exponential(min=1, max=60), stop=stop_after_attempt(6), retry=retry_if_exception_type( (OpenAIError, Exception, ValueError) ), # Don't retry TokenLimitExceeded ) async def ask_tool( self, messages: List[Union[dict, Message]], system_msgs: Optional[List[Union[dict, Message]]] = None, timeout: int = 300, tools: Optional[List[dict]] = None, tool_choice: TOOL_CHOICE_TYPE = ToolChoice.AUTO, # type: ignore temperature: Optional[float] = None, **kwargs, ) -> ChatCompletionMessage | None: """ Ask LLM using functions/tools and return the response. Args: messages: List of conversation messages system_msgs: Optional system messages to prepend timeout: Request timeout in seconds tools: List of tools to use tool_choice: Tool choice strategy temperature: Sampling temperature for the response **kwargs: Additional completion arguments Returns: ChatCompletionMessage: The model's response Raises: TokenLimitExceeded: If token limits are exceeded ValueError: If tools, tool_choice, or messages are invalid OpenAIError: If API call fails after retries Exception: For unexpected errors """ try: # Validate tool_choice if tool_choice not in TOOL_CHOICE_VALUES: raise ValueError(f"Invalid tool_choice: {tool_choice}") # Check if the model supports images supports_images = self.model in MULTIMODAL_MODELS # Format messages if system_msgs: system_msgs = self.format_messages(system_msgs, supports_images) messages = system_msgs + self.format_messages(messages, supports_images) else: messages = self.format_messages(messages, supports_images) # Calculate input token count input_tokens = self.count_message_tokens(messages) # If there are tools, calculate token count for tool descriptions tools_tokens = 0 if tools: for tool in tools: tools_tokens += self.count_tokens(str(tool)) input_tokens += tools_tokens # Check if token limits are exceeded if not self.check_token_limit(input_tokens): error_message = self.get_limit_error_message(input_tokens) # Raise a special exception that won't be retried raise TokenLimitExceeded(error_message) # Validate tools if provided if tools: for tool in tools: if not isinstance(tool, dict) or "type" not in tool: raise ValueError("Each tool must be a dict with 'type' field") # Set up the completion request params = { "model": self.model, "messages": messages, "tools": tools, "tool_choice": tool_choice, "timeout": timeout, **kwargs, } if self.model in REASONING_MODELS: params["max_completion_tokens"] = self.max_tokens else: params["max_tokens"] = self.max_tokens params["temperature"] = ( temperature if temperature is not None else self.temperature ) params["stream"] = False # Always use non-streaming for tool requests response: ChatCompletion = await self.client.chat.completions.create( **params ) # Check if response is valid if not response.choices or not response.choices[0].message: print(response) # raise ValueError("Invalid or empty response from LLM") return None # Update token counts self.update_token_count( response.usage.prompt_tokens, response.usage.completion_tokens ) return response.choices[0].message except TokenLimitExceeded: # Re-raise token limit errors without logging raise except ValueError as ve: logger.error(f"Validation error in ask_tool: {ve}") raise except OpenAIError as oe: logger.error(f"OpenAI API error: {oe}") if isinstance(oe, AuthenticationError): logger.error("Authentication failed. Check API key.") elif isinstance(oe, RateLimitError): logger.error("Rate limit exceeded. Consider increasing retry attempts.") elif isinstance(oe, APIError): logger.error(f"API error: {oe}") raise except Exception as e: logger.error(f"Unexpected error in ask_tool: {e}") raise
python
MIT
52a13f2a57d8c7f6737eefb02ccf569594d44273
2026-01-04T14:39:27.873507Z
false
FoundationAgents/OpenManus
https://github.com/FoundationAgents/OpenManus/blob/52a13f2a57d8c7f6737eefb02ccf569594d44273/app/schema.py
app/schema.py
from enum import Enum from typing import Any, List, Literal, Optional, Union from pydantic import BaseModel, Field class Role(str, Enum): """Message role options""" SYSTEM = "system" USER = "user" ASSISTANT = "assistant" TOOL = "tool" ROLE_VALUES = tuple(role.value for role in Role) ROLE_TYPE = Literal[ROLE_VALUES] # type: ignore class ToolChoice(str, Enum): """Tool choice options""" NONE = "none" AUTO = "auto" REQUIRED = "required" TOOL_CHOICE_VALUES = tuple(choice.value for choice in ToolChoice) TOOL_CHOICE_TYPE = Literal[TOOL_CHOICE_VALUES] # type: ignore class AgentState(str, Enum): """Agent execution states""" IDLE = "IDLE" RUNNING = "RUNNING" FINISHED = "FINISHED" ERROR = "ERROR" class Function(BaseModel): name: str arguments: str class ToolCall(BaseModel): """Represents a tool/function call in a message""" id: str type: str = "function" function: Function class Message(BaseModel): """Represents a chat message in the conversation""" role: ROLE_TYPE = Field(...) # type: ignore content: Optional[str] = Field(default=None) tool_calls: Optional[List[ToolCall]] = Field(default=None) name: Optional[str] = Field(default=None) tool_call_id: Optional[str] = Field(default=None) base64_image: Optional[str] = Field(default=None) def __add__(self, other) -> List["Message"]: """支持 Message + list 或 Message + Message 的操作""" if isinstance(other, list): return [self] + other elif isinstance(other, Message): return [self, other] else: raise TypeError( f"unsupported operand type(s) for +: '{type(self).__name__}' and '{type(other).__name__}'" ) def __radd__(self, other) -> List["Message"]: """支持 list + Message 的操作""" if isinstance(other, list): return other + [self] else: raise TypeError( f"unsupported operand type(s) for +: '{type(other).__name__}' and '{type(self).__name__}'" ) def to_dict(self) -> dict: """Convert message to dictionary format""" message = {"role": self.role} if self.content is not None: message["content"] = self.content if self.tool_calls is not None: message["tool_calls"] = [tool_call.dict() for tool_call in self.tool_calls] if self.name is not None: message["name"] = self.name if self.tool_call_id is not None: message["tool_call_id"] = self.tool_call_id if self.base64_image is not None: message["base64_image"] = self.base64_image return message @classmethod def user_message( cls, content: str, base64_image: Optional[str] = None ) -> "Message": """Create a user message""" return cls(role=Role.USER, content=content, base64_image=base64_image) @classmethod def system_message(cls, content: str) -> "Message": """Create a system message""" return cls(role=Role.SYSTEM, content=content) @classmethod def assistant_message( cls, content: Optional[str] = None, base64_image: Optional[str] = None ) -> "Message": """Create an assistant message""" return cls(role=Role.ASSISTANT, content=content, base64_image=base64_image) @classmethod def tool_message( cls, content: str, name, tool_call_id: str, base64_image: Optional[str] = None ) -> "Message": """Create a tool message""" return cls( role=Role.TOOL, content=content, name=name, tool_call_id=tool_call_id, base64_image=base64_image, ) @classmethod def from_tool_calls( cls, tool_calls: List[Any], content: Union[str, List[str]] = "", base64_image: Optional[str] = None, **kwargs, ): """Create ToolCallsMessage from raw tool calls. Args: tool_calls: Raw tool calls from LLM content: Optional message content base64_image: Optional base64 encoded image """ formatted_calls = [ {"id": call.id, "function": call.function.model_dump(), "type": "function"} for call in tool_calls ] return cls( role=Role.ASSISTANT, content=content, tool_calls=formatted_calls, base64_image=base64_image, **kwargs, ) class Memory(BaseModel): messages: List[Message] = Field(default_factory=list) max_messages: int = Field(default=100) def add_message(self, message: Message) -> None: """Add a message to memory""" self.messages.append(message) # Optional: Implement message limit if len(self.messages) > self.max_messages: self.messages = self.messages[-self.max_messages :] def add_messages(self, messages: List[Message]) -> None: """Add multiple messages to memory""" self.messages.extend(messages) # Optional: Implement message limit if len(self.messages) > self.max_messages: self.messages = self.messages[-self.max_messages :] def clear(self) -> None: """Clear all messages""" self.messages.clear() def get_recent_messages(self, n: int) -> List[Message]: """Get n most recent messages""" return self.messages[-n:] def to_dict_list(self) -> List[dict]: """Convert messages to list of dicts""" return [msg.to_dict() for msg in self.messages]
python
MIT
52a13f2a57d8c7f6737eefb02ccf569594d44273
2026-01-04T14:39:27.873507Z
false
FoundationAgents/OpenManus
https://github.com/FoundationAgents/OpenManus/blob/52a13f2a57d8c7f6737eefb02ccf569594d44273/app/exceptions.py
app/exceptions.py
class ToolError(Exception): """Raised when a tool encounters an error.""" def __init__(self, message): self.message = message class OpenManusError(Exception): """Base exception for all OpenManus errors""" class TokenLimitExceeded(OpenManusError): """Exception raised when the token limit is exceeded"""
python
MIT
52a13f2a57d8c7f6737eefb02ccf569594d44273
2026-01-04T14:39:27.873507Z
false
FoundationAgents/OpenManus
https://github.com/FoundationAgents/OpenManus/blob/52a13f2a57d8c7f6737eefb02ccf569594d44273/app/logger.py
app/logger.py
import sys from datetime import datetime from loguru import logger as _logger from app.config import PROJECT_ROOT _print_level = "INFO" def define_log_level(print_level="INFO", logfile_level="DEBUG", name: str = None): """Adjust the log level to above level""" global _print_level _print_level = print_level current_date = datetime.now() formatted_date = current_date.strftime("%Y%m%d%H%M%S") log_name = ( f"{name}_{formatted_date}" if name else formatted_date ) # name a log with prefix name _logger.remove() _logger.add(sys.stderr, level=print_level) _logger.add(PROJECT_ROOT / f"logs/{log_name}.log", level=logfile_level) return _logger logger = define_log_level() if __name__ == "__main__": logger.info("Starting application") logger.debug("Debug message") logger.warning("Warning message") logger.error("Error message") logger.critical("Critical message") try: raise ValueError("Test error") except Exception as e: logger.exception(f"An error occurred: {e}")
python
MIT
52a13f2a57d8c7f6737eefb02ccf569594d44273
2026-01-04T14:39:27.873507Z
false
FoundationAgents/OpenManus
https://github.com/FoundationAgents/OpenManus/blob/52a13f2a57d8c7f6737eefb02ccf569594d44273/app/config.py
app/config.py
import json import threading import tomllib from pathlib import Path from typing import Dict, List, Optional from pydantic import BaseModel, Field def get_project_root() -> Path: """Get the project root directory""" return Path(__file__).resolve().parent.parent PROJECT_ROOT = get_project_root() WORKSPACE_ROOT = PROJECT_ROOT / "workspace" class LLMSettings(BaseModel): model: str = Field(..., description="Model name") base_url: str = Field(..., description="API base URL") api_key: str = Field(..., description="API key") max_tokens: int = Field(4096, description="Maximum number of tokens per request") max_input_tokens: Optional[int] = Field( None, description="Maximum input tokens to use across all requests (None for unlimited)", ) temperature: float = Field(1.0, description="Sampling temperature") api_type: str = Field(..., description="Azure, Openai, or Ollama") api_version: str = Field(..., description="Azure Openai version if AzureOpenai") class ProxySettings(BaseModel): server: str = Field(None, description="Proxy server address") username: Optional[str] = Field(None, description="Proxy username") password: Optional[str] = Field(None, description="Proxy password") class SearchSettings(BaseModel): engine: str = Field(default="Google", description="Search engine the llm to use") fallback_engines: List[str] = Field( default_factory=lambda: ["DuckDuckGo", "Baidu", "Bing"], description="Fallback search engines to try if the primary engine fails", ) retry_delay: int = Field( default=60, description="Seconds to wait before retrying all engines again after they all fail", ) max_retries: int = Field( default=3, description="Maximum number of times to retry all engines when all fail", ) lang: str = Field( default="en", description="Language code for search results (e.g., en, zh, fr)", ) country: str = Field( default="us", description="Country code for search results (e.g., us, cn, uk)", ) class RunflowSettings(BaseModel): use_data_analysis_agent: bool = Field( default=False, description="Enable data analysis agent in run flow" ) class BrowserSettings(BaseModel): headless: bool = Field(False, description="Whether to run browser in headless mode") disable_security: bool = Field( True, description="Disable browser security features" ) extra_chromium_args: List[str] = Field( default_factory=list, description="Extra arguments to pass to the browser" ) chrome_instance_path: Optional[str] = Field( None, description="Path to a Chrome instance to use" ) wss_url: Optional[str] = Field( None, description="Connect to a browser instance via WebSocket" ) cdp_url: Optional[str] = Field( None, description="Connect to a browser instance via CDP" ) proxy: Optional[ProxySettings] = Field( None, description="Proxy settings for the browser" ) max_content_length: int = Field( 2000, description="Maximum length for content retrieval operations" ) class SandboxSettings(BaseModel): """Configuration for the execution sandbox""" use_sandbox: bool = Field(False, description="Whether to use the sandbox") image: str = Field("python:3.12-slim", description="Base image") work_dir: str = Field("/workspace", description="Container working directory") memory_limit: str = Field("512m", description="Memory limit") cpu_limit: float = Field(1.0, description="CPU limit") timeout: int = Field(300, description="Default command timeout (seconds)") network_enabled: bool = Field( False, description="Whether network access is allowed" ) class DaytonaSettings(BaseModel): daytona_api_key: str daytona_server_url: Optional[str] = Field( "https://app.daytona.io/api", description="" ) daytona_target: Optional[str] = Field("us", description="enum ['eu', 'us']") sandbox_image_name: Optional[str] = Field("whitezxj/sandbox:0.1.0", description="") sandbox_entrypoint: Optional[str] = Field( "/usr/bin/supervisord -n -c /etc/supervisor/conf.d/supervisord.conf", description="", ) # sandbox_id: Optional[str] = Field( # None, description="ID of the daytona sandbox to use, if any" # ) VNC_password: Optional[str] = Field( "123456", description="VNC password for the vnc service in sandbox" ) class MCPServerConfig(BaseModel): """Configuration for a single MCP server""" type: str = Field(..., description="Server connection type (sse or stdio)") url: Optional[str] = Field(None, description="Server URL for SSE connections") command: Optional[str] = Field(None, description="Command for stdio connections") args: List[str] = Field( default_factory=list, description="Arguments for stdio command" ) class MCPSettings(BaseModel): """Configuration for MCP (Model Context Protocol)""" server_reference: str = Field( "app.mcp.server", description="Module reference for the MCP server" ) servers: Dict[str, MCPServerConfig] = Field( default_factory=dict, description="MCP server configurations" ) @classmethod def load_server_config(cls) -> Dict[str, MCPServerConfig]: """Load MCP server configuration from JSON file""" config_path = PROJECT_ROOT / "config" / "mcp.json" try: config_file = config_path if config_path.exists() else None if not config_file: return {} with config_file.open() as f: data = json.load(f) servers = {} for server_id, server_config in data.get("mcpServers", {}).items(): servers[server_id] = MCPServerConfig( type=server_config["type"], url=server_config.get("url"), command=server_config.get("command"), args=server_config.get("args", []), ) return servers except Exception as e: raise ValueError(f"Failed to load MCP server config: {e}") class AppConfig(BaseModel): llm: Dict[str, LLMSettings] sandbox: Optional[SandboxSettings] = Field( None, description="Sandbox configuration" ) browser_config: Optional[BrowserSettings] = Field( None, description="Browser configuration" ) search_config: Optional[SearchSettings] = Field( None, description="Search configuration" ) mcp_config: Optional[MCPSettings] = Field(None, description="MCP configuration") run_flow_config: Optional[RunflowSettings] = Field( None, description="Run flow configuration" ) daytona_config: Optional[DaytonaSettings] = Field( None, description="Daytona configuration" ) class Config: arbitrary_types_allowed = True class Config: _instance = None _lock = threading.Lock() _initialized = False def __new__(cls): if cls._instance is None: with cls._lock: if cls._instance is None: cls._instance = super().__new__(cls) return cls._instance def __init__(self): if not self._initialized: with self._lock: if not self._initialized: self._config = None self._load_initial_config() self._initialized = True @staticmethod def _get_config_path() -> Path: root = PROJECT_ROOT config_path = root / "config" / "config.toml" if config_path.exists(): return config_path example_path = root / "config" / "config.example.toml" if example_path.exists(): return example_path raise FileNotFoundError("No configuration file found in config directory") def _load_config(self) -> dict: config_path = self._get_config_path() with config_path.open("rb") as f: return tomllib.load(f) def _load_initial_config(self): raw_config = self._load_config() base_llm = raw_config.get("llm", {}) llm_overrides = { k: v for k, v in raw_config.get("llm", {}).items() if isinstance(v, dict) } default_settings = { "model": base_llm.get("model"), "base_url": base_llm.get("base_url"), "api_key": base_llm.get("api_key"), "max_tokens": base_llm.get("max_tokens", 4096), "max_input_tokens": base_llm.get("max_input_tokens"), "temperature": base_llm.get("temperature", 1.0), "api_type": base_llm.get("api_type", ""), "api_version": base_llm.get("api_version", ""), } # handle browser config. browser_config = raw_config.get("browser", {}) browser_settings = None if browser_config: # handle proxy settings. proxy_config = browser_config.get("proxy", {}) proxy_settings = None if proxy_config and proxy_config.get("server"): proxy_settings = ProxySettings( **{ k: v for k, v in proxy_config.items() if k in ["server", "username", "password"] and v } ) # filter valid browser config parameters. valid_browser_params = { k: v for k, v in browser_config.items() if k in BrowserSettings.__annotations__ and v is not None } # if there is proxy settings, add it to the parameters. if proxy_settings: valid_browser_params["proxy"] = proxy_settings # only create BrowserSettings when there are valid parameters. if valid_browser_params: browser_settings = BrowserSettings(**valid_browser_params) search_config = raw_config.get("search", {}) search_settings = None if search_config: search_settings = SearchSettings(**search_config) sandbox_config = raw_config.get("sandbox", {}) if sandbox_config: sandbox_settings = SandboxSettings(**sandbox_config) else: sandbox_settings = SandboxSettings() daytona_config = raw_config.get("daytona", {}) if daytona_config: daytona_settings = DaytonaSettings(**daytona_config) else: daytona_settings = DaytonaSettings() mcp_config = raw_config.get("mcp", {}) mcp_settings = None if mcp_config: # Load server configurations from JSON mcp_config["servers"] = MCPSettings.load_server_config() mcp_settings = MCPSettings(**mcp_config) else: mcp_settings = MCPSettings(servers=MCPSettings.load_server_config()) run_flow_config = raw_config.get("runflow") if run_flow_config: run_flow_settings = RunflowSettings(**run_flow_config) else: run_flow_settings = RunflowSettings() config_dict = { "llm": { "default": default_settings, **{ name: {**default_settings, **override_config} for name, override_config in llm_overrides.items() }, }, "sandbox": sandbox_settings, "browser_config": browser_settings, "search_config": search_settings, "mcp_config": mcp_settings, "run_flow_config": run_flow_settings, "daytona_config": daytona_settings, } self._config = AppConfig(**config_dict) @property def llm(self) -> Dict[str, LLMSettings]: return self._config.llm @property def sandbox(self) -> SandboxSettings: return self._config.sandbox @property def daytona(self) -> DaytonaSettings: return self._config.daytona_config @property def browser_config(self) -> Optional[BrowserSettings]: return self._config.browser_config @property def search_config(self) -> Optional[SearchSettings]: return self._config.search_config @property def mcp_config(self) -> MCPSettings: """Get the MCP configuration""" return self._config.mcp_config @property def run_flow_config(self) -> RunflowSettings: """Get the Run Flow configuration""" return self._config.run_flow_config @property def workspace_root(self) -> Path: """Get the workspace root directory""" return WORKSPACE_ROOT @property def root_path(self) -> Path: """Get the root path of the application""" return PROJECT_ROOT config = Config()
python
MIT
52a13f2a57d8c7f6737eefb02ccf569594d44273
2026-01-04T14:39:27.873507Z
false
FoundationAgents/OpenManus
https://github.com/FoundationAgents/OpenManus/blob/52a13f2a57d8c7f6737eefb02ccf569594d44273/app/__init__.py
app/__init__.py
# Python version check: 3.11-3.13 import sys if sys.version_info < (3, 11) or sys.version_info > (3, 13): print( "Warning: Unsupported Python version {ver}, please use 3.11-3.13".format( ver=".".join(map(str, sys.version_info)) ) )
python
MIT
52a13f2a57d8c7f6737eefb02ccf569594d44273
2026-01-04T14:39:27.873507Z
false
FoundationAgents/OpenManus
https://github.com/FoundationAgents/OpenManus/blob/52a13f2a57d8c7f6737eefb02ccf569594d44273/app/bedrock.py
app/bedrock.py
import json import sys import time import uuid from datetime import datetime from typing import Dict, List, Literal, Optional import boto3 # Global variables to track the current tool use ID across function calls # Tmp solution CURRENT_TOOLUSE_ID = None # Class to handle OpenAI-style response formatting class OpenAIResponse: def __init__(self, data): # Recursively convert nested dicts and lists to OpenAIResponse objects for key, value in data.items(): if isinstance(value, dict): value = OpenAIResponse(value) elif isinstance(value, list): value = [ OpenAIResponse(item) if isinstance(item, dict) else item for item in value ] setattr(self, key, value) def model_dump(self, *args, **kwargs): # Convert object to dict and add timestamp data = self.__dict__ data["created_at"] = datetime.now().isoformat() return data # Main client class for interacting with Amazon Bedrock class BedrockClient: def __init__(self): # Initialize Bedrock client, you need to configure AWS env first try: self.client = boto3.client("bedrock-runtime") self.chat = Chat(self.client) except Exception as e: print(f"Error initializing Bedrock client: {e}") sys.exit(1) # Chat interface class class Chat: def __init__(self, client): self.completions = ChatCompletions(client) # Core class handling chat completions functionality class ChatCompletions: def __init__(self, client): self.client = client def _convert_openai_tools_to_bedrock_format(self, tools): # Convert OpenAI function calling format to Bedrock tool format bedrock_tools = [] for tool in tools: if tool.get("type") == "function": function = tool.get("function", {}) bedrock_tool = { "toolSpec": { "name": function.get("name", ""), "description": function.get("description", ""), "inputSchema": { "json": { "type": "object", "properties": function.get("parameters", {}).get( "properties", {} ), "required": function.get("parameters", {}).get( "required", [] ), } }, } } bedrock_tools.append(bedrock_tool) return bedrock_tools def _convert_openai_messages_to_bedrock_format(self, messages): # Convert OpenAI message format to Bedrock message format bedrock_messages = [] system_prompt = [] for message in messages: if message.get("role") == "system": system_prompt = [{"text": message.get("content")}] elif message.get("role") == "user": bedrock_message = { "role": message.get("role", "user"), "content": [{"text": message.get("content")}], } bedrock_messages.append(bedrock_message) elif message.get("role") == "assistant": bedrock_message = { "role": "assistant", "content": [{"text": message.get("content")}], } openai_tool_calls = message.get("tool_calls", []) if openai_tool_calls: bedrock_tool_use = { "toolUseId": openai_tool_calls[0]["id"], "name": openai_tool_calls[0]["function"]["name"], "input": json.loads( openai_tool_calls[0]["function"]["arguments"] ), } bedrock_message["content"].append({"toolUse": bedrock_tool_use}) global CURRENT_TOOLUSE_ID CURRENT_TOOLUSE_ID = openai_tool_calls[0]["id"] bedrock_messages.append(bedrock_message) elif message.get("role") == "tool": bedrock_message = { "role": "user", "content": [ { "toolResult": { "toolUseId": CURRENT_TOOLUSE_ID, "content": [{"text": message.get("content")}], } } ], } bedrock_messages.append(bedrock_message) else: raise ValueError(f"Invalid role: {message.get('role')}") return system_prompt, bedrock_messages def _convert_bedrock_response_to_openai_format(self, bedrock_response): # Convert Bedrock response format to OpenAI format content = "" if bedrock_response.get("output", {}).get("message", {}).get("content"): content_array = bedrock_response["output"]["message"]["content"] content = "".join(item.get("text", "") for item in content_array) if content == "": content = "." # Handle tool calls in response openai_tool_calls = [] if bedrock_response.get("output", {}).get("message", {}).get("content"): for content_item in bedrock_response["output"]["message"]["content"]: if content_item.get("toolUse"): bedrock_tool_use = content_item["toolUse"] global CURRENT_TOOLUSE_ID CURRENT_TOOLUSE_ID = bedrock_tool_use["toolUseId"] openai_tool_call = { "id": CURRENT_TOOLUSE_ID, "type": "function", "function": { "name": bedrock_tool_use["name"], "arguments": json.dumps(bedrock_tool_use["input"]), }, } openai_tool_calls.append(openai_tool_call) # Construct final OpenAI format response openai_format = { "id": f"chatcmpl-{uuid.uuid4()}", "created": int(time.time()), "object": "chat.completion", "system_fingerprint": None, "choices": [ { "finish_reason": bedrock_response.get("stopReason", "end_turn"), "index": 0, "message": { "content": content, "role": bedrock_response.get("output", {}) .get("message", {}) .get("role", "assistant"), "tool_calls": openai_tool_calls if openai_tool_calls != [] else None, "function_call": None, }, } ], "usage": { "completion_tokens": bedrock_response.get("usage", {}).get( "outputTokens", 0 ), "prompt_tokens": bedrock_response.get("usage", {}).get( "inputTokens", 0 ), "total_tokens": bedrock_response.get("usage", {}).get("totalTokens", 0), }, } return OpenAIResponse(openai_format) async def _invoke_bedrock( self, model: str, messages: List[Dict[str, str]], max_tokens: int, temperature: float, tools: Optional[List[dict]] = None, tool_choice: Literal["none", "auto", "required"] = "auto", **kwargs, ) -> OpenAIResponse: # Non-streaming invocation of Bedrock model ( system_prompt, bedrock_messages, ) = self._convert_openai_messages_to_bedrock_format(messages) response = self.client.converse( modelId=model, system=system_prompt, messages=bedrock_messages, inferenceConfig={"temperature": temperature, "maxTokens": max_tokens}, toolConfig={"tools": tools} if tools else None, ) openai_response = self._convert_bedrock_response_to_openai_format(response) return openai_response async def _invoke_bedrock_stream( self, model: str, messages: List[Dict[str, str]], max_tokens: int, temperature: float, tools: Optional[List[dict]] = None, tool_choice: Literal["none", "auto", "required"] = "auto", **kwargs, ) -> OpenAIResponse: # Streaming invocation of Bedrock model ( system_prompt, bedrock_messages, ) = self._convert_openai_messages_to_bedrock_format(messages) response = self.client.converse_stream( modelId=model, system=system_prompt, messages=bedrock_messages, inferenceConfig={"temperature": temperature, "maxTokens": max_tokens}, toolConfig={"tools": tools} if tools else None, ) # Initialize response structure bedrock_response = { "output": {"message": {"role": "", "content": []}}, "stopReason": "", "usage": {}, "metrics": {}, } bedrock_response_text = "" bedrock_response_tool_input = "" # Process streaming response stream = response.get("stream") if stream: for event in stream: if event.get("messageStart", {}).get("role"): bedrock_response["output"]["message"]["role"] = event[ "messageStart" ]["role"] if event.get("contentBlockDelta", {}).get("delta", {}).get("text"): bedrock_response_text += event["contentBlockDelta"]["delta"]["text"] print( event["contentBlockDelta"]["delta"]["text"], end="", flush=True ) if event.get("contentBlockStop", {}).get("contentBlockIndex") == 0: bedrock_response["output"]["message"]["content"].append( {"text": bedrock_response_text} ) if event.get("contentBlockStart", {}).get("start", {}).get("toolUse"): bedrock_tool_use = event["contentBlockStart"]["start"]["toolUse"] tool_use = { "toolUseId": bedrock_tool_use["toolUseId"], "name": bedrock_tool_use["name"], } bedrock_response["output"]["message"]["content"].append( {"toolUse": tool_use} ) global CURRENT_TOOLUSE_ID CURRENT_TOOLUSE_ID = bedrock_tool_use["toolUseId"] if event.get("contentBlockDelta", {}).get("delta", {}).get("toolUse"): bedrock_response_tool_input += event["contentBlockDelta"]["delta"][ "toolUse" ]["input"] print( event["contentBlockDelta"]["delta"]["toolUse"]["input"], end="", flush=True, ) if event.get("contentBlockStop", {}).get("contentBlockIndex") == 1: bedrock_response["output"]["message"]["content"][1]["toolUse"][ "input" ] = json.loads(bedrock_response_tool_input) print() openai_response = self._convert_bedrock_response_to_openai_format( bedrock_response ) return openai_response def create( self, model: str, messages: List[Dict[str, str]], max_tokens: int, temperature: float, stream: Optional[bool] = True, tools: Optional[List[dict]] = None, tool_choice: Literal["none", "auto", "required"] = "auto", **kwargs, ) -> OpenAIResponse: # Main entry point for chat completion bedrock_tools = [] if tools is not None: bedrock_tools = self._convert_openai_tools_to_bedrock_format(tools) if stream: return self._invoke_bedrock_stream( model, messages, max_tokens, temperature, bedrock_tools, tool_choice, **kwargs, ) else: return self._invoke_bedrock( model, messages, max_tokens, temperature, bedrock_tools, tool_choice, **kwargs, )
python
MIT
52a13f2a57d8c7f6737eefb02ccf569594d44273
2026-01-04T14:39:27.873507Z
false
FoundationAgents/OpenManus
https://github.com/FoundationAgents/OpenManus/blob/52a13f2a57d8c7f6737eefb02ccf569594d44273/app/flow/flow_factory.py
app/flow/flow_factory.py
from enum import Enum from typing import Dict, List, Union from app.agent.base import BaseAgent from app.flow.base import BaseFlow from app.flow.planning import PlanningFlow class FlowType(str, Enum): PLANNING = "planning" class FlowFactory: """Factory for creating different types of flows with support for multiple agents""" @staticmethod def create_flow( flow_type: FlowType, agents: Union[BaseAgent, List[BaseAgent], Dict[str, BaseAgent]], **kwargs, ) -> BaseFlow: flows = { FlowType.PLANNING: PlanningFlow, } flow_class = flows.get(flow_type) if not flow_class: raise ValueError(f"Unknown flow type: {flow_type}") return flow_class(agents, **kwargs)
python
MIT
52a13f2a57d8c7f6737eefb02ccf569594d44273
2026-01-04T14:39:27.873507Z
false
FoundationAgents/OpenManus
https://github.com/FoundationAgents/OpenManus/blob/52a13f2a57d8c7f6737eefb02ccf569594d44273/app/flow/planning.py
app/flow/planning.py
import json import time from enum import Enum from typing import Dict, List, Optional, Union from pydantic import Field from app.agent.base import BaseAgent from app.flow.base import BaseFlow from app.llm import LLM from app.logger import logger from app.schema import AgentState, Message, ToolChoice from app.tool import PlanningTool class PlanStepStatus(str, Enum): """Enum class defining possible statuses of a plan step""" NOT_STARTED = "not_started" IN_PROGRESS = "in_progress" COMPLETED = "completed" BLOCKED = "blocked" @classmethod def get_all_statuses(cls) -> list[str]: """Return a list of all possible step status values""" return [status.value for status in cls] @classmethod def get_active_statuses(cls) -> list[str]: """Return a list of values representing active statuses (not started or in progress)""" return [cls.NOT_STARTED.value, cls.IN_PROGRESS.value] @classmethod def get_status_marks(cls) -> Dict[str, str]: """Return a mapping of statuses to their marker symbols""" return { cls.COMPLETED.value: "[✓]", cls.IN_PROGRESS.value: "[→]", cls.BLOCKED.value: "[!]", cls.NOT_STARTED.value: "[ ]", } class PlanningFlow(BaseFlow): """A flow that manages planning and execution of tasks using agents.""" llm: LLM = Field(default_factory=lambda: LLM()) planning_tool: PlanningTool = Field(default_factory=PlanningTool) executor_keys: List[str] = Field(default_factory=list) active_plan_id: str = Field(default_factory=lambda: f"plan_{int(time.time())}") current_step_index: Optional[int] = None def __init__( self, agents: Union[BaseAgent, List[BaseAgent], Dict[str, BaseAgent]], **data ): # Set executor keys before super().__init__ if "executors" in data: data["executor_keys"] = data.pop("executors") # Set plan ID if provided if "plan_id" in data: data["active_plan_id"] = data.pop("plan_id") # Initialize the planning tool if not provided if "planning_tool" not in data: planning_tool = PlanningTool() data["planning_tool"] = planning_tool # Call parent's init with the processed data super().__init__(agents, **data) # Set executor_keys to all agent keys if not specified if not self.executor_keys: self.executor_keys = list(self.agents.keys()) def get_executor(self, step_type: Optional[str] = None) -> BaseAgent: """ Get an appropriate executor agent for the current step. Can be extended to select agents based on step type/requirements. """ # If step type is provided and matches an agent key, use that agent if step_type and step_type in self.agents: return self.agents[step_type] # Otherwise use the first available executor or fall back to primary agent for key in self.executor_keys: if key in self.agents: return self.agents[key] # Fallback to primary agent return self.primary_agent async def execute(self, input_text: str) -> str: """Execute the planning flow with agents.""" try: if not self.primary_agent: raise ValueError("No primary agent available") # Create initial plan if input provided if input_text: await self._create_initial_plan(input_text) # Verify plan was created successfully if self.active_plan_id not in self.planning_tool.plans: logger.error( f"Plan creation failed. Plan ID {self.active_plan_id} not found in planning tool." ) return f"Failed to create plan for: {input_text}" result = "" while True: # Get current step to execute self.current_step_index, step_info = await self._get_current_step_info() # Exit if no more steps or plan completed if self.current_step_index is None: result += await self._finalize_plan() break # Execute current step with appropriate agent step_type = step_info.get("type") if step_info else None executor = self.get_executor(step_type) step_result = await self._execute_step(executor, step_info) result += step_result + "\n" # Check if agent wants to terminate if hasattr(executor, "state") and executor.state == AgentState.FINISHED: break return result except Exception as e: logger.error(f"Error in PlanningFlow: {str(e)}") return f"Execution failed: {str(e)}" async def _create_initial_plan(self, request: str) -> None: """Create an initial plan based on the request using the flow's LLM and PlanningTool.""" logger.info(f"Creating initial plan with ID: {self.active_plan_id}") system_message_content = ( "You are a planning assistant. Create a concise, actionable plan with clear steps. " "Focus on key milestones rather than detailed sub-steps. " "Optimize for clarity and efficiency." ) agents_description = [] for key in self.executor_keys: if key in self.agents: agents_description.append( { "name": key.upper(), "description": self.agents[key].description, } ) if len(agents_description) > 1: # Add description of agents to select system_message_content += ( f"\nNow we have {agents_description} agents. " f"The infomation of them are below: {json.dumps(agents_description)}\n" "When creating steps in the planning tool, please specify the agent names using the format '[agent_name]'." ) # Create a system message for plan creation system_message = Message.system_message(system_message_content) # Create a user message with the request user_message = Message.user_message( f"Create a reasonable plan with clear steps to accomplish the task: {request}" ) # Call LLM with PlanningTool response = await self.llm.ask_tool( messages=[user_message], system_msgs=[system_message], tools=[self.planning_tool.to_param()], tool_choice=ToolChoice.AUTO, ) # Process tool calls if present if response.tool_calls: for tool_call in response.tool_calls: if tool_call.function.name == "planning": # Parse the arguments args = tool_call.function.arguments if isinstance(args, str): try: args = json.loads(args) except json.JSONDecodeError: logger.error(f"Failed to parse tool arguments: {args}") continue # Ensure plan_id is set correctly and execute the tool args["plan_id"] = self.active_plan_id # Execute the tool via ToolCollection instead of directly result = await self.planning_tool.execute(**args) logger.info(f"Plan creation result: {str(result)}") return # If execution reached here, create a default plan logger.warning("Creating default plan") # Create default plan using the ToolCollection await self.planning_tool.execute( **{ "command": "create", "plan_id": self.active_plan_id, "title": f"Plan for: {request[:50]}{'...' if len(request) > 50 else ''}", "steps": ["Analyze request", "Execute task", "Verify results"], } ) async def _get_current_step_info(self) -> tuple[Optional[int], Optional[dict]]: """ Parse the current plan to identify the first non-completed step's index and info. Returns (None, None) if no active step is found. """ if ( not self.active_plan_id or self.active_plan_id not in self.planning_tool.plans ): logger.error(f"Plan with ID {self.active_plan_id} not found") return None, None try: # Direct access to plan data from planning tool storage plan_data = self.planning_tool.plans[self.active_plan_id] steps = plan_data.get("steps", []) step_statuses = plan_data.get("step_statuses", []) # Find first non-completed step for i, step in enumerate(steps): if i >= len(step_statuses): status = PlanStepStatus.NOT_STARTED.value else: status = step_statuses[i] if status in PlanStepStatus.get_active_statuses(): # Extract step type/category if available step_info = {"text": step} # Try to extract step type from the text (e.g., [SEARCH] or [CODE]) import re type_match = re.search(r"\[([A-Z_]+)\]", step) if type_match: step_info["type"] = type_match.group(1).lower() # Mark current step as in_progress try: await self.planning_tool.execute( command="mark_step", plan_id=self.active_plan_id, step_index=i, step_status=PlanStepStatus.IN_PROGRESS.value, ) except Exception as e: logger.warning(f"Error marking step as in_progress: {e}") # Update step status directly if needed if i < len(step_statuses): step_statuses[i] = PlanStepStatus.IN_PROGRESS.value else: while len(step_statuses) < i: step_statuses.append(PlanStepStatus.NOT_STARTED.value) step_statuses.append(PlanStepStatus.IN_PROGRESS.value) plan_data["step_statuses"] = step_statuses return i, step_info return None, None # No active step found except Exception as e: logger.warning(f"Error finding current step index: {e}") return None, None async def _execute_step(self, executor: BaseAgent, step_info: dict) -> str: """Execute the current step with the specified agent using agent.run().""" # Prepare context for the agent with current plan status plan_status = await self._get_plan_text() step_text = step_info.get("text", f"Step {self.current_step_index}") # Create a prompt for the agent to execute the current step step_prompt = f""" CURRENT PLAN STATUS: {plan_status} YOUR CURRENT TASK: You are now working on step {self.current_step_index}: "{step_text}" Please only execute this current step using the appropriate tools. When you're done, provide a summary of what you accomplished. """ # Use agent.run() to execute the step try: step_result = await executor.run(step_prompt) # Mark the step as completed after successful execution await self._mark_step_completed() return step_result except Exception as e: logger.error(f"Error executing step {self.current_step_index}: {e}") return f"Error executing step {self.current_step_index}: {str(e)}" async def _mark_step_completed(self) -> None: """Mark the current step as completed.""" if self.current_step_index is None: return try: # Mark the step as completed await self.planning_tool.execute( command="mark_step", plan_id=self.active_plan_id, step_index=self.current_step_index, step_status=PlanStepStatus.COMPLETED.value, ) logger.info( f"Marked step {self.current_step_index} as completed in plan {self.active_plan_id}" ) except Exception as e: logger.warning(f"Failed to update plan status: {e}") # Update step status directly in planning tool storage if self.active_plan_id in self.planning_tool.plans: plan_data = self.planning_tool.plans[self.active_plan_id] step_statuses = plan_data.get("step_statuses", []) # Ensure the step_statuses list is long enough while len(step_statuses) <= self.current_step_index: step_statuses.append(PlanStepStatus.NOT_STARTED.value) # Update the status step_statuses[self.current_step_index] = PlanStepStatus.COMPLETED.value plan_data["step_statuses"] = step_statuses async def _get_plan_text(self) -> str: """Get the current plan as formatted text.""" try: result = await self.planning_tool.execute( command="get", plan_id=self.active_plan_id ) return result.output if hasattr(result, "output") else str(result) except Exception as e: logger.error(f"Error getting plan: {e}") return self._generate_plan_text_from_storage() def _generate_plan_text_from_storage(self) -> str: """Generate plan text directly from storage if the planning tool fails.""" try: if self.active_plan_id not in self.planning_tool.plans: return f"Error: Plan with ID {self.active_plan_id} not found" plan_data = self.planning_tool.plans[self.active_plan_id] title = plan_data.get("title", "Untitled Plan") steps = plan_data.get("steps", []) step_statuses = plan_data.get("step_statuses", []) step_notes = plan_data.get("step_notes", []) # Ensure step_statuses and step_notes match the number of steps while len(step_statuses) < len(steps): step_statuses.append(PlanStepStatus.NOT_STARTED.value) while len(step_notes) < len(steps): step_notes.append("") # Count steps by status status_counts = {status: 0 for status in PlanStepStatus.get_all_statuses()} for status in step_statuses: if status in status_counts: status_counts[status] += 1 completed = status_counts[PlanStepStatus.COMPLETED.value] total = len(steps) progress = (completed / total) * 100 if total > 0 else 0 plan_text = f"Plan: {title} (ID: {self.active_plan_id})\n" plan_text += "=" * len(plan_text) + "\n\n" plan_text += ( f"Progress: {completed}/{total} steps completed ({progress:.1f}%)\n" ) plan_text += f"Status: {status_counts[PlanStepStatus.COMPLETED.value]} completed, {status_counts[PlanStepStatus.IN_PROGRESS.value]} in progress, " plan_text += f"{status_counts[PlanStepStatus.BLOCKED.value]} blocked, {status_counts[PlanStepStatus.NOT_STARTED.value]} not started\n\n" plan_text += "Steps:\n" status_marks = PlanStepStatus.get_status_marks() for i, (step, status, notes) in enumerate( zip(steps, step_statuses, step_notes) ): # Use status marks to indicate step status status_mark = status_marks.get( status, status_marks[PlanStepStatus.NOT_STARTED.value] ) plan_text += f"{i}. {status_mark} {step}\n" if notes: plan_text += f" Notes: {notes}\n" return plan_text except Exception as e: logger.error(f"Error generating plan text from storage: {e}") return f"Error: Unable to retrieve plan with ID {self.active_plan_id}" async def _finalize_plan(self) -> str: """Finalize the plan and provide a summary using the flow's LLM directly.""" plan_text = await self._get_plan_text() # Create a summary using the flow's LLM directly try: system_message = Message.system_message( "You are a planning assistant. Your task is to summarize the completed plan." ) user_message = Message.user_message( f"The plan has been completed. Here is the final plan status:\n\n{plan_text}\n\nPlease provide a summary of what was accomplished and any final thoughts." ) response = await self.llm.ask( messages=[user_message], system_msgs=[system_message] ) return f"Plan completed:\n\n{response}" except Exception as e: logger.error(f"Error finalizing plan with LLM: {e}") # Fallback to using an agent for the summary try: agent = self.primary_agent summary_prompt = f""" The plan has been completed. Here is the final plan status: {plan_text} Please provide a summary of what was accomplished and any final thoughts. """ summary = await agent.run(summary_prompt) return f"Plan completed:\n\n{summary}" except Exception as e2: logger.error(f"Error finalizing plan with agent: {e2}") return "Plan completed. Error generating summary."
python
MIT
52a13f2a57d8c7f6737eefb02ccf569594d44273
2026-01-04T14:39:27.873507Z
false
FoundationAgents/OpenManus
https://github.com/FoundationAgents/OpenManus/blob/52a13f2a57d8c7f6737eefb02ccf569594d44273/app/flow/__init__.py
app/flow/__init__.py
python
MIT
52a13f2a57d8c7f6737eefb02ccf569594d44273
2026-01-04T14:39:27.873507Z
false
FoundationAgents/OpenManus
https://github.com/FoundationAgents/OpenManus/blob/52a13f2a57d8c7f6737eefb02ccf569594d44273/app/flow/base.py
app/flow/base.py
from abc import ABC, abstractmethod from typing import Dict, List, Optional, Union from pydantic import BaseModel from app.agent.base import BaseAgent class BaseFlow(BaseModel, ABC): """Base class for execution flows supporting multiple agents""" agents: Dict[str, BaseAgent] tools: Optional[List] = None primary_agent_key: Optional[str] = None class Config: arbitrary_types_allowed = True def __init__( self, agents: Union[BaseAgent, List[BaseAgent], Dict[str, BaseAgent]], **data ): # Handle different ways of providing agents if isinstance(agents, BaseAgent): agents_dict = {"default": agents} elif isinstance(agents, list): agents_dict = {f"agent_{i}": agent for i, agent in enumerate(agents)} else: agents_dict = agents # If primary agent not specified, use first agent primary_key = data.get("primary_agent_key") if not primary_key and agents_dict: primary_key = next(iter(agents_dict)) data["primary_agent_key"] = primary_key # Set the agents dictionary data["agents"] = agents_dict # Initialize using BaseModel's init super().__init__(**data) @property def primary_agent(self) -> Optional[BaseAgent]: """Get the primary agent for the flow""" return self.agents.get(self.primary_agent_key) def get_agent(self, key: str) -> Optional[BaseAgent]: """Get a specific agent by key""" return self.agents.get(key) def add_agent(self, key: str, agent: BaseAgent) -> None: """Add a new agent to the flow""" self.agents[key] = agent @abstractmethod async def execute(self, input_text: str) -> str: """Execute the flow with given input"""
python
MIT
52a13f2a57d8c7f6737eefb02ccf569594d44273
2026-01-04T14:39:27.873507Z
false
FoundationAgents/OpenManus
https://github.com/FoundationAgents/OpenManus/blob/52a13f2a57d8c7f6737eefb02ccf569594d44273/app/utils/files_utils.py
app/utils/files_utils.py
import os # Files to exclude from operations EXCLUDED_FILES = { ".DS_Store", ".gitignore", "package-lock.json", "postcss.config.js", "postcss.config.mjs", "jsconfig.json", "components.json", "tsconfig.tsbuildinfo", "tsconfig.json", } # Directories to exclude from operations EXCLUDED_DIRS = {"node_modules", ".next", "dist", "build", ".git"} # File extensions to exclude from operations EXCLUDED_EXT = { ".ico", ".svg", ".png", ".jpg", ".jpeg", ".gif", ".bmp", ".tiff", ".webp", ".db", ".sql", } def should_exclude_file(rel_path: str) -> bool: """Check if a file should be excluded based on path, name, or extension Args: rel_path: Relative path of the file to check Returns: True if the file should be excluded, False otherwise """ # Check filename filename = os.path.basename(rel_path) if filename in EXCLUDED_FILES: return True # Check directory dir_path = os.path.dirname(rel_path) if any(excluded in dir_path for excluded in EXCLUDED_DIRS): return True # Check extension _, ext = os.path.splitext(filename) if ext.lower() in EXCLUDED_EXT: return True return False def clean_path(path: str, workspace_path: str = "/workspace") -> str: """Clean and normalize a path to be relative to the workspace Args: path: The path to clean workspace_path: The base workspace path to remove (default: "/workspace") Returns: The cleaned path, relative to the workspace """ # Remove any leading slash path = path.lstrip("/") # Remove workspace prefix if present if path.startswith(workspace_path.lstrip("/")): path = path[len(workspace_path.lstrip("/")) :] # Remove workspace/ prefix if present if path.startswith("workspace/"): path = path[9:] # Remove any remaining leading slash path = path.lstrip("/") return path
python
MIT
52a13f2a57d8c7f6737eefb02ccf569594d44273
2026-01-04T14:39:27.873507Z
false
FoundationAgents/OpenManus
https://github.com/FoundationAgents/OpenManus/blob/52a13f2a57d8c7f6737eefb02ccf569594d44273/app/utils/logger.py
app/utils/logger.py
import logging import os import structlog ENV_MODE = os.getenv("ENV_MODE", "LOCAL") renderer = [structlog.processors.JSONRenderer()] if ENV_MODE.lower() == "local".lower(): renderer = [structlog.dev.ConsoleRenderer()] structlog.configure( processors=[ structlog.stdlib.add_log_level, structlog.stdlib.PositionalArgumentsFormatter(), structlog.processors.dict_tracebacks, structlog.processors.CallsiteParameterAdder( { structlog.processors.CallsiteParameter.FILENAME, structlog.processors.CallsiteParameter.FUNC_NAME, structlog.processors.CallsiteParameter.LINENO, } ), structlog.processors.TimeStamper(fmt="iso"), structlog.contextvars.merge_contextvars, *renderer, ], cache_logger_on_first_use=True, ) logger: structlog.stdlib.BoundLogger = structlog.get_logger(level=logging.DEBUG)
python
MIT
52a13f2a57d8c7f6737eefb02ccf569594d44273
2026-01-04T14:39:27.873507Z
false
FoundationAgents/OpenManus
https://github.com/FoundationAgents/OpenManus/blob/52a13f2a57d8c7f6737eefb02ccf569594d44273/app/utils/__init__.py
app/utils/__init__.py
# Utility functions and constants for agent tools
python
MIT
52a13f2a57d8c7f6737eefb02ccf569594d44273
2026-01-04T14:39:27.873507Z
false
FoundationAgents/OpenManus
https://github.com/FoundationAgents/OpenManus/blob/52a13f2a57d8c7f6737eefb02ccf569594d44273/app/prompt/toolcall.py
app/prompt/toolcall.py
SYSTEM_PROMPT = "You are an agent that can execute tool calls" NEXT_STEP_PROMPT = ( "If you want to stop interaction, use `terminate` tool/function call." )
python
MIT
52a13f2a57d8c7f6737eefb02ccf569594d44273
2026-01-04T14:39:27.873507Z
false
FoundationAgents/OpenManus
https://github.com/FoundationAgents/OpenManus/blob/52a13f2a57d8c7f6737eefb02ccf569594d44273/app/prompt/swe.py
app/prompt/swe.py
SYSTEM_PROMPT = """SETTING: You are an autonomous programmer, and you're working directly in the command line with a special interface. The special interface consists of a file editor that shows you {{WINDOW}} lines of a file at a time. In addition to typical bash commands, you can also use specific commands to help you navigate and edit files. To call a command, you need to invoke it with a function call/tool call. Please note that THE EDIT COMMAND REQUIRES PROPER INDENTATION. If you'd like to add the line ' print(x)' you must fully write that out, with all those spaces before the code! Indentation is important and code that is not indented correctly will fail and require fixing before it can be run. RESPONSE FORMAT: Your shell prompt is formatted as follows: (Open file: <path>) (Current directory: <cwd>) bash-$ First, you should _always_ include a general thought about what you're going to do next. Then, for every response, you must include exactly _ONE_ tool call/function call. Remember, you should always include a _SINGLE_ tool call/function call and then wait for a response from the shell before continuing with more discussion and commands. Everything you include in the DISCUSSION section will be saved for future reference. If you'd like to issue two commands at once, PLEASE DO NOT DO THAT! Please instead first submit just the first tool call, and then after receiving a response you'll be able to issue the second tool call. Note that the environment does NOT support interactive session commands (e.g. python, vim), so please do not invoke them. """
python
MIT
52a13f2a57d8c7f6737eefb02ccf569594d44273
2026-01-04T14:39:27.873507Z
false
FoundationAgents/OpenManus
https://github.com/FoundationAgents/OpenManus/blob/52a13f2a57d8c7f6737eefb02ccf569594d44273/app/prompt/manus.py
app/prompt/manus.py
SYSTEM_PROMPT = ( "You are OpenManus, an all-capable AI assistant, aimed at solving any task presented by the user. You have various tools at your disposal that you can call upon to efficiently complete complex requests. Whether it's programming, information retrieval, file processing, web browsing, or human interaction (only for extreme cases), you can handle it all." "The initial directory is: {directory}" ) NEXT_STEP_PROMPT = """ Based on user needs, proactively select the most appropriate tool or combination of tools. For complex tasks, you can break down the problem and use different tools step by step to solve it. After using each tool, clearly explain the execution results and suggest the next steps. If you want to stop the interaction at any point, use the `terminate` tool/function call. """
python
MIT
52a13f2a57d8c7f6737eefb02ccf569594d44273
2026-01-04T14:39:27.873507Z
false
FoundationAgents/OpenManus
https://github.com/FoundationAgents/OpenManus/blob/52a13f2a57d8c7f6737eefb02ccf569594d44273/app/prompt/planning.py
app/prompt/planning.py
PLANNING_SYSTEM_PROMPT = """ You are an expert Planning Agent tasked with solving problems efficiently through structured plans. Your job is: 1. Analyze requests to understand the task scope 2. Create a clear, actionable plan that makes meaningful progress with the `planning` tool 3. Execute steps using available tools as needed 4. Track progress and adapt plans when necessary 5. Use `finish` to conclude immediately when the task is complete Available tools will vary by task but may include: - `planning`: Create, update, and track plans (commands: create, update, mark_step, etc.) - `finish`: End the task when complete Break tasks into logical steps with clear outcomes. Avoid excessive detail or sub-steps. Think about dependencies and verification methods. Know when to conclude - don't continue thinking once objectives are met. """ NEXT_STEP_PROMPT = """ Based on the current state, what's your next action? Choose the most efficient path forward: 1. Is the plan sufficient, or does it need refinement? 2. Can you execute the next step immediately? 3. Is the task complete? If so, use `finish` right away. Be concise in your reasoning, then select the appropriate tool or action. """
python
MIT
52a13f2a57d8c7f6737eefb02ccf569594d44273
2026-01-04T14:39:27.873507Z
false
FoundationAgents/OpenManus
https://github.com/FoundationAgents/OpenManus/blob/52a13f2a57d8c7f6737eefb02ccf569594d44273/app/prompt/mcp.py
app/prompt/mcp.py
"""Prompts for the MCP Agent.""" SYSTEM_PROMPT = """You are an AI assistant with access to a Model Context Protocol (MCP) server. You can use the tools provided by the MCP server to complete tasks. The MCP server will dynamically expose tools that you can use - always check the available tools first. When using an MCP tool: 1. Choose the appropriate tool based on your task requirements 2. Provide properly formatted arguments as required by the tool 3. Observe the results and use them to determine next steps 4. Tools may change during operation - new tools might appear or existing ones might disappear Follow these guidelines: - Call tools with valid parameters as documented in their schemas - Handle errors gracefully by understanding what went wrong and trying again with corrected parameters - For multimedia responses (like images), you'll receive a description of the content - Complete user requests step by step, using the most appropriate tools - If multiple tools need to be called in sequence, make one call at a time and wait for results Remember to clearly explain your reasoning and actions to the user. """ NEXT_STEP_PROMPT = """Based on the current state and available tools, what should be done next? Think step by step about the problem and identify which MCP tool would be most helpful for the current stage. If you've already made progress, consider what additional information you need or what actions would move you closer to completing the task. """ # Additional specialized prompts TOOL_ERROR_PROMPT = """You encountered an error with the tool '{tool_name}'. Try to understand what went wrong and correct your approach. Common issues include: - Missing or incorrect parameters - Invalid parameter formats - Using a tool that's no longer available - Attempting an operation that's not supported Please check the tool specifications and try again with corrected parameters. """ MULTIMEDIA_RESPONSE_PROMPT = """You've received a multimedia response (image, audio, etc.) from the tool '{tool_name}'. This content has been processed and described for you. Use this information to continue the task or provide insights to the user. """
python
MIT
52a13f2a57d8c7f6737eefb02ccf569594d44273
2026-01-04T14:39:27.873507Z
false
FoundationAgents/OpenManus
https://github.com/FoundationAgents/OpenManus/blob/52a13f2a57d8c7f6737eefb02ccf569594d44273/app/prompt/browser.py
app/prompt/browser.py
SYSTEM_PROMPT = """\ You are an AI agent designed to automate browser tasks. Your goal is to accomplish the ultimate task following the rules. # Input Format Task Previous steps Current URL Open Tabs Interactive Elements [index]<type>text</type> - index: Numeric identifier for interaction - type: HTML element type (button, input, etc.) - text: Element description Example: [33]<button>Submit Form</button> - Only elements with numeric indexes in [] are interactive - elements without [] provide only context # Response Rules 1. RESPONSE FORMAT: You must ALWAYS respond with valid JSON in this exact format: {{"current_state": {{"evaluation_previous_goal": "Success|Failed|Unknown - Analyze the current elements and the image to check if the previous goals/actions are successful like intended by the task. Mention if something unexpected happened. Shortly state why/why not", "memory": "Description of what has been done and what you need to remember. Be very specific. Count here ALWAYS how many times you have done something and how many remain. E.g. 0 out of 10 websites analyzed. Continue with abc and xyz", "next_goal": "What needs to be done with the next immediate action"}}, "action":[{{"one_action_name": {{// action-specific parameter}}}}, // ... more actions in sequence]}} 2. ACTIONS: You can specify multiple actions in the list to be executed in sequence. But always specify only one action name per item. Use maximum {{max_actions}} actions per sequence. Common action sequences: - Form filling: [{{"input_text": {{"index": 1, "text": "username"}}}}, {{"input_text": {{"index": 2, "text": "password"}}}}, {{"click_element": {{"index": 3}}}}] - Navigation and extraction: [{{"go_to_url": {{"url": "https://example.com"}}}}, {{"extract_content": {{"goal": "extract the names"}}}}] - Actions are executed in the given order - If the page changes after an action, the sequence is interrupted and you get the new state. - Only provide the action sequence until an action which changes the page state significantly. - Try to be efficient, e.g. fill forms at once, or chain actions where nothing changes on the page - only use multiple actions if it makes sense. 3. ELEMENT INTERACTION: - Only use indexes of the interactive elements - Elements marked with "[]Non-interactive text" are non-interactive 4. NAVIGATION & ERROR HANDLING: - If no suitable elements exist, use other functions to complete the task - If stuck, try alternative approaches - like going back to a previous page, new search, new tab etc. - Handle popups/cookies by accepting or closing them - Use scroll to find elements you are looking for - If you want to research something, open a new tab instead of using the current tab - If captcha pops up, try to solve it - else try a different approach - If the page is not fully loaded, use wait action 5. TASK COMPLETION: - Use the done action as the last action as soon as the ultimate task is complete - Dont use "done" before you are done with everything the user asked you, except you reach the last step of max_steps. - If you reach your last step, use the done action even if the task is not fully finished. Provide all the information you have gathered so far. If the ultimate task is completly finished set success to true. If not everything the user asked for is completed set success in done to false! - If you have to do something repeatedly for example the task says for "each", or "for all", or "x times", count always inside "memory" how many times you have done it and how many remain. Don't stop until you have completed like the task asked you. Only call done after the last step. - Don't hallucinate actions - Make sure you include everything you found out for the ultimate task in the done text parameter. Do not just say you are done, but include the requested information of the task. 6. VISUAL CONTEXT: - When an image is provided, use it to understand the page layout - Bounding boxes with labels on their top right corner correspond to element indexes 7. Form filling: - If you fill an input field and your action sequence is interrupted, most often something changed e.g. suggestions popped up under the field. 8. Long tasks: - Keep track of the status and subresults in the memory. 9. Extraction: - If your task is to find information - call extract_content on the specific pages to get and store the information. Your responses must be always JSON with the specified format. """ NEXT_STEP_PROMPT = """ What should I do next to achieve my goal? When you see [Current state starts here], focus on the following: - Current URL and page title{url_placeholder} - Available tabs{tabs_placeholder} - Interactive elements and their indices - Content above{content_above_placeholder} or below{content_below_placeholder} the viewport (if indicated) - Any action results or errors{results_placeholder} For browser interactions: - To navigate: browser_use with action="go_to_url", url="..." - To click: browser_use with action="click_element", index=N - To type: browser_use with action="input_text", index=N, text="..." - To extract: browser_use with action="extract_content", goal="..." - To scroll: browser_use with action="scroll_down" or "scroll_up" Consider both what's visible and what might be beyond the current viewport. Be methodical - remember your progress and what you've learned so far. If you want to stop the interaction at any point, use the `terminate` tool/function call. """
python
MIT
52a13f2a57d8c7f6737eefb02ccf569594d44273
2026-01-04T14:39:27.873507Z
false
FoundationAgents/OpenManus
https://github.com/FoundationAgents/OpenManus/blob/52a13f2a57d8c7f6737eefb02ccf569594d44273/app/prompt/__init__.py
app/prompt/__init__.py
python
MIT
52a13f2a57d8c7f6737eefb02ccf569594d44273
2026-01-04T14:39:27.873507Z
false
FoundationAgents/OpenManus
https://github.com/FoundationAgents/OpenManus/blob/52a13f2a57d8c7f6737eefb02ccf569594d44273/app/prompt/visualization.py
app/prompt/visualization.py
SYSTEM_PROMPT = """You are an AI agent designed to data analysis / visualization task. You have various tools at your disposal that you can call upon to efficiently complete complex requests. # Note: 1. The workspace directory is: {directory}; Read / write file in workspace 2. Generate analysis conclusion report in the end""" NEXT_STEP_PROMPT = """Based on user needs, break down the problem and use different tools step by step to solve it. # Note 1. Each step select the most appropriate tool proactively (ONLY ONE). 2. After using each tool, clearly explain the execution results and suggest the next steps. 3. When observation with Error, review and fix it."""
python
MIT
52a13f2a57d8c7f6737eefb02ccf569594d44273
2026-01-04T14:39:27.873507Z
false
FoundationAgents/OpenManus
https://github.com/FoundationAgents/OpenManus/blob/52a13f2a57d8c7f6737eefb02ccf569594d44273/app/daytona/sandbox.py
app/daytona/sandbox.py
import time from daytona import ( CreateSandboxFromImageParams, Daytona, DaytonaConfig, Resources, Sandbox, SandboxState, SessionExecuteRequest, ) from app.config import config from app.utils.logger import logger # load_dotenv() daytona_settings = config.daytona logger.info("Initializing Daytona sandbox configuration") daytona_config = DaytonaConfig( api_key=daytona_settings.daytona_api_key, server_url=daytona_settings.daytona_server_url, target=daytona_settings.daytona_target, ) if daytona_config.api_key: logger.info("Daytona API key configured successfully") else: logger.warning("No Daytona API key found in environment variables") if daytona_config.server_url: logger.info(f"Daytona server URL set to: {daytona_config.server_url}") else: logger.warning("No Daytona server URL found in environment variables") if daytona_config.target: logger.info(f"Daytona target set to: {daytona_config.target}") else: logger.warning("No Daytona target found in environment variables") daytona = Daytona(daytona_config) logger.info("Daytona client initialized") async def get_or_start_sandbox(sandbox_id: str): """Retrieve a sandbox by ID, check its state, and start it if needed.""" logger.info(f"Getting or starting sandbox with ID: {sandbox_id}") try: sandbox = daytona.get(sandbox_id) # Check if sandbox needs to be started if ( sandbox.state == SandboxState.ARCHIVED or sandbox.state == SandboxState.STOPPED ): logger.info(f"Sandbox is in {sandbox.state} state. Starting...") try: daytona.start(sandbox) # Wait a moment for the sandbox to initialize # sleep(5) # Refresh sandbox state after starting sandbox = daytona.get(sandbox_id) # Start supervisord in a session when restarting start_supervisord_session(sandbox) except Exception as e: logger.error(f"Error starting sandbox: {e}") raise e logger.info(f"Sandbox {sandbox_id} is ready") return sandbox except Exception as e: logger.error(f"Error retrieving or starting sandbox: {str(e)}") raise e def start_supervisord_session(sandbox: Sandbox): """Start supervisord in a session.""" session_id = "supervisord-session" try: logger.info(f"Creating session {session_id} for supervisord") sandbox.process.create_session(session_id) # Execute supervisord command sandbox.process.execute_session_command( session_id, SessionExecuteRequest( command="exec /usr/bin/supervisord -n -c /etc/supervisor/conf.d/supervisord.conf", var_async=True, ), ) time.sleep(25) # Wait a bit to ensure supervisord starts properly logger.info(f"Supervisord started in session {session_id}") except Exception as e: logger.error(f"Error starting supervisord session: {str(e)}") raise e def create_sandbox(password: str, project_id: str = None): """Create a new sandbox with all required services configured and running.""" logger.info("Creating new Daytona sandbox environment") logger.info("Configuring sandbox with browser-use image and environment variables") labels = None if project_id: logger.info(f"Using sandbox_id as label: {project_id}") labels = {"id": project_id} params = CreateSandboxFromImageParams( image=daytona_settings.sandbox_image_name, public=True, labels=labels, env_vars={ "CHROME_PERSISTENT_SESSION": "true", "RESOLUTION": "1024x768x24", "RESOLUTION_WIDTH": "1024", "RESOLUTION_HEIGHT": "768", "VNC_PASSWORD": password, "ANONYMIZED_TELEMETRY": "false", "CHROME_PATH": "", "CHROME_USER_DATA": "", "CHROME_DEBUGGING_PORT": "9222", "CHROME_DEBUGGING_HOST": "localhost", "CHROME_CDP": "", }, resources=Resources( cpu=2, memory=4, disk=5, ), auto_stop_interval=15, auto_archive_interval=24 * 60, ) # Create the sandbox sandbox = daytona.create(params) logger.info(f"Sandbox created with ID: {sandbox.id}") # Start supervisord in a session for new sandbox start_supervisord_session(sandbox) logger.info(f"Sandbox environment successfully initialized") return sandbox async def delete_sandbox(sandbox_id: str): """Delete a sandbox by its ID.""" logger.info(f"Deleting sandbox with ID: {sandbox_id}") try: # Get the sandbox sandbox = daytona.get(sandbox_id) # Delete the sandbox daytona.delete(sandbox) logger.info(f"Successfully deleted sandbox {sandbox_id}") return True except Exception as e: logger.error(f"Error deleting sandbox {sandbox_id}: {str(e)}") raise e
python
MIT
52a13f2a57d8c7f6737eefb02ccf569594d44273
2026-01-04T14:39:27.873507Z
false
FoundationAgents/OpenManus
https://github.com/FoundationAgents/OpenManus/blob/52a13f2a57d8c7f6737eefb02ccf569594d44273/app/daytona/tool_base.py
app/daytona/tool_base.py
from dataclasses import dataclass, field from datetime import datetime from typing import Any, ClassVar, Dict, Optional from daytona import Daytona, DaytonaConfig, Sandbox, SandboxState from pydantic import Field from app.config import config from app.daytona.sandbox import create_sandbox, start_supervisord_session from app.tool.base import BaseTool from app.utils.files_utils import clean_path from app.utils.logger import logger # load_dotenv() daytona_settings = config.daytona daytona_config = DaytonaConfig( api_key=daytona_settings.daytona_api_key, server_url=daytona_settings.daytona_server_url, target=daytona_settings.daytona_target, ) daytona = Daytona(daytona_config) @dataclass class ThreadMessage: """ Represents a message to be added to a thread. """ type: str content: Dict[str, Any] is_llm_message: bool = False metadata: Optional[Dict[str, Any]] = None timestamp: Optional[float] = field( default_factory=lambda: datetime.now().timestamp() ) def to_dict(self) -> Dict[str, Any]: """Convert the message to a dictionary for API calls""" return { "type": self.type, "content": self.content, "is_llm_message": self.is_llm_message, "metadata": self.metadata or {}, "timestamp": self.timestamp, } class SandboxToolsBase(BaseTool): """Base class for all sandbox tools that provides project-based sandbox access.""" # Class variable to track if sandbox URLs have been printed _urls_printed: ClassVar[bool] = False # Required fields project_id: Optional[str] = None # thread_manager: Optional[ThreadManager] = None # Private fields (not part of the model schema) _sandbox: Optional[Sandbox] = None _sandbox_id: Optional[str] = None _sandbox_pass: Optional[str] = None workspace_path: str = Field(default="/workspace", exclude=True) _sessions: dict[str, str] = {} class Config: arbitrary_types_allowed = True # Allow non-pydantic types like ThreadManager underscore_attrs_are_private = True async def _ensure_sandbox(self) -> Sandbox: """Ensure we have a valid sandbox instance, retrieving it from the project if needed.""" if self._sandbox is None: # Get or start the sandbox try: self._sandbox = create_sandbox(password=config.daytona.VNC_password) # Log URLs if not already printed if not SandboxToolsBase._urls_printed: vnc_link = self._sandbox.get_preview_link(6080) website_link = self._sandbox.get_preview_link(8080) vnc_url = ( vnc_link.url if hasattr(vnc_link, "url") else str(vnc_link) ) website_url = ( website_link.url if hasattr(website_link, "url") else str(website_link) ) print("\033[95m***") print(f"VNC URL: {vnc_url}") print(f"Website URL: {website_url}") print("***\033[0m") SandboxToolsBase._urls_printed = True except Exception as e: logger.error(f"Error retrieving or starting sandbox: {str(e)}") raise e else: if ( self._sandbox.state == SandboxState.ARCHIVED or self._sandbox.state == SandboxState.STOPPED ): logger.info(f"Sandbox is in {self._sandbox.state} state. Starting...") try: daytona.start(self._sandbox) # Wait a moment for the sandbox to initialize # sleep(5) # Refresh sandbox state after starting # Start supervisord in a session when restarting start_supervisord_session(self._sandbox) except Exception as e: logger.error(f"Error starting sandbox: {e}") raise e return self._sandbox @property def sandbox(self) -> Sandbox: """Get the sandbox instance, ensuring it exists.""" if self._sandbox is None: raise RuntimeError("Sandbox not initialized. Call _ensure_sandbox() first.") return self._sandbox @property def sandbox_id(self) -> str: """Get the sandbox ID, ensuring it exists.""" if self._sandbox_id is None: raise RuntimeError( "Sandbox ID not initialized. Call _ensure_sandbox() first." ) return self._sandbox_id def clean_path(self, path: str) -> str: """Clean and normalize a path to be relative to /workspace.""" cleaned_path = clean_path(path, self.workspace_path) logger.debug(f"Cleaned path: {path} -> {cleaned_path}") return cleaned_path
python
MIT
52a13f2a57d8c7f6737eefb02ccf569594d44273
2026-01-04T14:39:27.873507Z
false
FoundationAgents/OpenManus
https://github.com/FoundationAgents/OpenManus/blob/52a13f2a57d8c7f6737eefb02ccf569594d44273/app/sandbox/client.py
app/sandbox/client.py
from abc import ABC, abstractmethod from typing import Dict, Optional, Protocol from app.config import SandboxSettings from app.sandbox.core.sandbox import DockerSandbox class SandboxFileOperations(Protocol): """Protocol for sandbox file operations.""" async def copy_from(self, container_path: str, local_path: str) -> None: """Copies file from container to local. Args: container_path: File path in container. local_path: Local destination path. """ ... async def copy_to(self, local_path: str, container_path: str) -> None: """Copies file from local to container. Args: local_path: Local source file path. container_path: Destination path in container. """ ... async def read_file(self, path: str) -> str: """Reads file content from container. Args: path: File path in container. Returns: str: File content. """ ... async def write_file(self, path: str, content: str) -> None: """Writes content to file in container. Args: path: File path in container. content: Content to write. """ ... class BaseSandboxClient(ABC): """Base sandbox client interface.""" @abstractmethod async def create( self, config: Optional[SandboxSettings] = None, volume_bindings: Optional[Dict[str, str]] = None, ) -> None: """Creates sandbox.""" @abstractmethod async def run_command(self, command: str, timeout: Optional[int] = None) -> str: """Executes command.""" @abstractmethod async def copy_from(self, container_path: str, local_path: str) -> None: """Copies file from container.""" @abstractmethod async def copy_to(self, local_path: str, container_path: str) -> None: """Copies file to container.""" @abstractmethod async def read_file(self, path: str) -> str: """Reads file.""" @abstractmethod async def write_file(self, path: str, content: str) -> None: """Writes file.""" @abstractmethod async def cleanup(self) -> None: """Cleans up resources.""" class LocalSandboxClient(BaseSandboxClient): """Local sandbox client implementation.""" def __init__(self): """Initializes local sandbox client.""" self.sandbox: Optional[DockerSandbox] = None async def create( self, config: Optional[SandboxSettings] = None, volume_bindings: Optional[Dict[str, str]] = None, ) -> None: """Creates a sandbox. Args: config: Sandbox configuration. volume_bindings: Volume mappings. Raises: RuntimeError: If sandbox creation fails. """ self.sandbox = DockerSandbox(config, volume_bindings) await self.sandbox.create() async def run_command(self, command: str, timeout: Optional[int] = None) -> str: """Runs command in sandbox. Args: command: Command to execute. timeout: Execution timeout in seconds. Returns: Command output. Raises: RuntimeError: If sandbox not initialized. """ if not self.sandbox: raise RuntimeError("Sandbox not initialized") return await self.sandbox.run_command(command, timeout) async def copy_from(self, container_path: str, local_path: str) -> None: """Copies file from container to local. Args: container_path: File path in container. local_path: Local destination path. Raises: RuntimeError: If sandbox not initialized. """ if not self.sandbox: raise RuntimeError("Sandbox not initialized") await self.sandbox.copy_from(container_path, local_path) async def copy_to(self, local_path: str, container_path: str) -> None: """Copies file from local to container. Args: local_path: Local source file path. container_path: Destination path in container. Raises: RuntimeError: If sandbox not initialized. """ if not self.sandbox: raise RuntimeError("Sandbox not initialized") await self.sandbox.copy_to(local_path, container_path) async def read_file(self, path: str) -> str: """Reads file from container. Args: path: File path in container. Returns: File content. Raises: RuntimeError: If sandbox not initialized. """ if not self.sandbox: raise RuntimeError("Sandbox not initialized") return await self.sandbox.read_file(path) async def write_file(self, path: str, content: str) -> None: """Writes file to container. Args: path: File path in container. content: File content. Raises: RuntimeError: If sandbox not initialized. """ if not self.sandbox: raise RuntimeError("Sandbox not initialized") await self.sandbox.write_file(path, content) async def cleanup(self) -> None: """Cleans up resources.""" if self.sandbox: await self.sandbox.cleanup() self.sandbox = None def create_sandbox_client() -> LocalSandboxClient: """Creates a sandbox client. Returns: LocalSandboxClient: Sandbox client instance. """ return LocalSandboxClient() SANDBOX_CLIENT = create_sandbox_client()
python
MIT
52a13f2a57d8c7f6737eefb02ccf569594d44273
2026-01-04T14:39:27.873507Z
false
FoundationAgents/OpenManus
https://github.com/FoundationAgents/OpenManus/blob/52a13f2a57d8c7f6737eefb02ccf569594d44273/app/sandbox/__init__.py
app/sandbox/__init__.py
""" Docker Sandbox Module Provides secure containerized execution environment with resource limits and isolation for running untrusted code. """ from app.sandbox.client import ( BaseSandboxClient, LocalSandboxClient, create_sandbox_client, ) from app.sandbox.core.exceptions import ( SandboxError, SandboxResourceError, SandboxTimeoutError, ) from app.sandbox.core.manager import SandboxManager from app.sandbox.core.sandbox import DockerSandbox __all__ = [ "DockerSandbox", "SandboxManager", "BaseSandboxClient", "LocalSandboxClient", "create_sandbox_client", "SandboxError", "SandboxTimeoutError", "SandboxResourceError", ]
python
MIT
52a13f2a57d8c7f6737eefb02ccf569594d44273
2026-01-04T14:39:27.873507Z
false
FoundationAgents/OpenManus
https://github.com/FoundationAgents/OpenManus/blob/52a13f2a57d8c7f6737eefb02ccf569594d44273/app/sandbox/core/terminal.py
app/sandbox/core/terminal.py
""" Asynchronous Docker Terminal This module provides asynchronous terminal functionality for Docker containers, allowing interactive command execution with timeout control. """ import asyncio import re import socket from typing import Dict, Optional, Tuple, Union import docker from docker import APIClient from docker.errors import APIError from docker.models.containers import Container class DockerSession: def __init__(self, container_id: str) -> None: """Initializes a Docker session. Args: container_id: ID of the Docker container. """ self.api = APIClient() self.container_id = container_id self.exec_id = None self.socket = None async def create(self, working_dir: str, env_vars: Dict[str, str]) -> None: """Creates an interactive session with the container. Args: working_dir: Working directory inside the container. env_vars: Environment variables to set. Raises: RuntimeError: If socket connection fails. """ startup_command = [ "bash", "-c", f"cd {working_dir} && " "PROMPT_COMMAND='' " "PS1='$ ' " "exec bash --norc --noprofile", ] exec_data = self.api.exec_create( self.container_id, startup_command, stdin=True, tty=True, stdout=True, stderr=True, privileged=True, user="root", environment={**env_vars, "TERM": "dumb", "PS1": "$ ", "PROMPT_COMMAND": ""}, ) self.exec_id = exec_data["Id"] socket_data = self.api.exec_start( self.exec_id, socket=True, tty=True, stream=True, demux=True ) if hasattr(socket_data, "_sock"): self.socket = socket_data._sock self.socket.setblocking(False) else: raise RuntimeError("Failed to get socket connection") await self._read_until_prompt() async def close(self) -> None: """Cleans up session resources. 1. Sends exit command 2. Closes socket connection 3. Checks and cleans up exec instance """ try: if self.socket: # Send exit command to close bash session try: self.socket.sendall(b"exit\n") # Allow time for command execution await asyncio.sleep(0.1) except: pass # Ignore sending errors, continue cleanup # Close socket connection try: self.socket.shutdown(socket.SHUT_RDWR) except: pass # Some platforms may not support shutdown self.socket.close() self.socket = None if self.exec_id: try: # Check exec instance status exec_inspect = self.api.exec_inspect(self.exec_id) if exec_inspect.get("Running", False): # If still running, wait for it to complete await asyncio.sleep(0.5) except: pass # Ignore inspection errors, continue cleanup self.exec_id = None except Exception as e: # Log error but don't raise, ensure cleanup continues print(f"Warning: Error during session cleanup: {e}") async def _read_until_prompt(self) -> str: """Reads output until prompt is found. Returns: String containing output up to the prompt. Raises: socket.error: If socket communication fails. """ buffer = b"" while b"$ " not in buffer: try: chunk = self.socket.recv(4096) if chunk: buffer += chunk except socket.error as e: if e.errno == socket.EWOULDBLOCK: await asyncio.sleep(0.1) continue raise return buffer.decode("utf-8") async def execute(self, command: str, timeout: Optional[int] = None) -> str: """Executes a command and returns cleaned output. Args: command: Shell command to execute. timeout: Maximum execution time in seconds. Returns: Command output as string with prompt markers removed. Raises: RuntimeError: If session not initialized or execution fails. TimeoutError: If command execution exceeds timeout. """ if not self.socket: raise RuntimeError("Session not initialized") try: # Sanitize command to prevent shell injection sanitized_command = self._sanitize_command(command) full_command = f"{sanitized_command}\necho $?\n" self.socket.sendall(full_command.encode()) async def read_output() -> str: buffer = b"" result_lines = [] command_sent = False while True: try: chunk = self.socket.recv(4096) if not chunk: break buffer += chunk lines = buffer.split(b"\n") buffer = lines[-1] lines = lines[:-1] for line in lines: line = line.rstrip(b"\r") if not command_sent: command_sent = True continue if line.strip() == b"echo $?" or line.strip().isdigit(): continue if line.strip(): result_lines.append(line) if buffer.endswith(b"$ "): break except socket.error as e: if e.errno == socket.EWOULDBLOCK: await asyncio.sleep(0.1) continue raise output = b"\n".join(result_lines).decode("utf-8") output = re.sub(r"\n\$ echo \$\$?.*$", "", output) return output if timeout: result = await asyncio.wait_for(read_output(), timeout) else: result = await read_output() return result.strip() except asyncio.TimeoutError: raise TimeoutError(f"Command execution timed out after {timeout} seconds") except Exception as e: raise RuntimeError(f"Failed to execute command: {e}") def _sanitize_command(self, command: str) -> str: """Sanitizes the command string to prevent shell injection. Args: command: Raw command string. Returns: Sanitized command string. Raises: ValueError: If command contains potentially dangerous patterns. """ # Additional checks for specific risky commands risky_commands = [ "rm -rf /", "rm -rf /*", "mkfs", "dd if=/dev/zero", ":(){:|:&};:", "chmod -R 777 /", "chown -R", ] for risky in risky_commands: if risky in command.lower(): raise ValueError( f"Command contains potentially dangerous operation: {risky}" ) return command class AsyncDockerizedTerminal: def __init__( self, container: Union[str, Container], working_dir: str = "/workspace", env_vars: Optional[Dict[str, str]] = None, default_timeout: int = 60, ) -> None: """Initializes an asynchronous terminal for Docker containers. Args: container: Docker container ID or Container object. working_dir: Working directory inside the container. env_vars: Environment variables to set. default_timeout: Default command execution timeout in seconds. """ self.client = docker.from_env() self.container = ( container if isinstance(container, Container) else self.client.containers.get(container) ) self.working_dir = working_dir self.env_vars = env_vars or {} self.default_timeout = default_timeout self.session = None async def init(self) -> None: """Initializes the terminal environment. Ensures working directory exists and creates an interactive session. Raises: RuntimeError: If initialization fails. """ await self._ensure_workdir() self.session = DockerSession(self.container.id) await self.session.create(self.working_dir, self.env_vars) async def _ensure_workdir(self) -> None: """Ensures working directory exists in container. Raises: RuntimeError: If directory creation fails. """ try: await self._exec_simple(f"mkdir -p {self.working_dir}") except APIError as e: raise RuntimeError(f"Failed to create working directory: {e}") async def _exec_simple(self, cmd: str) -> Tuple[int, str]: """Executes a simple command using Docker's exec_run. Args: cmd: Command to execute. Returns: Tuple of (exit_code, output). """ result = await asyncio.to_thread( self.container.exec_run, cmd, environment=self.env_vars ) return result.exit_code, result.output.decode("utf-8") async def run_command(self, cmd: str, timeout: Optional[int] = None) -> str: """Runs a command in the container with timeout. Args: cmd: Shell command to execute. timeout: Maximum execution time in seconds. Returns: Command output as string. Raises: RuntimeError: If terminal not initialized. """ if not self.session: raise RuntimeError("Terminal not initialized") return await self.session.execute(cmd, timeout=timeout or self.default_timeout) async def close(self) -> None: """Closes the terminal session.""" if self.session: await self.session.close() async def __aenter__(self) -> "AsyncDockerizedTerminal": """Async context manager entry.""" await self.init() return self async def __aexit__(self, exc_type, exc_val, exc_tb) -> None: """Async context manager exit.""" await self.close()
python
MIT
52a13f2a57d8c7f6737eefb02ccf569594d44273
2026-01-04T14:39:27.873507Z
false
FoundationAgents/OpenManus
https://github.com/FoundationAgents/OpenManus/blob/52a13f2a57d8c7f6737eefb02ccf569594d44273/app/sandbox/core/exceptions.py
app/sandbox/core/exceptions.py
"""Exception classes for the sandbox system. This module defines custom exceptions used throughout the sandbox system to handle various error conditions in a structured way. """ class SandboxError(Exception): """Base exception for sandbox-related errors.""" class SandboxTimeoutError(SandboxError): """Exception raised when a sandbox operation times out.""" class SandboxResourceError(SandboxError): """Exception raised for resource-related errors."""
python
MIT
52a13f2a57d8c7f6737eefb02ccf569594d44273
2026-01-04T14:39:27.873507Z
false
FoundationAgents/OpenManus
https://github.com/FoundationAgents/OpenManus/blob/52a13f2a57d8c7f6737eefb02ccf569594d44273/app/sandbox/core/sandbox.py
app/sandbox/core/sandbox.py
import asyncio import io import os import tarfile import tempfile import uuid from typing import Dict, Optional import docker from docker.errors import NotFound from docker.models.containers import Container from app.config import SandboxSettings from app.sandbox.core.exceptions import SandboxTimeoutError from app.sandbox.core.terminal import AsyncDockerizedTerminal class DockerSandbox: """Docker sandbox environment. Provides a containerized execution environment with resource limits, file operations, and command execution capabilities. Attributes: config: Sandbox configuration. volume_bindings: Volume mapping configuration. client: Docker client. container: Docker container instance. terminal: Container terminal interface. """ def __init__( self, config: Optional[SandboxSettings] = None, volume_bindings: Optional[Dict[str, str]] = None, ): """Initializes a sandbox instance. Args: config: Sandbox configuration. Default configuration used if None. volume_bindings: Volume mappings in {host_path: container_path} format. """ self.config = config or SandboxSettings() self.volume_bindings = volume_bindings or {} self.client = docker.from_env() self.container: Optional[Container] = None self.terminal: Optional[AsyncDockerizedTerminal] = None async def create(self) -> "DockerSandbox": """Creates and starts the sandbox container. Returns: Current sandbox instance. Raises: docker.errors.APIError: If Docker API call fails. RuntimeError: If container creation or startup fails. """ try: # Prepare container config host_config = self.client.api.create_host_config( mem_limit=self.config.memory_limit, cpu_period=100000, cpu_quota=int(100000 * self.config.cpu_limit), network_mode="none" if not self.config.network_enabled else "bridge", binds=self._prepare_volume_bindings(), ) # Generate unique container name with sandbox_ prefix container_name = f"sandbox_{uuid.uuid4().hex[:8]}" # Create container container = await asyncio.to_thread( self.client.api.create_container, image=self.config.image, command="tail -f /dev/null", hostname="sandbox", working_dir=self.config.work_dir, host_config=host_config, name=container_name, tty=True, detach=True, ) self.container = self.client.containers.get(container["Id"]) # Start container await asyncio.to_thread(self.container.start) # Initialize terminal self.terminal = AsyncDockerizedTerminal( container["Id"], self.config.work_dir, env_vars={"PYTHONUNBUFFERED": "1"} # Ensure Python output is not buffered ) await self.terminal.init() return self except Exception as e: await self.cleanup() # Ensure resources are cleaned up raise RuntimeError(f"Failed to create sandbox: {e}") from e def _prepare_volume_bindings(self) -> Dict[str, Dict[str, str]]: """Prepares volume binding configuration. Returns: Volume binding configuration dictionary. """ bindings = {} # Create and add working directory mapping work_dir = self._ensure_host_dir(self.config.work_dir) bindings[work_dir] = {"bind": self.config.work_dir, "mode": "rw"} # Add custom volume bindings for host_path, container_path in self.volume_bindings.items(): bindings[host_path] = {"bind": container_path, "mode": "rw"} return bindings @staticmethod def _ensure_host_dir(path: str) -> str: """Ensures directory exists on the host. Args: path: Directory path. Returns: Actual path on the host. """ host_path = os.path.join( tempfile.gettempdir(), f"sandbox_{os.path.basename(path)}_{os.urandom(4).hex()}", ) os.makedirs(host_path, exist_ok=True) return host_path async def run_command(self, cmd: str, timeout: Optional[int] = None) -> str: """Runs a command in the sandbox. Args: cmd: Command to execute. timeout: Timeout in seconds. Returns: Command output as string. Raises: RuntimeError: If sandbox not initialized or command execution fails. TimeoutError: If command execution times out. """ if not self.terminal: raise RuntimeError("Sandbox not initialized") try: return await self.terminal.run_command( cmd, timeout=timeout or self.config.timeout ) except TimeoutError: raise SandboxTimeoutError( f"Command execution timed out after {timeout or self.config.timeout} seconds" ) async def read_file(self, path: str) -> str: """Reads a file from the container. Args: path: File path. Returns: File contents as string. Raises: FileNotFoundError: If file does not exist. RuntimeError: If read operation fails. """ if not self.container: raise RuntimeError("Sandbox not initialized") try: # Get file archive resolved_path = self._safe_resolve_path(path) tar_stream, _ = await asyncio.to_thread( self.container.get_archive, resolved_path ) # Read file content from tar stream content = await self._read_from_tar(tar_stream) return content.decode("utf-8") except NotFound: raise FileNotFoundError(f"File not found: {path}") except Exception as e: raise RuntimeError(f"Failed to read file: {e}") async def write_file(self, path: str, content: str) -> None: """Writes content to a file in the container. Args: path: Target path. content: File content. Raises: RuntimeError: If write operation fails. """ if not self.container: raise RuntimeError("Sandbox not initialized") try: resolved_path = self._safe_resolve_path(path) parent_dir = os.path.dirname(resolved_path) # Create parent directory if parent_dir: await self.run_command(f"mkdir -p {parent_dir}") # Prepare file data tar_stream = await self._create_tar_stream( os.path.basename(path), content.encode("utf-8") ) # Write file await asyncio.to_thread( self.container.put_archive, parent_dir or "/", tar_stream ) except Exception as e: raise RuntimeError(f"Failed to write file: {e}") def _safe_resolve_path(self, path: str) -> str: """Safely resolves container path, preventing path traversal. Args: path: Original path. Returns: Resolved absolute path. Raises: ValueError: If path contains potentially unsafe patterns. """ # Check for path traversal attempts if ".." in path.split("/"): raise ValueError("Path contains potentially unsafe patterns") resolved = ( os.path.join(self.config.work_dir, path) if not os.path.isabs(path) else path ) return resolved async def copy_from(self, src_path: str, dst_path: str) -> None: """Copies a file from the container. Args: src_path: Source file path (container). dst_path: Destination path (host). Raises: FileNotFoundError: If source file does not exist. RuntimeError: If copy operation fails. """ try: # Ensure destination file's parent directory exists parent_dir = os.path.dirname(dst_path) if parent_dir: os.makedirs(parent_dir, exist_ok=True) # Get file stream resolved_src = self._safe_resolve_path(src_path) stream, stat = await asyncio.to_thread( self.container.get_archive, resolved_src ) # Create temporary directory to extract file with tempfile.TemporaryDirectory() as tmp_dir: # Write stream to temporary file tar_path = os.path.join(tmp_dir, "temp.tar") with open(tar_path, "wb") as f: for chunk in stream: f.write(chunk) # Extract file with tarfile.open(tar_path) as tar: members = tar.getmembers() if not members: raise FileNotFoundError(f"Source file is empty: {src_path}") # If destination is a directory, we should preserve relative path structure if os.path.isdir(dst_path): tar.extractall(dst_path) else: # If destination is a file, we only extract the source file's content if len(members) > 1: raise RuntimeError( f"Source path is a directory but destination is a file: {src_path}" ) with open(dst_path, "wb") as dst: src_file = tar.extractfile(members[0]) if src_file is None: raise RuntimeError( f"Failed to extract file: {src_path}" ) dst.write(src_file.read()) except docker.errors.NotFound: raise FileNotFoundError(f"Source file not found: {src_path}") except Exception as e: raise RuntimeError(f"Failed to copy file: {e}") async def copy_to(self, src_path: str, dst_path: str) -> None: """Copies a file to the container. Args: src_path: Source file path (host). dst_path: Destination path (container). Raises: FileNotFoundError: If source file does not exist. RuntimeError: If copy operation fails. """ try: if not os.path.exists(src_path): raise FileNotFoundError(f"Source file not found: {src_path}") # Create destination directory in container resolved_dst = self._safe_resolve_path(dst_path) container_dir = os.path.dirname(resolved_dst) if container_dir: await self.run_command(f"mkdir -p {container_dir}") # Create tar file to upload with tempfile.TemporaryDirectory() as tmp_dir: tar_path = os.path.join(tmp_dir, "temp.tar") with tarfile.open(tar_path, "w") as tar: # Handle directory source path if os.path.isdir(src_path): os.path.basename(src_path.rstrip("/")) for root, _, files in os.walk(src_path): for file in files: file_path = os.path.join(root, file) arcname = os.path.join( os.path.basename(dst_path), os.path.relpath(file_path, src_path), ) tar.add(file_path, arcname=arcname) else: # Add single file to tar tar.add(src_path, arcname=os.path.basename(dst_path)) # Read tar file content with open(tar_path, "rb") as f: data = f.read() # Upload to container await asyncio.to_thread( self.container.put_archive, os.path.dirname(resolved_dst) or "/", data, ) # Verify file was created successfully try: await self.run_command(f"test -e {resolved_dst}") except Exception: raise RuntimeError(f"Failed to verify file creation: {dst_path}") except FileNotFoundError: raise except Exception as e: raise RuntimeError(f"Failed to copy file: {e}") @staticmethod async def _create_tar_stream(name: str, content: bytes) -> io.BytesIO: """Creates a tar file stream. Args: name: Filename. content: File content. Returns: Tar file stream. """ tar_stream = io.BytesIO() with tarfile.open(fileobj=tar_stream, mode="w") as tar: tarinfo = tarfile.TarInfo(name=name) tarinfo.size = len(content) tar.addfile(tarinfo, io.BytesIO(content)) tar_stream.seek(0) return tar_stream @staticmethod async def _read_from_tar(tar_stream) -> bytes: """Reads file content from a tar stream. Args: tar_stream: Tar file stream. Returns: File content. Raises: RuntimeError: If read operation fails. """ with tempfile.NamedTemporaryFile() as tmp: for chunk in tar_stream: tmp.write(chunk) tmp.seek(0) with tarfile.open(fileobj=tmp) as tar: member = tar.next() if not member: raise RuntimeError("Empty tar archive") file_content = tar.extractfile(member) if not file_content: raise RuntimeError("Failed to extract file content") return file_content.read() async def cleanup(self) -> None: """Cleans up sandbox resources.""" errors = [] try: if self.terminal: try: await self.terminal.close() except Exception as e: errors.append(f"Terminal cleanup error: {e}") finally: self.terminal = None if self.container: try: await asyncio.to_thread(self.container.stop, timeout=5) except Exception as e: errors.append(f"Container stop error: {e}") try: await asyncio.to_thread(self.container.remove, force=True) except Exception as e: errors.append(f"Container remove error: {e}") finally: self.container = None except Exception as e: errors.append(f"General cleanup error: {e}") if errors: print(f"Warning: Errors during cleanup: {', '.join(errors)}") async def __aenter__(self) -> "DockerSandbox": """Async context manager entry.""" return await self.create() async def __aexit__(self, exc_type, exc_val, exc_tb) -> None: """Async context manager exit.""" await self.cleanup()
python
MIT
52a13f2a57d8c7f6737eefb02ccf569594d44273
2026-01-04T14:39:27.873507Z
false
FoundationAgents/OpenManus
https://github.com/FoundationAgents/OpenManus/blob/52a13f2a57d8c7f6737eefb02ccf569594d44273/app/sandbox/core/manager.py
app/sandbox/core/manager.py
import asyncio import uuid from contextlib import asynccontextmanager from typing import Dict, Optional, Set import docker from docker.errors import APIError, ImageNotFound from app.config import SandboxSettings from app.logger import logger from app.sandbox.core.sandbox import DockerSandbox class SandboxManager: """Docker sandbox manager. Manages multiple DockerSandbox instances lifecycle including creation, monitoring, and cleanup. Provides concurrent access control and automatic cleanup mechanisms for sandbox resources. Attributes: max_sandboxes: Maximum allowed number of sandboxes. idle_timeout: Sandbox idle timeout in seconds. cleanup_interval: Cleanup check interval in seconds. _sandboxes: Active sandbox instance mapping. _last_used: Last used time record for sandboxes. """ def __init__( self, max_sandboxes: int = 100, idle_timeout: int = 3600, cleanup_interval: int = 300, ): """Initializes sandbox manager. Args: max_sandboxes: Maximum sandbox count limit. idle_timeout: Idle timeout in seconds. cleanup_interval: Cleanup check interval in seconds. """ self.max_sandboxes = max_sandboxes self.idle_timeout = idle_timeout self.cleanup_interval = cleanup_interval # Docker client self._client = docker.from_env() # Resource mappings self._sandboxes: Dict[str, DockerSandbox] = {} self._last_used: Dict[str, float] = {} # Concurrency control self._locks: Dict[str, asyncio.Lock] = {} self._global_lock = asyncio.Lock() self._active_operations: Set[str] = set() # Cleanup task self._cleanup_task: Optional[asyncio.Task] = None self._is_shutting_down = False # Start automatic cleanup self.start_cleanup_task() async def ensure_image(self, image: str) -> bool: """Ensures Docker image is available. Args: image: Image name. Returns: bool: Whether image is available. """ try: self._client.images.get(image) return True except ImageNotFound: try: logger.info(f"Pulling image {image}...") await asyncio.get_event_loop().run_in_executor( None, self._client.images.pull, image ) return True except (APIError, Exception) as e: logger.error(f"Failed to pull image {image}: {e}") return False @asynccontextmanager async def sandbox_operation(self, sandbox_id: str): """Context manager for sandbox operations. Provides concurrency control and usage time updates. Args: sandbox_id: Sandbox ID. Raises: KeyError: If sandbox not found. """ if sandbox_id not in self._locks: self._locks[sandbox_id] = asyncio.Lock() async with self._locks[sandbox_id]: if sandbox_id not in self._sandboxes: raise KeyError(f"Sandbox {sandbox_id} not found") self._active_operations.add(sandbox_id) try: self._last_used[sandbox_id] = asyncio.get_event_loop().time() yield self._sandboxes[sandbox_id] finally: self._active_operations.remove(sandbox_id) async def create_sandbox( self, config: Optional[SandboxSettings] = None, volume_bindings: Optional[Dict[str, str]] = None, ) -> str: """Creates a new sandbox instance. Args: config: Sandbox configuration. volume_bindings: Volume mapping configuration. Returns: str: Sandbox ID. Raises: RuntimeError: If max sandbox count reached or creation fails. """ async with self._global_lock: if len(self._sandboxes) >= self.max_sandboxes: raise RuntimeError( f"Maximum number of sandboxes ({self.max_sandboxes}) reached" ) config = config or SandboxSettings() if not await self.ensure_image(config.image): raise RuntimeError(f"Failed to ensure Docker image: {config.image}") sandbox_id = str(uuid.uuid4()) try: sandbox = DockerSandbox(config, volume_bindings) await sandbox.create() self._sandboxes[sandbox_id] = sandbox self._last_used[sandbox_id] = asyncio.get_event_loop().time() self._locks[sandbox_id] = asyncio.Lock() logger.info(f"Created sandbox {sandbox_id}") return sandbox_id except Exception as e: logger.error(f"Failed to create sandbox: {e}") if sandbox_id in self._sandboxes: await self.delete_sandbox(sandbox_id) raise RuntimeError(f"Failed to create sandbox: {e}") async def get_sandbox(self, sandbox_id: str) -> DockerSandbox: """Gets a sandbox instance. Args: sandbox_id: Sandbox ID. Returns: DockerSandbox: Sandbox instance. Raises: KeyError: If sandbox does not exist. """ async with self.sandbox_operation(sandbox_id) as sandbox: return sandbox def start_cleanup_task(self) -> None: """Starts automatic cleanup task.""" async def cleanup_loop(): while not self._is_shutting_down: try: await self._cleanup_idle_sandboxes() except Exception as e: logger.error(f"Error in cleanup loop: {e}") await asyncio.sleep(self.cleanup_interval) self._cleanup_task = asyncio.create_task(cleanup_loop()) async def _cleanup_idle_sandboxes(self) -> None: """Cleans up idle sandboxes.""" current_time = asyncio.get_event_loop().time() to_cleanup = [] async with self._global_lock: for sandbox_id, last_used in self._last_used.items(): if ( sandbox_id not in self._active_operations and current_time - last_used > self.idle_timeout ): to_cleanup.append(sandbox_id) for sandbox_id in to_cleanup: try: await self.delete_sandbox(sandbox_id) except Exception as e: logger.error(f"Error cleaning up sandbox {sandbox_id}: {e}") async def cleanup(self) -> None: """Cleans up all resources.""" logger.info("Starting manager cleanup...") self._is_shutting_down = True # Cancel cleanup task if self._cleanup_task: self._cleanup_task.cancel() try: await asyncio.wait_for(self._cleanup_task, timeout=1.0) except (asyncio.CancelledError, asyncio.TimeoutError): pass # Get all sandbox IDs to clean up async with self._global_lock: sandbox_ids = list(self._sandboxes.keys()) # Concurrently clean up all sandboxes cleanup_tasks = [] for sandbox_id in sandbox_ids: task = asyncio.create_task(self._safe_delete_sandbox(sandbox_id)) cleanup_tasks.append(task) if cleanup_tasks: # Wait for all cleanup tasks to complete, with timeout to avoid infinite waiting try: await asyncio.wait(cleanup_tasks, timeout=30.0) except asyncio.TimeoutError: logger.error("Sandbox cleanup timed out") # Clean up remaining references self._sandboxes.clear() self._last_used.clear() self._locks.clear() self._active_operations.clear() logger.info("Manager cleanup completed") async def _safe_delete_sandbox(self, sandbox_id: str) -> None: """Safely deletes a single sandbox. Args: sandbox_id: Sandbox ID to delete. """ try: if sandbox_id in self._active_operations: logger.warning( f"Sandbox {sandbox_id} has active operations, waiting for completion" ) for _ in range(10): # Wait at most 10 times await asyncio.sleep(0.5) if sandbox_id not in self._active_operations: break else: logger.warning( f"Timeout waiting for sandbox {sandbox_id} operations to complete" ) # Get reference to sandbox object sandbox = self._sandboxes.get(sandbox_id) if sandbox: await sandbox.cleanup() # Remove sandbox record from manager async with self._global_lock: self._sandboxes.pop(sandbox_id, None) self._last_used.pop(sandbox_id, None) self._locks.pop(sandbox_id, None) logger.info(f"Deleted sandbox {sandbox_id}") except Exception as e: logger.error(f"Error during cleanup of sandbox {sandbox_id}: {e}") async def delete_sandbox(self, sandbox_id: str) -> None: """Deletes specified sandbox. Args: sandbox_id: Sandbox ID. """ if sandbox_id not in self._sandboxes: return try: await self._safe_delete_sandbox(sandbox_id) except Exception as e: logger.error(f"Failed to delete sandbox {sandbox_id}: {e}") async def __aenter__(self) -> "SandboxManager": """Async context manager entry.""" return self async def __aexit__(self, exc_type, exc_val, exc_tb) -> None: """Async context manager exit.""" await self.cleanup() def get_stats(self) -> Dict: """Gets manager statistics. Returns: Dict: Statistics information. """ return { "total_sandboxes": len(self._sandboxes), "active_operations": len(self._active_operations), "max_sandboxes": self.max_sandboxes, "idle_timeout": self.idle_timeout, "cleanup_interval": self.cleanup_interval, "is_shutting_down": self._is_shutting_down, }
python
MIT
52a13f2a57d8c7f6737eefb02ccf569594d44273
2026-01-04T14:39:27.873507Z
false
FoundationAgents/OpenManus
https://github.com/FoundationAgents/OpenManus/blob/52a13f2a57d8c7f6737eefb02ccf569594d44273/app/agent/toolcall.py
app/agent/toolcall.py
import asyncio import json from typing import Any, List, Optional, Union from pydantic import Field from app.agent.react import ReActAgent from app.exceptions import TokenLimitExceeded from app.logger import logger from app.prompt.toolcall import NEXT_STEP_PROMPT, SYSTEM_PROMPT from app.schema import TOOL_CHOICE_TYPE, AgentState, Message, ToolCall, ToolChoice from app.tool import CreateChatCompletion, Terminate, ToolCollection TOOL_CALL_REQUIRED = "Tool calls required but none provided" class ToolCallAgent(ReActAgent): """Base agent class for handling tool/function calls with enhanced abstraction""" name: str = "toolcall" description: str = "an agent that can execute tool calls." system_prompt: str = SYSTEM_PROMPT next_step_prompt: str = NEXT_STEP_PROMPT available_tools: ToolCollection = ToolCollection( CreateChatCompletion(), Terminate() ) tool_choices: TOOL_CHOICE_TYPE = ToolChoice.AUTO # type: ignore special_tool_names: List[str] = Field(default_factory=lambda: [Terminate().name]) tool_calls: List[ToolCall] = Field(default_factory=list) _current_base64_image: Optional[str] = None max_steps: int = 30 max_observe: Optional[Union[int, bool]] = None async def think(self) -> bool: """Process current state and decide next actions using tools""" if self.next_step_prompt: user_msg = Message.user_message(self.next_step_prompt) self.messages += [user_msg] try: # Get response with tool options response = await self.llm.ask_tool( messages=self.messages, system_msgs=( [Message.system_message(self.system_prompt)] if self.system_prompt else None ), tools=self.available_tools.to_params(), tool_choice=self.tool_choices, ) except ValueError: raise except Exception as e: # Check if this is a RetryError containing TokenLimitExceeded if hasattr(e, "__cause__") and isinstance(e.__cause__, TokenLimitExceeded): token_limit_error = e.__cause__ logger.error( f"🚨 Token limit error (from RetryError): {token_limit_error}" ) self.memory.add_message( Message.assistant_message( f"Maximum token limit reached, cannot continue execution: {str(token_limit_error)}" ) ) self.state = AgentState.FINISHED return False raise self.tool_calls = tool_calls = ( response.tool_calls if response and response.tool_calls else [] ) content = response.content if response and response.content else "" # Log response info logger.info(f"✨ {self.name}'s thoughts: {content}") logger.info( f"🛠️ {self.name} selected {len(tool_calls) if tool_calls else 0} tools to use" ) if tool_calls: logger.info( f"🧰 Tools being prepared: {[call.function.name for call in tool_calls]}" ) logger.info(f"🔧 Tool arguments: {tool_calls[0].function.arguments}") try: if response is None: raise RuntimeError("No response received from the LLM") # Handle different tool_choices modes if self.tool_choices == ToolChoice.NONE: if tool_calls: logger.warning( f"🤔 Hmm, {self.name} tried to use tools when they weren't available!" ) if content: self.memory.add_message(Message.assistant_message(content)) return True return False # Create and add assistant message assistant_msg = ( Message.from_tool_calls(content=content, tool_calls=self.tool_calls) if self.tool_calls else Message.assistant_message(content) ) self.memory.add_message(assistant_msg) if self.tool_choices == ToolChoice.REQUIRED and not self.tool_calls: return True # Will be handled in act() # For 'auto' mode, continue with content if no commands but content exists if self.tool_choices == ToolChoice.AUTO and not self.tool_calls: return bool(content) return bool(self.tool_calls) except Exception as e: logger.error(f"🚨 Oops! The {self.name}'s thinking process hit a snag: {e}") self.memory.add_message( Message.assistant_message( f"Error encountered while processing: {str(e)}" ) ) return False async def act(self) -> str: """Execute tool calls and handle their results""" if not self.tool_calls: if self.tool_choices == ToolChoice.REQUIRED: raise ValueError(TOOL_CALL_REQUIRED) # Return last message content if no tool calls return self.messages[-1].content or "No content or commands to execute" results = [] for command in self.tool_calls: # Reset base64_image for each tool call self._current_base64_image = None result = await self.execute_tool(command) if self.max_observe: result = result[: self.max_observe] logger.info( f"🎯 Tool '{command.function.name}' completed its mission! Result: {result}" ) # Add tool response to memory tool_msg = Message.tool_message( content=result, tool_call_id=command.id, name=command.function.name, base64_image=self._current_base64_image, ) self.memory.add_message(tool_msg) results.append(result) return "\n\n".join(results) async def execute_tool(self, command: ToolCall) -> str: """Execute a single tool call with robust error handling""" if not command or not command.function or not command.function.name: return "Error: Invalid command format" name = command.function.name if name not in self.available_tools.tool_map: return f"Error: Unknown tool '{name}'" try: # Parse arguments args = json.loads(command.function.arguments or "{}") # Execute the tool logger.info(f"🔧 Activating tool: '{name}'...") result = await self.available_tools.execute(name=name, tool_input=args) # Handle special tools await self._handle_special_tool(name=name, result=result) # Check if result is a ToolResult with base64_image if hasattr(result, "base64_image") and result.base64_image: # Store the base64_image for later use in tool_message self._current_base64_image = result.base64_image # Format result for display (standard case) observation = ( f"Observed output of cmd `{name}` executed:\n{str(result)}" if result else f"Cmd `{name}` completed with no output" ) return observation except json.JSONDecodeError: error_msg = f"Error parsing arguments for {name}: Invalid JSON format" logger.error( f"📝 Oops! The arguments for '{name}' don't make sense - invalid JSON, arguments:{command.function.arguments}" ) return f"Error: {error_msg}" except Exception as e: error_msg = f"⚠️ Tool '{name}' encountered a problem: {str(e)}" logger.exception(error_msg) return f"Error: {error_msg}" async def _handle_special_tool(self, name: str, result: Any, **kwargs): """Handle special tool execution and state changes""" if not self._is_special_tool(name): return if self._should_finish_execution(name=name, result=result, **kwargs): # Set agent state to finished logger.info(f"🏁 Special tool '{name}' has completed the task!") self.state = AgentState.FINISHED @staticmethod def _should_finish_execution(**kwargs) -> bool: """Determine if tool execution should finish the agent""" return True def _is_special_tool(self, name: str) -> bool: """Check if tool name is in special tools list""" return name.lower() in [n.lower() for n in self.special_tool_names] async def cleanup(self): """Clean up resources used by the agent's tools.""" logger.info(f"🧹 Cleaning up resources for agent '{self.name}'...") for tool_name, tool_instance in self.available_tools.tool_map.items(): if hasattr(tool_instance, "cleanup") and asyncio.iscoroutinefunction( tool_instance.cleanup ): try: logger.debug(f"🧼 Cleaning up tool: {tool_name}") await tool_instance.cleanup() except Exception as e: logger.error( f"🚨 Error cleaning up tool '{tool_name}': {e}", exc_info=True ) logger.info(f"✨ Cleanup complete for agent '{self.name}'.") async def run(self, request: Optional[str] = None) -> str: """Run the agent with cleanup when done.""" try: return await super().run(request) finally: await self.cleanup()
python
MIT
52a13f2a57d8c7f6737eefb02ccf569594d44273
2026-01-04T14:39:27.873507Z
false
FoundationAgents/OpenManus
https://github.com/FoundationAgents/OpenManus/blob/52a13f2a57d8c7f6737eefb02ccf569594d44273/app/agent/swe.py
app/agent/swe.py
from typing import List from pydantic import Field from app.agent.toolcall import ToolCallAgent from app.prompt.swe import SYSTEM_PROMPT from app.tool import Bash, StrReplaceEditor, Terminate, ToolCollection class SWEAgent(ToolCallAgent): """An agent that implements the SWEAgent paradigm for executing code and natural conversations.""" name: str = "swe" description: str = "an autonomous AI programmer that interacts directly with the computer to solve tasks." system_prompt: str = SYSTEM_PROMPT next_step_prompt: str = "" available_tools: ToolCollection = ToolCollection( Bash(), StrReplaceEditor(), Terminate() ) special_tool_names: List[str] = Field(default_factory=lambda: [Terminate().name]) max_steps: int = 20
python
MIT
52a13f2a57d8c7f6737eefb02ccf569594d44273
2026-01-04T14:39:27.873507Z
false
FoundationAgents/OpenManus
https://github.com/FoundationAgents/OpenManus/blob/52a13f2a57d8c7f6737eefb02ccf569594d44273/app/agent/data_analysis.py
app/agent/data_analysis.py
from pydantic import Field from app.agent.toolcall import ToolCallAgent from app.config import config from app.prompt.visualization import NEXT_STEP_PROMPT, SYSTEM_PROMPT from app.tool import Terminate, ToolCollection from app.tool.chart_visualization.chart_prepare import VisualizationPrepare from app.tool.chart_visualization.data_visualization import DataVisualization from app.tool.chart_visualization.python_execute import NormalPythonExecute class DataAnalysis(ToolCallAgent): """ A data analysis agent that uses planning to solve various data analysis tasks. This agent extends ToolCallAgent with a comprehensive set of tools and capabilities, including Data Analysis, Chart Visualization, Data Report. """ name: str = "Data_Analysis" description: str = "An analytical agent that utilizes python and data visualization tools to solve diverse data analysis tasks" system_prompt: str = SYSTEM_PROMPT.format(directory=config.workspace_root) next_step_prompt: str = NEXT_STEP_PROMPT max_observe: int = 15000 max_steps: int = 20 # Add general-purpose tools to the tool collection available_tools: ToolCollection = Field( default_factory=lambda: ToolCollection( NormalPythonExecute(), VisualizationPrepare(), DataVisualization(), Terminate(), ) )
python
MIT
52a13f2a57d8c7f6737eefb02ccf569594d44273
2026-01-04T14:39:27.873507Z
false
FoundationAgents/OpenManus
https://github.com/FoundationAgents/OpenManus/blob/52a13f2a57d8c7f6737eefb02ccf569594d44273/app/agent/manus.py
app/agent/manus.py
from typing import Dict, List, Optional from pydantic import Field, model_validator from app.agent.browser import BrowserContextHelper from app.agent.toolcall import ToolCallAgent from app.config import config from app.logger import logger from app.prompt.manus import NEXT_STEP_PROMPT, SYSTEM_PROMPT from app.tool import Terminate, ToolCollection from app.tool.ask_human import AskHuman from app.tool.browser_use_tool import BrowserUseTool from app.tool.mcp import MCPClients, MCPClientTool from app.tool.python_execute import PythonExecute from app.tool.str_replace_editor import StrReplaceEditor class Manus(ToolCallAgent): """A versatile general-purpose agent with support for both local and MCP tools.""" name: str = "Manus" description: str = "A versatile agent that can solve various tasks using multiple tools including MCP-based tools" system_prompt: str = SYSTEM_PROMPT.format(directory=config.workspace_root) next_step_prompt: str = NEXT_STEP_PROMPT max_observe: int = 10000 max_steps: int = 20 # MCP clients for remote tool access mcp_clients: MCPClients = Field(default_factory=MCPClients) # Add general-purpose tools to the tool collection available_tools: ToolCollection = Field( default_factory=lambda: ToolCollection( PythonExecute(), BrowserUseTool(), StrReplaceEditor(), AskHuman(), Terminate(), ) ) special_tool_names: list[str] = Field(default_factory=lambda: [Terminate().name]) browser_context_helper: Optional[BrowserContextHelper] = None # Track connected MCP servers connected_servers: Dict[str, str] = Field( default_factory=dict ) # server_id -> url/command _initialized: bool = False @model_validator(mode="after") def initialize_helper(self) -> "Manus": """Initialize basic components synchronously.""" self.browser_context_helper = BrowserContextHelper(self) return self @classmethod async def create(cls, **kwargs) -> "Manus": """Factory method to create and properly initialize a Manus instance.""" instance = cls(**kwargs) await instance.initialize_mcp_servers() instance._initialized = True return instance async def initialize_mcp_servers(self) -> None: """Initialize connections to configured MCP servers.""" for server_id, server_config in config.mcp_config.servers.items(): try: if server_config.type == "sse": if server_config.url: await self.connect_mcp_server(server_config.url, server_id) logger.info( f"Connected to MCP server {server_id} at {server_config.url}" ) elif server_config.type == "stdio": if server_config.command: await self.connect_mcp_server( server_config.command, server_id, use_stdio=True, stdio_args=server_config.args, ) logger.info( f"Connected to MCP server {server_id} using command {server_config.command}" ) except Exception as e: logger.error(f"Failed to connect to MCP server {server_id}: {e}") async def connect_mcp_server( self, server_url: str, server_id: str = "", use_stdio: bool = False, stdio_args: List[str] = None, ) -> None: """Connect to an MCP server and add its tools.""" if use_stdio: await self.mcp_clients.connect_stdio( server_url, stdio_args or [], server_id ) self.connected_servers[server_id or server_url] = server_url else: await self.mcp_clients.connect_sse(server_url, server_id) self.connected_servers[server_id or server_url] = server_url # Update available tools with only the new tools from this server new_tools = [ tool for tool in self.mcp_clients.tools if tool.server_id == server_id ] self.available_tools.add_tools(*new_tools) async def disconnect_mcp_server(self, server_id: str = "") -> None: """Disconnect from an MCP server and remove its tools.""" await self.mcp_clients.disconnect(server_id) if server_id: self.connected_servers.pop(server_id, None) else: self.connected_servers.clear() # Rebuild available tools without the disconnected server's tools base_tools = [ tool for tool in self.available_tools.tools if not isinstance(tool, MCPClientTool) ] self.available_tools = ToolCollection(*base_tools) self.available_tools.add_tools(*self.mcp_clients.tools) async def cleanup(self): """Clean up Manus agent resources.""" if self.browser_context_helper: await self.browser_context_helper.cleanup_browser() # Disconnect from all MCP servers only if we were initialized if self._initialized: await self.disconnect_mcp_server() self._initialized = False async def think(self) -> bool: """Process current state and decide next actions with appropriate context.""" if not self._initialized: await self.initialize_mcp_servers() self._initialized = True original_prompt = self.next_step_prompt recent_messages = self.memory.messages[-3:] if self.memory.messages else [] browser_in_use = any( tc.function.name == BrowserUseTool().name for msg in recent_messages if msg.tool_calls for tc in msg.tool_calls ) if browser_in_use: self.next_step_prompt = ( await self.browser_context_helper.format_next_step_prompt() ) result = await super().think() # Restore original prompt self.next_step_prompt = original_prompt return result
python
MIT
52a13f2a57d8c7f6737eefb02ccf569594d44273
2026-01-04T14:39:27.873507Z
false
FoundationAgents/OpenManus
https://github.com/FoundationAgents/OpenManus/blob/52a13f2a57d8c7f6737eefb02ccf569594d44273/app/agent/react.py
app/agent/react.py
from abc import ABC, abstractmethod from typing import Optional from pydantic import Field from app.agent.base import BaseAgent from app.llm import LLM from app.schema import AgentState, Memory class ReActAgent(BaseAgent, ABC): name: str description: Optional[str] = None system_prompt: Optional[str] = None next_step_prompt: Optional[str] = None llm: Optional[LLM] = Field(default_factory=LLM) memory: Memory = Field(default_factory=Memory) state: AgentState = AgentState.IDLE max_steps: int = 10 current_step: int = 0 @abstractmethod async def think(self) -> bool: """Process current state and decide next action""" @abstractmethod async def act(self) -> str: """Execute decided actions""" async def step(self) -> str: """Execute a single step: think and act.""" should_act = await self.think() if not should_act: return "Thinking complete - no action needed" return await self.act()
python
MIT
52a13f2a57d8c7f6737eefb02ccf569594d44273
2026-01-04T14:39:27.873507Z
false
FoundationAgents/OpenManus
https://github.com/FoundationAgents/OpenManus/blob/52a13f2a57d8c7f6737eefb02ccf569594d44273/app/agent/mcp.py
app/agent/mcp.py
from typing import Any, Dict, List, Optional, Tuple from pydantic import Field from app.agent.toolcall import ToolCallAgent from app.logger import logger from app.prompt.mcp import MULTIMEDIA_RESPONSE_PROMPT, NEXT_STEP_PROMPT, SYSTEM_PROMPT from app.schema import AgentState, Message from app.tool.base import ToolResult from app.tool.mcp import MCPClients class MCPAgent(ToolCallAgent): """Agent for interacting with MCP (Model Context Protocol) servers. This agent connects to an MCP server using either SSE or stdio transport and makes the server's tools available through the agent's tool interface. """ name: str = "mcp_agent" description: str = "An agent that connects to an MCP server and uses its tools." system_prompt: str = SYSTEM_PROMPT next_step_prompt: str = NEXT_STEP_PROMPT # Initialize MCP tool collection mcp_clients: MCPClients = Field(default_factory=MCPClients) available_tools: MCPClients = None # Will be set in initialize() max_steps: int = 20 connection_type: str = "stdio" # "stdio" or "sse" # Track tool schemas to detect changes tool_schemas: Dict[str, Dict[str, Any]] = Field(default_factory=dict) _refresh_tools_interval: int = 5 # Refresh tools every N steps # Special tool names that should trigger termination special_tool_names: List[str] = Field(default_factory=lambda: ["terminate"]) async def initialize( self, connection_type: Optional[str] = None, server_url: Optional[str] = None, command: Optional[str] = None, args: Optional[List[str]] = None, ) -> None: """Initialize the MCP connection. Args: connection_type: Type of connection to use ("stdio" or "sse") server_url: URL of the MCP server (for SSE connection) command: Command to run (for stdio connection) args: Arguments for the command (for stdio connection) """ if connection_type: self.connection_type = connection_type # Connect to the MCP server based on connection type if self.connection_type == "sse": if not server_url: raise ValueError("Server URL is required for SSE connection") await self.mcp_clients.connect_sse(server_url=server_url) elif self.connection_type == "stdio": if not command: raise ValueError("Command is required for stdio connection") await self.mcp_clients.connect_stdio(command=command, args=args or []) else: raise ValueError(f"Unsupported connection type: {self.connection_type}") # Set available_tools to our MCP instance self.available_tools = self.mcp_clients # Store initial tool schemas await self._refresh_tools() # Add system message about available tools tool_names = list(self.mcp_clients.tool_map.keys()) tools_info = ", ".join(tool_names) # Add system prompt and available tools information self.memory.add_message( Message.system_message( f"{self.system_prompt}\n\nAvailable MCP tools: {tools_info}" ) ) async def _refresh_tools(self) -> Tuple[List[str], List[str]]: """Refresh the list of available tools from the MCP server. Returns: A tuple of (added_tools, removed_tools) """ if not self.mcp_clients.sessions: return [], [] # Get current tool schemas directly from the server response = await self.mcp_clients.list_tools() current_tools = {tool.name: tool.inputSchema for tool in response.tools} # Determine added, removed, and changed tools current_names = set(current_tools.keys()) previous_names = set(self.tool_schemas.keys()) added_tools = list(current_names - previous_names) removed_tools = list(previous_names - current_names) # Check for schema changes in existing tools changed_tools = [] for name in current_names.intersection(previous_names): if current_tools[name] != self.tool_schemas.get(name): changed_tools.append(name) # Update stored schemas self.tool_schemas = current_tools # Log and notify about changes if added_tools: logger.info(f"Added MCP tools: {added_tools}") self.memory.add_message( Message.system_message(f"New tools available: {', '.join(added_tools)}") ) if removed_tools: logger.info(f"Removed MCP tools: {removed_tools}") self.memory.add_message( Message.system_message( f"Tools no longer available: {', '.join(removed_tools)}" ) ) if changed_tools: logger.info(f"Changed MCP tools: {changed_tools}") return added_tools, removed_tools async def think(self) -> bool: """Process current state and decide next action.""" # Check MCP session and tools availability if not self.mcp_clients.sessions or not self.mcp_clients.tool_map: logger.info("MCP service is no longer available, ending interaction") self.state = AgentState.FINISHED return False # Refresh tools periodically if self.current_step % self._refresh_tools_interval == 0: await self._refresh_tools() # All tools removed indicates shutdown if not self.mcp_clients.tool_map: logger.info("MCP service has shut down, ending interaction") self.state = AgentState.FINISHED return False # Use the parent class's think method return await super().think() async def _handle_special_tool(self, name: str, result: Any, **kwargs) -> None: """Handle special tool execution and state changes""" # First process with parent handler await super()._handle_special_tool(name, result, **kwargs) # Handle multimedia responses if isinstance(result, ToolResult) and result.base64_image: self.memory.add_message( Message.system_message( MULTIMEDIA_RESPONSE_PROMPT.format(tool_name=name) ) ) def _should_finish_execution(self, name: str, **kwargs) -> bool: """Determine if tool execution should finish the agent""" # Terminate if the tool name is 'terminate' return name.lower() == "terminate" async def cleanup(self) -> None: """Clean up MCP connection when done.""" if self.mcp_clients.sessions: await self.mcp_clients.disconnect() logger.info("MCP connection closed") async def run(self, request: Optional[str] = None) -> str: """Run the agent with cleanup when done.""" try: result = await super().run(request) return result finally: # Ensure cleanup happens even if there's an error await self.cleanup()
python
MIT
52a13f2a57d8c7f6737eefb02ccf569594d44273
2026-01-04T14:39:27.873507Z
false
FoundationAgents/OpenManus
https://github.com/FoundationAgents/OpenManus/blob/52a13f2a57d8c7f6737eefb02ccf569594d44273/app/agent/browser.py
app/agent/browser.py
import json from typing import TYPE_CHECKING, Optional from pydantic import Field, model_validator from app.agent.toolcall import ToolCallAgent from app.logger import logger from app.prompt.browser import NEXT_STEP_PROMPT, SYSTEM_PROMPT from app.schema import Message, ToolChoice from app.tool import BrowserUseTool, Terminate, ToolCollection from app.tool.sandbox.sb_browser_tool import SandboxBrowserTool # Avoid circular import if BrowserAgent needs BrowserContextHelper if TYPE_CHECKING: from app.agent.base import BaseAgent # Or wherever memory is defined class BrowserContextHelper: def __init__(self, agent: "BaseAgent"): self.agent = agent self._current_base64_image: Optional[str] = None async def get_browser_state(self) -> Optional[dict]: browser_tool = self.agent.available_tools.get_tool(BrowserUseTool().name) if not browser_tool: browser_tool = self.agent.available_tools.get_tool( SandboxBrowserTool().name ) if not browser_tool or not hasattr(browser_tool, "get_current_state"): logger.warning("BrowserUseTool not found or doesn't have get_current_state") return None try: result = await browser_tool.get_current_state() if result.error: logger.debug(f"Browser state error: {result.error}") return None if hasattr(result, "base64_image") and result.base64_image: self._current_base64_image = result.base64_image else: self._current_base64_image = None return json.loads(result.output) except Exception as e: logger.debug(f"Failed to get browser state: {str(e)}") return None async def format_next_step_prompt(self) -> str: """Gets browser state and formats the browser prompt.""" browser_state = await self.get_browser_state() url_info, tabs_info, content_above_info, content_below_info = "", "", "", "" results_info = "" # Or get from agent if needed elsewhere if browser_state and not browser_state.get("error"): url_info = f"\n URL: {browser_state.get('url', 'N/A')}\n Title: {browser_state.get('title', 'N/A')}" tabs = browser_state.get("tabs", []) if tabs: tabs_info = f"\n {len(tabs)} tab(s) available" pixels_above = browser_state.get("pixels_above", 0) pixels_below = browser_state.get("pixels_below", 0) if pixels_above > 0: content_above_info = f" ({pixels_above} pixels)" if pixels_below > 0: content_below_info = f" ({pixels_below} pixels)" if self._current_base64_image: image_message = Message.user_message( content="Current browser screenshot:", base64_image=self._current_base64_image, ) self.agent.memory.add_message(image_message) self._current_base64_image = None # Consume the image after adding return NEXT_STEP_PROMPT.format( url_placeholder=url_info, tabs_placeholder=tabs_info, content_above_placeholder=content_above_info, content_below_placeholder=content_below_info, results_placeholder=results_info, ) async def cleanup_browser(self): browser_tool = self.agent.available_tools.get_tool(BrowserUseTool().name) if browser_tool and hasattr(browser_tool, "cleanup"): await browser_tool.cleanup() class BrowserAgent(ToolCallAgent): """ A browser agent that uses the browser_use library to control a browser. This agent can navigate web pages, interact with elements, fill forms, extract content, and perform other browser-based actions to accomplish tasks. """ name: str = "browser" description: str = "A browser agent that can control a browser to accomplish tasks" system_prompt: str = SYSTEM_PROMPT next_step_prompt: str = NEXT_STEP_PROMPT max_observe: int = 10000 max_steps: int = 20 # Configure the available tools available_tools: ToolCollection = Field( default_factory=lambda: ToolCollection(BrowserUseTool(), Terminate()) ) # Use Auto for tool choice to allow both tool usage and free-form responses tool_choices: ToolChoice = ToolChoice.AUTO special_tool_names: list[str] = Field(default_factory=lambda: [Terminate().name]) browser_context_helper: Optional[BrowserContextHelper] = None @model_validator(mode="after") def initialize_helper(self) -> "BrowserAgent": self.browser_context_helper = BrowserContextHelper(self) return self async def think(self) -> bool: """Process current state and decide next actions using tools, with browser state info added""" self.next_step_prompt = ( await self.browser_context_helper.format_next_step_prompt() ) return await super().think() async def cleanup(self): """Clean up browser agent resources by calling parent cleanup.""" await self.browser_context_helper.cleanup_browser()
python
MIT
52a13f2a57d8c7f6737eefb02ccf569594d44273
2026-01-04T14:39:27.873507Z
false
FoundationAgents/OpenManus
https://github.com/FoundationAgents/OpenManus/blob/52a13f2a57d8c7f6737eefb02ccf569594d44273/app/agent/sandbox_agent.py
app/agent/sandbox_agent.py
from typing import Dict, List, Optional from pydantic import Field, model_validator from app.agent.browser import BrowserContextHelper from app.agent.toolcall import ToolCallAgent from app.config import config from app.daytona.sandbox import create_sandbox, delete_sandbox from app.daytona.tool_base import SandboxToolsBase from app.logger import logger from app.prompt.manus import NEXT_STEP_PROMPT, SYSTEM_PROMPT from app.tool import Terminate, ToolCollection from app.tool.ask_human import AskHuman from app.tool.mcp import MCPClients, MCPClientTool from app.tool.sandbox.sb_browser_tool import SandboxBrowserTool from app.tool.sandbox.sb_files_tool import SandboxFilesTool from app.tool.sandbox.sb_shell_tool import SandboxShellTool from app.tool.sandbox.sb_vision_tool import SandboxVisionTool class SandboxManus(ToolCallAgent): """A versatile general-purpose agent with support for both local and MCP tools.""" name: str = "SandboxManus" description: str = "A versatile agent that can solve various tasks using multiple sandbox-tools including MCP-based tools" system_prompt: str = SYSTEM_PROMPT.format(directory=config.workspace_root) next_step_prompt: str = NEXT_STEP_PROMPT max_observe: int = 10000 max_steps: int = 20 # MCP clients for remote tool access mcp_clients: MCPClients = Field(default_factory=MCPClients) # Add general-purpose tools to the tool collection available_tools: ToolCollection = Field( default_factory=lambda: ToolCollection( # PythonExecute(), # BrowserUseTool(), # StrReplaceEditor(), AskHuman(), Terminate(), ) ) special_tool_names: list[str] = Field(default_factory=lambda: [Terminate().name]) browser_context_helper: Optional[BrowserContextHelper] = None # Track connected MCP servers connected_servers: Dict[str, str] = Field( default_factory=dict ) # server_id -> url/command _initialized: bool = False sandbox_link: Optional[dict[str, dict[str, str]]] = Field(default_factory=dict) @model_validator(mode="after") def initialize_helper(self) -> "SandboxManus": """Initialize basic components synchronously.""" self.browser_context_helper = BrowserContextHelper(self) return self @classmethod async def create(cls, **kwargs) -> "SandboxManus": """Factory method to create and properly initialize a Manus instance.""" instance = cls(**kwargs) await instance.initialize_mcp_servers() await instance.initialize_sandbox_tools() instance._initialized = True return instance async def initialize_sandbox_tools( self, password: str = config.daytona.VNC_password, ) -> None: try: # 创建新沙箱 if password: sandbox = create_sandbox(password=password) self.sandbox = sandbox else: raise ValueError("password must be provided") vnc_link = sandbox.get_preview_link(6080) website_link = sandbox.get_preview_link(8080) vnc_url = vnc_link.url if hasattr(vnc_link, "url") else str(vnc_link) website_url = ( website_link.url if hasattr(website_link, "url") else str(website_link) ) # Get the actual sandbox_id from the created sandbox actual_sandbox_id = sandbox.id if hasattr(sandbox, "id") else "new_sandbox" if not self.sandbox_link: self.sandbox_link = {} self.sandbox_link[actual_sandbox_id] = { "vnc": vnc_url, "website": website_url, } logger.info(f"VNC URL: {vnc_url}") logger.info(f"Website URL: {website_url}") SandboxToolsBase._urls_printed = True sb_tools = [ SandboxBrowserTool(sandbox), SandboxFilesTool(sandbox), SandboxShellTool(sandbox), SandboxVisionTool(sandbox), ] self.available_tools.add_tools(*sb_tools) except Exception as e: logger.error(f"Error initializing sandbox tools: {e}") raise async def initialize_mcp_servers(self) -> None: """Initialize connections to configured MCP servers.""" for server_id, server_config in config.mcp_config.servers.items(): try: if server_config.type == "sse": if server_config.url: await self.connect_mcp_server(server_config.url, server_id) logger.info( f"Connected to MCP server {server_id} at {server_config.url}" ) elif server_config.type == "stdio": if server_config.command: await self.connect_mcp_server( server_config.command, server_id, use_stdio=True, stdio_args=server_config.args, ) logger.info( f"Connected to MCP server {server_id} using command {server_config.command}" ) except Exception as e: logger.error(f"Failed to connect to MCP server {server_id}: {e}") async def connect_mcp_server( self, server_url: str, server_id: str = "", use_stdio: bool = False, stdio_args: List[str] = None, ) -> None: """Connect to an MCP server and add its tools.""" if use_stdio: await self.mcp_clients.connect_stdio( server_url, stdio_args or [], server_id ) self.connected_servers[server_id or server_url] = server_url else: await self.mcp_clients.connect_sse(server_url, server_id) self.connected_servers[server_id or server_url] = server_url # Update available tools with only the new tools from this server new_tools = [ tool for tool in self.mcp_clients.tools if tool.server_id == server_id ] self.available_tools.add_tools(*new_tools) async def disconnect_mcp_server(self, server_id: str = "") -> None: """Disconnect from an MCP server and remove its tools.""" await self.mcp_clients.disconnect(server_id) if server_id: self.connected_servers.pop(server_id, None) else: self.connected_servers.clear() # Rebuild available tools without the disconnected server's tools base_tools = [ tool for tool in self.available_tools.tools if not isinstance(tool, MCPClientTool) ] self.available_tools = ToolCollection(*base_tools) self.available_tools.add_tools(*self.mcp_clients.tools) async def delete_sandbox(self, sandbox_id: str) -> None: """Delete a sandbox by ID.""" try: await delete_sandbox(sandbox_id) logger.info(f"Sandbox {sandbox_id} deleted successfully") if sandbox_id in self.sandbox_link: del self.sandbox_link[sandbox_id] except Exception as e: logger.error(f"Error deleting sandbox {sandbox_id}: {e}") raise e async def cleanup(self): """Clean up Manus agent resources.""" if self.browser_context_helper: await self.browser_context_helper.cleanup_browser() # Disconnect from all MCP servers only if we were initialized if self._initialized: await self.disconnect_mcp_server() await self.delete_sandbox(self.sandbox.id if self.sandbox else "unknown") self._initialized = False async def think(self) -> bool: """Process current state and decide next actions with appropriate context.""" if not self._initialized: await self.initialize_mcp_servers() self._initialized = True original_prompt = self.next_step_prompt recent_messages = self.memory.messages[-3:] if self.memory.messages else [] browser_in_use = any( tc.function.name == SandboxBrowserTool().name for msg in recent_messages if msg.tool_calls for tc in msg.tool_calls ) if browser_in_use: self.next_step_prompt = ( await self.browser_context_helper.format_next_step_prompt() ) result = await super().think() # Restore original prompt self.next_step_prompt = original_prompt return result
python
MIT
52a13f2a57d8c7f6737eefb02ccf569594d44273
2026-01-04T14:39:27.873507Z
false
FoundationAgents/OpenManus
https://github.com/FoundationAgents/OpenManus/blob/52a13f2a57d8c7f6737eefb02ccf569594d44273/app/agent/__init__.py
app/agent/__init__.py
from app.agent.base import BaseAgent from app.agent.browser import BrowserAgent from app.agent.mcp import MCPAgent from app.agent.react import ReActAgent from app.agent.swe import SWEAgent from app.agent.toolcall import ToolCallAgent __all__ = [ "BaseAgent", "BrowserAgent", "ReActAgent", "SWEAgent", "ToolCallAgent", "MCPAgent", ]
python
MIT
52a13f2a57d8c7f6737eefb02ccf569594d44273
2026-01-04T14:39:27.873507Z
false
FoundationAgents/OpenManus
https://github.com/FoundationAgents/OpenManus/blob/52a13f2a57d8c7f6737eefb02ccf569594d44273/app/agent/base.py
app/agent/base.py
from abc import ABC, abstractmethod from contextlib import asynccontextmanager from typing import List, Optional from pydantic import BaseModel, Field, model_validator from app.llm import LLM from app.logger import logger from app.sandbox.client import SANDBOX_CLIENT from app.schema import ROLE_TYPE, AgentState, Memory, Message class BaseAgent(BaseModel, ABC): """Abstract base class for managing agent state and execution. Provides foundational functionality for state transitions, memory management, and a step-based execution loop. Subclasses must implement the `step` method. """ # Core attributes name: str = Field(..., description="Unique name of the agent") description: Optional[str] = Field(None, description="Optional agent description") # Prompts system_prompt: Optional[str] = Field( None, description="System-level instruction prompt" ) next_step_prompt: Optional[str] = Field( None, description="Prompt for determining next action" ) # Dependencies llm: LLM = Field(default_factory=LLM, description="Language model instance") memory: Memory = Field(default_factory=Memory, description="Agent's memory store") state: AgentState = Field( default=AgentState.IDLE, description="Current agent state" ) # Execution control max_steps: int = Field(default=10, description="Maximum steps before termination") current_step: int = Field(default=0, description="Current step in execution") duplicate_threshold: int = 2 class Config: arbitrary_types_allowed = True extra = "allow" # Allow extra fields for flexibility in subclasses @model_validator(mode="after") def initialize_agent(self) -> "BaseAgent": """Initialize agent with default settings if not provided.""" if self.llm is None or not isinstance(self.llm, LLM): self.llm = LLM(config_name=self.name.lower()) if not isinstance(self.memory, Memory): self.memory = Memory() return self @asynccontextmanager async def state_context(self, new_state: AgentState): """Context manager for safe agent state transitions. Args: new_state: The state to transition to during the context. Yields: None: Allows execution within the new state. Raises: ValueError: If the new_state is invalid. """ if not isinstance(new_state, AgentState): raise ValueError(f"Invalid state: {new_state}") previous_state = self.state self.state = new_state try: yield except Exception as e: self.state = AgentState.ERROR # Transition to ERROR on failure raise e finally: self.state = previous_state # Revert to previous state def update_memory( self, role: ROLE_TYPE, # type: ignore content: str, base64_image: Optional[str] = None, **kwargs, ) -> None: """Add a message to the agent's memory. Args: role: The role of the message sender (user, system, assistant, tool). content: The message content. base64_image: Optional base64 encoded image. **kwargs: Additional arguments (e.g., tool_call_id for tool messages). Raises: ValueError: If the role is unsupported. """ message_map = { "user": Message.user_message, "system": Message.system_message, "assistant": Message.assistant_message, "tool": lambda content, **kw: Message.tool_message(content, **kw), } if role not in message_map: raise ValueError(f"Unsupported message role: {role}") # Create message with appropriate parameters based on role kwargs = {"base64_image": base64_image, **(kwargs if role == "tool" else {})} self.memory.add_message(message_map[role](content, **kwargs)) async def run(self, request: Optional[str] = None) -> str: """Execute the agent's main loop asynchronously. Args: request: Optional initial user request to process. Returns: A string summarizing the execution results. Raises: RuntimeError: If the agent is not in IDLE state at start. """ if self.state != AgentState.IDLE: raise RuntimeError(f"Cannot run agent from state: {self.state}") if request: self.update_memory("user", request) results: List[str] = [] async with self.state_context(AgentState.RUNNING): while ( self.current_step < self.max_steps and self.state != AgentState.FINISHED ): self.current_step += 1 logger.info(f"Executing step {self.current_step}/{self.max_steps}") step_result = await self.step() # Check for stuck state if self.is_stuck(): self.handle_stuck_state() results.append(f"Step {self.current_step}: {step_result}") if self.current_step >= self.max_steps: self.current_step = 0 self.state = AgentState.IDLE results.append(f"Terminated: Reached max steps ({self.max_steps})") await SANDBOX_CLIENT.cleanup() return "\n".join(results) if results else "No steps executed" @abstractmethod async def step(self) -> str: """Execute a single step in the agent's workflow. Must be implemented by subclasses to define specific behavior. """ def handle_stuck_state(self): """Handle stuck state by adding a prompt to change strategy""" stuck_prompt = "\ Observed duplicate responses. Consider new strategies and avoid repeating ineffective paths already attempted." self.next_step_prompt = f"{stuck_prompt}\n{self.next_step_prompt}" logger.warning(f"Agent detected stuck state. Added prompt: {stuck_prompt}") def is_stuck(self) -> bool: """Check if the agent is stuck in a loop by detecting duplicate content""" if len(self.memory.messages) < 2: return False last_message = self.memory.messages[-1] if not last_message.content: return False # Count identical content occurrences duplicate_count = sum( 1 for msg in reversed(self.memory.messages[:-1]) if msg.role == "assistant" and msg.content == last_message.content ) return duplicate_count >= self.duplicate_threshold @property def messages(self) -> List[Message]: """Retrieve a list of messages from the agent's memory.""" return self.memory.messages @messages.setter def messages(self, value: List[Message]): """Set the list of messages in the agent's memory.""" self.memory.messages = value
python
MIT
52a13f2a57d8c7f6737eefb02ccf569594d44273
2026-01-04T14:39:27.873507Z
false
FoundationAgents/OpenManus
https://github.com/FoundationAgents/OpenManus/blob/52a13f2a57d8c7f6737eefb02ccf569594d44273/app/mcp/__init__.py
app/mcp/__init__.py
python
MIT
52a13f2a57d8c7f6737eefb02ccf569594d44273
2026-01-04T14:39:27.873507Z
false
FoundationAgents/OpenManus
https://github.com/FoundationAgents/OpenManus/blob/52a13f2a57d8c7f6737eefb02ccf569594d44273/app/mcp/server.py
app/mcp/server.py
import logging import sys logging.basicConfig(level=logging.INFO, handlers=[logging.StreamHandler(sys.stderr)]) import argparse import asyncio import atexit import json from inspect import Parameter, Signature from typing import Any, Dict, Optional from mcp.server.fastmcp import FastMCP from app.logger import logger from app.tool.base import BaseTool from app.tool.bash import Bash from app.tool.browser_use_tool import BrowserUseTool from app.tool.str_replace_editor import StrReplaceEditor from app.tool.terminate import Terminate class MCPServer: """MCP Server implementation with tool registration and management.""" def __init__(self, name: str = "openmanus"): self.server = FastMCP(name) self.tools: Dict[str, BaseTool] = {} # Initialize standard tools self.tools["bash"] = Bash() self.tools["browser"] = BrowserUseTool() self.tools["editor"] = StrReplaceEditor() self.tools["terminate"] = Terminate() def register_tool(self, tool: BaseTool, method_name: Optional[str] = None) -> None: """Register a tool with parameter validation and documentation.""" tool_name = method_name or tool.name tool_param = tool.to_param() tool_function = tool_param["function"] # Define the async function to be registered async def tool_method(**kwargs): logger.info(f"Executing {tool_name}: {kwargs}") result = await tool.execute(**kwargs) logger.info(f"Result of {tool_name}: {result}") # Handle different types of results (match original logic) if hasattr(result, "model_dump"): return json.dumps(result.model_dump()) elif isinstance(result, dict): return json.dumps(result) return result # Set method metadata tool_method.__name__ = tool_name tool_method.__doc__ = self._build_docstring(tool_function) tool_method.__signature__ = self._build_signature(tool_function) # Store parameter schema (important for tools that access it programmatically) param_props = tool_function.get("parameters", {}).get("properties", {}) required_params = tool_function.get("parameters", {}).get("required", []) tool_method._parameter_schema = { param_name: { "description": param_details.get("description", ""), "type": param_details.get("type", "any"), "required": param_name in required_params, } for param_name, param_details in param_props.items() } # Register with server self.server.tool()(tool_method) logger.info(f"Registered tool: {tool_name}") def _build_docstring(self, tool_function: dict) -> str: """Build a formatted docstring from tool function metadata.""" description = tool_function.get("description", "") param_props = tool_function.get("parameters", {}).get("properties", {}) required_params = tool_function.get("parameters", {}).get("required", []) # Build docstring (match original format) docstring = description if param_props: docstring += "\n\nParameters:\n" for param_name, param_details in param_props.items(): required_str = ( "(required)" if param_name in required_params else "(optional)" ) param_type = param_details.get("type", "any") param_desc = param_details.get("description", "") docstring += ( f" {param_name} ({param_type}) {required_str}: {param_desc}\n" ) return docstring def _build_signature(self, tool_function: dict) -> Signature: """Build a function signature from tool function metadata.""" param_props = tool_function.get("parameters", {}).get("properties", {}) required_params = tool_function.get("parameters", {}).get("required", []) parameters = [] # Follow original type mapping for param_name, param_details in param_props.items(): param_type = param_details.get("type", "") default = Parameter.empty if param_name in required_params else None # Map JSON Schema types to Python types (same as original) annotation = Any if param_type == "string": annotation = str elif param_type == "integer": annotation = int elif param_type == "number": annotation = float elif param_type == "boolean": annotation = bool elif param_type == "object": annotation = dict elif param_type == "array": annotation = list # Create parameter with same structure as original param = Parameter( name=param_name, kind=Parameter.KEYWORD_ONLY, default=default, annotation=annotation, ) parameters.append(param) return Signature(parameters=parameters) async def cleanup(self) -> None: """Clean up server resources.""" logger.info("Cleaning up resources") # Follow original cleanup logic - only clean browser tool if "browser" in self.tools and hasattr(self.tools["browser"], "cleanup"): await self.tools["browser"].cleanup() def register_all_tools(self) -> None: """Register all tools with the server.""" for tool in self.tools.values(): self.register_tool(tool) def run(self, transport: str = "stdio") -> None: """Run the MCP server.""" # Register all tools self.register_all_tools() # Register cleanup function (match original behavior) atexit.register(lambda: asyncio.run(self.cleanup())) # Start server (with same logging as original) logger.info(f"Starting OpenManus server ({transport} mode)") self.server.run(transport=transport) def parse_args() -> argparse.Namespace: """Parse command line arguments.""" parser = argparse.ArgumentParser(description="OpenManus MCP Server") parser.add_argument( "--transport", choices=["stdio"], default="stdio", help="Communication method: stdio or http (default: stdio)", ) return parser.parse_args() if __name__ == "__main__": args = parse_args() # Create and run server (maintaining original flow) server = MCPServer() server.run(transport=args.transport)
python
MIT
52a13f2a57d8c7f6737eefb02ccf569594d44273
2026-01-04T14:39:27.873507Z
false
FoundationAgents/OpenManus
https://github.com/FoundationAgents/OpenManus/blob/52a13f2a57d8c7f6737eefb02ccf569594d44273/app/tool/crawl4ai.py
app/tool/crawl4ai.py
""" Crawl4AI Web Crawler Tool for OpenManus This tool integrates Crawl4AI, a high-performance web crawler designed for LLMs and AI agents, providing fast, precise, and AI-ready data extraction with clean Markdown generation. """ import asyncio from typing import List, Union from urllib.parse import urlparse from app.logger import logger from app.tool.base import BaseTool, ToolResult class Crawl4aiTool(BaseTool): """ Web crawler tool powered by Crawl4AI. Provides clean markdown extraction optimized for AI processing. """ name: str = "crawl4ai" description: str = """Web crawler that extracts clean, AI-ready content from web pages. Features: - Extracts clean markdown content optimized for LLMs - Handles JavaScript-heavy sites and dynamic content - Supports multiple URLs in a single request - Fast and reliable with built-in error handling Perfect for content analysis, research, and feeding web content to AI models.""" parameters: dict = { "type": "object", "properties": { "urls": { "type": "array", "items": {"type": "string"}, "description": "(required) List of URLs to crawl. Can be a single URL or multiple URLs.", "minItems": 1, }, "timeout": { "type": "integer", "description": "(optional) Timeout in seconds for each URL. Default is 30.", "default": 30, "minimum": 5, "maximum": 120, }, "bypass_cache": { "type": "boolean", "description": "(optional) Whether to bypass cache and fetch fresh content. Default is false.", "default": False, }, "word_count_threshold": { "type": "integer", "description": "(optional) Minimum word count for content blocks. Default is 10.", "default": 10, "minimum": 1, }, }, "required": ["urls"], } async def execute( self, urls: Union[str, List[str]], timeout: int = 30, bypass_cache: bool = False, word_count_threshold: int = 10, ) -> ToolResult: """ Execute web crawling for the specified URLs. Args: urls: Single URL string or list of URLs to crawl timeout: Timeout in seconds for each URL bypass_cache: Whether to bypass cache word_count_threshold: Minimum word count for content blocks Returns: ToolResult with crawl results """ # Normalize URLs to list if isinstance(urls, str): url_list = [urls] else: url_list = urls # Validate URLs valid_urls = [] for url in url_list: if self._is_valid_url(url): valid_urls.append(url) else: logger.warning(f"Invalid URL skipped: {url}") if not valid_urls: return ToolResult(error="No valid URLs provided") try: # Import crawl4ai components from crawl4ai import ( AsyncWebCrawler, BrowserConfig, CacheMode, CrawlerRunConfig, ) # Configure browser settings browser_config = BrowserConfig( headless=True, verbose=False, browser_type="chromium", ignore_https_errors=True, java_script_enabled=True, ) # Configure crawler settings run_config = CrawlerRunConfig( cache_mode=CacheMode.BYPASS if bypass_cache else CacheMode.ENABLED, word_count_threshold=word_count_threshold, process_iframes=True, remove_overlay_elements=True, excluded_tags=["script", "style"], page_timeout=timeout * 1000, # Convert to milliseconds verbose=False, wait_until="domcontentloaded", ) results = [] successful_count = 0 failed_count = 0 # Process each URL async with AsyncWebCrawler(config=browser_config) as crawler: for url in valid_urls: try: logger.info(f"🕷️ Crawling URL: {url}") start_time = asyncio.get_event_loop().time() result = await crawler.arun(url=url, config=run_config) end_time = asyncio.get_event_loop().time() execution_time = end_time - start_time if result.success: # Count words in markdown word_count = 0 if hasattr(result, "markdown") and result.markdown: word_count = len(result.markdown.split()) # Count links links_count = 0 if hasattr(result, "links") and result.links: internal_links = result.links.get("internal", []) external_links = result.links.get("external", []) links_count = len(internal_links) + len(external_links) # Count images images_count = 0 if hasattr(result, "media") and result.media: images = result.media.get("images", []) images_count = len(images) results.append( { "url": url, "success": True, "status_code": getattr(result, "status_code", 200), "title": result.metadata.get("title") if result.metadata else None, "markdown": result.markdown if hasattr(result, "markdown") else None, "word_count": word_count, "links_count": links_count, "images_count": images_count, "execution_time": execution_time, } ) successful_count += 1 logger.info( f"✅ Successfully crawled {url} in {execution_time:.2f}s" ) else: results.append( { "url": url, "success": False, "error_message": getattr( result, "error_message", "Unknown error" ), "execution_time": execution_time, } ) failed_count += 1 logger.warning(f"❌ Failed to crawl {url}") except Exception as e: error_msg = f"Error crawling {url}: {str(e)}" logger.error(error_msg) results.append( {"url": url, "success": False, "error_message": error_msg} ) failed_count += 1 # Format output output_lines = [f"🕷️ Crawl4AI Results Summary:"] output_lines.append(f"📊 Total URLs: {len(valid_urls)}") output_lines.append(f"✅ Successful: {successful_count}") output_lines.append(f"❌ Failed: {failed_count}") output_lines.append("") for i, result in enumerate(results, 1): output_lines.append(f"{i}. {result['url']}") if result["success"]: output_lines.append( f" ✅ Status: Success (HTTP {result.get('status_code', 'N/A')})" ) if result.get("title"): output_lines.append(f" 📄 Title: {result['title']}") if result.get("markdown"): # Show first 300 characters of markdown content content_preview = result["markdown"] if len(result["markdown"]) > 300: content_preview += "..." output_lines.append(f" 📝 Content: {content_preview}") output_lines.append( f" 📊 Stats: {result.get('word_count', 0)} words, {result.get('links_count', 0)} links, {result.get('images_count', 0)} images" ) if result.get("execution_time"): output_lines.append( f" ⏱️ Time: {result['execution_time']:.2f}s" ) else: output_lines.append(f" ❌ Status: Failed") if result.get("error_message"): output_lines.append(f" 🚫 Error: {result['error_message']}") output_lines.append("") return ToolResult(output="\n".join(output_lines)) except ImportError: error_msg = "Crawl4AI is not installed. Please install it with: pip install crawl4ai" logger.error(error_msg) return ToolResult(error=error_msg) except Exception as e: error_msg = f"Crawl4AI execution failed: {str(e)}" logger.error(error_msg) return ToolResult(error=error_msg) def _is_valid_url(self, url: str) -> bool: """Validate if a URL is properly formatted.""" try: result = urlparse(url) return all([result.scheme, result.netloc]) and result.scheme in [ "http", "https", ] except Exception: return False
python
MIT
52a13f2a57d8c7f6737eefb02ccf569594d44273
2026-01-04T14:39:27.873507Z
false
FoundationAgents/OpenManus
https://github.com/FoundationAgents/OpenManus/blob/52a13f2a57d8c7f6737eefb02ccf569594d44273/app/tool/web_search.py
app/tool/web_search.py
import asyncio from typing import Any, Dict, List, Optional import requests from bs4 import BeautifulSoup from pydantic import BaseModel, ConfigDict, Field, model_validator from tenacity import retry, stop_after_attempt, wait_exponential from app.config import config from app.logger import logger from app.tool.base import BaseTool, ToolResult from app.tool.search import ( BaiduSearchEngine, BingSearchEngine, DuckDuckGoSearchEngine, GoogleSearchEngine, WebSearchEngine, ) from app.tool.search.base import SearchItem class SearchResult(BaseModel): """Represents a single search result returned by a search engine.""" model_config = ConfigDict(arbitrary_types_allowed=True) position: int = Field(description="Position in search results") url: str = Field(description="URL of the search result") title: str = Field(default="", description="Title of the search result") description: str = Field( default="", description="Description or snippet of the search result" ) source: str = Field(description="The search engine that provided this result") raw_content: Optional[str] = Field( default=None, description="Raw content from the search result page if available" ) def __str__(self) -> str: """String representation of a search result.""" return f"{self.title} ({self.url})" class SearchMetadata(BaseModel): """Metadata about the search operation.""" model_config = ConfigDict(arbitrary_types_allowed=True) total_results: int = Field(description="Total number of results found") language: str = Field(description="Language code used for the search") country: str = Field(description="Country code used for the search") class SearchResponse(ToolResult): """Structured response from the web search tool, inheriting ToolResult.""" query: str = Field(description="The search query that was executed") results: List[SearchResult] = Field( default_factory=list, description="List of search results" ) metadata: Optional[SearchMetadata] = Field( default=None, description="Metadata about the search" ) @model_validator(mode="after") def populate_output(self) -> "SearchResponse": """Populate output or error fields based on search results.""" if self.error: return self result_text = [f"Search results for '{self.query}':"] for i, result in enumerate(self.results, 1): # Add title with position number title = result.title.strip() or "No title" result_text.append(f"\n{i}. {title}") # Add URL with proper indentation result_text.append(f" URL: {result.url}") # Add description if available if result.description.strip(): result_text.append(f" Description: {result.description}") # Add content preview if available if result.raw_content: content_preview = result.raw_content[:1000].replace("\n", " ").strip() if len(result.raw_content) > 1000: content_preview += "..." result_text.append(f" Content: {content_preview}") # Add metadata at the bottom if available if self.metadata: result_text.extend( [ f"\nMetadata:", f"- Total results: {self.metadata.total_results}", f"- Language: {self.metadata.language}", f"- Country: {self.metadata.country}", ] ) self.output = "\n".join(result_text) return self class WebContentFetcher: """Utility class for fetching web content.""" @staticmethod async def fetch_content(url: str, timeout: int = 10) -> Optional[str]: """ Fetch and extract the main content from a webpage. Args: url: The URL to fetch content from timeout: Request timeout in seconds Returns: Extracted text content or None if fetching fails """ headers = { "WebSearch": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/91.0.4472.124 Safari/537.36" } try: # Use asyncio to run requests in a thread pool response = await asyncio.get_event_loop().run_in_executor( None, lambda: requests.get(url, headers=headers, timeout=timeout) ) if response.status_code != 200: logger.warning( f"Failed to fetch content from {url}: HTTP {response.status_code}" ) return None # Parse HTML with BeautifulSoup soup = BeautifulSoup(response.text, "html.parser") # Remove script and style elements for script in soup(["script", "style", "header", "footer", "nav"]): script.extract() # Get text content text = soup.get_text(separator="\n", strip=True) # Clean up whitespace and limit size (100KB max) text = " ".join(text.split()) return text[:10000] if text else None except Exception as e: logger.warning(f"Error fetching content from {url}: {e}") return None class WebSearch(BaseTool): """Search the web for information using various search engines.""" name: str = "web_search" description: str = """Search the web for real-time information about any topic. This tool returns comprehensive search results with relevant information, URLs, titles, and descriptions. If the primary search engine fails, it automatically falls back to alternative engines.""" parameters: dict = { "type": "object", "properties": { "query": { "type": "string", "description": "(required) The search query to submit to the search engine.", }, "num_results": { "type": "integer", "description": "(optional) The number of search results to return. Default is 5.", "default": 5, }, "lang": { "type": "string", "description": "(optional) Language code for search results (default: en).", "default": "en", }, "country": { "type": "string", "description": "(optional) Country code for search results (default: us).", "default": "us", }, "fetch_content": { "type": "boolean", "description": "(optional) Whether to fetch full content from result pages. Default is false.", "default": False, }, }, "required": ["query"], } _search_engine: dict[str, WebSearchEngine] = { "google": GoogleSearchEngine(), "baidu": BaiduSearchEngine(), "duckduckgo": DuckDuckGoSearchEngine(), "bing": BingSearchEngine(), } content_fetcher: WebContentFetcher = WebContentFetcher() async def execute( self, query: str, num_results: int = 5, lang: Optional[str] = None, country: Optional[str] = None, fetch_content: bool = False, ) -> SearchResponse: """ Execute a Web search and return detailed search results. Args: query: The search query to submit to the search engine num_results: The number of search results to return (default: 5) lang: Language code for search results (default from config) country: Country code for search results (default from config) fetch_content: Whether to fetch content from result pages (default: False) Returns: A structured response containing search results and metadata """ # Get settings from config retry_delay = ( getattr(config.search_config, "retry_delay", 60) if config.search_config else 60 ) max_retries = ( getattr(config.search_config, "max_retries", 3) if config.search_config else 3 ) # Use config values for lang and country if not specified if lang is None: lang = ( getattr(config.search_config, "lang", "en") if config.search_config else "en" ) if country is None: country = ( getattr(config.search_config, "country", "us") if config.search_config else "us" ) search_params = {"lang": lang, "country": country} # Try searching with retries when all engines fail for retry_count in range(max_retries + 1): results = await self._try_all_engines(query, num_results, search_params) if results: # Fetch content if requested if fetch_content: results = await self._fetch_content_for_results(results) # Return a successful structured response return SearchResponse( status="success", query=query, results=results, metadata=SearchMetadata( total_results=len(results), language=lang, country=country, ), ) if retry_count < max_retries: # All engines failed, wait and retry logger.warning( f"All search engines failed. Waiting {retry_delay} seconds before retry {retry_count + 1}/{max_retries}..." ) await asyncio.sleep(retry_delay) else: logger.error( f"All search engines failed after {max_retries} retries. Giving up." ) # Return an error response return SearchResponse( query=query, error="All search engines failed to return results after multiple retries.", results=[], ) async def _try_all_engines( self, query: str, num_results: int, search_params: Dict[str, Any] ) -> List[SearchResult]: """Try all search engines in the configured order.""" engine_order = self._get_engine_order() failed_engines = [] for engine_name in engine_order: engine = self._search_engine[engine_name] logger.info(f"🔎 Attempting search with {engine_name.capitalize()}...") search_items = await self._perform_search_with_engine( engine, query, num_results, search_params ) if not search_items: continue if failed_engines: logger.info( f"Search successful with {engine_name.capitalize()} after trying: {', '.join(failed_engines)}" ) # Transform search items into structured results return [ SearchResult( position=i + 1, url=item.url, title=item.title or f"Result {i+1}", # Ensure we always have a title description=item.description or "", source=engine_name, ) for i, item in enumerate(search_items) ] if failed_engines: logger.error(f"All search engines failed: {', '.join(failed_engines)}") return [] async def _fetch_content_for_results( self, results: List[SearchResult] ) -> List[SearchResult]: """Fetch and add web content to search results.""" if not results: return [] # Create tasks for each result tasks = [self._fetch_single_result_content(result) for result in results] # Type annotation to help type checker fetched_results = await asyncio.gather(*tasks) # Explicit validation of return type return [ ( result if isinstance(result, SearchResult) else SearchResult(**result.dict()) ) for result in fetched_results ] async def _fetch_single_result_content(self, result: SearchResult) -> SearchResult: """Fetch content for a single search result.""" if result.url: content = await self.content_fetcher.fetch_content(result.url) if content: result.raw_content = content return result def _get_engine_order(self) -> List[str]: """Determines the order in which to try search engines.""" preferred = ( getattr(config.search_config, "engine", "google").lower() if config.search_config else "google" ) fallbacks = ( [engine.lower() for engine in config.search_config.fallback_engines] if config.search_config and hasattr(config.search_config, "fallback_engines") else [] ) # Start with preferred engine, then fallbacks, then remaining engines engine_order = [preferred] if preferred in self._search_engine else [] engine_order.extend( [ fb for fb in fallbacks if fb in self._search_engine and fb not in engine_order ] ) engine_order.extend([e for e in self._search_engine if e not in engine_order]) return engine_order @retry( stop=stop_after_attempt(3), wait=wait_exponential(multiplier=1, min=1, max=10) ) async def _perform_search_with_engine( self, engine: WebSearchEngine, query: str, num_results: int, search_params: Dict[str, Any], ) -> List[SearchItem]: """Execute search with the given engine and parameters.""" return await asyncio.get_event_loop().run_in_executor( None, lambda: list( engine.perform_search( query, num_results=num_results, lang=search_params.get("lang"), country=search_params.get("country"), ) ), ) if __name__ == "__main__": web_search = WebSearch() search_response = asyncio.run( web_search.execute( query="Python programming", fetch_content=True, num_results=1 ) ) print(search_response.to_tool_result())
python
MIT
52a13f2a57d8c7f6737eefb02ccf569594d44273
2026-01-04T14:39:27.873507Z
false
FoundationAgents/OpenManus
https://github.com/FoundationAgents/OpenManus/blob/52a13f2a57d8c7f6737eefb02ccf569594d44273/app/tool/browser_use_tool.py
app/tool/browser_use_tool.py
import asyncio import base64 import json from typing import Generic, Optional, TypeVar from browser_use import Browser as BrowserUseBrowser from browser_use import BrowserConfig from browser_use.browser.context import BrowserContext, BrowserContextConfig from browser_use.dom.service import DomService from pydantic import Field, field_validator from pydantic_core.core_schema import ValidationInfo from app.config import config from app.llm import LLM from app.tool.base import BaseTool, ToolResult from app.tool.web_search import WebSearch _BROWSER_DESCRIPTION = """\ A powerful browser automation tool that allows interaction with web pages through various actions. * This tool provides commands for controlling a browser session, navigating web pages, and extracting information * It maintains state across calls, keeping the browser session alive until explicitly closed * Use this when you need to browse websites, fill forms, click buttons, extract content, or perform web searches * Each action requires specific parameters as defined in the tool's dependencies Key capabilities include: * Navigation: Go to specific URLs, go back, search the web, or refresh pages * Interaction: Click elements, input text, select from dropdowns, send keyboard commands * Scrolling: Scroll up/down by pixel amount or scroll to specific text * Content extraction: Extract and analyze content from web pages based on specific goals * Tab management: Switch between tabs, open new tabs, or close tabs Note: When using element indices, refer to the numbered elements shown in the current browser state. """ Context = TypeVar("Context") class BrowserUseTool(BaseTool, Generic[Context]): name: str = "browser_use" description: str = _BROWSER_DESCRIPTION parameters: dict = { "type": "object", "properties": { "action": { "type": "string", "enum": [ "go_to_url", "click_element", "input_text", "scroll_down", "scroll_up", "scroll_to_text", "send_keys", "get_dropdown_options", "select_dropdown_option", "go_back", "web_search", "wait", "extract_content", "switch_tab", "open_tab", "close_tab", ], "description": "The browser action to perform", }, "url": { "type": "string", "description": "URL for 'go_to_url' or 'open_tab' actions", }, "index": { "type": "integer", "description": "Element index for 'click_element', 'input_text', 'get_dropdown_options', or 'select_dropdown_option' actions", }, "text": { "type": "string", "description": "Text for 'input_text', 'scroll_to_text', or 'select_dropdown_option' actions", }, "scroll_amount": { "type": "integer", "description": "Pixels to scroll (positive for down, negative for up) for 'scroll_down' or 'scroll_up' actions", }, "tab_id": { "type": "integer", "description": "Tab ID for 'switch_tab' action", }, "query": { "type": "string", "description": "Search query for 'web_search' action", }, "goal": { "type": "string", "description": "Extraction goal for 'extract_content' action", }, "keys": { "type": "string", "description": "Keys to send for 'send_keys' action", }, "seconds": { "type": "integer", "description": "Seconds to wait for 'wait' action", }, }, "required": ["action"], "dependencies": { "go_to_url": ["url"], "click_element": ["index"], "input_text": ["index", "text"], "switch_tab": ["tab_id"], "open_tab": ["url"], "scroll_down": ["scroll_amount"], "scroll_up": ["scroll_amount"], "scroll_to_text": ["text"], "send_keys": ["keys"], "get_dropdown_options": ["index"], "select_dropdown_option": ["index", "text"], "go_back": [], "web_search": ["query"], "wait": ["seconds"], "extract_content": ["goal"], }, } lock: asyncio.Lock = Field(default_factory=asyncio.Lock) browser: Optional[BrowserUseBrowser] = Field(default=None, exclude=True) context: Optional[BrowserContext] = Field(default=None, exclude=True) dom_service: Optional[DomService] = Field(default=None, exclude=True) web_search_tool: WebSearch = Field(default_factory=WebSearch, exclude=True) # Context for generic functionality tool_context: Optional[Context] = Field(default=None, exclude=True) llm: Optional[LLM] = Field(default_factory=LLM) @field_validator("parameters", mode="before") def validate_parameters(cls, v: dict, info: ValidationInfo) -> dict: if not v: raise ValueError("Parameters cannot be empty") return v async def _ensure_browser_initialized(self) -> BrowserContext: """Ensure browser and context are initialized.""" if self.browser is None: browser_config_kwargs = {"headless": False, "disable_security": True} if config.browser_config: from browser_use.browser.browser import ProxySettings # handle proxy settings. if config.browser_config.proxy and config.browser_config.proxy.server: browser_config_kwargs["proxy"] = ProxySettings( server=config.browser_config.proxy.server, username=config.browser_config.proxy.username, password=config.browser_config.proxy.password, ) browser_attrs = [ "headless", "disable_security", "extra_chromium_args", "chrome_instance_path", "wss_url", "cdp_url", ] for attr in browser_attrs: value = getattr(config.browser_config, attr, None) if value is not None: if not isinstance(value, list) or value: browser_config_kwargs[attr] = value self.browser = BrowserUseBrowser(BrowserConfig(**browser_config_kwargs)) if self.context is None: context_config = BrowserContextConfig() # if there is context config in the config, use it. if ( config.browser_config and hasattr(config.browser_config, "new_context_config") and config.browser_config.new_context_config ): context_config = config.browser_config.new_context_config self.context = await self.browser.new_context(context_config) self.dom_service = DomService(await self.context.get_current_page()) return self.context async def execute( self, action: str, url: Optional[str] = None, index: Optional[int] = None, text: Optional[str] = None, scroll_amount: Optional[int] = None, tab_id: Optional[int] = None, query: Optional[str] = None, goal: Optional[str] = None, keys: Optional[str] = None, seconds: Optional[int] = None, **kwargs, ) -> ToolResult: """ Execute a specified browser action. Args: action: The browser action to perform url: URL for navigation or new tab index: Element index for click or input actions text: Text for input action or search query scroll_amount: Pixels to scroll for scroll action tab_id: Tab ID for switch_tab action query: Search query for Google search goal: Extraction goal for content extraction keys: Keys to send for keyboard actions seconds: Seconds to wait **kwargs: Additional arguments Returns: ToolResult with the action's output or error """ async with self.lock: try: context = await self._ensure_browser_initialized() # Get max content length from config max_content_length = getattr( config.browser_config, "max_content_length", 2000 ) # Navigation actions if action == "go_to_url": if not url: return ToolResult( error="URL is required for 'go_to_url' action" ) page = await context.get_current_page() await page.goto(url) await page.wait_for_load_state() return ToolResult(output=f"Navigated to {url}") elif action == "go_back": await context.go_back() return ToolResult(output="Navigated back") elif action == "refresh": await context.refresh_page() return ToolResult(output="Refreshed current page") elif action == "web_search": if not query: return ToolResult( error="Query is required for 'web_search' action" ) # Execute the web search and return results directly without browser navigation search_response = await self.web_search_tool.execute( query=query, fetch_content=True, num_results=1 ) # Navigate to the first search result first_search_result = search_response.results[0] url_to_navigate = first_search_result.url page = await context.get_current_page() await page.goto(url_to_navigate) await page.wait_for_load_state() return search_response # Element interaction actions elif action == "click_element": if index is None: return ToolResult( error="Index is required for 'click_element' action" ) element = await context.get_dom_element_by_index(index) if not element: return ToolResult(error=f"Element with index {index} not found") download_path = await context._click_element_node(element) output = f"Clicked element at index {index}" if download_path: output += f" - Downloaded file to {download_path}" return ToolResult(output=output) elif action == "input_text": if index is None or not text: return ToolResult( error="Index and text are required for 'input_text' action" ) element = await context.get_dom_element_by_index(index) if not element: return ToolResult(error=f"Element with index {index} not found") await context._input_text_element_node(element, text) return ToolResult( output=f"Input '{text}' into element at index {index}" ) elif action == "scroll_down" or action == "scroll_up": direction = 1 if action == "scroll_down" else -1 amount = ( scroll_amount if scroll_amount is not None else context.config.browser_window_size["height"] ) await context.execute_javascript( f"window.scrollBy(0, {direction * amount});" ) return ToolResult( output=f"Scrolled {'down' if direction > 0 else 'up'} by {amount} pixels" ) elif action == "scroll_to_text": if not text: return ToolResult( error="Text is required for 'scroll_to_text' action" ) page = await context.get_current_page() try: locator = page.get_by_text(text, exact=False) await locator.scroll_into_view_if_needed() return ToolResult(output=f"Scrolled to text: '{text}'") except Exception as e: return ToolResult(error=f"Failed to scroll to text: {str(e)}") elif action == "send_keys": if not keys: return ToolResult( error="Keys are required for 'send_keys' action" ) page = await context.get_current_page() await page.keyboard.press(keys) return ToolResult(output=f"Sent keys: {keys}") elif action == "get_dropdown_options": if index is None: return ToolResult( error="Index is required for 'get_dropdown_options' action" ) element = await context.get_dom_element_by_index(index) if not element: return ToolResult(error=f"Element with index {index} not found") page = await context.get_current_page() options = await page.evaluate( """ (xpath) => { const select = document.evaluate(xpath, document, null, XPathResult.FIRST_ORDERED_NODE_TYPE, null).singleNodeValue; if (!select) return null; return Array.from(select.options).map(opt => ({ text: opt.text, value: opt.value, index: opt.index })); } """, element.xpath, ) return ToolResult(output=f"Dropdown options: {options}") elif action == "select_dropdown_option": if index is None or not text: return ToolResult( error="Index and text are required for 'select_dropdown_option' action" ) element = await context.get_dom_element_by_index(index) if not element: return ToolResult(error=f"Element with index {index} not found") page = await context.get_current_page() await page.select_option(element.xpath, label=text) return ToolResult( output=f"Selected option '{text}' from dropdown at index {index}" ) # Content extraction actions elif action == "extract_content": if not goal: return ToolResult( error="Goal is required for 'extract_content' action" ) page = await context.get_current_page() import markdownify content = markdownify.markdownify(await page.content()) prompt = f"""\ Your task is to extract the content of the page. You will be given a page and a goal, and you should extract all relevant information around this goal from the page. If the goal is vague, summarize the page. Respond in json format. Extraction goal: {goal} Page content: {content[:max_content_length]} """ messages = [{"role": "system", "content": prompt}] # Define extraction function schema extraction_function = { "type": "function", "function": { "name": "extract_content", "description": "Extract specific information from a webpage based on a goal", "parameters": { "type": "object", "properties": { "extracted_content": { "type": "object", "description": "The content extracted from the page according to the goal", "properties": { "text": { "type": "string", "description": "Text content extracted from the page", }, "metadata": { "type": "object", "description": "Additional metadata about the extracted content", "properties": { "source": { "type": "string", "description": "Source of the extracted content", } }, }, }, } }, "required": ["extracted_content"], }, }, } # Use LLM to extract content with required function calling response = await self.llm.ask_tool( messages, tools=[extraction_function], tool_choice="required", ) if response and response.tool_calls: args = json.loads(response.tool_calls[0].function.arguments) extracted_content = args.get("extracted_content", {}) return ToolResult( output=f"Extracted from page:\n{extracted_content}\n" ) return ToolResult(output="No content was extracted from the page.") # Tab management actions elif action == "switch_tab": if tab_id is None: return ToolResult( error="Tab ID is required for 'switch_tab' action" ) await context.switch_to_tab(tab_id) page = await context.get_current_page() await page.wait_for_load_state() return ToolResult(output=f"Switched to tab {tab_id}") elif action == "open_tab": if not url: return ToolResult(error="URL is required for 'open_tab' action") await context.create_new_tab(url) return ToolResult(output=f"Opened new tab with {url}") elif action == "close_tab": await context.close_current_tab() return ToolResult(output="Closed current tab") # Utility actions elif action == "wait": seconds_to_wait = seconds if seconds is not None else 3 await asyncio.sleep(seconds_to_wait) return ToolResult(output=f"Waited for {seconds_to_wait} seconds") else: return ToolResult(error=f"Unknown action: {action}") except Exception as e: return ToolResult(error=f"Browser action '{action}' failed: {str(e)}") async def get_current_state( self, context: Optional[BrowserContext] = None ) -> ToolResult: """ Get the current browser state as a ToolResult. If context is not provided, uses self.context. """ try: # Use provided context or fall back to self.context ctx = context or self.context if not ctx: return ToolResult(error="Browser context not initialized") state = await ctx.get_state() # Create a viewport_info dictionary if it doesn't exist viewport_height = 0 if hasattr(state, "viewport_info") and state.viewport_info: viewport_height = state.viewport_info.height elif hasattr(ctx, "config") and hasattr(ctx.config, "browser_window_size"): viewport_height = ctx.config.browser_window_size.get("height", 0) # Take a screenshot for the state page = await ctx.get_current_page() await page.bring_to_front() await page.wait_for_load_state() screenshot = await page.screenshot( full_page=True, animations="disabled", type="jpeg", quality=100 ) screenshot = base64.b64encode(screenshot).decode("utf-8") # Build the state info with all required fields state_info = { "url": state.url, "title": state.title, "tabs": [tab.model_dump() for tab in state.tabs], "help": "[0], [1], [2], etc., represent clickable indices corresponding to the elements listed. Clicking on these indices will navigate to or interact with the respective content behind them.", "interactive_elements": ( state.element_tree.clickable_elements_to_string() if state.element_tree else "" ), "scroll_info": { "pixels_above": getattr(state, "pixels_above", 0), "pixels_below": getattr(state, "pixels_below", 0), "total_height": getattr(state, "pixels_above", 0) + getattr(state, "pixels_below", 0) + viewport_height, }, "viewport_height": viewport_height, } return ToolResult( output=json.dumps(state_info, indent=4, ensure_ascii=False), base64_image=screenshot, ) except Exception as e: return ToolResult(error=f"Failed to get browser state: {str(e)}") async def cleanup(self): """Clean up browser resources.""" async with self.lock: if self.context is not None: await self.context.close() self.context = None self.dom_service = None if self.browser is not None: await self.browser.close() self.browser = None def __del__(self): """Ensure cleanup when object is destroyed.""" if self.browser is not None or self.context is not None: try: asyncio.run(self.cleanup()) except RuntimeError: loop = asyncio.new_event_loop() loop.run_until_complete(self.cleanup()) loop.close() @classmethod def create_with_context(cls, context: Context) -> "BrowserUseTool[Context]": """Factory method to create a BrowserUseTool with a specific context.""" tool = cls() tool.tool_context = context return tool
python
MIT
52a13f2a57d8c7f6737eefb02ccf569594d44273
2026-01-04T14:39:27.873507Z
false
FoundationAgents/OpenManus
https://github.com/FoundationAgents/OpenManus/blob/52a13f2a57d8c7f6737eefb02ccf569594d44273/app/tool/planning.py
app/tool/planning.py
# tool/planning.py from typing import Dict, List, Literal, Optional from app.exceptions import ToolError from app.tool.base import BaseTool, ToolResult _PLANNING_TOOL_DESCRIPTION = """ A planning tool that allows the agent to create and manage plans for solving complex tasks. The tool provides functionality for creating plans, updating plan steps, and tracking progress. """ class PlanningTool(BaseTool): """ A planning tool that allows the agent to create and manage plans for solving complex tasks. The tool provides functionality for creating plans, updating plan steps, and tracking progress. """ name: str = "planning" description: str = _PLANNING_TOOL_DESCRIPTION parameters: dict = { "type": "object", "properties": { "command": { "description": "The command to execute. Available commands: create, update, list, get, set_active, mark_step, delete.", "enum": [ "create", "update", "list", "get", "set_active", "mark_step", "delete", ], "type": "string", }, "plan_id": { "description": "Unique identifier for the plan. Required for create, update, set_active, and delete commands. Optional for get and mark_step (uses active plan if not specified).", "type": "string", }, "title": { "description": "Title for the plan. Required for create command, optional for update command.", "type": "string", }, "steps": { "description": "List of plan steps. Required for create command, optional for update command.", "type": "array", "items": {"type": "string"}, }, "step_index": { "description": "Index of the step to update (0-based). Required for mark_step command.", "type": "integer", }, "step_status": { "description": "Status to set for a step. Used with mark_step command.", "enum": ["not_started", "in_progress", "completed", "blocked"], "type": "string", }, "step_notes": { "description": "Additional notes for a step. Optional for mark_step command.", "type": "string", }, }, "required": ["command"], "additionalProperties": False, } plans: dict = {} # Dictionary to store plans by plan_id _current_plan_id: Optional[str] = None # Track the current active plan async def execute( self, *, command: Literal[ "create", "update", "list", "get", "set_active", "mark_step", "delete" ], plan_id: Optional[str] = None, title: Optional[str] = None, steps: Optional[List[str]] = None, step_index: Optional[int] = None, step_status: Optional[ Literal["not_started", "in_progress", "completed", "blocked"] ] = None, step_notes: Optional[str] = None, **kwargs, ): """ Execute the planning tool with the given command and parameters. Parameters: - command: The operation to perform - plan_id: Unique identifier for the plan - title: Title for the plan (used with create command) - steps: List of steps for the plan (used with create command) - step_index: Index of the step to update (used with mark_step command) - step_status: Status to set for a step (used with mark_step command) - step_notes: Additional notes for a step (used with mark_step command) """ if command == "create": return self._create_plan(plan_id, title, steps) elif command == "update": return self._update_plan(plan_id, title, steps) elif command == "list": return self._list_plans() elif command == "get": return self._get_plan(plan_id) elif command == "set_active": return self._set_active_plan(plan_id) elif command == "mark_step": return self._mark_step(plan_id, step_index, step_status, step_notes) elif command == "delete": return self._delete_plan(plan_id) else: raise ToolError( f"Unrecognized command: {command}. Allowed commands are: create, update, list, get, set_active, mark_step, delete" ) def _create_plan( self, plan_id: Optional[str], title: Optional[str], steps: Optional[List[str]] ) -> ToolResult: """Create a new plan with the given ID, title, and steps.""" if not plan_id: raise ToolError("Parameter `plan_id` is required for command: create") if plan_id in self.plans: raise ToolError( f"A plan with ID '{plan_id}' already exists. Use 'update' to modify existing plans." ) if not title: raise ToolError("Parameter `title` is required for command: create") if ( not steps or not isinstance(steps, list) or not all(isinstance(step, str) for step in steps) ): raise ToolError( "Parameter `steps` must be a non-empty list of strings for command: create" ) # Create a new plan with initialized step statuses plan = { "plan_id": plan_id, "title": title, "steps": steps, "step_statuses": ["not_started"] * len(steps), "step_notes": [""] * len(steps), } self.plans[plan_id] = plan self._current_plan_id = plan_id # Set as active plan return ToolResult( output=f"Plan created successfully with ID: {plan_id}\n\n{self._format_plan(plan)}" ) def _update_plan( self, plan_id: Optional[str], title: Optional[str], steps: Optional[List[str]] ) -> ToolResult: """Update an existing plan with new title or steps.""" if not plan_id: raise ToolError("Parameter `plan_id` is required for command: update") if plan_id not in self.plans: raise ToolError(f"No plan found with ID: {plan_id}") plan = self.plans[plan_id] if title: plan["title"] = title if steps: if not isinstance(steps, list) or not all( isinstance(step, str) for step in steps ): raise ToolError( "Parameter `steps` must be a list of strings for command: update" ) # Preserve existing step statuses for unchanged steps old_steps = plan["steps"] old_statuses = plan["step_statuses"] old_notes = plan["step_notes"] # Create new step statuses and notes new_statuses = [] new_notes = [] for i, step in enumerate(steps): # If the step exists at the same position in old steps, preserve status and notes if i < len(old_steps) and step == old_steps[i]: new_statuses.append(old_statuses[i]) new_notes.append(old_notes[i]) else: new_statuses.append("not_started") new_notes.append("") plan["steps"] = steps plan["step_statuses"] = new_statuses plan["step_notes"] = new_notes return ToolResult( output=f"Plan updated successfully: {plan_id}\n\n{self._format_plan(plan)}" ) def _list_plans(self) -> ToolResult: """List all available plans.""" if not self.plans: return ToolResult( output="No plans available. Create a plan with the 'create' command." ) output = "Available plans:\n" for plan_id, plan in self.plans.items(): current_marker = " (active)" if plan_id == self._current_plan_id else "" completed = sum( 1 for status in plan["step_statuses"] if status == "completed" ) total = len(plan["steps"]) progress = f"{completed}/{total} steps completed" output += f"• {plan_id}{current_marker}: {plan['title']} - {progress}\n" return ToolResult(output=output) def _get_plan(self, plan_id: Optional[str]) -> ToolResult: """Get details of a specific plan.""" if not plan_id: # If no plan_id is provided, use the current active plan if not self._current_plan_id: raise ToolError( "No active plan. Please specify a plan_id or set an active plan." ) plan_id = self._current_plan_id if plan_id not in self.plans: raise ToolError(f"No plan found with ID: {plan_id}") plan = self.plans[plan_id] return ToolResult(output=self._format_plan(plan)) def _set_active_plan(self, plan_id: Optional[str]) -> ToolResult: """Set a plan as the active plan.""" if not plan_id: raise ToolError("Parameter `plan_id` is required for command: set_active") if plan_id not in self.plans: raise ToolError(f"No plan found with ID: {plan_id}") self._current_plan_id = plan_id return ToolResult( output=f"Plan '{plan_id}' is now the active plan.\n\n{self._format_plan(self.plans[plan_id])}" ) def _mark_step( self, plan_id: Optional[str], step_index: Optional[int], step_status: Optional[str], step_notes: Optional[str], ) -> ToolResult: """Mark a step with a specific status and optional notes.""" if not plan_id: # If no plan_id is provided, use the current active plan if not self._current_plan_id: raise ToolError( "No active plan. Please specify a plan_id or set an active plan." ) plan_id = self._current_plan_id if plan_id not in self.plans: raise ToolError(f"No plan found with ID: {plan_id}") if step_index is None: raise ToolError("Parameter `step_index` is required for command: mark_step") plan = self.plans[plan_id] if step_index < 0 or step_index >= len(plan["steps"]): raise ToolError( f"Invalid step_index: {step_index}. Valid indices range from 0 to {len(plan['steps'])-1}." ) if step_status and step_status not in [ "not_started", "in_progress", "completed", "blocked", ]: raise ToolError( f"Invalid step_status: {step_status}. Valid statuses are: not_started, in_progress, completed, blocked" ) if step_status: plan["step_statuses"][step_index] = step_status if step_notes: plan["step_notes"][step_index] = step_notes return ToolResult( output=f"Step {step_index} updated in plan '{plan_id}'.\n\n{self._format_plan(plan)}" ) def _delete_plan(self, plan_id: Optional[str]) -> ToolResult: """Delete a plan.""" if not plan_id: raise ToolError("Parameter `plan_id` is required for command: delete") if plan_id not in self.plans: raise ToolError(f"No plan found with ID: {plan_id}") del self.plans[plan_id] # If the deleted plan was the active plan, clear the active plan if self._current_plan_id == plan_id: self._current_plan_id = None return ToolResult(output=f"Plan '{plan_id}' has been deleted.") def _format_plan(self, plan: Dict) -> str: """Format a plan for display.""" output = f"Plan: {plan['title']} (ID: {plan['plan_id']})\n" output += "=" * len(output) + "\n\n" # Calculate progress statistics total_steps = len(plan["steps"]) completed = sum(1 for status in plan["step_statuses"] if status == "completed") in_progress = sum( 1 for status in plan["step_statuses"] if status == "in_progress" ) blocked = sum(1 for status in plan["step_statuses"] if status == "blocked") not_started = sum( 1 for status in plan["step_statuses"] if status == "not_started" ) output += f"Progress: {completed}/{total_steps} steps completed " if total_steps > 0: percentage = (completed / total_steps) * 100 output += f"({percentage:.1f}%)\n" else: output += "(0%)\n" output += f"Status: {completed} completed, {in_progress} in progress, {blocked} blocked, {not_started} not started\n\n" output += "Steps:\n" # Add each step with its status and notes for i, (step, status, notes) in enumerate( zip(plan["steps"], plan["step_statuses"], plan["step_notes"]) ): status_symbol = { "not_started": "[ ]", "in_progress": "[→]", "completed": "[✓]", "blocked": "[!]", }.get(status, "[ ]") output += f"{i}. {status_symbol} {step}\n" if notes: output += f" Notes: {notes}\n" return output
python
MIT
52a13f2a57d8c7f6737eefb02ccf569594d44273
2026-01-04T14:39:27.873507Z
false
FoundationAgents/OpenManus
https://github.com/FoundationAgents/OpenManus/blob/52a13f2a57d8c7f6737eefb02ccf569594d44273/app/tool/mcp.py
app/tool/mcp.py
from contextlib import AsyncExitStack from typing import Dict, List, Optional from mcp import ClientSession, StdioServerParameters from mcp.client.sse import sse_client from mcp.client.stdio import stdio_client from mcp.types import ListToolsResult, TextContent from app.logger import logger from app.tool.base import BaseTool, ToolResult from app.tool.tool_collection import ToolCollection class MCPClientTool(BaseTool): """Represents a tool proxy that can be called on the MCP server from the client side.""" session: Optional[ClientSession] = None server_id: str = "" # Add server identifier original_name: str = "" async def execute(self, **kwargs) -> ToolResult: """Execute the tool by making a remote call to the MCP server.""" if not self.session: return ToolResult(error="Not connected to MCP server") try: logger.info(f"Executing tool: {self.original_name}") result = await self.session.call_tool(self.original_name, kwargs) content_str = ", ".join( item.text for item in result.content if isinstance(item, TextContent) ) return ToolResult(output=content_str or "No output returned.") except Exception as e: return ToolResult(error=f"Error executing tool: {str(e)}") class MCPClients(ToolCollection): """ A collection of tools that connects to multiple MCP servers and manages available tools through the Model Context Protocol. """ sessions: Dict[str, ClientSession] = {} exit_stacks: Dict[str, AsyncExitStack] = {} description: str = "MCP client tools for server interaction" def __init__(self): super().__init__() # Initialize with empty tools list self.name = "mcp" # Keep name for backward compatibility async def connect_sse(self, server_url: str, server_id: str = "") -> None: """Connect to an MCP server using SSE transport.""" if not server_url: raise ValueError("Server URL is required.") server_id = server_id or server_url # Always ensure clean disconnection before new connection if server_id in self.sessions: await self.disconnect(server_id) exit_stack = AsyncExitStack() self.exit_stacks[server_id] = exit_stack streams_context = sse_client(url=server_url) streams = await exit_stack.enter_async_context(streams_context) session = await exit_stack.enter_async_context(ClientSession(*streams)) self.sessions[server_id] = session await self._initialize_and_list_tools(server_id) async def connect_stdio( self, command: str, args: List[str], server_id: str = "" ) -> None: """Connect to an MCP server using stdio transport.""" if not command: raise ValueError("Server command is required.") server_id = server_id or command # Always ensure clean disconnection before new connection if server_id in self.sessions: await self.disconnect(server_id) exit_stack = AsyncExitStack() self.exit_stacks[server_id] = exit_stack server_params = StdioServerParameters(command=command, args=args) stdio_transport = await exit_stack.enter_async_context( stdio_client(server_params) ) read, write = stdio_transport session = await exit_stack.enter_async_context(ClientSession(read, write)) self.sessions[server_id] = session await self._initialize_and_list_tools(server_id) async def _initialize_and_list_tools(self, server_id: str) -> None: """Initialize session and populate tool map.""" session = self.sessions.get(server_id) if not session: raise RuntimeError(f"Session not initialized for server {server_id}") await session.initialize() response = await session.list_tools() # Create proper tool objects for each server tool for tool in response.tools: original_name = tool.name tool_name = f"mcp_{server_id}_{original_name}" tool_name = self._sanitize_tool_name(tool_name) server_tool = MCPClientTool( name=tool_name, description=tool.description, parameters=tool.inputSchema, session=session, server_id=server_id, original_name=original_name, ) self.tool_map[tool_name] = server_tool # Update tools tuple self.tools = tuple(self.tool_map.values()) logger.info( f"Connected to server {server_id} with tools: {[tool.name for tool in response.tools]}" ) def _sanitize_tool_name(self, name: str) -> str: """Sanitize tool name to match MCPClientTool requirements.""" import re # Replace invalid characters with underscores sanitized = re.sub(r"[^a-zA-Z0-9_-]", "_", name) # Remove consecutive underscores sanitized = re.sub(r"_+", "_", sanitized) # Remove leading/trailing underscores sanitized = sanitized.strip("_") # Truncate to 64 characters if needed if len(sanitized) > 64: sanitized = sanitized[:64] return sanitized async def list_tools(self) -> ListToolsResult: """List all available tools.""" tools_result = ListToolsResult(tools=[]) for session in self.sessions.values(): response = await session.list_tools() tools_result.tools += response.tools return tools_result async def disconnect(self, server_id: str = "") -> None: """Disconnect from a specific MCP server or all servers if no server_id provided.""" if server_id: if server_id in self.sessions: try: exit_stack = self.exit_stacks.get(server_id) # Close the exit stack which will handle session cleanup if exit_stack: try: await exit_stack.aclose() except RuntimeError as e: if "cancel scope" in str(e).lower(): logger.warning( f"Cancel scope error during disconnect from {server_id}, continuing with cleanup: {e}" ) else: raise # Clean up references self.sessions.pop(server_id, None) self.exit_stacks.pop(server_id, None) # Remove tools associated with this server self.tool_map = { k: v for k, v in self.tool_map.items() if v.server_id != server_id } self.tools = tuple(self.tool_map.values()) logger.info(f"Disconnected from MCP server {server_id}") except Exception as e: logger.error(f"Error disconnecting from server {server_id}: {e}") else: # Disconnect from all servers in a deterministic order for sid in sorted(list(self.sessions.keys())): await self.disconnect(sid) self.tool_map = {} self.tools = tuple() logger.info("Disconnected from all MCP servers")
python
MIT
52a13f2a57d8c7f6737eefb02ccf569594d44273
2026-01-04T14:39:27.873507Z
false
FoundationAgents/OpenManus
https://github.com/FoundationAgents/OpenManus/blob/52a13f2a57d8c7f6737eefb02ccf569594d44273/app/tool/terminate.py
app/tool/terminate.py
from app.tool.base import BaseTool _TERMINATE_DESCRIPTION = """Terminate the interaction when the request is met OR if the assistant cannot proceed further with the task. When you have finished all the tasks, call this tool to end the work.""" class Terminate(BaseTool): name: str = "terminate" description: str = _TERMINATE_DESCRIPTION parameters: dict = { "type": "object", "properties": { "status": { "type": "string", "description": "The finish status of the interaction.", "enum": ["success", "failure"], } }, "required": ["status"], } async def execute(self, status: str) -> str: """Finish the current execution""" return f"The interaction has been completed with status: {status}"
python
MIT
52a13f2a57d8c7f6737eefb02ccf569594d44273
2026-01-04T14:39:27.873507Z
false
FoundationAgents/OpenManus
https://github.com/FoundationAgents/OpenManus/blob/52a13f2a57d8c7f6737eefb02ccf569594d44273/app/tool/computer_use_tool.py
app/tool/computer_use_tool.py
import asyncio import base64 import logging import os import time from typing import Dict, Literal, Optional import aiohttp from pydantic import Field from app.daytona.tool_base import Sandbox, SandboxToolsBase from app.tool.base import ToolResult KEYBOARD_KEYS = [ "a", "b", "c", "d", "e", "f", "g", "h", "i", "j", "k", "l", "m", "n", "o", "p", "q", "r", "s", "t", "u", "v", "w", "x", "y", "z", "0", "1", "2", "3", "4", "5", "6", "7", "8", "9", "enter", "esc", "backspace", "tab", "space", "delete", "ctrl", "alt", "shift", "win", "up", "down", "left", "right", "f1", "f2", "f3", "f4", "f5", "f6", "f7", "f8", "f9", "f10", "f11", "f12", "ctrl+c", "ctrl+v", "ctrl+x", "ctrl+z", "ctrl+a", "ctrl+s", "alt+tab", "alt+f4", "ctrl+alt+delete", ] MOUSE_BUTTONS = ["left", "right", "middle"] _COMPUTER_USE_DESCRIPTION = """\ A comprehensive computer automation tool that allows interaction with the desktop environment. * This tool provides commands for controlling mouse, keyboard, and taking screenshots * It maintains state including current mouse position * Use this when you need to automate desktop applications, fill forms, or perform GUI interactions Key capabilities include: * Mouse Control: Move, click, drag, scroll * Keyboard Input: Type text, press keys or key combinations * Screenshots: Capture and save screen images * Waiting: Pause execution for specified duration """ class ComputerUseTool(SandboxToolsBase): """Computer automation tool for controlling the desktop environment.""" name: str = "computer_use" description: str = _COMPUTER_USE_DESCRIPTION parameters: dict = { "type": "object", "properties": { "action": { "type": "string", "enum": [ "move_to", "click", "scroll", "typing", "press", "wait", "mouse_down", "mouse_up", "drag_to", "hotkey", "screenshot", ], "description": "The computer action to perform", }, "x": {"type": "number", "description": "X coordinate for mouse actions"}, "y": {"type": "number", "description": "Y coordinate for mouse actions"}, "button": { "type": "string", "enum": MOUSE_BUTTONS, "description": "Mouse button for click/drag actions", "default": "left", }, "num_clicks": { "type": "integer", "description": "Number of clicks", "enum": [1, 2, 3], "default": 1, }, "amount": { "type": "integer", "description": "Scroll amount (positive for up, negative for down)", "minimum": -10, "maximum": 10, }, "text": {"type": "string", "description": "Text to type"}, "key": { "type": "string", "enum": KEYBOARD_KEYS, "description": "Key to press", }, "keys": { "type": "string", "enum": KEYBOARD_KEYS, "description": "Key combination to press", }, "duration": { "type": "number", "description": "Duration in seconds to wait", "default": 0.5, }, }, "required": ["action"], "dependencies": { "move_to": ["x", "y"], "click": [], "scroll": ["amount"], "typing": ["text"], "press": ["key"], "wait": [], "mouse_down": [], "mouse_up": [], "drag_to": ["x", "y"], "hotkey": ["keys"], "screenshot": [], }, } session: Optional[aiohttp.ClientSession] = Field(default=None, exclude=True) mouse_x: int = Field(default=0, exclude=True) mouse_y: int = Field(default=0, exclude=True) api_base_url: Optional[str] = Field(default=None, exclude=True) def __init__(self, sandbox: Optional[Sandbox] = None, **data): """Initialize with optional sandbox.""" super().__init__(**data) if sandbox is not None: self._sandbox = sandbox # 直接操作基类的私有属性 self.api_base_url = sandbox.get_preview_link(8000).url logging.info( f"Initialized ComputerUseTool with API URL: {self.api_base_url}" ) @classmethod def create_with_sandbox(cls, sandbox: Sandbox) -> "ComputerUseTool": """Factory method to create a tool with sandbox.""" return cls(sandbox=sandbox) # 通过构造函数初始化 async def _get_session(self) -> aiohttp.ClientSession: """Get or create aiohttp session for API requests.""" if self.session is None or self.session.closed: self.session = aiohttp.ClientSession() return self.session async def _api_request( self, method: str, endpoint: str, data: Optional[Dict] = None ) -> Dict: """Send request to automation service API.""" try: session = await self._get_session() url = f"{self.api_base_url}/api{endpoint}" logging.debug(f"API request: {method} {url} {data}") if method.upper() == "GET": async with session.get(url) as response: result = await response.json() else: # POST async with session.post(url, json=data) as response: result = await response.json() logging.debug(f"API response: {result}") return result except Exception as e: logging.error(f"API request failed: {str(e)}") return {"success": False, "error": str(e)} async def execute( self, action: Literal[ "move_to", "click", "scroll", "typing", "press", "wait", "mouse_down", "mouse_up", "drag_to", "hotkey", "screenshot", ], x: Optional[float] = None, y: Optional[float] = None, button: str = "left", num_clicks: int = 1, amount: Optional[int] = None, text: Optional[str] = None, key: Optional[str] = None, keys: Optional[str] = None, duration: float = 0.5, **kwargs, ) -> ToolResult: """ Execute a specified computer automation action. Args: action: The action to perform x: X coordinate for mouse actions y: Y coordinate for mouse actions button: Mouse button for click/drag actions num_clicks: Number of clicks to perform amount: Scroll amount (positive for up, negative for down) text: Text to type key: Key to press keys: Key combination to press duration: Duration in seconds to wait **kwargs: Additional arguments Returns: ToolResult with the action's output or error """ try: if action == "move_to": if x is None or y is None: return ToolResult(error="x and y coordinates are required") x_int = int(round(float(x))) y_int = int(round(float(y))) result = await self._api_request( "POST", "/automation/mouse/move", {"x": x_int, "y": y_int} ) if result.get("success", False): self.mouse_x = x_int self.mouse_y = y_int return ToolResult(output=f"Moved to ({x_int}, {y_int})") else: return ToolResult( error=f"Failed to move: {result.get('error', 'Unknown error')}" ) elif action == "click": x_val = x if x is not None else self.mouse_x y_val = y if y is not None else self.mouse_y x_int = int(round(float(x_val))) y_int = int(round(float(y_val))) num_clicks = int(num_clicks) result = await self._api_request( "POST", "/automation/mouse/click", { "x": x_int, "y": y_int, "clicks": num_clicks, "button": button.lower(), }, ) if result.get("success", False): self.mouse_x = x_int self.mouse_y = y_int return ToolResult( output=f"{num_clicks} {button} click(s) performed at ({x_int}, {y_int})" ) else: return ToolResult( error=f"Failed to click: {result.get('error', 'Unknown error')}" ) elif action == "scroll": if amount is None: return ToolResult(error="Scroll amount is required") amount = int(float(amount)) amount = max(-10, min(10, amount)) result = await self._api_request( "POST", "/automation/mouse/scroll", {"clicks": amount, "x": self.mouse_x, "y": self.mouse_y}, ) if result.get("success", False): direction = "up" if amount > 0 else "down" steps = abs(amount) return ToolResult( output=f"Scrolled {direction} {steps} step(s) at position ({self.mouse_x}, {self.mouse_y})" ) else: return ToolResult( error=f"Failed to scroll: {result.get('error', 'Unknown error')}" ) elif action == "typing": if text is None: return ToolResult(error="Text is required for typing") text = str(text) result = await self._api_request( "POST", "/automation/keyboard/write", {"message": text, "interval": 0.01}, ) if result.get("success", False): return ToolResult(output=f"Typed: {text}") else: return ToolResult( error=f"Failed to type: {result.get('error', 'Unknown error')}" ) elif action == "press": if key is None: return ToolResult(error="Key is required for press action") key = str(key).lower() result = await self._api_request( "POST", "/automation/keyboard/press", {"keys": key, "presses": 1} ) if result.get("success", False): return ToolResult(output=f"Pressed key: {key}") else: return ToolResult( error=f"Failed to press key: {result.get('error', 'Unknown error')}" ) elif action == "wait": duration = float(duration) duration = max(0, min(10, duration)) await asyncio.sleep(duration) return ToolResult(output=f"Waited {duration} seconds") elif action == "mouse_down": x_val = x if x is not None else self.mouse_x y_val = y if y is not None else self.mouse_y x_int = int(round(float(x_val))) y_int = int(round(float(y_val))) result = await self._api_request( "POST", "/automation/mouse/down", {"x": x_int, "y": y_int, "button": button.lower()}, ) if result.get("success", False): self.mouse_x = x_int self.mouse_y = y_int return ToolResult( output=f"{button} button pressed at ({x_int}, {y_int})" ) else: return ToolResult( error=f"Failed to press button: {result.get('error', 'Unknown error')}" ) elif action == "mouse_up": x_val = x if x is not None else self.mouse_x y_val = y if y is not None else self.mouse_y x_int = int(round(float(x_val))) y_int = int(round(float(y_val))) result = await self._api_request( "POST", "/automation/mouse/up", {"x": x_int, "y": y_int, "button": button.lower()}, ) if result.get("success", False): self.mouse_x = x_int self.mouse_y = y_int return ToolResult( output=f"{button} button released at ({x_int}, {y_int})" ) else: return ToolResult( error=f"Failed to release button: {result.get('error', 'Unknown error')}" ) elif action == "drag_to": if x is None or y is None: return ToolResult(error="x and y coordinates are required") target_x = int(round(float(x))) target_y = int(round(float(y))) start_x = self.mouse_x start_y = self.mouse_y result = await self._api_request( "POST", "/automation/mouse/drag", {"x": target_x, "y": target_y, "duration": 0.3, "button": "left"}, ) if result.get("success", False): self.mouse_x = target_x self.mouse_y = target_y return ToolResult( output=f"Dragged from ({start_x}, {start_y}) to ({target_x}, {target_y})" ) else: return ToolResult( error=f"Failed to drag: {result.get('error', 'Unknown error')}" ) elif action == "hotkey": if keys is None: return ToolResult(error="Keys are required for hotkey action") keys = str(keys).lower().strip() key_sequence = keys.split("+") result = await self._api_request( "POST", "/automation/keyboard/hotkey", {"keys": key_sequence, "interval": 0.01}, ) if result.get("success", False): return ToolResult(output=f"Pressed key combination: {keys}") else: return ToolResult( error=f"Failed to press keys: {result.get('error', 'Unknown error')}" ) elif action == "screenshot": result = await self._api_request("POST", "/automation/screenshot") if "image" in result: base64_str = result["image"] timestamp = time.strftime("%Y%m%d_%H%M%S") # Save screenshot to file screenshots_dir = "screenshots" if not os.path.exists(screenshots_dir): os.makedirs(screenshots_dir) timestamped_filename = os.path.join( screenshots_dir, f"screenshot_{timestamp}.png" ) latest_filename = "latest_screenshot.png" # Decode base64 string and save to file img_data = base64.b64decode(base64_str) with open(timestamped_filename, "wb") as f: f.write(img_data) # Save a copy as the latest screenshot with open(latest_filename, "wb") as f: f.write(img_data) return ToolResult( output=f"Screenshot saved as {timestamped_filename}", base64_image=base64_str, ) else: return ToolResult(error="Failed to capture screenshot") else: return ToolResult(error=f"Unknown action: {action}") except Exception as e: return ToolResult(error=f"Computer action failed: {str(e)}") async def cleanup(self): """Clean up resources.""" if self.session and not self.session.closed: await self.session.close() self.session = None def __del__(self): """Ensure cleanup on destruction.""" if hasattr(self, "session") and self.session is not None: try: asyncio.run(self.cleanup()) except RuntimeError: loop = asyncio.new_event_loop() loop.run_until_complete(self.cleanup()) loop.close()
python
MIT
52a13f2a57d8c7f6737eefb02ccf569594d44273
2026-01-04T14:39:27.873507Z
false
FoundationAgents/OpenManus
https://github.com/FoundationAgents/OpenManus/blob/52a13f2a57d8c7f6737eefb02ccf569594d44273/app/tool/python_execute.py
app/tool/python_execute.py
import multiprocessing import sys from io import StringIO from typing import Dict from app.tool.base import BaseTool class PythonExecute(BaseTool): """A tool for executing Python code with timeout and safety restrictions.""" name: str = "python_execute" description: str = "Executes Python code string. Note: Only print outputs are visible, function return values are not captured. Use print statements to see results." parameters: dict = { "type": "object", "properties": { "code": { "type": "string", "description": "The Python code to execute.", }, }, "required": ["code"], } def _run_code(self, code: str, result_dict: dict, safe_globals: dict) -> None: original_stdout = sys.stdout try: output_buffer = StringIO() sys.stdout = output_buffer exec(code, safe_globals, safe_globals) result_dict["observation"] = output_buffer.getvalue() result_dict["success"] = True except Exception as e: result_dict["observation"] = str(e) result_dict["success"] = False finally: sys.stdout = original_stdout async def execute( self, code: str, timeout: int = 5, ) -> Dict: """ Executes the provided Python code with a timeout. Args: code (str): The Python code to execute. timeout (int): Execution timeout in seconds. Returns: Dict: Contains 'output' with execution output or error message and 'success' status. """ with multiprocessing.Manager() as manager: result = manager.dict({"observation": "", "success": False}) if isinstance(__builtins__, dict): safe_globals = {"__builtins__": __builtins__} else: safe_globals = {"__builtins__": __builtins__.__dict__.copy()} proc = multiprocessing.Process( target=self._run_code, args=(code, result, safe_globals) ) proc.start() proc.join(timeout) # timeout process if proc.is_alive(): proc.terminate() proc.join(1) return { "observation": f"Execution timeout after {timeout} seconds", "success": False, } return dict(result)
python
MIT
52a13f2a57d8c7f6737eefb02ccf569594d44273
2026-01-04T14:39:27.873507Z
false
FoundationAgents/OpenManus
https://github.com/FoundationAgents/OpenManus/blob/52a13f2a57d8c7f6737eefb02ccf569594d44273/app/tool/file_operators.py
app/tool/file_operators.py
"""File operation interfaces and implementations for local and sandbox environments.""" import asyncio from pathlib import Path from typing import Optional, Protocol, Tuple, Union, runtime_checkable from app.config import SandboxSettings from app.exceptions import ToolError from app.sandbox.client import SANDBOX_CLIENT PathLike = Union[str, Path] @runtime_checkable class FileOperator(Protocol): """Interface for file operations in different environments.""" async def read_file(self, path: PathLike) -> str: """Read content from a file.""" ... async def write_file(self, path: PathLike, content: str) -> None: """Write content to a file.""" ... async def is_directory(self, path: PathLike) -> bool: """Check if path points to a directory.""" ... async def exists(self, path: PathLike) -> bool: """Check if path exists.""" ... async def run_command( self, cmd: str, timeout: Optional[float] = 120.0 ) -> Tuple[int, str, str]: """Run a shell command and return (return_code, stdout, stderr).""" ... class LocalFileOperator(FileOperator): """File operations implementation for local filesystem.""" encoding: str = "utf-8" async def read_file(self, path: PathLike) -> str: """Read content from a local file.""" try: return Path(path).read_text(encoding=self.encoding) except Exception as e: raise ToolError(f"Failed to read {path}: {str(e)}") from None async def write_file(self, path: PathLike, content: str) -> None: """Write content to a local file.""" try: Path(path).write_text(content, encoding=self.encoding) except Exception as e: raise ToolError(f"Failed to write to {path}: {str(e)}") from None async def is_directory(self, path: PathLike) -> bool: """Check if path points to a directory.""" return Path(path).is_dir() async def exists(self, path: PathLike) -> bool: """Check if path exists.""" return Path(path).exists() async def run_command( self, cmd: str, timeout: Optional[float] = 120.0 ) -> Tuple[int, str, str]: """Run a shell command locally.""" process = await asyncio.create_subprocess_shell( cmd, stdout=asyncio.subprocess.PIPE, stderr=asyncio.subprocess.PIPE ) try: stdout, stderr = await asyncio.wait_for( process.communicate(), timeout=timeout ) return ( process.returncode or 0, stdout.decode(), stderr.decode(), ) except asyncio.TimeoutError as exc: try: process.kill() except ProcessLookupError: pass raise TimeoutError( f"Command '{cmd}' timed out after {timeout} seconds" ) from exc class SandboxFileOperator(FileOperator): """File operations implementation for sandbox environment.""" def __init__(self): self.sandbox_client = SANDBOX_CLIENT async def _ensure_sandbox_initialized(self): """Ensure sandbox is initialized.""" if not self.sandbox_client.sandbox: await self.sandbox_client.create(config=SandboxSettings()) async def read_file(self, path: PathLike) -> str: """Read content from a file in sandbox.""" await self._ensure_sandbox_initialized() try: return await self.sandbox_client.read_file(str(path)) except Exception as e: raise ToolError(f"Failed to read {path} in sandbox: {str(e)}") from None async def write_file(self, path: PathLike, content: str) -> None: """Write content to a file in sandbox.""" await self._ensure_sandbox_initialized() try: await self.sandbox_client.write_file(str(path), content) except Exception as e: raise ToolError(f"Failed to write to {path} in sandbox: {str(e)}") from None async def is_directory(self, path: PathLike) -> bool: """Check if path points to a directory in sandbox.""" await self._ensure_sandbox_initialized() result = await self.sandbox_client.run_command( f"test -d {path} && echo 'true' || echo 'false'" ) return result.strip() == "true" async def exists(self, path: PathLike) -> bool: """Check if path exists in sandbox.""" await self._ensure_sandbox_initialized() result = await self.sandbox_client.run_command( f"test -e {path} && echo 'true' || echo 'false'" ) return result.strip() == "true" async def run_command( self, cmd: str, timeout: Optional[float] = 120.0 ) -> Tuple[int, str, str]: """Run a command in sandbox environment.""" await self._ensure_sandbox_initialized() try: stdout = await self.sandbox_client.run_command( cmd, timeout=int(timeout) if timeout else None ) return ( 0, # Always return 0 since we don't have explicit return code from sandbox stdout, "", # No stderr capture in the current sandbox implementation ) except TimeoutError as exc: raise TimeoutError( f"Command '{cmd}' timed out after {timeout} seconds in sandbox" ) from exc except Exception as exc: return 1, "", f"Error executing command in sandbox: {str(exc)}"
python
MIT
52a13f2a57d8c7f6737eefb02ccf569594d44273
2026-01-04T14:39:27.873507Z
false
FoundationAgents/OpenManus
https://github.com/FoundationAgents/OpenManus/blob/52a13f2a57d8c7f6737eefb02ccf569594d44273/app/tool/__init__.py
app/tool/__init__.py
from app.tool.base import BaseTool from app.tool.bash import Bash from app.tool.browser_use_tool import BrowserUseTool from app.tool.crawl4ai import Crawl4aiTool from app.tool.create_chat_completion import CreateChatCompletion from app.tool.planning import PlanningTool from app.tool.str_replace_editor import StrReplaceEditor from app.tool.terminate import Terminate from app.tool.tool_collection import ToolCollection from app.tool.web_search import WebSearch __all__ = [ "BaseTool", "Bash", "BrowserUseTool", "Terminate", "StrReplaceEditor", "WebSearch", "ToolCollection", "CreateChatCompletion", "PlanningTool", "Crawl4aiTool", ]
python
MIT
52a13f2a57d8c7f6737eefb02ccf569594d44273
2026-01-04T14:39:27.873507Z
false
FoundationAgents/OpenManus
https://github.com/FoundationAgents/OpenManus/blob/52a13f2a57d8c7f6737eefb02ccf569594d44273/app/tool/str_replace_editor.py
app/tool/str_replace_editor.py
"""File and directory manipulation tool with sandbox support.""" from collections import defaultdict from pathlib import Path from typing import Any, DefaultDict, List, Literal, Optional, get_args from app.config import config from app.exceptions import ToolError from app.tool import BaseTool from app.tool.base import CLIResult, ToolResult from app.tool.file_operators import ( FileOperator, LocalFileOperator, PathLike, SandboxFileOperator, ) Command = Literal[ "view", "create", "str_replace", "insert", "undo_edit", ] # Constants SNIPPET_LINES: int = 4 MAX_RESPONSE_LEN: int = 16000 TRUNCATED_MESSAGE: str = ( "<response clipped><NOTE>To save on context only part of this file has been shown to you. " "You should retry this tool after you have searched inside the file with `grep -n` " "in order to find the line numbers of what you are looking for.</NOTE>" ) # Tool description _STR_REPLACE_EDITOR_DESCRIPTION = """Custom editing tool for viewing, creating and editing files * State is persistent across command calls and discussions with the user * If `path` is a file, `view` displays the result of applying `cat -n`. If `path` is a directory, `view` lists non-hidden files and directories up to 2 levels deep * The `create` command cannot be used if the specified `path` already exists as a file * If a `command` generates a long output, it will be truncated and marked with `<response clipped>` * The `undo_edit` command will revert the last edit made to the file at `path` Notes for using the `str_replace` command: * The `old_str` parameter should match EXACTLY one or more consecutive lines from the original file. Be mindful of whitespaces! * If the `old_str` parameter is not unique in the file, the replacement will not be performed. Make sure to include enough context in `old_str` to make it unique * The `new_str` parameter should contain the edited lines that should replace the `old_str` """ def maybe_truncate( content: str, truncate_after: Optional[int] = MAX_RESPONSE_LEN ) -> str: """Truncate content and append a notice if content exceeds the specified length.""" if not truncate_after or len(content) <= truncate_after: return content return content[:truncate_after] + TRUNCATED_MESSAGE class StrReplaceEditor(BaseTool): """A tool for viewing, creating, and editing files with sandbox support.""" name: str = "str_replace_editor" description: str = _STR_REPLACE_EDITOR_DESCRIPTION parameters: dict = { "type": "object", "properties": { "command": { "description": "The commands to run. Allowed options are: `view`, `create`, `str_replace`, `insert`, `undo_edit`.", "enum": ["view", "create", "str_replace", "insert", "undo_edit"], "type": "string", }, "path": { "description": "Absolute path to file or directory.", "type": "string", }, "file_text": { "description": "Required parameter of `create` command, with the content of the file to be created.", "type": "string", }, "old_str": { "description": "Required parameter of `str_replace` command containing the string in `path` to replace.", "type": "string", }, "new_str": { "description": "Optional parameter of `str_replace` command containing the new string (if not given, no string will be added). Required parameter of `insert` command containing the string to insert.", "type": "string", }, "insert_line": { "description": "Required parameter of `insert` command. The `new_str` will be inserted AFTER the line `insert_line` of `path`.", "type": "integer", }, "view_range": { "description": "Optional parameter of `view` command when `path` points to a file. If none is given, the full file is shown. If provided, the file will be shown in the indicated line number range, e.g. [11, 12] will show lines 11 and 12. Indexing at 1 to start. Setting `[start_line, -1]` shows all lines from `start_line` to the end of the file.", "items": {"type": "integer"}, "type": "array", }, }, "required": ["command", "path"], } _file_history: DefaultDict[PathLike, List[str]] = defaultdict(list) _local_operator: LocalFileOperator = LocalFileOperator() _sandbox_operator: SandboxFileOperator = SandboxFileOperator() # def _get_operator(self, use_sandbox: bool) -> FileOperator: def _get_operator(self) -> FileOperator: """Get the appropriate file operator based on execution mode.""" return ( self._sandbox_operator if config.sandbox.use_sandbox else self._local_operator ) async def execute( self, *, command: Command, path: str, file_text: str | None = None, view_range: list[int] | None = None, old_str: str | None = None, new_str: str | None = None, insert_line: int | None = None, **kwargs: Any, ) -> str: """Execute a file operation command.""" # Get the appropriate file operator operator = self._get_operator() # Validate path and command combination await self.validate_path(command, Path(path), operator) # Execute the appropriate command if command == "view": result = await self.view(path, view_range, operator) elif command == "create": if file_text is None: raise ToolError("Parameter `file_text` is required for command: create") await operator.write_file(path, file_text) self._file_history[path].append(file_text) result = ToolResult(output=f"File created successfully at: {path}") elif command == "str_replace": if old_str is None: raise ToolError( "Parameter `old_str` is required for command: str_replace" ) result = await self.str_replace(path, old_str, new_str, operator) elif command == "insert": if insert_line is None: raise ToolError( "Parameter `insert_line` is required for command: insert" ) if new_str is None: raise ToolError("Parameter `new_str` is required for command: insert") result = await self.insert(path, insert_line, new_str, operator) elif command == "undo_edit": result = await self.undo_edit(path, operator) else: # This should be caught by type checking, but we include it for safety raise ToolError( f'Unrecognized command {command}. The allowed commands for the {self.name} tool are: {", ".join(get_args(Command))}' ) return str(result) async def validate_path( self, command: str, path: Path, operator: FileOperator ) -> None: """Validate path and command combination based on execution environment.""" # Check if path is absolute if not path.is_absolute(): raise ToolError(f"The path {path} is not an absolute path") # Only check if path exists for non-create commands if command != "create": if not await operator.exists(path): raise ToolError( f"The path {path} does not exist. Please provide a valid path." ) # Check if path is a directory is_dir = await operator.is_directory(path) if is_dir and command != "view": raise ToolError( f"The path {path} is a directory and only the `view` command can be used on directories" ) # Check if file exists for create command elif command == "create": exists = await operator.exists(path) if exists: raise ToolError( f"File already exists at: {path}. Cannot overwrite files using command `create`." ) async def view( self, path: PathLike, view_range: Optional[List[int]] = None, operator: FileOperator = None, ) -> CLIResult: """Display file or directory content.""" # Determine if path is a directory is_dir = await operator.is_directory(path) if is_dir: # Directory handling if view_range: raise ToolError( "The `view_range` parameter is not allowed when `path` points to a directory." ) return await self._view_directory(path, operator) else: # File handling return await self._view_file(path, operator, view_range) @staticmethod async def _view_directory(path: PathLike, operator: FileOperator) -> CLIResult: """Display directory contents.""" find_cmd = f"find {path} -maxdepth 2 -not -path '*/\\.*'" # Execute command using the operator returncode, stdout, stderr = await operator.run_command(find_cmd) if not stderr: stdout = ( f"Here's the files and directories up to 2 levels deep in {path}, " f"excluding hidden items:\n{stdout}\n" ) return CLIResult(output=stdout, error=stderr) async def _view_file( self, path: PathLike, operator: FileOperator, view_range: Optional[List[int]] = None, ) -> CLIResult: """Display file content, optionally within a specified line range.""" # Read file content file_content = await operator.read_file(path) init_line = 1 # Apply view range if specified if view_range: if len(view_range) != 2 or not all(isinstance(i, int) for i in view_range): raise ToolError( "Invalid `view_range`. It should be a list of two integers." ) file_lines = file_content.split("\n") n_lines_file = len(file_lines) init_line, final_line = view_range # Validate view range if init_line < 1 or init_line > n_lines_file: raise ToolError( f"Invalid `view_range`: {view_range}. Its first element `{init_line}` should be " f"within the range of lines of the file: {[1, n_lines_file]}" ) if final_line > n_lines_file: raise ToolError( f"Invalid `view_range`: {view_range}. Its second element `{final_line}` should be " f"smaller than the number of lines in the file: `{n_lines_file}`" ) if final_line != -1 and final_line < init_line: raise ToolError( f"Invalid `view_range`: {view_range}. Its second element `{final_line}` should be " f"larger or equal than its first `{init_line}`" ) # Apply range if final_line == -1: file_content = "\n".join(file_lines[init_line - 1 :]) else: file_content = "\n".join(file_lines[init_line - 1 : final_line]) # Format and return result return CLIResult( output=self._make_output(file_content, str(path), init_line=init_line) ) async def str_replace( self, path: PathLike, old_str: str, new_str: Optional[str] = None, operator: FileOperator = None, ) -> CLIResult: """Replace a unique string in a file with a new string.""" # Read file content and expand tabs file_content = (await operator.read_file(path)).expandtabs() old_str = old_str.expandtabs() new_str = new_str.expandtabs() if new_str is not None else "" # Check if old_str is unique in the file occurrences = file_content.count(old_str) if occurrences == 0: raise ToolError( f"No replacement was performed, old_str `{old_str}` did not appear verbatim in {path}." ) elif occurrences > 1: # Find line numbers of occurrences file_content_lines = file_content.split("\n") lines = [ idx + 1 for idx, line in enumerate(file_content_lines) if old_str in line ] raise ToolError( f"No replacement was performed. Multiple occurrences of old_str `{old_str}` " f"in lines {lines}. Please ensure it is unique" ) # Replace old_str with new_str new_file_content = file_content.replace(old_str, new_str) # Write the new content to the file await operator.write_file(path, new_file_content) # Save the original content to history self._file_history[path].append(file_content) # Create a snippet of the edited section replacement_line = file_content.split(old_str)[0].count("\n") start_line = max(0, replacement_line - SNIPPET_LINES) end_line = replacement_line + SNIPPET_LINES + new_str.count("\n") snippet = "\n".join(new_file_content.split("\n")[start_line : end_line + 1]) # Prepare the success message success_msg = f"The file {path} has been edited. " success_msg += self._make_output( snippet, f"a snippet of {path}", start_line + 1 ) success_msg += "Review the changes and make sure they are as expected. Edit the file again if necessary." return CLIResult(output=success_msg) async def insert( self, path: PathLike, insert_line: int, new_str: str, operator: FileOperator = None, ) -> CLIResult: """Insert text at a specific line in a file.""" # Read and prepare content file_text = (await operator.read_file(path)).expandtabs() new_str = new_str.expandtabs() file_text_lines = file_text.split("\n") n_lines_file = len(file_text_lines) # Validate insert_line if insert_line < 0 or insert_line > n_lines_file: raise ToolError( f"Invalid `insert_line` parameter: {insert_line}. It should be within " f"the range of lines of the file: {[0, n_lines_file]}" ) # Perform insertion new_str_lines = new_str.split("\n") new_file_text_lines = ( file_text_lines[:insert_line] + new_str_lines + file_text_lines[insert_line:] ) # Create a snippet for preview snippet_lines = ( file_text_lines[max(0, insert_line - SNIPPET_LINES) : insert_line] + new_str_lines + file_text_lines[insert_line : insert_line + SNIPPET_LINES] ) # Join lines and write to file new_file_text = "\n".join(new_file_text_lines) snippet = "\n".join(snippet_lines) await operator.write_file(path, new_file_text) self._file_history[path].append(file_text) # Prepare success message success_msg = f"The file {path} has been edited. " success_msg += self._make_output( snippet, "a snippet of the edited file", max(1, insert_line - SNIPPET_LINES + 1), ) success_msg += "Review the changes and make sure they are as expected (correct indentation, no duplicate lines, etc). Edit the file again if necessary." return CLIResult(output=success_msg) async def undo_edit( self, path: PathLike, operator: FileOperator = None ) -> CLIResult: """Revert the last edit made to a file.""" if not self._file_history[path]: raise ToolError(f"No edit history found for {path}.") old_text = self._file_history[path].pop() await operator.write_file(path, old_text) return CLIResult( output=f"Last edit to {path} undone successfully. {self._make_output(old_text, str(path))}" ) def _make_output( self, file_content: str, file_descriptor: str, init_line: int = 1, expand_tabs: bool = True, ) -> str: """Format file content for display with line numbers.""" file_content = maybe_truncate(file_content) if expand_tabs: file_content = file_content.expandtabs() # Add line numbers to each line file_content = "\n".join( [ f"{i + init_line:6}\t{line}" for i, line in enumerate(file_content.split("\n")) ] ) return ( f"Here's the result of running `cat -n` on {file_descriptor}:\n" + file_content + "\n" )
python
MIT
52a13f2a57d8c7f6737eefb02ccf569594d44273
2026-01-04T14:39:27.873507Z
false
FoundationAgents/OpenManus
https://github.com/FoundationAgents/OpenManus/blob/52a13f2a57d8c7f6737eefb02ccf569594d44273/app/tool/base.py
app/tool/base.py
import json from abc import ABC, abstractmethod from typing import Any, Dict, Optional, Union from pydantic import BaseModel, Field from app.utils.logger import logger # class BaseTool(ABC, BaseModel): # name: str # description: str # parameters: Optional[dict] = None # class Config: # arbitrary_types_allowed = True # async def __call__(self, **kwargs) -> Any: # """Execute the tool with given parameters.""" # return await self.execute(**kwargs) # @abstractmethod # async def execute(self, **kwargs) -> Any: # """Execute the tool with given parameters.""" # def to_param(self) -> Dict: # """Convert tool to function call format.""" # return { # "type": "function", # "function": { # "name": self.name, # "description": self.description, # "parameters": self.parameters, # }, # } class ToolResult(BaseModel): """Represents the result of a tool execution.""" output: Any = Field(default=None) error: Optional[str] = Field(default=None) base64_image: Optional[str] = Field(default=None) system: Optional[str] = Field(default=None) class Config: arbitrary_types_allowed = True def __bool__(self): return any(getattr(self, field) for field in self.__fields__) def __add__(self, other: "ToolResult"): def combine_fields( field: Optional[str], other_field: Optional[str], concatenate: bool = True ): if field and other_field: if concatenate: return field + other_field raise ValueError("Cannot combine tool results") return field or other_field return ToolResult( output=combine_fields(self.output, other.output), error=combine_fields(self.error, other.error), base64_image=combine_fields(self.base64_image, other.base64_image, False), system=combine_fields(self.system, other.system), ) def __str__(self): return f"Error: {self.error}" if self.error else self.output def replace(self, **kwargs): """Returns a new ToolResult with the given fields replaced.""" # return self.copy(update=kwargs) return type(self)(**{**self.dict(), **kwargs}) class BaseTool(ABC, BaseModel): """Consolidated base class for all tools combining BaseModel and Tool functionality. Provides: - Pydantic model validation - Schema registration - Standardized result handling - Abstract execution interface Attributes: name (str): Tool name description (str): Tool description parameters (dict): Tool parameters schema _schemas (Dict[str, List[ToolSchema]]): Registered method schemas """ name: str description: str parameters: Optional[dict] = None # _schemas: Dict[str, List[ToolSchema]] = {} class Config: arbitrary_types_allowed = True underscore_attrs_are_private = False # def __init__(self, **data): # """Initialize tool with model validation and schema registration.""" # super().__init__(**data) # logger.debug(f"Initializing tool class: {self.__class__.__name__}") # self._register_schemas() # def _register_schemas(self): # """Register schemas from all decorated methods.""" # for name, method in inspect.getmembers(self, predicate=inspect.ismethod): # if hasattr(method, 'tool_schemas'): # self._schemas[name] = method.tool_schemas # logger.debug(f"Registered schemas for method '{name}' in {self.__class__.__name__}") async def __call__(self, **kwargs) -> Any: """Execute the tool with given parameters.""" return await self.execute(**kwargs) @abstractmethod async def execute(self, **kwargs) -> Any: """Execute the tool with given parameters.""" def to_param(self) -> Dict: """Convert tool to function call format. Returns: Dictionary with tool metadata in OpenAI function calling format """ return { "type": "function", "function": { "name": self.name, "description": self.description, "parameters": self.parameters, }, } # def get_schemas(self) -> Dict[str, List[ToolSchema]]: # """Get all registered tool schemas. # Returns: # Dict mapping method names to their schema definitions # """ # return self._schemas def success_response(self, data: Union[Dict[str, Any], str]) -> ToolResult: """Create a successful tool result. Args: data: Result data (dictionary or string) Returns: ToolResult with success=True and formatted output """ if isinstance(data, str): text = data else: text = json.dumps(data, indent=2) logger.debug(f"Created success response for {self.__class__.__name__}") return ToolResult(output=text) def fail_response(self, msg: str) -> ToolResult: """Create a failed tool result. Args: msg: Error message describing the failure Returns: ToolResult with success=False and error message """ logger.debug(f"Tool {self.__class__.__name__} returned failed result: {msg}") return ToolResult(error=msg) class CLIResult(ToolResult): """A ToolResult that can be rendered as a CLI output.""" class ToolFailure(ToolResult): """A ToolResult that represents a failure."""
python
MIT
52a13f2a57d8c7f6737eefb02ccf569594d44273
2026-01-04T14:39:27.873507Z
false
FoundationAgents/OpenManus
https://github.com/FoundationAgents/OpenManus/blob/52a13f2a57d8c7f6737eefb02ccf569594d44273/app/tool/tool_collection.py
app/tool/tool_collection.py
"""Collection classes for managing multiple tools.""" from typing import Any, Dict, List from app.exceptions import ToolError from app.logger import logger from app.tool.base import BaseTool, ToolFailure, ToolResult class ToolCollection: """A collection of defined tools.""" class Config: arbitrary_types_allowed = True def __init__(self, *tools: BaseTool): self.tools = tools self.tool_map = {tool.name: tool for tool in tools} def __iter__(self): return iter(self.tools) def to_params(self) -> List[Dict[str, Any]]: return [tool.to_param() for tool in self.tools] async def execute( self, *, name: str, tool_input: Dict[str, Any] = None ) -> ToolResult: tool = self.tool_map.get(name) if not tool: return ToolFailure(error=f"Tool {name} is invalid") try: result = await tool(**tool_input) return result except ToolError as e: return ToolFailure(error=e.message) async def execute_all(self) -> List[ToolResult]: """Execute all tools in the collection sequentially.""" results = [] for tool in self.tools: try: result = await tool() results.append(result) except ToolError as e: results.append(ToolFailure(error=e.message)) return results def get_tool(self, name: str) -> BaseTool: return self.tool_map.get(name) def add_tool(self, tool: BaseTool): """Add a single tool to the collection. If a tool with the same name already exists, it will be skipped and a warning will be logged. """ if tool.name in self.tool_map: logger.warning(f"Tool {tool.name} already exists in collection, skipping") return self self.tools += (tool,) self.tool_map[tool.name] = tool return self def add_tools(self, *tools: BaseTool): """Add multiple tools to the collection. If any tool has a name conflict with an existing tool, it will be skipped and a warning will be logged. """ for tool in tools: self.add_tool(tool) return self
python
MIT
52a13f2a57d8c7f6737eefb02ccf569594d44273
2026-01-04T14:39:27.873507Z
false
FoundationAgents/OpenManus
https://github.com/FoundationAgents/OpenManus/blob/52a13f2a57d8c7f6737eefb02ccf569594d44273/app/tool/ask_human.py
app/tool/ask_human.py
from app.tool import BaseTool class AskHuman(BaseTool): """Add a tool to ask human for help.""" name: str = "ask_human" description: str = "Use this tool to ask human for help." parameters: str = { "type": "object", "properties": { "inquire": { "type": "string", "description": "The question you want to ask human.", } }, "required": ["inquire"], } async def execute(self, inquire: str) -> str: return input(f"""Bot: {inquire}\n\nYou: """).strip()
python
MIT
52a13f2a57d8c7f6737eefb02ccf569594d44273
2026-01-04T14:39:27.873507Z
false
FoundationAgents/OpenManus
https://github.com/FoundationAgents/OpenManus/blob/52a13f2a57d8c7f6737eefb02ccf569594d44273/app/tool/bash.py
app/tool/bash.py
import asyncio import os from typing import Optional from app.exceptions import ToolError from app.tool.base import BaseTool, CLIResult _BASH_DESCRIPTION = """Execute a bash command in the terminal. * Long running commands: For commands that may run indefinitely, it should be run in the background and the output should be redirected to a file, e.g. command = `python3 app.py > server.log 2>&1 &`. * Interactive: If a bash command returns exit code `-1`, this means the process is not yet finished. The assistant must then send a second call to terminal with an empty `command` (which will retrieve any additional logs), or it can send additional text (set `command` to the text) to STDIN of the running process, or it can send command=`ctrl+c` to interrupt the process. * Timeout: If a command execution result says "Command timed out. Sending SIGINT to the process", the assistant should retry running the command in the background. """ class _BashSession: """A session of a bash shell.""" _started: bool _process: asyncio.subprocess.Process command: str = "/bin/bash" _output_delay: float = 0.2 # seconds _timeout: float = 120.0 # seconds _sentinel: str = "<<exit>>" def __init__(self): self._started = False self._timed_out = False async def start(self): if self._started: return self._process = await asyncio.create_subprocess_shell( self.command, preexec_fn=os.setsid, shell=True, bufsize=0, stdin=asyncio.subprocess.PIPE, stdout=asyncio.subprocess.PIPE, stderr=asyncio.subprocess.PIPE, ) self._started = True def stop(self): """Terminate the bash shell.""" if not self._started: raise ToolError("Session has not started.") if self._process.returncode is not None: return self._process.terminate() async def run(self, command: str): """Execute a command in the bash shell.""" if not self._started: raise ToolError("Session has not started.") if self._process.returncode is not None: return CLIResult( system="tool must be restarted", error=f"bash has exited with returncode {self._process.returncode}", ) if self._timed_out: raise ToolError( f"timed out: bash has not returned in {self._timeout} seconds and must be restarted", ) # we know these are not None because we created the process with PIPEs assert self._process.stdin assert self._process.stdout assert self._process.stderr # send command to the process self._process.stdin.write( command.encode() + f"; echo '{self._sentinel}'\n".encode() ) await self._process.stdin.drain() # read output from the process, until the sentinel is found try: async with asyncio.timeout(self._timeout): while True: await asyncio.sleep(self._output_delay) # if we read directly from stdout/stderr, it will wait forever for # EOF. use the StreamReader buffer directly instead. output = ( self._process.stdout._buffer.decode() ) # pyright: ignore[reportAttributeAccessIssue] if self._sentinel in output: # strip the sentinel and break output = output[: output.index(self._sentinel)] break except asyncio.TimeoutError: self._timed_out = True raise ToolError( f"timed out: bash has not returned in {self._timeout} seconds and must be restarted", ) from None if output.endswith("\n"): output = output[:-1] error = ( self._process.stderr._buffer.decode() ) # pyright: ignore[reportAttributeAccessIssue] if error.endswith("\n"): error = error[:-1] # clear the buffers so that the next output can be read correctly self._process.stdout._buffer.clear() # pyright: ignore[reportAttributeAccessIssue] self._process.stderr._buffer.clear() # pyright: ignore[reportAttributeAccessIssue] return CLIResult(output=output, error=error) class Bash(BaseTool): """A tool for executing bash commands""" name: str = "bash" description: str = _BASH_DESCRIPTION parameters: dict = { "type": "object", "properties": { "command": { "type": "string", "description": "The bash command to execute. Can be empty to view additional logs when previous exit code is `-1`. Can be `ctrl+c` to interrupt the currently running process.", }, }, "required": ["command"], } _session: Optional[_BashSession] = None async def execute( self, command: str | None = None, restart: bool = False, **kwargs ) -> CLIResult: if restart: if self._session: self._session.stop() self._session = _BashSession() await self._session.start() return CLIResult(system="tool has been restarted.") if self._session is None: self._session = _BashSession() await self._session.start() if command is not None: return await self._session.run(command) raise ToolError("no command provided.") if __name__ == "__main__": bash = Bash() rst = asyncio.run(bash.execute("ls -l")) print(rst)
python
MIT
52a13f2a57d8c7f6737eefb02ccf569594d44273
2026-01-04T14:39:27.873507Z
false
FoundationAgents/OpenManus
https://github.com/FoundationAgents/OpenManus/blob/52a13f2a57d8c7f6737eefb02ccf569594d44273/app/tool/create_chat_completion.py
app/tool/create_chat_completion.py
from typing import Any, List, Optional, Type, Union, get_args, get_origin from pydantic import BaseModel, Field from app.tool import BaseTool class CreateChatCompletion(BaseTool): name: str = "create_chat_completion" description: str = ( "Creates a structured completion with specified output formatting." ) # Type mapping for JSON schema type_mapping: dict = { str: "string", int: "integer", float: "number", bool: "boolean", dict: "object", list: "array", } response_type: Optional[Type] = None required: List[str] = Field(default_factory=lambda: ["response"]) def __init__(self, response_type: Optional[Type] = str): """Initialize with a specific response type.""" super().__init__() self.response_type = response_type self.parameters = self._build_parameters() def _build_parameters(self) -> dict: """Build parameters schema based on response type.""" if self.response_type == str: return { "type": "object", "properties": { "response": { "type": "string", "description": "The response text that should be delivered to the user.", }, }, "required": self.required, } if isinstance(self.response_type, type) and issubclass( self.response_type, BaseModel ): schema = self.response_type.model_json_schema() return { "type": "object", "properties": schema["properties"], "required": schema.get("required", self.required), } return self._create_type_schema(self.response_type) def _create_type_schema(self, type_hint: Type) -> dict: """Create a JSON schema for the given type.""" origin = get_origin(type_hint) args = get_args(type_hint) # Handle primitive types if origin is None: return { "type": "object", "properties": { "response": { "type": self.type_mapping.get(type_hint, "string"), "description": f"Response of type {type_hint.__name__}", } }, "required": self.required, } # Handle List type if origin is list: item_type = args[0] if args else Any return { "type": "object", "properties": { "response": { "type": "array", "items": self._get_type_info(item_type), } }, "required": self.required, } # Handle Dict type if origin is dict: value_type = args[1] if len(args) > 1 else Any return { "type": "object", "properties": { "response": { "type": "object", "additionalProperties": self._get_type_info(value_type), } }, "required": self.required, } # Handle Union type if origin is Union: return self._create_union_schema(args) return self._build_parameters() def _get_type_info(self, type_hint: Type) -> dict: """Get type information for a single type.""" if isinstance(type_hint, type) and issubclass(type_hint, BaseModel): return type_hint.model_json_schema() return { "type": self.type_mapping.get(type_hint, "string"), "description": f"Value of type {getattr(type_hint, '__name__', 'any')}", } def _create_union_schema(self, types: tuple) -> dict: """Create schema for Union types.""" return { "type": "object", "properties": { "response": {"anyOf": [self._get_type_info(t) for t in types]} }, "required": self.required, } async def execute(self, required: list | None = None, **kwargs) -> Any: """Execute the chat completion with type conversion. Args: required: List of required field names or None **kwargs: Response data Returns: Converted response based on response_type """ required = required or self.required # Handle case when required is a list if isinstance(required, list) and len(required) > 0: if len(required) == 1: required_field = required[0] result = kwargs.get(required_field, "") else: # Return multiple fields as a dictionary return {field: kwargs.get(field, "") for field in required} else: required_field = "response" result = kwargs.get(required_field, "") # Type conversion logic if self.response_type == str: return result if isinstance(self.response_type, type) and issubclass( self.response_type, BaseModel ): return self.response_type(**kwargs) if get_origin(self.response_type) in (list, dict): return result # Assuming result is already in correct format try: return self.response_type(result) except (ValueError, TypeError): return result
python
MIT
52a13f2a57d8c7f6737eefb02ccf569594d44273
2026-01-04T14:39:27.873507Z
false
FoundationAgents/OpenManus
https://github.com/FoundationAgents/OpenManus/blob/52a13f2a57d8c7f6737eefb02ccf569594d44273/app/tool/chart_visualization/data_visualization.py
app/tool/chart_visualization/data_visualization.py
import asyncio import json import os from typing import Any, Hashable import pandas as pd from pydantic import Field, model_validator from app.config import config from app.llm import LLM from app.logger import logger from app.tool.base import BaseTool class DataVisualization(BaseTool): name: str = "data_visualization" description: str = """Visualize statistical chart or Add insights in chart with JSON info from visualization_preparation tool. You can do steps as follows: 1. Visualize statistical chart 2. Choose insights into chart based on step 1 (Optional) Outputs: 1. Charts (png/html) 2. Charts Insights (.md)(Optional)""" parameters: dict = { "type": "object", "properties": { "json_path": { "type": "string", "description": """file path of json info with ".json" in the end""", }, "output_type": { "description": "Rendering format (html=interactive)", "type": "string", "default": "html", "enum": ["png", "html"], }, "tool_type": { "description": "visualize chart or add insights", "type": "string", "default": "visualization", "enum": ["visualization", "insight"], }, "language": { "description": "english(en) / chinese(zh)", "type": "string", "default": "en", "enum": ["zh", "en"], }, }, "required": ["code"], } llm: LLM = Field(default_factory=LLM, description="Language model instance") @model_validator(mode="after") def initialize_llm(self): """Initialize llm with default settings if not provided.""" if self.llm is None or not isinstance(self.llm, LLM): self.llm = LLM(config_name=self.name.lower()) return self def get_file_path( self, json_info: list[dict[str, str]], path_str: str, directory: str = None, ) -> list[str]: res = [] for item in json_info: if os.path.exists(item[path_str]): res.append(item[path_str]) elif os.path.exists( os.path.join(f"{directory or config.workspace_root}", item[path_str]) ): res.append( os.path.join( f"{directory or config.workspace_root}", item[path_str] ) ) else: raise Exception(f"No such file or directory: {item[path_str]}") return res def success_output_template(self, result: list[dict[str, str]]) -> str: content = "" if len(result) == 0: return "Is EMPTY!" for item in result: content += f"""## {item['title']}\nChart saved in: {item['chart_path']}""" if "insight_path" in item and item["insight_path"] and "insight_md" in item: content += "\n" + item["insight_md"] else: content += "\n" return f"Chart Generated Successful!\n{content}" async def data_visualization( self, json_info: list[dict[str, str]], output_type: str, language: str ) -> str: data_list = [] csv_file_path = self.get_file_path(json_info, "csvFilePath") for index, item in enumerate(json_info): df = pd.read_csv(csv_file_path[index], encoding="utf-8") df = df.astype(object) df = df.where(pd.notnull(df), None) data_dict_list = df.to_json(orient="records", force_ascii=False) data_list.append( { "file_name": os.path.basename(csv_file_path[index]).replace( ".csv", "" ), "dict_data": data_dict_list, "chartTitle": item["chartTitle"], } ) tasks = [ self.invoke_vmind( dict_data=item["dict_data"], chart_description=item["chartTitle"], file_name=item["file_name"], output_type=output_type, task_type="visualization", language=language, ) for item in data_list ] results = await asyncio.gather(*tasks) error_list = [] success_list = [] for index, result in enumerate(results): csv_path = csv_file_path[index] if "error" in result and "chart_path" not in result: error_list.append(f"Error in {csv_path}: {result['error']}") else: success_list.append( { **result, "title": json_info[index]["chartTitle"], } ) if len(error_list) > 0: return { "observation": f"# Error chart generated{'\n'.join(error_list)}\n{self.success_output_template(success_list)}", "success": False, } else: return {"observation": f"{self.success_output_template(success_list)}"} async def add_insighs( self, json_info: list[dict[str, str]], output_type: str ) -> str: data_list = [] chart_file_path = self.get_file_path( json_info, "chartPath", os.path.join(config.workspace_root, "visualization") ) for index, item in enumerate(json_info): if "insights_id" in item: data_list.append( { "file_name": os.path.basename(chart_file_path[index]).replace( f".{output_type}", "" ), "insights_id": item["insights_id"], } ) tasks = [ self.invoke_vmind( insights_id=item["insights_id"], file_name=item["file_name"], output_type=output_type, task_type="insight", ) for item in data_list ] results = await asyncio.gather(*tasks) error_list = [] success_list = [] for index, result in enumerate(results): chart_path = chart_file_path[index] if "error" in result and "chart_path" not in result: error_list.append(f"Error in {chart_path}: {result['error']}") else: success_list.append(chart_path) success_template = ( f"# Charts Update with Insights\n{','.join(success_list)}" if len(success_list) > 0 else "" ) if len(error_list) > 0: return { "observation": f"# Error in chart insights:{'\n'.join(error_list)}\n{success_template}", "success": False, } else: return {"observation": f"{success_template}"} async def execute( self, json_path: str, output_type: str | None = "html", tool_type: str | None = "visualization", language: str | None = "en", ) -> str: try: logger.info(f"📈 data_visualization with {json_path} in: {tool_type} ") with open(json_path, "r", encoding="utf-8") as file: json_info = json.load(file) if tool_type == "visualization": return await self.data_visualization(json_info, output_type, language) else: return await self.add_insighs(json_info, output_type) except Exception as e: return { "observation": f"Error: {e}", "success": False, } async def invoke_vmind( self, file_name: str, output_type: str, task_type: str, insights_id: list[str] = None, dict_data: list[dict[Hashable, Any]] = None, chart_description: str = None, language: str = "en", ): llm_config = { "base_url": self.llm.base_url, "model": self.llm.model, "api_key": self.llm.api_key, } vmind_params = { "llm_config": llm_config, "user_prompt": chart_description, "dataset": dict_data, "file_name": file_name, "output_type": output_type, "insights_id": insights_id, "task_type": task_type, "directory": str(config.workspace_root), "language": language, } # build async sub process process = await asyncio.create_subprocess_exec( "npx", "ts-node", "src/chartVisualize.ts", stdin=asyncio.subprocess.PIPE, stdout=asyncio.subprocess.PIPE, stderr=asyncio.subprocess.PIPE, cwd=os.path.dirname(__file__), ) input_json = json.dumps(vmind_params, ensure_ascii=False).encode("utf-8") try: stdout, stderr = await process.communicate(input_json) stdout_str = stdout.decode("utf-8") stderr_str = stderr.decode("utf-8") if process.returncode == 0: return json.loads(stdout_str) else: return {"error": f"Node.js Error: {stderr_str}"} except Exception as e: return {"error": f"Subprocess Error: {str(e)}"}
python
MIT
52a13f2a57d8c7f6737eefb02ccf569594d44273
2026-01-04T14:39:27.873507Z
false
FoundationAgents/OpenManus
https://github.com/FoundationAgents/OpenManus/blob/52a13f2a57d8c7f6737eefb02ccf569594d44273/app/tool/chart_visualization/python_execute.py
app/tool/chart_visualization/python_execute.py
from app.config import config from app.tool.python_execute import PythonExecute class NormalPythonExecute(PythonExecute): """A tool for executing Python code with timeout and safety restrictions.""" name: str = "python_execute" description: str = """Execute Python code for in-depth data analysis / data report(task conclusion) / other normal task without direct visualization.""" parameters: dict = { "type": "object", "properties": { "code_type": { "description": "code type, data process / data report / others", "type": "string", "default": "process", "enum": ["process", "report", "others"], }, "code": { "type": "string", "description": """Python code to execute. # Note 1. The code should generate a comprehensive text-based report containing dataset overview, column details, basic statistics, derived metrics, timeseries comparisons, outliers, and key insights. 2. Use print() for all outputs so the analysis (including sections like 'Dataset Overview' or 'Preprocessing Results') is clearly visible and save it also 3. Save any report / processed files / each analysis result in worksapce directory: {directory} 4. Data reports need to be content-rich, including your overall analysis process and corresponding data visualization. 5. You can invode this tool step-by-step to do data analysis from summary to in-depth with data report saved also""".format( directory=config.workspace_root ), }, }, "required": ["code"], } async def execute(self, code: str, code_type: str | None = None, timeout=5): return await super().execute(code, timeout)
python
MIT
52a13f2a57d8c7f6737eefb02ccf569594d44273
2026-01-04T14:39:27.873507Z
false
FoundationAgents/OpenManus
https://github.com/FoundationAgents/OpenManus/blob/52a13f2a57d8c7f6737eefb02ccf569594d44273/app/tool/chart_visualization/__init__.py
app/tool/chart_visualization/__init__.py
from app.tool.chart_visualization.chart_prepare import VisualizationPrepare from app.tool.chart_visualization.data_visualization import DataVisualization from app.tool.chart_visualization.python_execute import NormalPythonExecute __all__ = ["DataVisualization", "VisualizationPrepare", "NormalPythonExecute"]
python
MIT
52a13f2a57d8c7f6737eefb02ccf569594d44273
2026-01-04T14:39:27.873507Z
false
FoundationAgents/OpenManus
https://github.com/FoundationAgents/OpenManus/blob/52a13f2a57d8c7f6737eefb02ccf569594d44273/app/tool/chart_visualization/chart_prepare.py
app/tool/chart_visualization/chart_prepare.py
from app.tool.chart_visualization.python_execute import NormalPythonExecute class VisualizationPrepare(NormalPythonExecute): """A tool for Chart Generation Preparation""" name: str = "visualization_preparation" description: str = "Using Python code to generates metadata of data_visualization tool. Outputs: 1) JSON Information. 2) Cleaned CSV data files (Optional)." parameters: dict = { "type": "object", "properties": { "code_type": { "description": "code type, visualization: csv -> chart; insight: choose insight into chart", "type": "string", "default": "visualization", "enum": ["visualization", "insight"], }, "code": { "type": "string", "description": """Python code for data_visualization prepare. ## Visualization Type 1. Data loading logic 2. Csv Data and chart description generate 2.1 Csv data (The data you want to visulazation, cleaning / transform from origin data, saved in .csv) 2.2 Chart description of csv data (The chart title or description should be concise and clear. Examples: 'Product sales distribution', 'Monthly revenue trend'.) 3. Save information in json file.( format: {"csvFilePath": string, "chartTitle": string}[]) ## Insight Type 1. Select the insights from the data_visualization results that you want to add to the chart. 2. Save information in json file.( format: {"chartPath": string, "insights_id": number[]}[]) # Note 1. You can generate one or multiple csv data with different visualization needs. 2. Make each chart data esay, clean and different. 3. Json file saving in utf-8 with path print: print(json_path) """, }, }, "required": ["code", "code_type"], }
python
MIT
52a13f2a57d8c7f6737eefb02ccf569594d44273
2026-01-04T14:39:27.873507Z
false
FoundationAgents/OpenManus
https://github.com/FoundationAgents/OpenManus/blob/52a13f2a57d8c7f6737eefb02ccf569594d44273/app/tool/chart_visualization/test/report_demo.py
app/tool/chart_visualization/test/report_demo.py
import asyncio from app.agent.data_analysis import DataAnalysis # from app.agent.manus import Manus async def main(): agent = DataAnalysis() # agent = Manus() await agent.run( """Requirement: 1. Analyze the following data and generate a graphical data report in HTML format. The final product should be a data report. Data: Month | Team A | Team B | Team C January | 1200 hours | 1350 hours | 1100 hours February | 1250 hours | 1400 hours | 1150 hours March | 1180 hours | 1300 hours | 1300 hours April | 1220 hours | 1280 hours | 1400 hours May | 1230 hours | 1320 hours | 1450 hours June | 1200 hours | 1250 hours | 1500 hours """ ) if __name__ == "__main__": asyncio.run(main())
python
MIT
52a13f2a57d8c7f6737eefb02ccf569594d44273
2026-01-04T14:39:27.873507Z
false
FoundationAgents/OpenManus
https://github.com/FoundationAgents/OpenManus/blob/52a13f2a57d8c7f6737eefb02ccf569594d44273/app/tool/chart_visualization/test/chart_demo.py
app/tool/chart_visualization/test/chart_demo.py
import asyncio from app.agent.data_analysis import DataAnalysis from app.logger import logger prefix = "Help me generate charts and save them locally, specifically:" tasks = [ { "prompt": "Help me show the sales of different products in different regions", "data": """Product Name,Region,Sales Coke,South,2350 Coke,East,1027 Coke,West,1027 Coke,North,1027 Sprite,South,215 Sprite,East,654 Sprite,West,159 Sprite,North,28 Fanta,South,345 Fanta,East,654 Fanta,West,2100 Fanta,North,1679 Xingmu,South,1476 Xingmu,East,830 Xingmu,West,532 Xingmu,North,498 """, }, { "prompt": "Show market share of each brand", "data": """Brand Name,Market Share,Average Price,Net Profit Apple,0.5,7068,314531 Samsung,0.2,6059,362345 Vivo,0.05,3406,234512 Nokia,0.01,1064,-1345 Xiaomi,0.1,4087,131345""", }, { "prompt": "Please help me show the sales trend of each product", "data": """Date,Type,Value 2023-01-01,Product A,52.9 2023-01-01,Product B,63.6 2023-01-01,Product C,11.2 2023-01-02,Product A,45.7 2023-01-02,Product B,89.1 2023-01-02,Product C,21.4 2023-01-03,Product A,67.2 2023-01-03,Product B,82.4 2023-01-03,Product C,31.7 2023-01-04,Product A,80.7 2023-01-04,Product B,55.1 2023-01-04,Product C,21.1 2023-01-05,Product A,65.6 2023-01-05,Product B,78 2023-01-05,Product C,31.3 2023-01-06,Product A,75.6 2023-01-06,Product B,89.1 2023-01-06,Product C,63.5 2023-01-07,Product A,67.3 2023-01-07,Product B,77.2 2023-01-07,Product C,43.7 2023-01-08,Product A,96.1 2023-01-08,Product B,97.6 2023-01-08,Product C,59.9 2023-01-09,Product A,96.1 2023-01-09,Product B,100.6 2023-01-09,Product C,66.8 2023-01-10,Product A,101.6 2023-01-10,Product B,108.3 2023-01-10,Product C,56.9""", }, { "prompt": "Show the popularity of search keywords", "data": """Keyword,Popularity Hot Word,1000 Zao Le Wo Men,800 Rao Jian Huo,400 My Wish is World Peace,400 Xiu Xiu Xiu,400 Shenzhou 11,400 Hundred Birds Facing the Wind,400 China Women's Volleyball Team,400 My Guan Na,400 Leg Dong,400 Hot Pot Hero,400 Baby's Heart is Bitter,400 Olympics,400 Awesome My Brother,400 Poetry and Distance,400 Song Joong-ki,400 PPAP,400 Blue Thin Mushroom,400 Rain Dew Evenly,400 Friendship's Little Boat Says It Flips,400 Beijing Slump,400 Dedication,200 Apple,200 Dog Belt,200 Old Driver,200 Melon-Eating Crowd,200 Zootopia,200 City Will Play,200 Routine,200 Water Reverse,200 Why Don't You Go to Heaven,200 Snake Spirit Man,200 Why Don't You Go to Heaven,200 Samsung Explosion Gate,200 Little Li Oscar,200 Ugly People Need to Read More,200 Boyfriend Power,200 A Face of Confusion,200 Descendants of the Sun,200""", }, { "prompt": "Help me compare the performance of different electric vehicle brands using a scatter plot", "data": """Range,Charging Time,Brand Name,Average Price 2904,46,Brand1,2350 1231,146,Brand2,1027 5675,324,Brand3,1242 543,57,Brand4,6754 326,234,Brand5,215 1124,67,Brand6,654 3426,81,Brand7,159 2134,24,Brand8,28 1234,52,Brand9,345 2345,27,Brand10,654 526,145,Brand11,2100 234,93,Brand12,1679 567,94,Brand13,1476 789,45,Brand14,830 469,75,Brand15,532 5689,54,Brand16,498 """, }, { "prompt": "Show conversion rates for each process", "data": """Process,Conversion Rate,Month Step1,100,1 Step2,80,1 Step3,60,1 Step4,40,1""", }, { "prompt": "Show the difference in breakfast consumption between men and women", "data": """Day,Men-Breakfast,Women-Breakfast Monday,15,22 Tuesday,12,10 Wednesday,15,20 Thursday,10,12 Friday,13,15 Saturday,10,15 Sunday,12,14""", }, { "prompt": "Help me show this person's performance in different aspects, is he a hexagonal warrior", "data": """dimension,performance Strength,5 Speed,5 Shooting,3 Endurance,5 Precision,5 Growth,5""", }, { "prompt": "Show data flow", "data": """Origin,Destination,value Node A,Node 1,10 Node A,Node 2,5 Node B,Node 2,8 Node B,Node 3,2 Node C,Node 2,4 Node A,Node C,2 Node C,Node 1,2""", }, ] async def main(): for index, item in enumerate(tasks): logger.info(f"Begin task {index} / {len(tasks)}!") agent = DataAnalysis() await agent.run( f"{prefix},chart_description:{item['prompt']},Data:{item['data']}" ) logger.info(f"Finish with {item['prompt']}") if __name__ == "__main__": asyncio.run(main())
python
MIT
52a13f2a57d8c7f6737eefb02ccf569594d44273
2026-01-04T14:39:27.873507Z
false
FoundationAgents/OpenManus
https://github.com/FoundationAgents/OpenManus/blob/52a13f2a57d8c7f6737eefb02ccf569594d44273/app/tool/search/duckduckgo_search.py
app/tool/search/duckduckgo_search.py
from typing import List from duckduckgo_search import DDGS from app.tool.search.base import SearchItem, WebSearchEngine class DuckDuckGoSearchEngine(WebSearchEngine): def perform_search( self, query: str, num_results: int = 10, *args, **kwargs ) -> List[SearchItem]: """ DuckDuckGo search engine. Returns results formatted according to SearchItem model. """ raw_results = DDGS().text(query, max_results=num_results) results = [] for i, item in enumerate(raw_results): if isinstance(item, str): # If it's just a URL results.append( SearchItem( title=f"DuckDuckGo Result {i + 1}", url=item, description=None ) ) elif isinstance(item, dict): # Extract data from the dictionary results.append( SearchItem( title=item.get("title", f"DuckDuckGo Result {i + 1}"), url=item.get("href", ""), description=item.get("body", None), ) ) else: # Try to extract attributes directly try: results.append( SearchItem( title=getattr(item, "title", f"DuckDuckGo Result {i + 1}"), url=getattr(item, "href", ""), description=getattr(item, "body", None), ) ) except Exception: # Fallback results.append( SearchItem( title=f"DuckDuckGo Result {i + 1}", url=str(item), description=None, ) ) return results
python
MIT
52a13f2a57d8c7f6737eefb02ccf569594d44273
2026-01-04T14:39:27.873507Z
false