repo stringlengths 7 90 | file_url stringlengths 81 315 | file_path stringlengths 4 228 | content stringlengths 0 32.8k | language stringclasses 1
value | license stringclasses 7
values | commit_sha stringlengths 40 40 | retrieved_at stringdate 2026-01-04 14:38:15 2026-01-05 02:33:18 | truncated bool 2
classes |
|---|---|---|---|---|---|---|---|---|
scrapy/scrapy | https://github.com/scrapy/scrapy/blob/d1bd8eb49f7aba9289e4ff692006cead8bcd9080/scrapy/utils/boto.py | scrapy/utils/boto.py | """Boto/botocore helpers"""
def is_botocore_available() -> bool:
try:
import botocore # noqa: F401,PLC0415
return True
except ImportError:
return False
| python | BSD-3-Clause | d1bd8eb49f7aba9289e4ff692006cead8bcd9080 | 2026-01-04T14:38:41.023839Z | false |
scrapy/scrapy | https://github.com/scrapy/scrapy/blob/d1bd8eb49f7aba9289e4ff692006cead8bcd9080/scrapy/utils/misc.py | scrapy/utils/misc.py | """Helper functions which don't fit anywhere else"""
from __future__ import annotations
import ast
import hashlib
import inspect
import os
import re
import warnings
from collections import deque
from contextlib import contextmanager
from functools import partial
from importlib import import_module
from pkgutil import iter_modules
from typing import IO, TYPE_CHECKING, Any, TypeVar, cast
from scrapy.exceptions import ScrapyDeprecationWarning
from scrapy.item import Item
from scrapy.utils.datatypes import LocalWeakReferencedCache
if TYPE_CHECKING:
from collections.abc import Callable, Iterable, Iterator
from types import ModuleType
from scrapy import Spider
from scrapy.crawler import Crawler
_ITERABLE_SINGLE_VALUES = dict, Item, str, bytes
T = TypeVar("T")
def arg_to_iter(arg: Any) -> Iterable[Any]:
"""Convert an argument to an iterable. The argument can be a None, single
value, or an iterable.
Exception: if arg is a dict, [arg] will be returned
"""
if arg is None:
return []
if not isinstance(arg, _ITERABLE_SINGLE_VALUES) and hasattr(arg, "__iter__"):
return cast("Iterable[Any]", arg)
return [arg]
def load_object(path: str | Callable[..., Any]) -> Any:
"""Load an object given its absolute object path, and return it.
The object can be the import path of a class, function, variable or an
instance, e.g. 'scrapy.downloadermiddlewares.redirect.RedirectMiddleware'.
If ``path`` is not a string, but is a callable object, such as a class or
a function, then return it as is.
"""
if not isinstance(path, str):
if callable(path):
return path
raise TypeError(
f"Unexpected argument type, expected string or object, got: {type(path)}"
)
try:
dot = path.rindex(".")
except ValueError:
raise ValueError(f"Error loading object '{path}': not a full path")
module, name = path[:dot], path[dot + 1 :]
mod = import_module(module)
try:
obj = getattr(mod, name)
except AttributeError:
raise NameError(f"Module '{module}' doesn't define any object named '{name}'")
return obj
def walk_modules(path: str) -> list[ModuleType]:
"""Loads a module and all its submodules from the given module path and
returns them. If *any* module throws an exception while importing, that
exception is thrown back.
For example: walk_modules('scrapy.utils')
"""
mods: list[ModuleType] = []
mod = import_module(path)
mods.append(mod)
if hasattr(mod, "__path__"):
for _, subpath, ispkg in iter_modules(mod.__path__):
fullpath = path + "." + subpath
if ispkg:
mods += walk_modules(fullpath)
else:
submod = import_module(fullpath)
mods.append(submod)
return mods
def md5sum(file: IO[bytes]) -> str:
"""Calculate the md5 checksum of a file-like object without reading its
whole content in memory.
>>> from io import BytesIO
>>> md5sum(BytesIO(b'file content to hash'))
'784406af91dd5a54fbb9c84c2236595a'
"""
warnings.warn(
(
"The scrapy.utils.misc.md5sum function is deprecated and will be "
"removed in a future version of Scrapy."
),
ScrapyDeprecationWarning,
stacklevel=2,
)
m = hashlib.md5() # noqa: S324
while True:
d = file.read(8096)
if not d:
break
m.update(d)
return m.hexdigest()
def rel_has_nofollow(rel: str | None) -> bool:
"""Return True if link rel attribute has nofollow type"""
return rel is not None and "nofollow" in rel.replace(",", " ").split()
def build_from_crawler(
objcls: type[T], crawler: Crawler, /, *args: Any, **kwargs: Any
) -> T:
"""Construct a class instance using its ``from_crawler()`` or ``__init__()`` constructor.
.. versionadded:: 2.12
``*args`` and ``**kwargs`` are forwarded to the constructor.
Raises ``TypeError`` if the resulting instance is ``None``.
"""
if hasattr(objcls, "from_crawler"):
instance = objcls.from_crawler(crawler, *args, **kwargs) # type: ignore[attr-defined]
method_name = "from_crawler"
else:
instance = objcls(*args, **kwargs)
method_name = "__new__"
if instance is None:
raise TypeError(f"{objcls.__qualname__}.{method_name} returned None")
return cast("T", instance)
@contextmanager
def set_environ(**kwargs: str) -> Iterator[None]:
"""Temporarily set environment variables inside the context manager and
fully restore previous environment afterwards
"""
original_env = {k: os.environ.get(k) for k in kwargs}
os.environ.update(kwargs)
try:
yield
finally:
for k, v in original_env.items():
if v is None:
del os.environ[k]
else:
os.environ[k] = v
def walk_callable(node: ast.AST) -> Iterable[ast.AST]:
"""Similar to ``ast.walk``, but walks only function body and skips nested
functions defined within the node.
"""
todo: deque[ast.AST] = deque([node])
walked_func_def = False
while todo:
node = todo.popleft()
if isinstance(node, ast.FunctionDef):
if walked_func_def:
continue
walked_func_def = True
todo.extend(ast.iter_child_nodes(node))
yield node
_generator_callbacks_cache = LocalWeakReferencedCache(limit=128)
def is_generator_with_return_value(callable: Callable[..., Any]) -> bool: # noqa: A002
"""
Returns True if a callable is a generator function which includes a
'return' statement with a value different than None, False otherwise
"""
if callable in _generator_callbacks_cache:
return bool(_generator_callbacks_cache[callable])
def returns_none(return_node: ast.Return) -> bool:
value = return_node.value
return value is None or (
isinstance(value, ast.Constant) and value.value is None
)
if inspect.isgeneratorfunction(callable):
func = callable
while isinstance(func, partial):
func = func.func
src = inspect.getsource(func)
pattern = re.compile(r"(^[\t ]+)")
code = pattern.sub("", src)
match = pattern.match(src) # finds indentation
if match:
code = re.sub(f"\n{match.group(0)}", "\n", code) # remove indentation
tree = ast.parse(code)
for node in walk_callable(tree):
if isinstance(node, ast.Return) and not returns_none(node):
_generator_callbacks_cache[callable] = True
return bool(_generator_callbacks_cache[callable])
_generator_callbacks_cache[callable] = False
return bool(_generator_callbacks_cache[callable])
def warn_on_generator_with_return_value(
spider: Spider,
callable: Callable[..., Any], # noqa: A002
) -> None:
"""
Logs a warning if a callable is a generator function and includes
a 'return' statement with a value different than None
"""
if not spider.settings.getbool("WARN_ON_GENERATOR_RETURN_VALUE"):
return
try:
if is_generator_with_return_value(callable):
warnings.warn(
f'The "{spider.__class__.__name__}.{callable.__name__}" method is '
'a generator and includes a "return" statement with a value '
"different than None. This could lead to unexpected behaviour. Please see "
"https://docs.python.org/3/reference/simple_stmts.html#the-return-statement "
'for details about the semantics of the "return" statement within generators',
stacklevel=2,
)
except IndentationError:
callable_name = spider.__class__.__name__ + "." + callable.__name__
warnings.warn(
f'Unable to determine whether or not "{callable_name}" is a generator with a return value. '
"This will not prevent your code from working, but it prevents Scrapy from detecting "
f'potential issues in your implementation of "{callable_name}". Please, report this in the '
"Scrapy issue tracker (https://github.com/scrapy/scrapy/issues), "
f'including the code of "{callable_name}"',
stacklevel=2,
)
| python | BSD-3-Clause | d1bd8eb49f7aba9289e4ff692006cead8bcd9080 | 2026-01-04T14:38:41.023839Z | false |
scrapy/scrapy | https://github.com/scrapy/scrapy/blob/d1bd8eb49f7aba9289e4ff692006cead8bcd9080/scrapy/utils/display.py | scrapy/utils/display.py | """
pprint and pformat wrappers with colorization support
"""
import ctypes
import platform
import sys
from pprint import pformat as pformat_
from typing import Any
from packaging.version import Version as parse_version
def _enable_windows_terminal_processing() -> bool:
# https://stackoverflow.com/a/36760881
kernel32 = ctypes.windll.kernel32 # type: ignore[attr-defined]
return bool(kernel32.SetConsoleMode(kernel32.GetStdHandle(-11), 7))
def _tty_supports_color() -> bool:
if sys.platform != "win32":
return True
if parse_version(platform.version()) < parse_version("10.0.14393"):
return True
# Windows >= 10.0.14393 interprets ANSI escape sequences providing terminal
# processing is enabled.
return _enable_windows_terminal_processing()
def _colorize(text: str, colorize: bool = True) -> str:
# pylint: disable=no-name-in-module
if not colorize or not sys.stdout.isatty() or not _tty_supports_color():
return text
try:
from pygments import highlight # noqa: PLC0415
except ImportError:
return text
from pygments.formatters import TerminalFormatter # noqa: PLC0415
from pygments.lexers import PythonLexer # noqa: PLC0415
return highlight(text, PythonLexer(), TerminalFormatter())
def pformat(obj: Any, *args: Any, **kwargs: Any) -> str:
return _colorize(pformat_(obj), kwargs.pop("colorize", True))
def pprint(obj: Any, *args: Any, **kwargs: Any) -> None:
print(pformat(obj, *args, **kwargs))
| python | BSD-3-Clause | d1bd8eb49f7aba9289e4ff692006cead8bcd9080 | 2026-01-04T14:38:41.023839Z | false |
scrapy/scrapy | https://github.com/scrapy/scrapy/blob/d1bd8eb49f7aba9289e4ff692006cead8bcd9080/scrapy/utils/testsite.py | scrapy/utils/testsite.py | import warnings
from urllib.parse import urljoin
from twisted.web import resource, server, static, util
from scrapy.exceptions import ScrapyDeprecationWarning
warnings.warn(
"The scrapy.utils.testsite module is deprecated.",
ScrapyDeprecationWarning,
)
class SiteTest:
def setUp(self):
from twisted.internet import reactor
super().setUp()
self.site = reactor.listenTCP(0, test_site(), interface="127.0.0.1")
self.baseurl = f"http://localhost:{self.site.getHost().port}/"
def tearDown(self):
super().tearDown()
self.site.stopListening()
def url(self, path: str) -> str:
return urljoin(self.baseurl, path)
class NoMetaRefreshRedirect(util.Redirect):
def render(self, request: server.Request) -> bytes:
content = util.Redirect.render(self, request)
return content.replace(
b'http-equiv="refresh"', b'http-no-equiv="do-not-refresh-me"'
)
def test_site():
r = resource.Resource()
r.putChild(b"text", static.Data(b"Works", "text/plain"))
r.putChild(
b"html",
static.Data(
b"<body><p class='one'>Works</p><p class='two'>World</p></body>",
"text/html",
),
)
r.putChild(
b"enc-gb18030",
static.Data(b"<p>gb18030 encoding</p>", "text/html; charset=gb18030"),
)
r.putChild(b"redirect", util.Redirect(b"/redirected"))
r.putChild(b"redirect-no-meta-refresh", NoMetaRefreshRedirect(b"/redirected"))
r.putChild(b"redirected", static.Data(b"Redirected here", "text/plain"))
return server.Site(r)
if __name__ == "__main__":
from twisted.internet import reactor # pylint: disable=ungrouped-imports
port = reactor.listenTCP(0, test_site(), interface="127.0.0.1")
print(f"http://localhost:{port.getHost().port}/")
reactor.run()
| python | BSD-3-Clause | d1bd8eb49f7aba9289e4ff692006cead8bcd9080 | 2026-01-04T14:38:41.023839Z | false |
scrapy/scrapy | https://github.com/scrapy/scrapy/blob/d1bd8eb49f7aba9289e4ff692006cead8bcd9080/scrapy/utils/__init__.py | scrapy/utils/__init__.py | python | BSD-3-Clause | d1bd8eb49f7aba9289e4ff692006cead8bcd9080 | 2026-01-04T14:38:41.023839Z | false | |
scrapy/scrapy | https://github.com/scrapy/scrapy/blob/d1bd8eb49f7aba9289e4ff692006cead8bcd9080/scrapy/utils/project.py | scrapy/utils/project.py | from __future__ import annotations
import os
import warnings
from importlib import import_module
from pathlib import Path
from scrapy.exceptions import NotConfigured
from scrapy.settings import Settings
from scrapy.utils.conf import closest_scrapy_cfg, get_config, init_env
ENVVAR = "SCRAPY_SETTINGS_MODULE"
DATADIR_CFG_SECTION = "datadir"
def inside_project() -> bool:
scrapy_module = os.environ.get(ENVVAR)
if scrapy_module:
try:
import_module(scrapy_module)
except ImportError as exc:
warnings.warn(
f"Cannot import scrapy settings module {scrapy_module}: {exc}"
)
else:
return True
return bool(closest_scrapy_cfg())
def project_data_dir(project: str = "default") -> str:
"""Return the current project data dir, creating it if it doesn't exist"""
if not inside_project():
raise NotConfigured("Not inside a project")
cfg = get_config()
if cfg.has_option(DATADIR_CFG_SECTION, project):
d = Path(cfg.get(DATADIR_CFG_SECTION, project))
else:
scrapy_cfg = closest_scrapy_cfg()
if not scrapy_cfg:
raise NotConfigured(
"Unable to find scrapy.cfg file to infer project data dir"
)
d = (Path(scrapy_cfg).parent / ".scrapy").resolve()
if not d.exists():
d.mkdir(parents=True)
return str(d)
def data_path(path: str | os.PathLike[str], createdir: bool = False) -> str:
"""
Return the given path joined with the .scrapy data directory.
If given an absolute path, return it unmodified.
"""
path_obj = Path(path)
if not path_obj.is_absolute():
if inside_project():
path_obj = Path(project_data_dir(), path)
else:
path_obj = Path(".scrapy", path)
if createdir and not path_obj.exists():
path_obj.mkdir(parents=True)
return str(path_obj)
def get_project_settings() -> Settings:
if ENVVAR not in os.environ:
project = os.environ.get("SCRAPY_PROJECT", "default")
init_env(project)
settings = Settings()
settings_module_path = os.environ.get(ENVVAR)
if settings_module_path:
settings.setmodule(settings_module_path, priority="project")
valid_envvars = {
"CHECK",
"PROJECT",
"PYTHON_SHELL",
"SETTINGS_MODULE",
}
scrapy_envvars = {
k[7:]: v
for k, v in os.environ.items()
if k.startswith("SCRAPY_") and k.replace("SCRAPY_", "") in valid_envvars
}
settings.setdict(scrapy_envvars, priority="project")
return settings
| python | BSD-3-Clause | d1bd8eb49f7aba9289e4ff692006cead8bcd9080 | 2026-01-04T14:38:41.023839Z | false |
scrapy/scrapy | https://github.com/scrapy/scrapy/blob/d1bd8eb49f7aba9289e4ff692006cead8bcd9080/scrapy/utils/benchserver.py | scrapy/utils/benchserver.py | import random
from typing import Any
from urllib.parse import urlencode
from twisted.web.resource import Resource
from twisted.web.server import Request, Site
class Root(Resource):
isLeaf = True
def getChild(self, name: str, request: Request) -> Resource:
return self
def render(self, request: Request) -> bytes:
total = _getarg(request, b"total", 100, int)
show = _getarg(request, b"show", 10, int)
nlist = [random.randint(1, total) for _ in range(show)] # noqa: S311
request.write(b"<html><head></head><body>")
assert request.args is not None
args = request.args.copy()
for nl in nlist:
args["n"] = nl
argstr = urlencode(args, doseq=True)
request.write(f"<a href='/follow?{argstr}'>follow {nl}</a><br>".encode())
request.write(b"</body></html>")
return b""
def _getarg(request, name: bytes, default: Any = None, type_=str):
return type_(request.args[name][0]) if name in request.args else default
if __name__ == "__main__":
from twisted.internet import reactor
root = Root()
factory = Site(root)
httpPort = reactor.listenTCP(8998, Site(root))
def _print_listening() -> None:
httpHost = httpPort.getHost()
print(f"Bench server at http://{httpHost.host}:{httpHost.port}")
reactor.callWhenRunning(_print_listening)
reactor.run()
| python | BSD-3-Clause | d1bd8eb49f7aba9289e4ff692006cead8bcd9080 | 2026-01-04T14:38:41.023839Z | false |
scrapy/scrapy | https://github.com/scrapy/scrapy/blob/d1bd8eb49f7aba9289e4ff692006cead8bcd9080/scrapy/utils/test.py | scrapy/utils/test.py | """
This module contains some assorted functions used in tests
"""
from __future__ import annotations
import asyncio
import os
import warnings
from ftplib import FTP
from importlib import import_module
from pathlib import Path
from posixpath import split
from typing import TYPE_CHECKING, Any, TypeVar, cast
from unittest import mock
from twisted.trial.unittest import SkipTest
from twisted.web.client import Agent
from scrapy.crawler import CrawlerRunner
from scrapy.exceptions import ScrapyDeprecationWarning
from scrapy.utils.boto import is_botocore_available
from scrapy.utils.deprecate import create_deprecated_class
from scrapy.utils.reactor import is_asyncio_reactor_installed, is_reactor_installed
from scrapy.utils.spider import DefaultSpider
if TYPE_CHECKING:
from collections.abc import Awaitable
from twisted.internet.defer import Deferred
from twisted.web.client import Response as TxResponse
from scrapy import Spider
from scrapy.crawler import Crawler
_T = TypeVar("_T")
def assert_gcs_environ() -> None:
warnings.warn(
"The assert_gcs_environ() function is deprecated and will be removed in a future version of Scrapy."
" Check GCS_PROJECT_ID directly.",
category=ScrapyDeprecationWarning,
stacklevel=2,
)
if "GCS_PROJECT_ID" not in os.environ:
raise SkipTest("GCS_PROJECT_ID not found")
def skip_if_no_boto() -> None:
warnings.warn(
"The skip_if_no_boto() function is deprecated and will be removed in a future version of Scrapy."
" Check scrapy.utils.boto.is_botocore_available() directly.",
category=ScrapyDeprecationWarning,
stacklevel=2,
)
if not is_botocore_available():
raise SkipTest("missing botocore library")
def get_gcs_content_and_delete(
bucket: Any, path: str
) -> tuple[bytes, list[dict[str, str]], Any]:
from google.cloud import storage # noqa: PLC0415
warnings.warn(
"The get_gcs_content_and_delete() function is deprecated and will be removed in a future version of Scrapy.",
category=ScrapyDeprecationWarning,
stacklevel=2,
)
client = storage.Client(project=os.environ.get("GCS_PROJECT_ID"))
bucket = client.get_bucket(bucket)
blob = bucket.get_blob(path)
content = blob.download_as_string()
acl = list(blob.acl) # loads acl before it will be deleted
bucket.delete_blob(path)
return content, acl, blob
def get_ftp_content_and_delete(
path: str,
host: str,
port: int,
username: str,
password: str,
use_active_mode: bool = False,
) -> bytes:
warnings.warn(
"The get_ftp_content_and_delete() function is deprecated and will be removed in a future version of Scrapy.",
category=ScrapyDeprecationWarning,
stacklevel=2,
)
ftp = FTP()
ftp.connect(host, port)
ftp.login(username, password)
if use_active_mode:
ftp.set_pasv(False)
ftp_data: list[bytes] = []
def buffer_data(data: bytes) -> None:
ftp_data.append(data)
ftp.retrbinary(f"RETR {path}", buffer_data)
dirname, filename = split(path)
ftp.cwd(dirname)
ftp.delete(filename)
return b"".join(ftp_data)
TestSpider = create_deprecated_class("TestSpider", DefaultSpider)
def get_reactor_settings() -> dict[str, Any]:
"""Return a settings dict that works with the installed reactor.
``Crawler._apply_settings()`` checks that the installed reactor matches the
settings, so tests that run the crawler in the current process may need to
pass a correct ``"TWISTED_REACTOR"`` setting value when creating it.
"""
if not is_reactor_installed():
raise RuntimeError(
"get_reactor_settings() called without an installed reactor,"
" you may need to install a reactor explicitly when running your tests."
)
settings: dict[str, Any] = {}
if not is_asyncio_reactor_installed():
settings["TWISTED_REACTOR"] = None
return settings
def get_crawler(
spidercls: type[Spider] | None = None,
settings_dict: dict[str, Any] | None = None,
prevent_warnings: bool = True,
) -> Crawler:
"""Return an unconfigured Crawler object. If settings_dict is given, it
will be used to populate the crawler settings with a project level
priority.
"""
# When needed, useful settings can be added here, e.g. ones that prevent
# deprecation warnings.
settings: dict[str, Any] = {
**get_reactor_settings(),
**(settings_dict or {}),
}
runner = CrawlerRunner(settings)
crawler = runner.create_crawler(spidercls or DefaultSpider)
crawler._apply_settings()
return crawler
def get_pythonpath() -> str:
"""Return a PYTHONPATH suitable to use in processes so that they find this
installation of Scrapy"""
scrapy_path = import_module("scrapy").__path__[0]
return str(Path(scrapy_path).parent) + os.pathsep + os.environ.get("PYTHONPATH", "")
def get_testenv() -> dict[str, str]:
"""Return a OS environment dict suitable to fork processes that need to import
this installation of Scrapy, instead of a system installed one.
"""
env = os.environ.copy()
env["PYTHONPATH"] = get_pythonpath()
return env
def get_from_asyncio_queue(value: _T) -> Awaitable[_T]:
q: asyncio.Queue[_T] = asyncio.Queue()
getter = q.get()
q.put_nowait(value)
return getter
def mock_google_cloud_storage() -> tuple[Any, Any, Any]:
"""Creates autospec mocks for google-cloud-storage Client, Bucket and Blob
classes and set their proper return values.
"""
from google.cloud.storage import Blob, Bucket, Client # noqa: PLC0415
warnings.warn(
"The mock_google_cloud_storage() function is deprecated and will be removed in a future version of Scrapy.",
category=ScrapyDeprecationWarning,
stacklevel=2,
)
client_mock = mock.create_autospec(Client)
bucket_mock = mock.create_autospec(Bucket)
client_mock.get_bucket.return_value = bucket_mock
blob_mock = mock.create_autospec(Blob)
bucket_mock.blob.return_value = blob_mock
return (client_mock, bucket_mock, blob_mock)
def get_web_client_agent_req(url: str) -> Deferred[TxResponse]:
from twisted.internet import reactor
agent = Agent(reactor)
return cast("Deferred[TxResponse]", agent.request(b"GET", url.encode("utf-8")))
| python | BSD-3-Clause | d1bd8eb49f7aba9289e4ff692006cead8bcd9080 | 2026-01-04T14:38:41.023839Z | false |
scrapy/scrapy | https://github.com/scrapy/scrapy/blob/d1bd8eb49f7aba9289e4ff692006cead8bcd9080/scrapy/utils/conf.py | scrapy/utils/conf.py | from __future__ import annotations
import numbers
import os
import sys
from configparser import ConfigParser
from operator import itemgetter
from pathlib import Path
from typing import TYPE_CHECKING, Any, cast
from scrapy.exceptions import UsageError
from scrapy.settings import BaseSettings
from scrapy.utils.deprecate import update_classpath
from scrapy.utils.python import without_none_values
if TYPE_CHECKING:
from collections.abc import Callable, Collection, Iterable, Mapping, MutableMapping
def build_component_list(
compdict: MutableMapping[Any, Any],
*,
convert: Callable[[Any], Any] = update_classpath,
) -> list[Any]:
"""Compose a component list from a :ref:`component priority dictionary
<component-priority-dictionaries>`."""
def _check_components(complist: Collection[Any]) -> None:
if len({convert(c) for c in complist}) != len(complist):
raise ValueError(
f"Some paths in {complist!r} convert to the same object, "
"please update your settings"
)
def _map_keys(compdict: Mapping[Any, Any]) -> BaseSettings | dict[Any, Any]:
if isinstance(compdict, BaseSettings):
compbs = BaseSettings()
for k, v in compdict.items():
prio = compdict.getpriority(k)
assert prio is not None
if compbs.getpriority(convert(k)) == prio:
raise ValueError(
f"Some paths in {list(compdict.keys())!r} "
"convert to the same "
"object, please update your settings"
)
compbs.set(convert(k), v, priority=prio)
return compbs
_check_components(compdict)
return {convert(k): v for k, v in compdict.items()}
def _validate_values(compdict: Mapping[Any, Any]) -> None:
"""Fail if a value in the components dict is not a real number or None."""
for name, value in compdict.items():
if value is not None and not isinstance(value, numbers.Real):
raise ValueError(
f"Invalid value {value} for component {name}, "
"please provide a real number or None instead"
)
_validate_values(compdict)
compdict = without_none_values(_map_keys(compdict))
return [k for k, v in sorted(compdict.items(), key=itemgetter(1))]
def arglist_to_dict(arglist: list[str]) -> dict[str, str]:
"""Convert a list of arguments like ['arg1=val1', 'arg2=val2', ...] to a
dict
"""
return dict(x.split("=", 1) for x in arglist)
def closest_scrapy_cfg(
path: str | os.PathLike = ".",
prevpath: str | os.PathLike | None = None,
) -> str:
"""Return the path to the closest scrapy.cfg file by traversing the current
directory and its parents
"""
if prevpath is not None and str(path) == str(prevpath):
return ""
path = Path(path).resolve()
cfgfile = path / "scrapy.cfg"
if cfgfile.exists():
return str(cfgfile)
return closest_scrapy_cfg(path.parent, path)
def init_env(project: str = "default", set_syspath: bool = True) -> None:
"""Initialize environment to use command-line tool from inside a project
dir. This sets the Scrapy settings module and modifies the Python path to
be able to locate the project module.
"""
cfg = get_config()
if cfg.has_option("settings", project):
os.environ["SCRAPY_SETTINGS_MODULE"] = cfg.get("settings", project)
closest = closest_scrapy_cfg()
if closest:
projdir = str(Path(closest).parent)
if set_syspath and projdir not in sys.path:
sys.path.append(projdir)
def get_config(use_closest: bool = True) -> ConfigParser:
"""Get Scrapy config file as a ConfigParser"""
sources = get_sources(use_closest)
cfg = ConfigParser()
cfg.read(sources)
return cfg
def get_sources(use_closest: bool = True) -> list[str]:
xdg_config_home = (
os.environ.get("XDG_CONFIG_HOME") or Path("~/.config").expanduser()
)
sources = [
"/etc/scrapy.cfg",
r"c:\scrapy\scrapy.cfg",
str(Path(xdg_config_home) / "scrapy.cfg"),
str(Path("~/.scrapy.cfg").expanduser()),
]
if use_closest:
sources.append(closest_scrapy_cfg())
return sources
def feed_complete_default_values_from_settings(
feed: dict[str, Any], settings: BaseSettings
) -> dict[str, Any]:
out = feed.copy()
out.setdefault("batch_item_count", settings.getint("FEED_EXPORT_BATCH_ITEM_COUNT"))
out.setdefault("encoding", settings["FEED_EXPORT_ENCODING"])
out.setdefault("fields", settings.getdictorlist("FEED_EXPORT_FIELDS") or None)
out.setdefault("store_empty", settings.getbool("FEED_STORE_EMPTY"))
out.setdefault("uri_params", settings["FEED_URI_PARAMS"])
out.setdefault("item_export_kwargs", {})
if settings["FEED_EXPORT_INDENT"] is None:
out.setdefault("indent", None)
else:
out.setdefault("indent", settings.getint("FEED_EXPORT_INDENT"))
return out
def feed_process_params_from_cli(
settings: BaseSettings,
output: list[str],
*,
overwrite_output: list[str] | None = None,
) -> dict[str, dict[str, Any]]:
"""
Receives feed export params (from the 'crawl' or 'runspider' commands),
checks for inconsistencies in their quantities and returns a dictionary
suitable to be used as the FEEDS setting.
"""
valid_output_formats: Iterable[str] = without_none_values(
cast("dict[str, str]", settings.getwithbase("FEED_EXPORTERS"))
).keys()
def check_valid_format(output_format: str) -> None:
if output_format not in valid_output_formats:
raise UsageError(
f"Unrecognized output format '{output_format}'. "
f"Set a supported one ({tuple(valid_output_formats)}) "
"after a colon at the end of the output URI (i.e. -o/-O "
"<URI>:<FORMAT>) or as a file extension."
)
overwrite = False
if overwrite_output:
if output:
raise UsageError(
"Please use only one of -o/--output and -O/--overwrite-output"
)
output = overwrite_output
overwrite = True
result: dict[str, dict[str, Any]] = {}
for element in output:
try:
feed_uri, feed_format = element.rsplit(":", 1)
check_valid_format(feed_format)
except (ValueError, UsageError):
feed_uri = element
feed_format = Path(element).suffix.replace(".", "")
else:
if feed_uri == "-":
feed_uri = "stdout:"
check_valid_format(feed_format)
result[feed_uri] = {"format": feed_format}
if overwrite:
result[feed_uri]["overwrite"] = True
# FEEDS setting should take precedence over the matching CLI options
result.update(settings.getdict("FEEDS"))
return result
| python | BSD-3-Clause | d1bd8eb49f7aba9289e4ff692006cead8bcd9080 | 2026-01-04T14:38:41.023839Z | false |
scrapy/scrapy | https://github.com/scrapy/scrapy/blob/d1bd8eb49f7aba9289e4ff692006cead8bcd9080/scrapy/utils/job.py | scrapy/utils/job.py | from __future__ import annotations
from pathlib import Path
from typing import TYPE_CHECKING
if TYPE_CHECKING:
from scrapy.settings import BaseSettings
def job_dir(settings: BaseSettings) -> str | None:
path: str | None = settings["JOBDIR"]
if not path:
return None
if not Path(path).exists():
Path(path).mkdir(parents=True)
return path
| python | BSD-3-Clause | d1bd8eb49f7aba9289e4ff692006cead8bcd9080 | 2026-01-04T14:38:41.023839Z | false |
scrapy/scrapy | https://github.com/scrapy/scrapy/blob/d1bd8eb49f7aba9289e4ff692006cead8bcd9080/scrapy/utils/engine.py | scrapy/utils/engine.py | """Some debugging functions for working with the Scrapy engine"""
from __future__ import annotations
# used in global tests code
from time import time # noqa: F401
from typing import TYPE_CHECKING, Any
if TYPE_CHECKING:
from scrapy.core.engine import ExecutionEngine
def get_engine_status(engine: ExecutionEngine) -> list[tuple[str, Any]]:
"""Return a report of the current engine status"""
tests = [
"time()-engine.start_time",
"len(engine.downloader.active)",
"engine.scraper.is_idle()",
"engine.spider.name",
"engine.spider_is_idle()",
"engine._slot.closing",
"len(engine._slot.inprogress)",
"len(engine._slot.scheduler.dqs or [])",
"len(engine._slot.scheduler.mqs)",
"len(engine.scraper.slot.queue)",
"len(engine.scraper.slot.active)",
"engine.scraper.slot.active_size",
"engine.scraper.slot.itemproc_size",
"engine.scraper.slot.needs_backout()",
]
checks: list[tuple[str, Any]] = []
for test in tests:
try:
checks += [(test, eval(test))] # noqa: S307 # pylint: disable=eval-used
except Exception as e:
checks += [(test, f"{type(e).__name__} (exception)")]
return checks
def format_engine_status(engine: ExecutionEngine) -> str:
checks = get_engine_status(engine)
s = "Execution engine status\n\n"
for test, result in checks:
s += f"{test:<47} : {result}\n"
s += "\n"
return s
def print_engine_status(engine: ExecutionEngine) -> None:
print(format_engine_status(engine))
| python | BSD-3-Clause | d1bd8eb49f7aba9289e4ff692006cead8bcd9080 | 2026-01-04T14:38:41.023839Z | false |
scrapy/scrapy | https://github.com/scrapy/scrapy/blob/d1bd8eb49f7aba9289e4ff692006cead8bcd9080/scrapy/utils/datatypes.py | scrapy/utils/datatypes.py | """
This module contains data types used by Scrapy which are not included in the
Python Standard Library.
This module must not depend on any module outside the Standard Library.
"""
from __future__ import annotations
import collections
import contextlib
import warnings
import weakref
from collections import OrderedDict
from collections.abc import Mapping
from typing import TYPE_CHECKING, Any, AnyStr, TypeVar
from scrapy.exceptions import ScrapyDeprecationWarning
if TYPE_CHECKING:
from collections.abc import Iterable, Sequence
# typing.Self requires Python 3.11
from typing_extensions import Self
_KT = TypeVar("_KT")
_VT = TypeVar("_VT")
class CaselessDict(dict):
__slots__ = ()
def __new__(cls, *args: Any, **kwargs: Any) -> Self:
# circular import
from scrapy.http.headers import Headers # noqa: PLC0415
if issubclass(cls, CaselessDict) and not issubclass(cls, Headers):
warnings.warn(
"scrapy.utils.datatypes.CaselessDict is deprecated,"
" please use scrapy.utils.datatypes.CaseInsensitiveDict instead",
category=ScrapyDeprecationWarning,
stacklevel=2,
)
return super().__new__(cls, *args, **kwargs)
def __init__(
self,
seq: Mapping[AnyStr, Any] | Iterable[tuple[AnyStr, Any]] | None = None,
):
super().__init__()
if seq:
self.update(seq)
def __getitem__(self, key: AnyStr) -> Any:
return dict.__getitem__(self, self.normkey(key))
def __setitem__(self, key: AnyStr, value: Any) -> None:
dict.__setitem__(self, self.normkey(key), self.normvalue(value))
def __delitem__(self, key: AnyStr) -> None:
dict.__delitem__(self, self.normkey(key))
def __contains__(self, key: AnyStr) -> bool: # type: ignore[override]
return dict.__contains__(self, self.normkey(key))
has_key = __contains__
def __copy__(self) -> Self:
return self.__class__(self)
copy = __copy__
def normkey(self, key: AnyStr) -> AnyStr:
"""Method to normalize dictionary key access"""
return key.lower()
def normvalue(self, value: Any) -> Any:
"""Method to normalize values prior to be set"""
return value
def get(self, key: AnyStr, def_val: Any = None) -> Any:
return dict.get(self, self.normkey(key), self.normvalue(def_val))
def setdefault(self, key: AnyStr, def_val: Any = None) -> Any:
return dict.setdefault(self, self.normkey(key), self.normvalue(def_val)) # type: ignore[arg-type]
# doesn't fully implement MutableMapping.update()
def update(self, seq: Mapping[AnyStr, Any] | Iterable[tuple[AnyStr, Any]]) -> None: # type: ignore[override]
seq = seq.items() if isinstance(seq, Mapping) else seq
iseq = ((self.normkey(k), self.normvalue(v)) for k, v in seq)
super().update(iseq)
@classmethod
def fromkeys(cls, keys: Iterable[AnyStr], value: Any = None) -> Self: # type: ignore[override]
return cls((k, value) for k in keys) # type: ignore[misc]
def pop(self, key: AnyStr, *args: Any) -> Any:
return dict.pop(self, self.normkey(key), *args)
class CaseInsensitiveDict(collections.UserDict):
"""A dict-like structure that accepts strings or bytes
as keys and allows case-insensitive lookups.
"""
def __init__(self, *args: Any, **kwargs: Any) -> None:
self._keys: dict = {}
super().__init__(*args, **kwargs)
def __getitem__(self, key: AnyStr) -> Any:
normalized_key = self._normkey(key)
return super().__getitem__(self._keys[normalized_key.lower()])
def __setitem__(self, key: AnyStr, value: Any) -> None:
normalized_key = self._normkey(key)
try:
lower_key = self._keys[normalized_key.lower()]
del self[lower_key]
except KeyError:
pass
super().__setitem__(normalized_key, self._normvalue(value))
self._keys[normalized_key.lower()] = normalized_key
def __delitem__(self, key: AnyStr) -> None:
normalized_key = self._normkey(key)
stored_key = self._keys.pop(normalized_key.lower())
super().__delitem__(stored_key)
def __contains__(self, key: AnyStr) -> bool: # type: ignore[override]
normalized_key = self._normkey(key)
return normalized_key.lower() in self._keys
def __repr__(self) -> str:
return f"<{self.__class__.__name__}: {super().__repr__()}>"
def _normkey(self, key: AnyStr) -> AnyStr:
return key
def _normvalue(self, value: Any) -> Any:
return value
class LocalCache(OrderedDict[_KT, _VT]):
"""Dictionary with a finite number of keys.
Older items expires first.
"""
def __init__(self, limit: int | None = None):
super().__init__()
self.limit: int | None = limit
def __setitem__(self, key: _KT, value: _VT) -> None:
if self.limit:
while len(self) >= self.limit:
self.popitem(last=False)
super().__setitem__(key, value)
class LocalWeakReferencedCache(weakref.WeakKeyDictionary):
"""
A weakref.WeakKeyDictionary implementation that uses LocalCache as its
underlying data structure, making it ordered and capable of being size-limited.
Useful for memoization, while avoiding keeping received
arguments in memory only because of the cached references.
Note: like LocalCache and unlike weakref.WeakKeyDictionary,
it cannot be instantiated with an initial dictionary.
"""
def __init__(self, limit: int | None = None):
super().__init__()
self.data: LocalCache = LocalCache(limit=limit)
def __setitem__(self, key: _KT, value: _VT) -> None:
# if raised, key is not weak-referenceable, skip caching
with contextlib.suppress(TypeError):
super().__setitem__(key, value)
def __getitem__(self, key: _KT) -> _VT | None: # type: ignore[override]
try:
return super().__getitem__(key)
except (TypeError, KeyError):
return None # key is either not weak-referenceable or not cached
class SequenceExclude:
"""Object to test if an item is NOT within some sequence."""
def __init__(self, seq: Sequence[Any]):
self.seq: Sequence[Any] = seq
def __contains__(self, item: Any) -> bool:
return item not in self.seq
| python | BSD-3-Clause | d1bd8eb49f7aba9289e4ff692006cead8bcd9080 | 2026-01-04T14:38:41.023839Z | false |
scrapy/scrapy | https://github.com/scrapy/scrapy/blob/d1bd8eb49f7aba9289e4ff692006cead8bcd9080/scrapy/utils/reactor.py | scrapy/utils/reactor.py | from __future__ import annotations
import asyncio
import sys
from contextlib import suppress
from typing import TYPE_CHECKING, Any, Generic, ParamSpec, TypeVar
from warnings import catch_warnings, filterwarnings
from twisted.internet import asyncioreactor, error
from twisted.internet.defer import Deferred
from scrapy.utils.misc import load_object
from scrapy.utils.python import global_object_name
if TYPE_CHECKING:
from asyncio import AbstractEventLoop
from collections.abc import Callable
from twisted.internet.protocol import ServerFactory
from twisted.internet.tcp import Port
from scrapy.utils.asyncio import CallLaterResult
_T = TypeVar("_T")
_P = ParamSpec("_P")
def listen_tcp(portrange: list[int], host: str, factory: ServerFactory) -> Port: # type: ignore[return] # pylint: disable=inconsistent-return-statements # noqa: RET503
"""Like reactor.listenTCP but tries different ports in a range."""
from twisted.internet import reactor
if len(portrange) > 2:
raise ValueError(f"invalid portrange: {portrange}")
if not portrange:
return reactor.listenTCP(0, factory, interface=host)
if len(portrange) == 1:
return reactor.listenTCP(portrange[0], factory, interface=host)
for x in range(portrange[0], portrange[1] + 1):
try:
return reactor.listenTCP(x, factory, interface=host)
except error.CannotListenError:
if x == portrange[1]:
raise
class CallLaterOnce(Generic[_T]):
"""Schedule a function to be called in the next reactor loop, but only if
it hasn't been already scheduled since the last time it ran.
"""
def __init__(self, func: Callable[_P, _T], *a: _P.args, **kw: _P.kwargs):
self._func: Callable[_P, _T] = func
self._a: tuple[Any, ...] = a
self._kw: dict[str, Any] = kw
self._call: CallLaterResult | None = None
self._deferreds: list[Deferred] = []
def schedule(self, delay: float = 0) -> None:
# circular import
from scrapy.utils.asyncio import call_later # noqa: PLC0415
if self._call is None:
self._call = call_later(delay, self)
def cancel(self) -> None:
if self._call:
self._call.cancel()
def __call__(self) -> _T:
# circular import
from scrapy.utils.asyncio import call_later # noqa: PLC0415
self._call = None
result = self._func(*self._a, **self._kw)
for d in self._deferreds:
call_later(0, d.callback, None)
self._deferreds = []
return result
async def wait(self):
# circular import
from scrapy.utils.defer import maybe_deferred_to_future # noqa: PLC0415
d = Deferred()
self._deferreds.append(d)
await maybe_deferred_to_future(d)
_asyncio_reactor_path = "twisted.internet.asyncioreactor.AsyncioSelectorReactor"
def set_asyncio_event_loop_policy() -> None:
"""The policy functions from asyncio often behave unexpectedly,
so we restrict their use to the absolutely essential case.
This should only be used to install the reactor.
"""
policy = asyncio.get_event_loop_policy()
if sys.platform == "win32" and not isinstance(
policy, asyncio.WindowsSelectorEventLoopPolicy
):
policy = asyncio.WindowsSelectorEventLoopPolicy()
asyncio.set_event_loop_policy(policy)
def install_reactor(reactor_path: str, event_loop_path: str | None = None) -> None:
"""Installs the :mod:`~twisted.internet.reactor` with the specified
import path. Also installs the asyncio event loop with the specified import
path if the asyncio reactor is enabled"""
reactor_class = load_object(reactor_path)
if reactor_class is asyncioreactor.AsyncioSelectorReactor:
set_asyncio_event_loop_policy()
with suppress(error.ReactorAlreadyInstalledError):
event_loop = set_asyncio_event_loop(event_loop_path)
asyncioreactor.install(eventloop=event_loop)
else:
*module, _ = reactor_path.split(".")
installer_path = [*module, "install"]
installer = load_object(".".join(installer_path))
with suppress(error.ReactorAlreadyInstalledError):
installer()
def _get_asyncio_event_loop() -> AbstractEventLoop:
return set_asyncio_event_loop(None)
def set_asyncio_event_loop(event_loop_path: str | None) -> AbstractEventLoop:
"""Sets and returns the event loop with specified import path."""
if event_loop_path is not None:
event_loop_class: type[AbstractEventLoop] = load_object(event_loop_path)
event_loop = _get_asyncio_event_loop()
if not isinstance(event_loop, event_loop_class):
event_loop = event_loop_class()
asyncio.set_event_loop(event_loop)
else:
try:
with catch_warnings():
# In Python 3.10.9, 3.11.1, 3.12 and 3.13, a DeprecationWarning
# is emitted about the lack of a current event loop, because in
# Python 3.14 and later `get_event_loop` will raise a
# RuntimeError in that event. Because our code is already
# prepared for that future behavior, we ignore the deprecation
# warning.
filterwarnings(
"ignore",
message="There is no current event loop",
category=DeprecationWarning,
)
event_loop = asyncio.get_event_loop()
except RuntimeError:
# `get_event_loop` raises RuntimeError when called with no asyncio
# event loop yet installed in the following scenarios:
# - Previsibly on Python 3.14 and later.
# https://github.com/python/cpython/issues/100160#issuecomment-1345581902
event_loop = asyncio.new_event_loop()
asyncio.set_event_loop(event_loop)
return event_loop
def verify_installed_reactor(reactor_path: str) -> None:
"""Raise :exc:`RuntimeError` if the installed
:mod:`~twisted.internet.reactor` does not match the specified import
path or if no reactor is installed."""
if not is_reactor_installed():
raise RuntimeError(
"verify_installed_reactor() called without an installed reactor."
)
from twisted.internet import reactor
expected_reactor_type = load_object(reactor_path)
reactor_type = type(reactor)
if not reactor_type == expected_reactor_type:
raise RuntimeError(
f"The installed reactor ({global_object_name(reactor_type)}) "
f"does not match the requested one ({reactor_path})"
)
def verify_installed_asyncio_event_loop(loop_path: str) -> None:
"""Raise :exc:`RuntimeError` if the even loop of the installed
:class:`~twisted.internet.asyncioreactor.AsyncioSelectorReactor`
does not match the specified import path or if no reactor is installed."""
if not is_reactor_installed():
raise RuntimeError(
"verify_installed_asyncio_event_loop() called without an installed reactor."
)
from twisted.internet import reactor
loop_class = load_object(loop_path)
if isinstance(reactor._asyncioEventloop, loop_class):
return
installed = (
f"{reactor._asyncioEventloop.__class__.__module__}"
f".{reactor._asyncioEventloop.__class__.__qualname__}"
)
raise RuntimeError(
"Scrapy found an asyncio Twisted reactor already "
f"installed, and its event loop class ({installed}) does "
"not match the one specified in the ASYNCIO_EVENT_LOOP "
f"setting ({global_object_name(loop_class)})"
)
def is_reactor_installed() -> bool:
"""Check whether a :mod:`~twisted.internet.reactor` is installed."""
return "twisted.internet.reactor" in sys.modules
def is_asyncio_reactor_installed() -> bool:
"""Check whether the installed reactor is :class:`~twisted.internet.asyncioreactor.AsyncioSelectorReactor`.
Raise a :exc:`RuntimeError` if no reactor is installed.
In a future Scrapy version, when Scrapy supports running without a Twisted
reactor, this function won't be useful for checking if it's possible to use
asyncio features, so the code that that doesn't directly require a Twisted
reactor should use :func:`scrapy.utils.asyncio.is_asyncio_available`
instead of this function.
.. versionchanged:: 2.13
In earlier Scrapy versions this function silently installed the default
reactor if there was no reactor installed. Now it raises an exception to
prevent silent problems in this case.
"""
if not is_reactor_installed():
raise RuntimeError(
"is_asyncio_reactor_installed() called without an installed reactor."
)
from twisted.internet import reactor
return isinstance(reactor, asyncioreactor.AsyncioSelectorReactor)
| python | BSD-3-Clause | d1bd8eb49f7aba9289e4ff692006cead8bcd9080 | 2026-01-04T14:38:41.023839Z | false |
scrapy/scrapy | https://github.com/scrapy/scrapy/blob/d1bd8eb49f7aba9289e4ff692006cead8bcd9080/scrapy/utils/asyncgen.py | scrapy/utils/asyncgen.py | from __future__ import annotations
from collections.abc import AsyncGenerator, AsyncIterator, Iterable
from typing import TypeVar
_T = TypeVar("_T")
async def collect_asyncgen(result: AsyncIterator[_T]) -> list[_T]:
return [x async for x in result]
async def as_async_generator(
it: Iterable[_T] | AsyncIterator[_T],
) -> AsyncGenerator[_T]:
"""Wraps an iterable (sync or async) into an async generator."""
if isinstance(it, AsyncIterator):
async for r in it:
yield r
else:
for r in it:
yield r
| python | BSD-3-Clause | d1bd8eb49f7aba9289e4ff692006cead8bcd9080 | 2026-01-04T14:38:41.023839Z | false |
scrapy/scrapy | https://github.com/scrapy/scrapy/blob/d1bd8eb49f7aba9289e4ff692006cead8bcd9080/scrapy/selector/unified.py | scrapy/selector/unified.py | """
XPath selectors based on lxml
"""
from __future__ import annotations
from typing import Any
from parsel import Selector as _ParselSelector
from scrapy.http import HtmlResponse, TextResponse, XmlResponse
from scrapy.utils.python import to_bytes
from scrapy.utils.response import get_base_url
from scrapy.utils.trackref import object_ref
__all__ = ["Selector", "SelectorList"]
_NOT_SET = object()
def _st(response: TextResponse | None, st: str | None) -> str:
if st is None:
return "xml" if isinstance(response, XmlResponse) else "html"
return st
def _response_from_text(text: str | bytes, st: str | None) -> TextResponse:
rt: type[TextResponse] = XmlResponse if st == "xml" else HtmlResponse
return rt(url="about:blank", encoding="utf-8", body=to_bytes(text, "utf-8"))
class SelectorList(_ParselSelector.selectorlist_cls, object_ref):
"""
The :class:`SelectorList` class is a subclass of the builtin ``list``
class, which provides a few additional methods.
"""
class Selector(_ParselSelector, object_ref):
"""
An instance of :class:`Selector` is a wrapper over response to select
certain parts of its content.
``response`` is an :class:`~scrapy.http.HtmlResponse` or an
:class:`~scrapy.http.XmlResponse` object that will be used for selecting
and extracting data.
``text`` is a unicode string or utf-8 encoded text for cases when a
``response`` isn't available. Using ``text`` and ``response`` together is
undefined behavior.
``type`` defines the selector type, it can be ``"html"``, ``"xml"``, ``"json"``
or ``None`` (default).
If ``type`` is ``None``, the selector automatically chooses the best type
based on ``response`` type (see below), or defaults to ``"html"`` in case it
is used together with ``text``.
If ``type`` is ``None`` and a ``response`` is passed, the selector type is
inferred from the response type as follows:
* ``"html"`` for :class:`~scrapy.http.HtmlResponse` type
* ``"xml"`` for :class:`~scrapy.http.XmlResponse` type
* ``"json"`` for :class:`~scrapy.http.TextResponse` type
* ``"html"`` for anything else
Otherwise, if ``type`` is set, the selector type will be forced and no
detection will occur.
"""
__slots__ = ["response"]
selectorlist_cls = SelectorList
def __init__(
self,
response: TextResponse | None = None,
text: str | None = None,
type: str | None = None, # noqa: A002
root: Any | None = _NOT_SET,
**kwargs: Any,
):
if response is not None and text is not None:
raise ValueError(
f"{self.__class__.__name__}.__init__() received both response and text"
)
st = _st(response, type)
if text is not None:
response = _response_from_text(text, st)
if response is not None:
text = response.text
kwargs.setdefault("base_url", get_base_url(response))
self.response = response
if root is not _NOT_SET:
kwargs["root"] = root
super().__init__(text=text, type=st, **kwargs)
| python | BSD-3-Clause | d1bd8eb49f7aba9289e4ff692006cead8bcd9080 | 2026-01-04T14:38:41.023839Z | false |
scrapy/scrapy | https://github.com/scrapy/scrapy/blob/d1bd8eb49f7aba9289e4ff692006cead8bcd9080/scrapy/selector/__init__.py | scrapy/selector/__init__.py | """
Selectors
"""
# top-level imports
from scrapy.selector.unified import Selector, SelectorList
__all__ = [
"Selector",
"SelectorList",
]
| python | BSD-3-Clause | d1bd8eb49f7aba9289e4ff692006cead8bcd9080 | 2026-01-04T14:38:41.023839Z | false |
scrapy/scrapy | https://github.com/scrapy/scrapy/blob/d1bd8eb49f7aba9289e4ff692006cead8bcd9080/scrapy/templates/project/module/__init__.py | scrapy/templates/project/module/__init__.py | python | BSD-3-Clause | d1bd8eb49f7aba9289e4ff692006cead8bcd9080 | 2026-01-04T14:38:41.023839Z | false | |
scrapy/scrapy | https://github.com/scrapy/scrapy/blob/d1bd8eb49f7aba9289e4ff692006cead8bcd9080/scrapy/templates/project/module/spiders/__init__.py | scrapy/templates/project/module/spiders/__init__.py | # This package will contain the spiders of your Scrapy project
#
# Please refer to the documentation for information on how to create and manage
# your spiders.
| python | BSD-3-Clause | d1bd8eb49f7aba9289e4ff692006cead8bcd9080 | 2026-01-04T14:38:41.023839Z | false |
scrapy/scrapy | https://github.com/scrapy/scrapy/blob/d1bd8eb49f7aba9289e4ff692006cead8bcd9080/scrapy/linkextractors/lxmlhtml.py | scrapy/linkextractors/lxmlhtml.py | """
Link extractor based on lxml.html
"""
from __future__ import annotations
import logging
import operator
import re
from collections.abc import Callable, Iterable
from functools import partial
from typing import TYPE_CHECKING, Any, TypeAlias, cast
from urllib.parse import urljoin, urlparse
from lxml import etree
from parsel.csstranslator import HTMLTranslator
from w3lib.html import strip_html5_whitespace
from w3lib.url import canonicalize_url, safe_url_string
from scrapy.link import Link
from scrapy.linkextractors import IGNORED_EXTENSIONS, _is_valid_url, _matches
from scrapy.utils.misc import arg_to_iter, rel_has_nofollow
from scrapy.utils.python import unique as unique_list
from scrapy.utils.response import get_base_url
from scrapy.utils.url import url_has_any_extension, url_is_from_any_domain
if TYPE_CHECKING:
from lxml.html import HtmlElement
from scrapy import Selector
from scrapy.http import TextResponse
logger = logging.getLogger(__name__)
# from lxml/src/lxml/html/__init__.py
XHTML_NAMESPACE = "http://www.w3.org/1999/xhtml"
_collect_string_content = etree.XPath("string()")
def _nons(tag: Any) -> Any:
if (
isinstance(tag, str)
and tag[0] == "{"
and tag[1 : len(XHTML_NAMESPACE) + 1] == XHTML_NAMESPACE
):
return tag.split("}")[-1]
return tag
def _identity(x: Any) -> Any:
return x
def _canonicalize_link_url(link: Link) -> str:
return canonicalize_url(link.url, keep_fragments=True)
class LxmlParserLinkExtractor:
def __init__(
self,
tag: str | Callable[[str], bool] = "a",
attr: str | Callable[[str], bool] = "href",
process: Callable[[Any], Any] | None = None,
unique: bool = False,
strip: bool = True,
canonicalized: bool = False,
):
# mypy doesn't infer types for operator.* and also for partial()
self.scan_tag: Callable[[str], bool] = (
tag
if callable(tag)
else cast("Callable[[str], bool]", partial(operator.eq, tag))
)
self.scan_attr: Callable[[str], bool] = (
attr
if callable(attr)
else cast("Callable[[str], bool]", partial(operator.eq, attr))
)
self.process_attr: Callable[[Any], Any] = (
process if callable(process) else _identity
)
self.unique: bool = unique
self.strip: bool = strip
self.link_key: Callable[[Link], str] = (
cast("Callable[[Link], str]", operator.attrgetter("url"))
if canonicalized
else _canonicalize_link_url
)
def _iter_links(
self, document: HtmlElement
) -> Iterable[tuple[HtmlElement, str, str]]:
for el in document.iter(etree.Element):
if not self.scan_tag(_nons(el.tag)):
continue
attribs = el.attrib
for attrib in attribs:
if not self.scan_attr(attrib):
continue
yield el, attrib, attribs[attrib]
def _extract_links(
self,
selector: Selector,
response_url: str,
response_encoding: str,
base_url: str,
) -> list[Link]:
links: list[Link] = []
# hacky way to get the underlying lxml parsed document
for el, attr, attr_val in self._iter_links(selector.root):
# pseudo lxml.html.HtmlElement.make_links_absolute(base_url)
try:
if self.strip:
attr_val = strip_html5_whitespace(attr_val)
attr_val = urljoin(base_url, attr_val)
except ValueError:
continue # skipping bogus links
else:
url = self.process_attr(attr_val)
if url is None:
continue
try:
url = safe_url_string(url, encoding=response_encoding)
except ValueError:
logger.debug(f"Skipping extraction of link with bad URL {url!r}")
continue
# to fix relative links after process_value
url = urljoin(response_url, url)
link = Link(
url,
_collect_string_content(el) or "",
nofollow=rel_has_nofollow(el.get("rel")),
)
links.append(link)
return self._deduplicate_if_needed(links)
def extract_links(self, response: TextResponse) -> list[Link]:
base_url = get_base_url(response)
return self._extract_links(
response.selector, response.url, response.encoding, base_url
)
def _process_links(self, links: list[Link]) -> list[Link]:
"""Normalize and filter extracted links
The subclass should override it if necessary
"""
return self._deduplicate_if_needed(links)
def _deduplicate_if_needed(self, links: list[Link]) -> list[Link]:
if self.unique:
return unique_list(links, key=self.link_key)
return links
_Regex: TypeAlias = str | re.Pattern[str]
_RegexOrSeveral: TypeAlias = _Regex | Iterable[_Regex]
class LxmlLinkExtractor:
_csstranslator = HTMLTranslator()
def __init__(
self,
allow: _RegexOrSeveral = (),
deny: _RegexOrSeveral = (),
allow_domains: str | Iterable[str] = (),
deny_domains: str | Iterable[str] = (),
restrict_xpaths: str | Iterable[str] = (),
tags: str | Iterable[str] = ("a", "area"),
attrs: str | Iterable[str] = ("href",),
canonicalize: bool = False,
unique: bool = True,
process_value: Callable[[Any], Any] | None = None,
deny_extensions: str | Iterable[str] | None = None,
restrict_css: str | Iterable[str] = (),
strip: bool = True,
restrict_text: _RegexOrSeveral | None = None,
):
tags, attrs = set(arg_to_iter(tags)), set(arg_to_iter(attrs))
self.link_extractor = LxmlParserLinkExtractor(
tag=partial(operator.contains, tags),
attr=partial(operator.contains, attrs),
unique=unique,
process=process_value,
strip=strip,
canonicalized=not canonicalize,
)
self.allow_res: list[re.Pattern[str]] = self._compile_regexes(allow)
self.deny_res: list[re.Pattern[str]] = self._compile_regexes(deny)
self.allow_domains: set[str] = set(arg_to_iter(allow_domains))
self.deny_domains: set[str] = set(arg_to_iter(deny_domains))
self.restrict_xpaths: tuple[str, ...] = tuple(arg_to_iter(restrict_xpaths))
self.restrict_xpaths += tuple(
map(self._csstranslator.css_to_xpath, arg_to_iter(restrict_css))
)
if deny_extensions is None:
deny_extensions = IGNORED_EXTENSIONS
self.canonicalize: bool = canonicalize
self.deny_extensions: set[str] = {"." + e for e in arg_to_iter(deny_extensions)}
self.restrict_text: list[re.Pattern[str]] = self._compile_regexes(restrict_text)
@staticmethod
def _compile_regexes(value: _RegexOrSeveral | None) -> list[re.Pattern[str]]:
return [
x if isinstance(x, re.Pattern) else re.compile(x)
for x in arg_to_iter(value)
]
def _link_allowed(self, link: Link) -> bool:
if not _is_valid_url(link.url):
return False
if self.allow_res and not _matches(link.url, self.allow_res):
return False
if self.deny_res and _matches(link.url, self.deny_res):
return False
parsed_url = urlparse(link.url)
if self.allow_domains and not url_is_from_any_domain(
parsed_url, self.allow_domains
):
return False
if self.deny_domains and url_is_from_any_domain(parsed_url, self.deny_domains):
return False
if self.deny_extensions and url_has_any_extension(
parsed_url, self.deny_extensions
):
return False
return not self.restrict_text or _matches(link.text, self.restrict_text)
def matches(self, url: str) -> bool:
if self.allow_domains and not url_is_from_any_domain(url, self.allow_domains):
return False
if self.deny_domains and url_is_from_any_domain(url, self.deny_domains):
return False
allowed = (
(regex.search(url) for regex in self.allow_res)
if self.allow_res
else [True]
)
denied = (regex.search(url) for regex in self.deny_res) if self.deny_res else []
return any(allowed) and not any(denied)
def _process_links(self, links: list[Link]) -> list[Link]:
links = [x for x in links if self._link_allowed(x)]
if self.canonicalize:
for link in links:
link.url = canonicalize_url(link.url)
return self.link_extractor._process_links(links)
def _extract_links(self, *args: Any, **kwargs: Any) -> list[Link]:
return self.link_extractor._extract_links(*args, **kwargs)
def extract_links(self, response: TextResponse) -> list[Link]:
"""Returns a list of :class:`~scrapy.link.Link` objects from the
specified :class:`response <scrapy.http.Response>`.
Only links that match the settings passed to the ``__init__`` method of
the link extractor are returned.
Duplicate links are omitted if the ``unique`` attribute is set to ``True``,
otherwise they are returned.
"""
base_url = get_base_url(response)
if self.restrict_xpaths:
docs = [
subdoc for x in self.restrict_xpaths for subdoc in response.xpath(x)
]
else:
docs = [response.selector]
all_links = []
for doc in docs:
links = self._extract_links(doc, response.url, response.encoding, base_url)
all_links.extend(self._process_links(links))
if self.link_extractor.unique:
return unique_list(all_links, key=self.link_extractor.link_key)
return all_links
| python | BSD-3-Clause | d1bd8eb49f7aba9289e4ff692006cead8bcd9080 | 2026-01-04T14:38:41.023839Z | false |
scrapy/scrapy | https://github.com/scrapy/scrapy/blob/d1bd8eb49f7aba9289e4ff692006cead8bcd9080/scrapy/linkextractors/__init__.py | scrapy/linkextractors/__init__.py | """
scrapy.linkextractors
This package contains a collection of Link Extractors.
For more info see docs/topics/link-extractors.rst
"""
from __future__ import annotations
from typing import TYPE_CHECKING
if TYPE_CHECKING:
from collections.abc import Iterable
from re import Pattern
# common file extensions that are not followed if they occur in links
IGNORED_EXTENSIONS = [
# archives
"7z",
"7zip",
"bz2",
"rar",
"tar",
"tar.gz",
"xz",
"zip",
# images
"mng",
"pct",
"bmp",
"gif",
"jpg",
"jpeg",
"png",
"pst",
"psp",
"tif",
"tiff",
"ai",
"drw",
"dxf",
"eps",
"ps",
"svg",
"cdr",
"ico",
"webp",
# audio
"mp3",
"wma",
"ogg",
"wav",
"ra",
"aac",
"mid",
"au",
"aiff",
# video
"3gp",
"asf",
"asx",
"avi",
"mov",
"mp4",
"mpg",
"qt",
"rm",
"swf",
"wmv",
"m4a",
"m4v",
"flv",
"webm",
# office suites
"xls",
"xlsm",
"xlsx",
"xltm",
"xltx",
"potm",
"potx",
"ppt",
"pptm",
"pptx",
"pps",
"doc",
"docb",
"docm",
"docx",
"dotm",
"dotx",
"odt",
"ods",
"odg",
"odp",
# other
"css",
"pdf",
"exe",
"bin",
"rss",
"dmg",
"iso",
"apk",
"jar",
"sh",
"rb",
"js",
"hta",
"bat",
"cpl",
"msi",
"msp",
"py",
]
def _matches(url: str, regexs: Iterable[Pattern[str]]) -> bool:
return any(r.search(url) for r in regexs)
def _is_valid_url(url: str) -> bool:
return url.split("://", 1)[0] in {"http", "https", "file", "ftp"}
# Top-level imports
from scrapy.linkextractors.lxmlhtml import LxmlLinkExtractor as LinkExtractor
__all__ = [
"IGNORED_EXTENSIONS",
"LinkExtractor",
]
| python | BSD-3-Clause | d1bd8eb49f7aba9289e4ff692006cead8bcd9080 | 2026-01-04T14:38:41.023839Z | false |
scrapy/scrapy | https://github.com/scrapy/scrapy/blob/d1bd8eb49f7aba9289e4ff692006cead8bcd9080/scrapy/contracts/default.py | scrapy/contracts/default.py | from __future__ import annotations
import json
from typing import TYPE_CHECKING, Any
from itemadapter import ItemAdapter, is_item
from scrapy.contracts import Contract
from scrapy.exceptions import ContractFail
from scrapy.http import Request
if TYPE_CHECKING:
from collections.abc import Callable
# contracts
class UrlContract(Contract):
"""Contract to set the url of the request (mandatory)
@url http://scrapy.org
"""
name = "url"
def adjust_request_args(self, args: dict[str, Any]) -> dict[str, Any]:
args["url"] = self.args[0]
return args
class CallbackKeywordArgumentsContract(Contract):
"""Contract to set the keyword arguments for the request.
The value should be a JSON-encoded dictionary, e.g.:
@cb_kwargs {"arg1": "some value"}
"""
name = "cb_kwargs"
def adjust_request_args(self, args: dict[str, Any]) -> dict[str, Any]:
args["cb_kwargs"] = json.loads(" ".join(self.args))
return args
class MetadataContract(Contract):
"""Contract to set metadata arguments for the request.
The value should be JSON-encoded dictionary, e.g.:
@meta {"arg1": "some value"}
"""
name = "meta"
def adjust_request_args(self, args: dict[str, Any]) -> dict[str, Any]:
args["meta"] = json.loads(" ".join(self.args))
return args
class ReturnsContract(Contract):
"""Contract to check the output of a callback
general form:
@returns request(s)/item(s) [min=1 [max]]
e.g.:
@returns request
@returns request 2
@returns request 2 10
@returns request 0 10
"""
name = "returns"
object_type_verifiers: dict[str | None, Callable[[Any], bool]] = {
"request": lambda x: isinstance(x, Request),
"requests": lambda x: isinstance(x, Request),
"item": is_item,
"items": is_item,
}
def __init__(self, *args: Any, **kwargs: Any):
super().__init__(*args, **kwargs)
if len(self.args) not in [1, 2, 3]:
raise ValueError(
f"Incorrect argument quantity: expected 1, 2 or 3, got {len(self.args)}"
)
self.obj_name = self.args[0] or None
self.obj_type_verifier = self.object_type_verifiers[self.obj_name]
try:
self.min_bound: float = int(self.args[1])
except IndexError:
self.min_bound = 1
try:
self.max_bound: float = int(self.args[2])
except IndexError:
self.max_bound = float("inf")
def post_process(self, output: list[Any]) -> None:
occurrences = 0
for x in output:
if self.obj_type_verifier(x):
occurrences += 1
assertion = self.min_bound <= occurrences <= self.max_bound
if not assertion:
if self.min_bound == self.max_bound:
expected = str(self.min_bound)
else:
expected = f"{self.min_bound}..{self.max_bound}"
raise ContractFail(
f"Returned {occurrences} {self.obj_name}, expected {expected}"
)
class ScrapesContract(Contract):
"""Contract to check presence of fields in scraped items
@scrapes page_name page_body
"""
name = "scrapes"
def post_process(self, output: list[Any]) -> None:
for x in output:
if is_item(x):
missing = [arg for arg in self.args if arg not in ItemAdapter(x)]
if missing:
missing_fields = ", ".join(missing)
raise ContractFail(f"Missing fields: {missing_fields}")
| python | BSD-3-Clause | d1bd8eb49f7aba9289e4ff692006cead8bcd9080 | 2026-01-04T14:38:41.023839Z | false |
scrapy/scrapy | https://github.com/scrapy/scrapy/blob/d1bd8eb49f7aba9289e4ff692006cead8bcd9080/scrapy/contracts/__init__.py | scrapy/contracts/__init__.py | from __future__ import annotations
import re
import sys
from collections.abc import AsyncGenerator, Iterable
from functools import wraps
from inspect import getmembers
from types import CoroutineType
from typing import TYPE_CHECKING, Any, cast
from unittest import TestCase, TestResult
from scrapy.http import Request, Response
from scrapy.utils.python import get_spec
from scrapy.utils.spider import iterate_spider_output
if TYPE_CHECKING:
from collections.abc import Callable
from twisted.python.failure import Failure
from scrapy import Spider
class Contract:
"""Abstract class for contracts"""
request_cls: type[Request] | None = None
name: str
def __init__(self, method: Callable, *args: Any):
self.testcase_pre = _create_testcase(method, f"@{self.name} pre-hook")
self.testcase_post = _create_testcase(method, f"@{self.name} post-hook")
self.args: tuple[Any, ...] = args
def add_pre_hook(self, request: Request, results: TestResult) -> Request:
if hasattr(self, "pre_process"):
cb = request.callback
assert cb is not None
@wraps(cb)
def wrapper(response: Response, **cb_kwargs: Any) -> list[Any]:
try:
results.startTest(self.testcase_pre)
self.pre_process(response)
results.stopTest(self.testcase_pre)
except AssertionError:
results.addFailure(self.testcase_pre, sys.exc_info())
except Exception:
results.addError(self.testcase_pre, sys.exc_info())
else:
results.addSuccess(self.testcase_pre)
cb_result = cb(response, **cb_kwargs)
if isinstance(cb_result, (AsyncGenerator, CoroutineType)):
raise TypeError("Contracts don't support async callbacks")
return list(cast("Iterable[Any]", iterate_spider_output(cb_result)))
request.callback = wrapper
return request
def add_post_hook(self, request: Request, results: TestResult) -> Request:
if hasattr(self, "post_process"):
cb = request.callback
assert cb is not None
@wraps(cb)
def wrapper(response: Response, **cb_kwargs: Any) -> list[Any]:
cb_result = cb(response, **cb_kwargs)
if isinstance(cb_result, (AsyncGenerator, CoroutineType)):
raise TypeError("Contracts don't support async callbacks")
output = list(cast("Iterable[Any]", iterate_spider_output(cb_result)))
try:
results.startTest(self.testcase_post)
self.post_process(output)
results.stopTest(self.testcase_post)
except AssertionError:
results.addFailure(self.testcase_post, sys.exc_info())
except Exception:
results.addError(self.testcase_post, sys.exc_info())
else:
results.addSuccess(self.testcase_post)
return output
request.callback = wrapper
return request
def adjust_request_args(self, args: dict[str, Any]) -> dict[str, Any]:
return args
class ContractsManager:
contracts: dict[str, type[Contract]] = {}
def __init__(self, contracts: Iterable[type[Contract]]):
for contract in contracts:
self.contracts[contract.name] = contract
def tested_methods_from_spidercls(self, spidercls: type[Spider]) -> list[str]:
is_method = re.compile(r"^\s*@", re.MULTILINE).search
methods = []
for key, value in getmembers(spidercls):
if callable(value) and value.__doc__ and is_method(value.__doc__):
methods.append(key)
return methods
def extract_contracts(self, method: Callable) -> list[Contract]:
contracts: list[Contract] = []
assert method.__doc__ is not None
for line in method.__doc__.split("\n"):
line = line.strip()
if line.startswith("@"):
m = re.match(r"@(\w+)\s*(.*)", line)
if m is None:
continue
name, args = m.groups()
args = re.split(r"\s+", args)
contracts.append(self.contracts[name](method, *args))
return contracts
def from_spider(self, spider: Spider, results: TestResult) -> list[Request | None]:
requests: list[Request | None] = []
for method in self.tested_methods_from_spidercls(type(spider)):
bound_method = spider.__getattribute__(method)
try:
requests.append(self.from_method(bound_method, results))
except Exception:
case = _create_testcase(bound_method, "contract")
results.addError(case, sys.exc_info())
return requests
def from_method(self, method: Callable, results: TestResult) -> Request | None:
contracts = self.extract_contracts(method)
if contracts:
request_cls = Request
for contract in contracts:
if contract.request_cls is not None:
request_cls = contract.request_cls
# calculate request args
args, kwargs = get_spec(request_cls.__init__)
# Don't filter requests to allow
# testing different callbacks on the same URL.
kwargs["dont_filter"] = True
kwargs["callback"] = method
for contract in contracts:
kwargs = contract.adjust_request_args(kwargs)
args.remove("self")
# check if all positional arguments are defined in kwargs
if set(args).issubset(set(kwargs)):
request = request_cls(**kwargs)
# execute pre and post hooks in order
for contract in reversed(contracts):
request = contract.add_pre_hook(request, results)
for contract in contracts:
request = contract.add_post_hook(request, results)
self._clean_req(request, method, results)
return request
return None
def _clean_req(
self, request: Request, method: Callable, results: TestResult
) -> None:
"""stop the request from returning objects and records any errors"""
cb = request.callback
assert cb is not None
@wraps(cb)
def cb_wrapper(response: Response, **cb_kwargs: Any) -> None:
try:
output = cb(response, **cb_kwargs)
output = list(cast("Iterable[Any]", iterate_spider_output(output)))
except Exception:
case = _create_testcase(method, "callback")
results.addError(case, sys.exc_info())
def eb_wrapper(failure: Failure) -> None:
case = _create_testcase(method, "errback")
exc_info = failure.type, failure.value, failure.getTracebackObject()
results.addError(case, exc_info)
request.callback = cb_wrapper
request.errback = eb_wrapper
def _create_testcase(method: Callable, desc: str) -> TestCase:
spider = method.__self__.name # type: ignore[attr-defined]
class ContractTestCase(TestCase):
def __str__(_self) -> str: # pylint: disable=no-self-argument
return f"[{spider}] {method.__name__} ({desc})"
name = f"{spider}_{method.__name__}"
setattr(ContractTestCase, name, lambda x: x)
return ContractTestCase(name)
| python | BSD-3-Clause | d1bd8eb49f7aba9289e4ff692006cead8bcd9080 | 2026-01-04T14:38:41.023839Z | false |
scrapy/scrapy | https://github.com/scrapy/scrapy/blob/d1bd8eb49f7aba9289e4ff692006cead8bcd9080/scrapy/core/scraper.py | scrapy/core/scraper.py | """This module implements the Scraper component which parses responses and
extracts information from them"""
from __future__ import annotations
import logging
import warnings
from collections import deque
from collections.abc import AsyncIterator
from typing import TYPE_CHECKING, Any, TypeAlias, TypeVar
from twisted.internet.defer import Deferred, inlineCallbacks
from twisted.python.failure import Failure
from scrapy import Spider, signals
from scrapy.core.spidermw import SpiderMiddlewareManager
from scrapy.exceptions import (
CloseSpider,
DropItem,
IgnoreRequest,
ScrapyDeprecationWarning,
)
from scrapy.http import Request, Response
from scrapy.pipelines import ItemPipelineManager
from scrapy.utils.asyncio import _parallel_asyncio, is_asyncio_available
from scrapy.utils.decorators import _warn_spider_arg
from scrapy.utils.defer import (
_defer_sleep_async,
_schedule_coro,
aiter_errback,
deferred_from_coro,
ensure_awaitable,
iter_errback,
maybe_deferred_to_future,
parallel,
parallel_async,
)
from scrapy.utils.deprecate import method_is_overridden
from scrapy.utils.log import failure_to_exc_info, logformatter_adapter
from scrapy.utils.misc import load_object, warn_on_generator_with_return_value
from scrapy.utils.python import global_object_name
from scrapy.utils.spider import iterate_spider_output
if TYPE_CHECKING:
from collections.abc import Generator, Iterable
from scrapy.crawler import Crawler
from scrapy.logformatter import LogFormatter
from scrapy.signalmanager import SignalManager
logger = logging.getLogger(__name__)
_T = TypeVar("_T")
QueueTuple: TypeAlias = tuple[Response | Failure, Request, Deferred[None]]
class Slot:
"""Scraper slot (one per running spider)"""
MIN_RESPONSE_SIZE = 1024
def __init__(self, max_active_size: int = 5000000):
self.max_active_size: int = max_active_size
self.queue: deque[QueueTuple] = deque()
self.active: set[Request] = set()
self.active_size: int = 0
self.itemproc_size: int = 0
self.closing: Deferred[Spider] | None = None
def add_response_request(
self, result: Response | Failure, request: Request
) -> Deferred[None]:
# this Deferred will be awaited in enqueue_scrape()
deferred: Deferred[None] = Deferred()
self.queue.append((result, request, deferred))
if isinstance(result, Response):
self.active_size += max(len(result.body), self.MIN_RESPONSE_SIZE)
else:
self.active_size += self.MIN_RESPONSE_SIZE
return deferred
def next_response_request_deferred(self) -> QueueTuple:
result, request, deferred = self.queue.popleft()
self.active.add(request)
return result, request, deferred
def finish_response(self, result: Response | Failure, request: Request) -> None:
self.active.remove(request)
if isinstance(result, Response):
self.active_size -= max(len(result.body), self.MIN_RESPONSE_SIZE)
else:
self.active_size -= self.MIN_RESPONSE_SIZE
def is_idle(self) -> bool:
return not (self.queue or self.active)
def needs_backout(self) -> bool:
return self.active_size > self.max_active_size
class Scraper:
def __init__(self, crawler: Crawler) -> None:
self.slot: Slot | None = None
self.spidermw: SpiderMiddlewareManager = SpiderMiddlewareManager.from_crawler(
crawler
)
itemproc_cls: type[ItemPipelineManager] = load_object(
crawler.settings["ITEM_PROCESSOR"]
)
self.itemproc: ItemPipelineManager = itemproc_cls.from_crawler(crawler)
self._itemproc_has_async: dict[str, bool] = {}
for method in [
"open_spider",
"close_spider",
"process_item",
]:
self._check_deprecated_itemproc_method(method)
self.concurrent_items: int = crawler.settings.getint("CONCURRENT_ITEMS")
self.crawler: Crawler = crawler
self.signals: SignalManager = crawler.signals
assert crawler.logformatter
self.logformatter: LogFormatter = crawler.logformatter
def _check_deprecated_itemproc_method(self, method: str) -> None:
itemproc_cls = type(self.itemproc)
if not hasattr(self.itemproc, "process_item_async"):
warnings.warn(
f"{global_object_name(itemproc_cls)} doesn't define a {method}_async() method,"
f" this is deprecated and the method will be required in future Scrapy versions.",
ScrapyDeprecationWarning,
stacklevel=2,
)
self._itemproc_has_async[method] = False
elif (
issubclass(itemproc_cls, ItemPipelineManager)
and method_is_overridden(itemproc_cls, ItemPipelineManager, method)
and not method_is_overridden(
itemproc_cls, ItemPipelineManager, f"{method}_async"
)
):
warnings.warn(
f"{global_object_name(itemproc_cls)} overrides {method}() but doesn't override {method}_async()."
f" This is deprecated. {method}() will be used, but in future Scrapy versions {method}_async() will be used instead.",
ScrapyDeprecationWarning,
stacklevel=2,
)
self._itemproc_has_async[method] = False
else:
self._itemproc_has_async[method] = True
def open_spider(self, spider: Spider | None = None) -> Deferred[None]:
warnings.warn(
"Scraper.open_spider() is deprecated, use open_spider_async() instead",
ScrapyDeprecationWarning,
stacklevel=2,
)
return deferred_from_coro(self.open_spider_async())
async def open_spider_async(self) -> None:
"""Open the spider for scraping and allocate resources for it.
.. versionadded:: VERSION
"""
self.slot = Slot(self.crawler.settings.getint("SCRAPER_SLOT_MAX_ACTIVE_SIZE"))
if not self.crawler.spider:
raise RuntimeError(
"Scraper.open_spider() called before Crawler.spider is set."
)
if self._itemproc_has_async["open_spider"]:
await self.itemproc.open_spider_async()
else:
await maybe_deferred_to_future(
self.itemproc.open_spider(self.crawler.spider)
)
def close_spider(self, spider: Spider | None = None) -> Deferred[None]:
warnings.warn(
"Scraper.close_spider() is deprecated, use close_spider_async() instead",
ScrapyDeprecationWarning,
stacklevel=2,
)
return deferred_from_coro(self.close_spider_async())
async def close_spider_async(self) -> None:
"""Close the spider being scraped and release its resources.
.. versionadded:: VERSION
"""
if self.slot is None:
raise RuntimeError("Scraper slot not assigned")
self.slot.closing = Deferred()
self._check_if_closing()
await maybe_deferred_to_future(self.slot.closing)
if self._itemproc_has_async["close_spider"]:
await self.itemproc.close_spider_async()
else:
assert self.crawler.spider
await maybe_deferred_to_future(
self.itemproc.close_spider(self.crawler.spider)
)
def is_idle(self) -> bool:
"""Return True if there isn't any more spiders to process"""
return not self.slot
def _check_if_closing(self) -> None:
assert self.slot is not None # typing
if self.slot.closing and self.slot.is_idle():
assert self.crawler.spider
self.slot.closing.callback(self.crawler.spider)
@inlineCallbacks
@_warn_spider_arg
def enqueue_scrape(
self, result: Response | Failure, request: Request, spider: Spider | None = None
) -> Generator[Deferred[Any], Any, None]:
if self.slot is None:
raise RuntimeError("Scraper slot not assigned")
dfd = self.slot.add_response_request(result, request)
self._scrape_next()
try:
yield dfd # fired in _wait_for_processing()
except Exception:
logger.error(
"Scraper bug processing %(request)s",
{"request": request},
exc_info=True,
extra={"spider": self.crawler.spider},
)
finally:
self.slot.finish_response(result, request)
self._check_if_closing()
self._scrape_next()
def _scrape_next(self) -> None:
assert self.slot is not None # typing
while self.slot.queue:
result, request, queue_dfd = self.slot.next_response_request_deferred()
_schedule_coro(self._wait_for_processing(result, request, queue_dfd))
async def _scrape(self, result: Response | Failure, request: Request) -> None:
"""Handle the downloaded response or failure through the spider callback/errback."""
if not isinstance(result, (Response, Failure)):
raise TypeError(
f"Incorrect type: expected Response or Failure, got {type(result)}: {result!r}"
)
output: Iterable[Any] | AsyncIterator[Any]
if isinstance(result, Response):
try:
# call the spider middlewares and the request callback with the response
output = await self.spidermw.scrape_response_async(
self.call_spider_async, result, request
)
except Exception:
self.handle_spider_error(Failure(), request, result)
else:
await self.handle_spider_output_async(output, request, result)
return
try:
# call the request errback with the downloader error
output = await self.call_spider_async(result, request)
except Exception as spider_exc:
# the errback didn't silence the exception
assert self.crawler.spider
if not result.check(IgnoreRequest):
logkws = self.logformatter.download_error(
result, request, self.crawler.spider
)
logger.log(
*logformatter_adapter(logkws),
extra={"spider": self.crawler.spider},
exc_info=failure_to_exc_info(result),
)
if spider_exc is not result.value:
# the errback raised a different exception, handle it
self.handle_spider_error(Failure(), request, result)
else:
await self.handle_spider_output_async(output, request, result)
async def _wait_for_processing(
self, result: Response | Failure, request: Request, queue_dfd: Deferred[None]
) -> None:
try:
await self._scrape(result, request)
except Exception:
queue_dfd.errback(Failure())
else:
queue_dfd.callback(None) # awaited in enqueue_scrape()
def call_spider(
self, result: Response | Failure, request: Request, spider: Spider | None = None
) -> Deferred[Iterable[Any] | AsyncIterator[Any]]:
warnings.warn(
"Scraper.call_spider() is deprecated, use call_spider_async() instead",
ScrapyDeprecationWarning,
stacklevel=2,
)
return deferred_from_coro(self.call_spider_async(result, request))
async def call_spider_async(
self, result: Response | Failure, request: Request
) -> Iterable[Any] | AsyncIterator[Any]:
"""Call the request callback or errback with the response or failure.
.. versionadded:: 2.13
"""
await _defer_sleep_async()
assert self.crawler.spider
if isinstance(result, Response):
if getattr(result, "request", None) is None:
result.request = request
assert result.request
callback = result.request.callback or self.crawler.spider._parse
warn_on_generator_with_return_value(self.crawler.spider, callback)
output = callback(result, **result.request.cb_kwargs)
if isinstance(output, Deferred):
warnings.warn(
f"{callback} returned a Deferred."
f" Returning Deferreds from spider callbacks is deprecated.",
ScrapyDeprecationWarning,
stacklevel=2,
)
else: # result is a Failure
# TODO: properly type adding this attribute to a Failure
result.request = request # type: ignore[attr-defined]
if not request.errback:
result.raiseException()
warn_on_generator_with_return_value(self.crawler.spider, request.errback)
output = request.errback(result)
if isinstance(output, Failure):
output.raiseException()
# else the errback returned actual output (like a callback),
# which needs to be passed to iterate_spider_output()
if isinstance(output, Deferred):
warnings.warn(
f"{request.errback} returned a Deferred."
f" Returning Deferreds from spider errbacks is deprecated.",
ScrapyDeprecationWarning,
stacklevel=2,
)
return await ensure_awaitable(iterate_spider_output(output))
@_warn_spider_arg
def handle_spider_error(
self,
_failure: Failure,
request: Request,
response: Response | Failure,
spider: Spider | None = None,
) -> None:
"""Handle an exception raised by a spider callback or errback."""
assert self.crawler.spider
exc = _failure.value
if isinstance(exc, CloseSpider):
assert self.crawler.engine is not None # typing
_schedule_coro(
self.crawler.engine.close_spider_async(reason=exc.reason or "cancelled")
)
return
logkws = self.logformatter.spider_error(
_failure, request, response, self.crawler.spider
)
logger.log(
*logformatter_adapter(logkws),
exc_info=failure_to_exc_info(_failure),
extra={"spider": self.crawler.spider},
)
self.signals.send_catch_log(
signal=signals.spider_error,
failure=_failure,
response=response,
spider=self.crawler.spider,
)
assert self.crawler.stats
self.crawler.stats.inc_value("spider_exceptions/count")
self.crawler.stats.inc_value(
f"spider_exceptions/{_failure.value.__class__.__name__}"
)
def handle_spider_output(
self,
result: Iterable[_T] | AsyncIterator[_T],
request: Request,
response: Response | Failure,
spider: Spider | None = None,
) -> Deferred[None]:
"""Pass items/requests produced by a callback to ``_process_spidermw_output()`` in parallel."""
warnings.warn(
"Scraper.handle_spider_output() is deprecated, use handle_spider_output_async() instead",
ScrapyDeprecationWarning,
stacklevel=2,
)
return deferred_from_coro(
self.handle_spider_output_async(result, request, response)
)
async def handle_spider_output_async(
self,
result: Iterable[_T] | AsyncIterator[_T],
request: Request,
response: Response | Failure,
) -> None:
"""Pass items/requests produced by a callback to ``_process_spidermw_output()`` in parallel.
.. versionadded:: 2.13
"""
it: Iterable[_T] | AsyncIterator[_T]
if is_asyncio_available():
if isinstance(result, AsyncIterator):
it = aiter_errback(result, self.handle_spider_error, request, response)
else:
it = iter_errback(result, self.handle_spider_error, request, response)
await _parallel_asyncio(
it, self.concurrent_items, self._process_spidermw_output_async, response
)
return
if isinstance(result, AsyncIterator):
it = aiter_errback(result, self.handle_spider_error, request, response)
await maybe_deferred_to_future(
parallel_async(
it,
self.concurrent_items,
self._process_spidermw_output,
response,
)
)
return
it = iter_errback(result, self.handle_spider_error, request, response)
await maybe_deferred_to_future(
parallel(
it,
self.concurrent_items,
self._process_spidermw_output,
response,
)
)
def _process_spidermw_output(
self, output: Any, response: Response | Failure
) -> Deferred[None]:
"""Process each Request/Item (given in the output parameter) returned
from the given spider.
Items are sent to the item pipelines, requests are scheduled.
"""
return deferred_from_coro(self._process_spidermw_output_async(output, response))
async def _process_spidermw_output_async(
self, output: Any, response: Response | Failure
) -> None:
"""Process each Request/Item (given in the output parameter) returned
from the given spider.
Items are sent to the item pipelines, requests are scheduled.
"""
if isinstance(output, Request):
assert self.crawler.engine is not None # typing
self.crawler.engine.crawl(request=output)
return
if output is not None:
await self.start_itemproc_async(output, response=response)
def start_itemproc(
self, item: Any, *, response: Response | Failure | None
) -> Deferred[None]:
"""Send *item* to the item pipelines for processing.
*response* is the source of the item data. If the item does not come
from response data, e.g. it was hard-coded, set it to ``None``.
"""
warnings.warn(
"Scraper.start_itemproc() is deprecated, use start_itemproc_async() instead",
ScrapyDeprecationWarning,
stacklevel=2,
)
return deferred_from_coro(self.start_itemproc_async(item, response=response))
async def start_itemproc_async(
self, item: Any, *, response: Response | Failure | None
) -> None:
"""Send *item* to the item pipelines for processing.
*response* is the source of the item data. If the item does not come
from response data, e.g. it was hard-coded, set it to ``None``.
.. versionadded:: VERSION
"""
assert self.slot is not None # typing
assert self.crawler.spider is not None # typing
self.slot.itemproc_size += 1
try:
if self._itemproc_has_async["process_item"]:
output = await self.itemproc.process_item_async(item)
else:
output = await maybe_deferred_to_future(
self.itemproc.process_item(item, self.crawler.spider)
)
except DropItem as ex:
logkws = self.logformatter.dropped(item, ex, response, self.crawler.spider)
if logkws is not None:
logger.log(
*logformatter_adapter(logkws), extra={"spider": self.crawler.spider}
)
await self.signals.send_catch_log_async(
signal=signals.item_dropped,
item=item,
response=response,
spider=self.crawler.spider,
exception=ex,
)
except Exception as ex:
logkws = self.logformatter.item_error(
item, ex, response, self.crawler.spider
)
logger.log(
*logformatter_adapter(logkws),
extra={"spider": self.crawler.spider},
exc_info=True,
)
await self.signals.send_catch_log_async(
signal=signals.item_error,
item=item,
response=response,
spider=self.crawler.spider,
failure=Failure(),
)
else:
logkws = self.logformatter.scraped(output, response, self.crawler.spider)
if logkws is not None:
logger.log(
*logformatter_adapter(logkws), extra={"spider": self.crawler.spider}
)
await self.signals.send_catch_log_async(
signal=signals.item_scraped,
item=output,
response=response,
spider=self.crawler.spider,
)
finally:
self.slot.itemproc_size -= 1
| python | BSD-3-Clause | d1bd8eb49f7aba9289e4ff692006cead8bcd9080 | 2026-01-04T14:38:41.023839Z | false |
scrapy/scrapy | https://github.com/scrapy/scrapy/blob/d1bd8eb49f7aba9289e4ff692006cead8bcd9080/scrapy/core/spidermw.py | scrapy/core/spidermw.py | """
Spider Middleware manager
See documentation in docs/topics/spider-middleware.rst
"""
from __future__ import annotations
import logging
from collections.abc import AsyncIterator, Callable, Coroutine, Iterable
from functools import wraps
from inspect import isasyncgenfunction, iscoroutine
from itertools import islice
from typing import TYPE_CHECKING, Any, TypeAlias, TypeVar, cast
from warnings import warn
from twisted.internet.defer import Deferred, inlineCallbacks
from twisted.python.failure import Failure
from scrapy import Request, Spider
from scrapy.exceptions import ScrapyDeprecationWarning, _InvalidOutput
from scrapy.http import Response
from scrapy.middleware import MiddlewareManager
from scrapy.utils.asyncgen import as_async_generator, collect_asyncgen
from scrapy.utils.conf import build_component_list
from scrapy.utils.defer import (
_defer_sleep_async,
deferred_from_coro,
maybe_deferred_to_future,
)
from scrapy.utils.python import MutableAsyncChain, MutableChain, global_object_name
if TYPE_CHECKING:
from collections.abc import Generator
from scrapy.crawler import Crawler
from scrapy.settings import BaseSettings
logger = logging.getLogger(__name__)
_T = TypeVar("_T")
ScrapeFunc: TypeAlias = Callable[
[Response | Failure, Request],
Coroutine[Any, Any, Iterable[_T] | AsyncIterator[_T]],
]
def _isiterable(o: Any) -> bool:
return isinstance(o, (Iterable, AsyncIterator))
class SpiderMiddlewareManager(MiddlewareManager):
component_name = "spider middleware"
@classmethod
def _get_mwlist_from_settings(cls, settings: BaseSettings) -> list[Any]:
return build_component_list(settings.getwithbase("SPIDER_MIDDLEWARES"))
def __init__(self, *middlewares: Any, crawler: Crawler | None = None) -> None:
self._check_deprecated_process_start_requests_use(middlewares)
super().__init__(*middlewares, crawler=crawler)
def _check_deprecated_process_start_requests_use(
self, middlewares: tuple[Any, ...]
) -> None:
deprecated_middlewares = [
middleware
for middleware in middlewares
if hasattr(middleware, "process_start_requests")
and not hasattr(middleware, "process_start")
]
modern_middlewares = [
middleware
for middleware in middlewares
if not hasattr(middleware, "process_start_requests")
and hasattr(middleware, "process_start")
]
if deprecated_middlewares and modern_middlewares:
raise ValueError(
"You are trying to combine spider middlewares that only "
"define the deprecated process_start_requests() method () "
"with spider middlewares that only define the "
"process_start() method (). This is not possible. You must "
"either disable or make universal 1 of those 2 sets of "
"spider middlewares. Making a spider middleware universal "
"means having it define both methods. See the release notes "
"of Scrapy 2.13 for details: "
"https://docs.scrapy.org/en/2.13/news.html"
)
self._use_start_requests = bool(deprecated_middlewares)
if self._use_start_requests:
deprecated_middleware_list = ", ".join(
global_object_name(middleware.__class__)
for middleware in deprecated_middlewares
)
warn(
f"The following enabled spider middlewares, directly or "
f"through their parent classes, define the deprecated "
f"process_start_requests() method: "
f"{deprecated_middleware_list}. process_start_requests() has "
f"been deprecated in favor of a new method, process_start(), "
f"to support asynchronous code execution. "
f"process_start_requests() will stop being called in a future "
f"version of Scrapy. If you use Scrapy 2.13 or higher "
f"only, replace process_start_requests() with "
f"process_start(); note that process_start() is a coroutine "
f"(async def). If you need to maintain compatibility with "
f"lower Scrapy versions, when defining "
f"process_start_requests() in a spider middleware class, "
f"define process_start() as well. See the release notes of "
f"Scrapy 2.13 for details: "
f"https://docs.scrapy.org/en/2.13/news.html",
ScrapyDeprecationWarning,
)
def _add_middleware(self, mw: Any) -> None:
if hasattr(mw, "process_spider_input"):
self.methods["process_spider_input"].append(mw.process_spider_input)
self._check_mw_method_spider_arg(mw.process_spider_input)
if self._use_start_requests:
if hasattr(mw, "process_start_requests"):
self.methods["process_start_requests"].appendleft(
mw.process_start_requests
)
elif hasattr(mw, "process_start"):
self.methods["process_start"].appendleft(mw.process_start)
process_spider_output = self._get_async_method_pair(mw, "process_spider_output")
self.methods["process_spider_output"].appendleft(process_spider_output)
if callable(process_spider_output):
self._check_mw_method_spider_arg(process_spider_output)
elif isinstance(process_spider_output, tuple):
for m in process_spider_output:
self._check_mw_method_spider_arg(m)
process_spider_exception = getattr(mw, "process_spider_exception", None)
self.methods["process_spider_exception"].appendleft(process_spider_exception)
if process_spider_exception is not None:
self._check_mw_method_spider_arg(process_spider_exception)
async def _process_spider_input(
self,
scrape_func: ScrapeFunc[_T],
response: Response,
request: Request,
) -> Iterable[_T] | AsyncIterator[_T]:
for method in self.methods["process_spider_input"]:
method = cast("Callable", method)
try:
if method in self._mw_methods_requiring_spider:
result = method(response=response, spider=self._spider)
else:
result = method(response=response)
if result is not None:
msg = (
f"{global_object_name(method)} must return None "
f"or raise an exception, got {type(result)}"
)
raise _InvalidOutput(msg)
except _InvalidOutput:
raise
except Exception:
return await scrape_func(Failure(), request)
return await scrape_func(response, request)
def _evaluate_iterable(
self,
response: Response,
iterable: Iterable[_T] | AsyncIterator[_T],
exception_processor_index: int,
recover_to: MutableChain[_T] | MutableAsyncChain[_T],
) -> Iterable[_T] | AsyncIterator[_T]:
def process_sync(iterable: Iterable[_T]) -> Iterable[_T]:
try:
yield from iterable
except Exception as ex:
exception_result = cast(
"Failure | MutableChain[_T]",
self._process_spider_exception(
response, ex, exception_processor_index
),
)
if isinstance(exception_result, Failure):
raise
assert isinstance(recover_to, MutableChain)
recover_to.extend(exception_result)
async def process_async(iterable: AsyncIterator[_T]) -> AsyncIterator[_T]:
try:
async for r in iterable:
yield r
except Exception as ex:
exception_result = cast(
"Failure | MutableAsyncChain[_T]",
self._process_spider_exception(
response, ex, exception_processor_index
),
)
if isinstance(exception_result, Failure):
raise
assert isinstance(recover_to, MutableAsyncChain)
recover_to.extend(exception_result)
if isinstance(iterable, AsyncIterator):
return process_async(iterable)
return process_sync(iterable)
def _process_spider_exception(
self,
response: Response,
exception: Exception,
start_index: int = 0,
) -> MutableChain[_T] | MutableAsyncChain[_T]:
# don't handle _InvalidOutput exception
if isinstance(exception, _InvalidOutput):
raise exception
method_list = islice(
self.methods["process_spider_exception"], start_index, None
)
for method_index, method in enumerate(method_list, start=start_index):
if method is None:
continue
method = cast("Callable", method)
if method in self._mw_methods_requiring_spider:
result = method(
response=response, exception=exception, spider=self._spider
)
else:
result = method(response=response, exception=exception)
if _isiterable(result):
# stop exception handling by handing control over to the
# process_spider_output chain if an iterable has been returned
dfd: Deferred[MutableChain[_T] | MutableAsyncChain[_T]] = (
self._process_spider_output(response, result, method_index + 1)
)
# _process_spider_output() returns a Deferred only because of downgrading so this can be
# simplified when downgrading is removed.
if dfd.called:
# the result is available immediately if _process_spider_output didn't do downgrading
return cast("MutableChain[_T] | MutableAsyncChain[_T]", dfd.result)
# we forbid waiting here because otherwise we would need to return a deferred from
# _process_spider_exception too, which complicates the architecture
msg = f"Async iterable returned from {global_object_name(method)} cannot be downgraded"
raise _InvalidOutput(msg)
if result is None:
continue
msg = (
f"{global_object_name(method)} must return None "
f"or an iterable, got {type(result)}"
)
raise _InvalidOutput(msg)
raise exception
# This method cannot be made async def, as _process_spider_exception relies on the Deferred result
# being available immediately which doesn't work when it's a wrapped coroutine.
# It also needs @inlineCallbacks only because of downgrading so it can be removed when downgrading is removed.
@inlineCallbacks
def _process_spider_output(
self,
response: Response,
result: Iterable[_T] | AsyncIterator[_T],
start_index: int = 0,
) -> Generator[Deferred[Any], Any, MutableChain[_T] | MutableAsyncChain[_T]]:
# items in this iterable do not need to go through the process_spider_output
# chain, they went through it already from the process_spider_exception method
recovered: MutableChain[_T] | MutableAsyncChain[_T]
last_result_is_async = isinstance(result, AsyncIterator)
recovered = MutableAsyncChain() if last_result_is_async else MutableChain()
# There are three cases for the middleware: def foo, async def foo, def foo + async def foo_async.
# 1. def foo. Sync iterables are passed as is, async ones are downgraded.
# 2. async def foo. Sync iterables are upgraded, async ones are passed as is.
# 3. def foo + async def foo_async. Iterables are passed to the respective method.
# Storing methods and method tuples in the same list is weird but we should be able to roll this back
# when we drop this compatibility feature.
method_list = islice(self.methods["process_spider_output"], start_index, None)
for method_index, method_pair in enumerate(method_list, start=start_index):
if method_pair is None:
continue
need_upgrade = need_downgrade = False
if isinstance(method_pair, tuple):
# This tuple handling is only needed until _async compatibility methods are removed.
method_sync, method_async = method_pair
method = method_async if last_result_is_async else method_sync
else:
method = method_pair
if not last_result_is_async and isasyncgenfunction(method):
need_upgrade = True
elif last_result_is_async and not isasyncgenfunction(method):
need_downgrade = True
try:
if need_upgrade:
# Iterable -> AsyncIterator
result = as_async_generator(result)
elif need_downgrade:
logger.warning(
f"Async iterable passed to {global_object_name(method)} was"
f" downgraded to a non-async one. This is deprecated and will"
f" stop working in a future version of Scrapy. Please see"
f" https://docs.scrapy.org/en/latest/topics/coroutines.html#for-middleware-users"
f" for more information."
)
assert isinstance(result, AsyncIterator)
# AsyncIterator -> Iterable
result = yield deferred_from_coro(collect_asyncgen(result))
if isinstance(recovered, AsyncIterator):
recovered_collected = yield deferred_from_coro(
collect_asyncgen(recovered)
)
recovered = MutableChain(recovered_collected)
# might fail directly if the output value is not a generator
if method in self._mw_methods_requiring_spider:
result = method(
response=response, result=result, spider=self._spider
)
else:
result = method(response=response, result=result)
except Exception as ex:
exception_result: Failure | MutableChain[_T] | MutableAsyncChain[_T] = (
self._process_spider_exception(response, ex, method_index + 1)
)
if isinstance(exception_result, Failure):
raise
return exception_result
if _isiterable(result):
result = self._evaluate_iterable(
response, result, method_index + 1, recovered
)
else:
if iscoroutine(result):
result.close() # Silence warning about not awaiting
msg = (
f"{global_object_name(method)} must be an asynchronous "
f"generator (i.e. use yield)"
)
else:
msg = (
f"{global_object_name(method)} must return an iterable, got "
f"{type(result)}"
)
raise _InvalidOutput(msg)
last_result_is_async = isinstance(result, AsyncIterator)
if last_result_is_async:
return MutableAsyncChain(result, recovered)
return MutableChain(result, recovered) # type: ignore[arg-type]
async def _process_callback_output(
self,
response: Response,
result: Iterable[_T] | AsyncIterator[_T],
) -> MutableChain[_T] | MutableAsyncChain[_T]:
recovered: MutableChain[_T] | MutableAsyncChain[_T]
if isinstance(result, AsyncIterator):
recovered = MutableAsyncChain()
else:
recovered = MutableChain()
result = self._evaluate_iterable(response, result, 0, recovered)
result = await maybe_deferred_to_future(
cast(
"Deferred[Iterable[_T] | AsyncIterator[_T]]",
self._process_spider_output(response, result),
)
)
if isinstance(result, AsyncIterator):
return MutableAsyncChain(result, recovered)
if isinstance(recovered, AsyncIterator):
recovered_collected = await collect_asyncgen(recovered)
recovered = MutableChain(recovered_collected)
return MutableChain(result, recovered)
def scrape_response(
self,
scrape_func: Callable[
[Response | Failure, Request],
Deferred[Iterable[_T] | AsyncIterator[_T]],
],
response: Response,
request: Request,
spider: Spider,
) -> Deferred[MutableChain[_T] | MutableAsyncChain[_T]]:
warn(
"SpiderMiddlewareManager.scrape_response() is deprecated, use scrape_response_async() instead",
ScrapyDeprecationWarning,
stacklevel=2,
)
@wraps(scrape_func)
async def scrape_func_wrapped(
response: Response | Failure, request: Request
) -> Iterable[_T] | AsyncIterator[_T]:
return await maybe_deferred_to_future(scrape_func(response, request))
self._set_compat_spider(spider)
return deferred_from_coro(
self.scrape_response_async(scrape_func_wrapped, response, request)
)
async def scrape_response_async(
self,
scrape_func: ScrapeFunc[_T],
response: Response,
request: Request,
) -> MutableChain[_T] | MutableAsyncChain[_T]:
if not self.crawler:
raise RuntimeError(
"scrape_response_async() called on a SpiderMiddlewareManager"
" instance created without a crawler."
)
async def process_callback_output(
result: Iterable[_T] | AsyncIterator[_T],
) -> MutableChain[_T] | MutableAsyncChain[_T]:
return await self._process_callback_output(response, result)
def process_spider_exception(
exception: Exception,
) -> MutableChain[_T] | MutableAsyncChain[_T]:
return self._process_spider_exception(response, exception)
try:
it: Iterable[_T] | AsyncIterator[_T] = await self._process_spider_input(
scrape_func, response, request
)
return await process_callback_output(it)
except Exception as ex:
await _defer_sleep_async()
return process_spider_exception(ex)
async def process_start(
self, spider: Spider | None = None
) -> AsyncIterator[Any] | None:
if spider:
if self.crawler:
msg = (
"Passing a spider argument to SpiderMiddlewareManager.process_start() is deprecated"
" and the passed value is ignored."
)
else:
msg = (
"Passing a spider argument to SpiderMiddlewareManager.process_start() is deprecated,"
" SpiderMiddlewareManager should be instantiated with a Crawler instance instead."
)
warn(msg, category=ScrapyDeprecationWarning, stacklevel=2)
self._set_compat_spider(spider)
self._check_deprecated_start_requests_use()
if self._use_start_requests:
sync_start = iter(self._spider.start_requests())
sync_start = await self._process_chain(
"process_start_requests", sync_start, always_add_spider=True
)
start: AsyncIterator[Any] = as_async_generator(sync_start)
else:
start = self._spider.start()
start = await self._process_chain("process_start", start)
return start
def _check_deprecated_start_requests_use(self):
start_requests_cls = None
start_cls = None
spidercls = self._spider.__class__
mro = spidercls.__mro__
for cls in mro:
cls_dict = cls.__dict__
if start_requests_cls is None and "start_requests" in cls_dict:
start_requests_cls = cls
if start_cls is None and "start" in cls_dict:
start_cls = cls
if start_requests_cls is not None and start_cls is not None:
break
# Spider defines both, start_requests and start.
assert start_requests_cls is not None
assert start_cls is not None
if (
start_requests_cls is not Spider
and start_cls is not start_requests_cls
and mro.index(start_requests_cls) < mro.index(start_cls)
):
src = global_object_name(start_requests_cls)
if start_requests_cls is not spidercls:
src += f" (inherited by {global_object_name(spidercls)})"
warn(
f"{src} defines the deprecated start_requests() method. "
f"start_requests() has been deprecated in favor of a new "
f"method, start(), to support asynchronous code "
f"execution. start_requests() will stop being called in a "
f"future version of Scrapy. If you use Scrapy 2.13 or "
f"higher only, replace start_requests() with start(); "
f"note that start() is a coroutine (async def). If you "
f"need to maintain compatibility with lower Scrapy versions, "
f"when overriding start_requests() in a spider class, "
f"override start() as well; you can use super() to "
f"reuse the inherited start() implementation without "
f"copy-pasting. See the release notes of Scrapy 2.13 for "
f"details: https://docs.scrapy.org/en/2.13/news.html",
ScrapyDeprecationWarning,
)
if (
self._use_start_requests
and start_cls is not Spider
and start_requests_cls is not start_cls
and mro.index(start_cls) < mro.index(start_requests_cls)
):
src = global_object_name(start_cls)
if start_cls is not spidercls:
src += f" (inherited by {global_object_name(spidercls)})"
raise ValueError(
f"{src} does not define the deprecated start_requests() "
f"method. However, one or more of your enabled spider "
f"middlewares (reported in an earlier deprecation warning) "
f"define the process_start_requests() method, and not the "
f"process_start() method, making them only compatible with "
f"(deprecated) spiders that define the start_requests() "
f"method. To solve this issue, disable the offending spider "
f"middlewares, upgrade them as described in that earlier "
f"deprecation warning, or make your spider compatible with "
f"deprecated spider middlewares (and earlier Scrapy versions) "
f"by defining a sync start_requests() method that works "
f"similarly to its existing start() method. See the "
f"release notes of Scrapy 2.13 for details: "
f"https://docs.scrapy.org/en/2.13/news.html"
)
# This method is only needed until _async compatibility methods are removed.
@staticmethod
def _get_async_method_pair(
mw: Any, methodname: str
) -> Callable | tuple[Callable, Callable] | None:
normal_method: Callable | None = getattr(mw, methodname, None)
methodname_async = methodname + "_async"
async_method: Callable | None = getattr(mw, methodname_async, None)
if not async_method:
if normal_method and not isasyncgenfunction(normal_method):
logger.warning(
f"Middleware {global_object_name(mw.__class__)} doesn't support"
f" asynchronous spider output, this is deprecated and will stop"
f" working in a future version of Scrapy. The middleware should"
f" be updated to support it. Please see"
f" https://docs.scrapy.org/en/latest/topics/coroutines.html#for-middleware-users"
f" for more information."
)
return normal_method
if not normal_method:
logger.error(
f"Middleware {global_object_name(mw.__class__)} has {methodname_async} "
f"without {methodname}, skipping this method."
)
return None
if not isasyncgenfunction(async_method):
logger.error(
f"{global_object_name(async_method)} is not "
f"an async generator function, skipping this method."
)
return normal_method
if isasyncgenfunction(normal_method):
logger.error(
f"{global_object_name(normal_method)} is an async "
f"generator function while {methodname_async} exists, "
f"skipping both methods."
)
return None
return normal_method, async_method
| python | BSD-3-Clause | d1bd8eb49f7aba9289e4ff692006cead8bcd9080 | 2026-01-04T14:38:41.023839Z | false |
scrapy/scrapy | https://github.com/scrapy/scrapy/blob/d1bd8eb49f7aba9289e4ff692006cead8bcd9080/scrapy/core/__init__.py | scrapy/core/__init__.py | """
Scrapy core library classes and functions.
"""
| python | BSD-3-Clause | d1bd8eb49f7aba9289e4ff692006cead8bcd9080 | 2026-01-04T14:38:41.023839Z | false |
scrapy/scrapy | https://github.com/scrapy/scrapy/blob/d1bd8eb49f7aba9289e4ff692006cead8bcd9080/scrapy/core/scheduler.py | scrapy/core/scheduler.py | from __future__ import annotations
import json
import logging
from abc import abstractmethod
from pathlib import Path
from typing import TYPE_CHECKING, Any, cast
from warnings import warn
# working around https://github.com/sphinx-doc/sphinx/issues/10400
from twisted.internet.defer import Deferred # noqa: TC002
from scrapy.exceptions import ScrapyDeprecationWarning
from scrapy.spiders import Spider # noqa: TC001
from scrapy.utils.job import job_dir
from scrapy.utils.misc import build_from_crawler, load_object
from scrapy.utils.python import global_object_name
if TYPE_CHECKING:
# requires queuelib >= 1.6.2
from queuelib.queue import BaseQueue
# typing.Self requires Python 3.11
from typing_extensions import Self
from scrapy.crawler import Crawler
from scrapy.dupefilters import BaseDupeFilter
from scrapy.http.request import Request
from scrapy.pqueues import ScrapyPriorityQueue
from scrapy.statscollectors import StatsCollector
logger = logging.getLogger(__name__)
class BaseSchedulerMeta(type):
"""
Metaclass to check scheduler classes against the necessary interface
"""
def __instancecheck__(cls, instance: Any) -> bool:
return cls.__subclasscheck__(type(instance))
def __subclasscheck__(cls, subclass: type) -> bool:
return (
hasattr(subclass, "has_pending_requests")
and callable(subclass.has_pending_requests)
and hasattr(subclass, "enqueue_request")
and callable(subclass.enqueue_request)
and hasattr(subclass, "next_request")
and callable(subclass.next_request)
)
class BaseScheduler(metaclass=BaseSchedulerMeta):
"""The scheduler component is responsible for storing requests received
from the engine, and feeding them back upon request (also to the engine).
The original sources of said requests are:
* Spider: ``start`` method, requests created for URLs in the ``start_urls`` attribute, request callbacks
* Spider middleware: ``process_spider_output`` and ``process_spider_exception`` methods
* Downloader middleware: ``process_request``, ``process_response`` and ``process_exception`` methods
The order in which the scheduler returns its stored requests (via the ``next_request`` method)
plays a great part in determining the order in which those requests are downloaded. See :ref:`request-order`.
The methods defined in this class constitute the minimal interface that the Scrapy engine will interact with.
"""
@classmethod
def from_crawler(cls, crawler: Crawler) -> Self:
"""
Factory method which receives the current :class:`~scrapy.crawler.Crawler` object as argument.
"""
return cls()
def open(self, spider: Spider) -> Deferred[None] | None:
"""
Called when the spider is opened by the engine. It receives the spider
instance as argument and it's useful to execute initialization code.
:param spider: the spider object for the current crawl
:type spider: :class:`~scrapy.spiders.Spider`
"""
def close(self, reason: str) -> Deferred[None] | None:
"""
Called when the spider is closed by the engine. It receives the reason why the crawl
finished as argument and it's useful to execute cleaning code.
:param reason: a string which describes the reason why the spider was closed
:type reason: :class:`str`
"""
@abstractmethod
def has_pending_requests(self) -> bool:
"""
``True`` if the scheduler has enqueued requests, ``False`` otherwise
"""
raise NotImplementedError
@abstractmethod
def enqueue_request(self, request: Request) -> bool:
"""
Process a request received by the engine.
Return ``True`` if the request is stored correctly, ``False`` otherwise.
If ``False``, the engine will fire a ``request_dropped`` signal, and
will not make further attempts to schedule the request at a later time.
For reference, the default Scrapy scheduler returns ``False`` when the
request is rejected by the dupefilter.
"""
raise NotImplementedError
@abstractmethod
def next_request(self) -> Request | None:
"""
Return the next :class:`~scrapy.Request` to be processed, or ``None``
to indicate that there are no requests to be considered ready at the moment.
Returning ``None`` implies that no request from the scheduler will be sent
to the downloader in the current reactor cycle. The engine will continue
calling ``next_request`` until ``has_pending_requests`` is ``False``.
"""
raise NotImplementedError
class Scheduler(BaseScheduler):
"""Default scheduler.
Requests are stored into priority queues
(:setting:`SCHEDULER_PRIORITY_QUEUE`) that sort requests by
:attr:`~scrapy.http.Request.priority`.
By default, a single, memory-based priority queue is used for all requests.
When using :setting:`JOBDIR`, a disk-based priority queue is also created,
and only unserializable requests are stored in the memory-based priority
queue. For a given priority value, requests in memory take precedence over
requests in disk.
Each priority queue stores requests in separate internal queues, one per
priority value. The memory priority queue uses
:setting:`SCHEDULER_MEMORY_QUEUE` queues, while the disk priority queue
uses :setting:`SCHEDULER_DISK_QUEUE` queues. The internal queues determine
:ref:`request order <request-order>` when requests have the same priority.
:ref:`Start requests <start-requests>` are stored into separate internal
queues by default, and :ref:`ordered differently <start-request-order>`.
Duplicate requests are filtered out with an instance of
:setting:`DUPEFILTER_CLASS`.
.. _request-order:
Request order
=============
With default settings, pending requests are stored in a LIFO_ queue
(:ref:`except for start requests <start-request-order>`). As a result,
crawling happens in `DFO order`_, which is usually the most convenient
crawl order. However, you can enforce :ref:`BFO <bfo>` or :ref:`a custom
order <custom-request-order>` (:ref:`except for the first few requests
<concurrency-v-order>`).
.. _LIFO: https://en.wikipedia.org/wiki/Stack_(abstract_data_type)
.. _DFO order: https://en.wikipedia.org/wiki/Depth-first_search
.. _start-request-order:
Start request order
-------------------
:ref:`Start requests <start-requests>` are sent in the order they are
yielded from :meth:`~scrapy.Spider.start`, and given the same
:attr:`~scrapy.http.Request.priority`, other requests take precedence over
start requests.
You can set :setting:`SCHEDULER_START_MEMORY_QUEUE` and
:setting:`SCHEDULER_START_DISK_QUEUE` to ``None`` to handle start requests
the same as other requests when it comes to order and priority.
.. _bfo:
Crawling in BFO order
---------------------
If you do want to crawl in `BFO order`_, you can do it by setting the
following :ref:`settings <topics-settings>`:
| :setting:`DEPTH_PRIORITY` = ``1``
| :setting:`SCHEDULER_DISK_QUEUE` = ``"scrapy.squeues.PickleFifoDiskQueue"``
| :setting:`SCHEDULER_MEMORY_QUEUE` = ``"scrapy.squeues.FifoMemoryQueue"``
.. _BFO order: https://en.wikipedia.org/wiki/Breadth-first_search
.. _custom-request-order:
Crawling in a custom order
--------------------------
You can manually set :attr:`~scrapy.http.Request.priority` on requests to
force a specific request order.
.. _concurrency-v-order:
Concurrency affects order
-------------------------
While pending requests are below the configured values of
:setting:`CONCURRENT_REQUESTS`, :setting:`CONCURRENT_REQUESTS_PER_DOMAIN`
or :setting:`CONCURRENT_REQUESTS_PER_IP`, those requests are sent
concurrently.
As a result, the first few requests of a crawl may not follow the desired
order. Lowering those settings to ``1`` enforces the desired order except
for the very first request, but it significantly slows down the crawl as a
whole.
"""
@classmethod
def from_crawler(cls, crawler: Crawler) -> Self:
dupefilter_cls = load_object(crawler.settings["DUPEFILTER_CLASS"])
return cls(
dupefilter=build_from_crawler(dupefilter_cls, crawler),
jobdir=job_dir(crawler.settings),
dqclass=load_object(crawler.settings["SCHEDULER_DISK_QUEUE"]),
mqclass=load_object(crawler.settings["SCHEDULER_MEMORY_QUEUE"]),
logunser=crawler.settings.getbool("SCHEDULER_DEBUG"),
stats=crawler.stats,
pqclass=load_object(crawler.settings["SCHEDULER_PRIORITY_QUEUE"]),
crawler=crawler,
)
def __init__(
self,
dupefilter: BaseDupeFilter,
jobdir: str | None = None,
dqclass: type[BaseQueue] | None = None,
mqclass: type[BaseQueue] | None = None,
logunser: bool = False,
stats: StatsCollector | None = None,
pqclass: type[ScrapyPriorityQueue] | None = None,
crawler: Crawler | None = None,
):
"""Initialize the scheduler.
:param dupefilter: An object responsible for checking and filtering duplicate requests.
The value for the :setting:`DUPEFILTER_CLASS` setting is used by default.
:type dupefilter: :class:`scrapy.dupefilters.BaseDupeFilter` instance or similar:
any class that implements the `BaseDupeFilter` interface
:param jobdir: The path of a directory to be used for persisting the crawl's state.
The value for the :setting:`JOBDIR` setting is used by default.
See :ref:`topics-jobs`.
:type jobdir: :class:`str` or ``None``
:param dqclass: A class to be used as persistent request queue.
The value for the :setting:`SCHEDULER_DISK_QUEUE` setting is used by default.
:type dqclass: class
:param mqclass: A class to be used as non-persistent request queue.
The value for the :setting:`SCHEDULER_MEMORY_QUEUE` setting is used by default.
:type mqclass: class
:param logunser: A boolean that indicates whether or not unserializable requests should be logged.
The value for the :setting:`SCHEDULER_DEBUG` setting is used by default.
:type logunser: bool
:param stats: A stats collector object to record stats about the request scheduling process.
The value for the :setting:`STATS_CLASS` setting is used by default.
:type stats: :class:`scrapy.statscollectors.StatsCollector` instance or similar:
any class that implements the `StatsCollector` interface
:param pqclass: A class to be used as priority queue for requests.
The value for the :setting:`SCHEDULER_PRIORITY_QUEUE` setting is used by default.
:type pqclass: class
:param crawler: The crawler object corresponding to the current crawl.
:type crawler: :class:`scrapy.crawler.Crawler`
"""
self.df: BaseDupeFilter = dupefilter
self.dqdir: str | None = self._dqdir(jobdir)
self.pqclass: type[ScrapyPriorityQueue] | None = pqclass
self.dqclass: type[BaseQueue] | None = dqclass
self.mqclass: type[BaseQueue] | None = mqclass
self.logunser: bool = logunser
self.stats: StatsCollector | None = stats
self.crawler: Crawler | None = crawler
self._sdqclass: type[BaseQueue] | None = self._get_start_queue_cls(
crawler, "DISK"
)
self._smqclass: type[BaseQueue] | None = self._get_start_queue_cls(
crawler, "MEMORY"
)
def _get_start_queue_cls(
self, crawler: Crawler | None, queue: str
) -> type[BaseQueue] | None:
if crawler is None:
return None
cls = crawler.settings[f"SCHEDULER_START_{queue}_QUEUE"]
if not cls:
return None
return load_object(cls)
def has_pending_requests(self) -> bool:
return len(self) > 0
def open(self, spider: Spider) -> Deferred[None] | None:
"""
(1) initialize the memory queue
(2) initialize the disk queue if the ``jobdir`` attribute is a valid directory
(3) return the result of the dupefilter's ``open`` method
"""
self.spider: Spider = spider
self.mqs: ScrapyPriorityQueue = self._mq()
self.dqs: ScrapyPriorityQueue | None = self._dq() if self.dqdir else None
return self.df.open()
def close(self, reason: str) -> Deferred[None] | None:
"""
(1) dump pending requests to disk if there is a disk queue
(2) return the result of the dupefilter's ``close`` method
"""
if self.dqs is not None:
state = self.dqs.close()
assert isinstance(self.dqdir, str)
self._write_dqs_state(self.dqdir, state)
return self.df.close(reason)
def enqueue_request(self, request: Request) -> bool:
"""
Unless the received request is filtered out by the Dupefilter, attempt to push
it into the disk queue, falling back to pushing it into the memory queue.
Increment the appropriate stats, such as: ``scheduler/enqueued``,
``scheduler/enqueued/disk``, ``scheduler/enqueued/memory``.
Return ``True`` if the request was stored successfully, ``False`` otherwise.
"""
if not request.dont_filter and self.df.request_seen(request):
self.df.log(request, self.spider)
return False
dqok = self._dqpush(request)
assert self.stats is not None
if dqok:
self.stats.inc_value("scheduler/enqueued/disk")
else:
self._mqpush(request)
self.stats.inc_value("scheduler/enqueued/memory")
self.stats.inc_value("scheduler/enqueued")
return True
def next_request(self) -> Request | None:
"""
Return a :class:`~scrapy.Request` object from the memory queue,
falling back to the disk queue if the memory queue is empty.
Return ``None`` if there are no more enqueued requests.
Increment the appropriate stats, such as: ``scheduler/dequeued``,
``scheduler/dequeued/disk``, ``scheduler/dequeued/memory``.
"""
request: Request | None = self.mqs.pop()
assert self.stats is not None
if request is not None:
self.stats.inc_value("scheduler/dequeued/memory")
else:
request = self._dqpop()
if request is not None:
self.stats.inc_value("scheduler/dequeued/disk")
if request is not None:
self.stats.inc_value("scheduler/dequeued")
return request
def __len__(self) -> int:
"""
Return the total amount of enqueued requests
"""
return len(self.dqs) + len(self.mqs) if self.dqs is not None else len(self.mqs)
def _dqpush(self, request: Request) -> bool:
if self.dqs is None:
return False
try:
self.dqs.push(request)
except ValueError as e: # non serializable request
if self.logunser:
msg = (
"Unable to serialize request: %(request)s - reason:"
" %(reason)s - no more unserializable requests will be"
" logged (stats being collected)"
)
logger.warning(
msg,
{"request": request, "reason": e},
exc_info=True,
extra={"spider": self.spider},
)
self.logunser = False
assert self.stats is not None
self.stats.inc_value("scheduler/unserializable")
return False
return True
def _mqpush(self, request: Request) -> None:
self.mqs.push(request)
def _dqpop(self) -> Request | None:
if self.dqs is not None:
return self.dqs.pop()
return None
def _mq(self) -> ScrapyPriorityQueue:
"""Create a new priority queue instance, with in-memory storage"""
assert self.crawler
assert self.pqclass
try:
return build_from_crawler(
self.pqclass,
self.crawler,
downstream_queue_cls=self.mqclass,
key="",
start_queue_cls=self._smqclass,
)
except TypeError:
warn(
f"The __init__ method of {global_object_name(self.pqclass)} "
f"does not support a `start_queue_cls` keyword-only "
f"parameter.",
ScrapyDeprecationWarning,
)
return build_from_crawler(
self.pqclass,
self.crawler,
downstream_queue_cls=self.mqclass,
key="",
)
def _dq(self) -> ScrapyPriorityQueue:
"""Create a new priority queue instance, with disk storage"""
assert self.crawler
assert self.dqdir
assert self.pqclass
state = self._read_dqs_state(self.dqdir)
try:
q = build_from_crawler(
self.pqclass,
self.crawler,
downstream_queue_cls=self.dqclass,
key=self.dqdir,
startprios=state,
start_queue_cls=self._sdqclass,
)
except TypeError:
warn(
f"The __init__ method of {global_object_name(self.pqclass)} "
f"does not support a `start_queue_cls` keyword-only "
f"parameter.",
ScrapyDeprecationWarning,
)
q = build_from_crawler(
self.pqclass,
self.crawler,
downstream_queue_cls=self.dqclass,
key=self.dqdir,
startprios=state,
)
if q:
logger.info(
"Resuming crawl (%(queuesize)d requests scheduled)",
{"queuesize": len(q)},
extra={"spider": self.spider},
)
return q
def _dqdir(self, jobdir: str | None) -> str | None:
"""Return a folder name to keep disk queue state at"""
if jobdir:
dqdir = Path(jobdir, "requests.queue")
if not dqdir.exists():
dqdir.mkdir(parents=True)
return str(dqdir)
return None
def _read_dqs_state(self, dqdir: str) -> list[int]:
path = Path(dqdir, "active.json")
if not path.exists():
return []
with path.open(encoding="utf-8") as f:
return cast("list[int]", json.load(f))
def _write_dqs_state(self, dqdir: str, state: list[int]) -> None:
with Path(dqdir, "active.json").open("w", encoding="utf-8") as f:
json.dump(state, f)
| python | BSD-3-Clause | d1bd8eb49f7aba9289e4ff692006cead8bcd9080 | 2026-01-04T14:38:41.023839Z | false |
scrapy/scrapy | https://github.com/scrapy/scrapy/blob/d1bd8eb49f7aba9289e4ff692006cead8bcd9080/scrapy/core/engine.py | scrapy/core/engine.py | """
This is the Scrapy engine which controls the Scheduler, Downloader and Spider.
For more information see docs/topics/architecture.rst
"""
from __future__ import annotations
import asyncio
import logging
import warnings
from time import time
from traceback import format_exc
from typing import TYPE_CHECKING, Any
from twisted.internet.defer import CancelledError, Deferred, inlineCallbacks
from twisted.python.failure import Failure
from scrapy import signals
from scrapy.core.scheduler import BaseScheduler
from scrapy.core.scraper import Scraper
from scrapy.exceptions import (
CloseSpider,
DontCloseSpider,
IgnoreRequest,
ScrapyDeprecationWarning,
)
from scrapy.http import Request, Response
from scrapy.utils.asyncio import (
AsyncioLoopingCall,
create_looping_call,
is_asyncio_available,
)
from scrapy.utils.defer import (
_schedule_coro,
deferred_from_coro,
ensure_awaitable,
maybe_deferred_to_future,
)
from scrapy.utils.deprecate import argument_is_required
from scrapy.utils.log import failure_to_exc_info, logformatter_adapter
from scrapy.utils.misc import build_from_crawler, load_object
from scrapy.utils.python import global_object_name
from scrapy.utils.reactor import CallLaterOnce
if TYPE_CHECKING:
from collections.abc import AsyncIterator, Callable, Coroutine, Generator
from twisted.internet.task import LoopingCall
from scrapy.core.downloader import Downloader
from scrapy.crawler import Crawler
from scrapy.logformatter import LogFormatter
from scrapy.settings import BaseSettings, Settings
from scrapy.signalmanager import SignalManager
from scrapy.spiders import Spider
logger = logging.getLogger(__name__)
class _Slot:
def __init__(
self,
close_if_idle: bool,
nextcall: CallLaterOnce[None],
scheduler: BaseScheduler,
) -> None:
self.closing: Deferred[None] | None = None
self.inprogress: set[Request] = set()
self.close_if_idle: bool = close_if_idle
self.nextcall: CallLaterOnce[None] = nextcall
self.scheduler: BaseScheduler = scheduler
self.heartbeat: AsyncioLoopingCall | LoopingCall = create_looping_call(
nextcall.schedule
)
def add_request(self, request: Request) -> None:
self.inprogress.add(request)
def remove_request(self, request: Request) -> None:
self.inprogress.remove(request)
self._maybe_fire_closing()
async def close(self) -> None:
self.closing = Deferred()
self._maybe_fire_closing()
await maybe_deferred_to_future(self.closing)
def _maybe_fire_closing(self) -> None:
if self.closing is not None and not self.inprogress:
if self.nextcall:
self.nextcall.cancel()
if self.heartbeat.running:
self.heartbeat.stop()
self.closing.callback(None)
class ExecutionEngine:
_SLOT_HEARTBEAT_INTERVAL: float = 5.0
def __init__(
self,
crawler: Crawler,
spider_closed_callback: Callable[
[Spider], Coroutine[Any, Any, None] | Deferred[None] | None
],
) -> None:
self.crawler: Crawler = crawler
self.settings: Settings = crawler.settings
self.signals: SignalManager = crawler.signals
assert crawler.logformatter
self.logformatter: LogFormatter = crawler.logformatter
self._slot: _Slot | None = None
self.spider: Spider | None = None
self.running: bool = False
self._starting: bool = False
self._stopping: bool = False
self.paused: bool = False
self._spider_closed_callback: Callable[
[Spider], Coroutine[Any, Any, None] | Deferred[None] | None
] = spider_closed_callback
self.start_time: float | None = None
self._start: AsyncIterator[Any] | None = None
self._closewait: Deferred[None] | None = None
self._start_request_processing_awaitable: (
asyncio.Future[None] | Deferred[None] | None
) = None
downloader_cls: type[Downloader] = load_object(self.settings["DOWNLOADER"])
try:
self.scheduler_cls: type[BaseScheduler] = self._get_scheduler_class(
crawler.settings
)
self.downloader: Downloader = downloader_cls(crawler)
self._downloader_fetch_needs_spider: bool = argument_is_required(
self.downloader.fetch, "spider"
)
if self._downloader_fetch_needs_spider:
warnings.warn(
f"The fetch() method of {global_object_name(downloader_cls)} requires a spider argument,"
f" this is deprecated and the argument will not be passed in future Scrapy versions.",
ScrapyDeprecationWarning,
stacklevel=2,
)
self.scraper: Scraper = Scraper(crawler)
except Exception:
if hasattr(self, "downloader"):
self.downloader.close()
raise
def _get_scheduler_class(self, settings: BaseSettings) -> type[BaseScheduler]:
scheduler_cls: type[BaseScheduler] = load_object(settings["SCHEDULER"])
if not issubclass(scheduler_cls, BaseScheduler):
raise TypeError(
f"The provided scheduler class ({settings['SCHEDULER']})"
" does not fully implement the scheduler interface"
)
return scheduler_cls
def start(self, _start_request_processing=True) -> Deferred[None]:
warnings.warn(
"ExecutionEngine.start() is deprecated, use start_async() instead",
ScrapyDeprecationWarning,
stacklevel=2,
)
return deferred_from_coro(
self.start_async(_start_request_processing=_start_request_processing)
)
async def start_async(self, *, _start_request_processing: bool = True) -> None:
"""Start the execution engine.
.. versionadded:: VERSION
"""
if self._starting:
raise RuntimeError("Engine already running")
self.start_time = time()
self._starting = True
await self.signals.send_catch_log_async(signal=signals.engine_started)
if self._stopping:
# band-aid until https://github.com/scrapy/scrapy/issues/6916
return
if _start_request_processing and self.spider is None:
# require an opened spider when not run in scrapy shell
return
self.running = True
self._closewait = Deferred()
if _start_request_processing:
coro = self._start_request_processing()
if is_asyncio_available():
# not wrapping in a Deferred here to avoid https://github.com/twisted/twisted/issues/12470
# (can happen when this is cancelled, e.g. in test_close_during_start_iteration())
self._start_request_processing_awaitable = asyncio.ensure_future(coro)
else:
self._start_request_processing_awaitable = Deferred.fromCoroutine(coro)
await maybe_deferred_to_future(self._closewait)
def stop(self) -> Deferred[None]:
warnings.warn(
"ExecutionEngine.stop() is deprecated, use stop_async() instead",
ScrapyDeprecationWarning,
stacklevel=2,
)
return deferred_from_coro(self.stop_async())
async def stop_async(self) -> None:
"""Gracefully stop the execution engine.
.. versionadded:: VERSION
"""
if not self._starting:
raise RuntimeError("Engine not running")
self.running = self._starting = False
self._stopping = True
if self._start_request_processing_awaitable is not None:
if (
not is_asyncio_available()
or self._start_request_processing_awaitable
is not asyncio.current_task()
):
# If using the asyncio loop and stop_async() was called from
# start() itself, we can't cancel it, and _start_request_processing()
# will exit via the self.running check.
self._start_request_processing_awaitable.cancel()
self._start_request_processing_awaitable = None
if self.spider is not None:
await self.close_spider_async(reason="shutdown")
await self.signals.send_catch_log_async(signal=signals.engine_stopped)
if self._closewait:
self._closewait.callback(None)
def close(self) -> Deferred[None]:
warnings.warn(
"ExecutionEngine.close() is deprecated, use close_async() instead",
ScrapyDeprecationWarning,
stacklevel=2,
)
return deferred_from_coro(self.close_async())
async def close_async(self) -> None:
"""
Gracefully close the execution engine.
If it has already been started, stop it. In all cases, close the spider and the downloader.
"""
if self.running:
await self.stop_async() # will also close spider and downloader
elif self.spider is not None:
await self.close_spider_async(
reason="shutdown"
) # will also close downloader
elif hasattr(self, "downloader"):
self.downloader.close()
def pause(self) -> None:
self.paused = True
def unpause(self) -> None:
self.paused = False
async def _process_start_next(self):
"""Processes the next item or request from Spider.start().
If a request, it is scheduled. If an item, it is sent to item
pipelines.
"""
try:
item_or_request = await self._start.__anext__()
except StopAsyncIteration:
self._start = None
except Exception as exception:
self._start = None
exception_traceback = format_exc()
logger.error(
f"Error while reading start items and requests: {exception}.\n{exception_traceback}",
exc_info=True,
)
else:
if not self.spider:
return # spider already closed
if isinstance(item_or_request, Request):
self.crawl(item_or_request)
else:
_schedule_coro(
self.scraper.start_itemproc_async(item_or_request, response=None)
)
self._slot.nextcall.schedule()
async def _start_request_processing(self) -> None:
"""Starts consuming Spider.start() output and sending scheduled
requests."""
# Starts the processing of scheduled requests, as well as a periodic
# call to that processing method for scenarios where the scheduler
# reports having pending requests but returns none.
try:
assert self._slot is not None # typing
self._slot.nextcall.schedule()
self._slot.heartbeat.start(self._SLOT_HEARTBEAT_INTERVAL)
while self._start and self.spider and self.running:
await self._process_start_next()
if not self.needs_backout():
# Give room for the outcome of self._process_start_next() to be
# processed before continuing with the next iteration.
self._slot.nextcall.schedule()
await self._slot.nextcall.wait()
except (asyncio.exceptions.CancelledError, CancelledError):
# self.stop_async() has cancelled us, nothing to do
return
except Exception:
# an error happened, log it and stop the engine
self._start_request_processing_awaitable = None
logger.error(
"Error while processing requests from start()",
exc_info=True,
extra={"spider": self.spider},
)
await self.stop_async()
def _start_scheduled_requests(self) -> None:
if self._slot is None or self._slot.closing is not None or self.paused:
return
while not self.needs_backout():
if not self._start_scheduled_request():
break
if self.spider_is_idle() and self._slot.close_if_idle:
self._spider_idle()
def needs_backout(self) -> bool:
"""Returns ``True`` if no more requests can be sent at the moment, or
``False`` otherwise.
See :ref:`start-requests-lazy` for an example.
"""
assert self.scraper.slot is not None # typing
return (
not self.running
or not self._slot
or bool(self._slot.closing)
or self.downloader.needs_backout()
or self.scraper.slot.needs_backout()
)
def _start_scheduled_request(self) -> bool:
assert self._slot is not None # typing
assert self.spider is not None # typing
request = self._slot.scheduler.next_request()
if request is None:
self.signals.send_catch_log(signals.scheduler_empty)
return False
d: Deferred[Response | Request] = self._download(request)
d.addBoth(self._handle_downloader_output, request)
d.addErrback(
lambda f: logger.info(
"Error while handling downloader output",
exc_info=failure_to_exc_info(f),
extra={"spider": self.spider},
)
)
def _remove_request(_: Any) -> None:
assert self._slot
self._slot.remove_request(request)
d2: Deferred[None] = d.addBoth(_remove_request)
d2.addErrback(
lambda f: logger.info(
"Error while removing request from slot",
exc_info=failure_to_exc_info(f),
extra={"spider": self.spider},
)
)
slot = self._slot
d2.addBoth(lambda _: slot.nextcall.schedule())
d2.addErrback(
lambda f: logger.info(
"Error while scheduling new request",
exc_info=failure_to_exc_info(f),
extra={"spider": self.spider},
)
)
return True
@inlineCallbacks
def _handle_downloader_output(
self, result: Request | Response | Failure, request: Request
) -> Generator[Deferred[Any], Any, None]:
if not isinstance(result, (Request, Response, Failure)):
raise TypeError(
f"Incorrect type: expected Request, Response or Failure, got {type(result)}: {result!r}"
)
# downloader middleware can return requests (for example, redirects)
if isinstance(result, Request):
self.crawl(result)
return
try:
yield self.scraper.enqueue_scrape(result, request)
except Exception:
assert self.spider is not None
logger.error(
"Error while enqueuing scrape",
exc_info=True,
extra={"spider": self.spider},
)
def spider_is_idle(self) -> bool:
if self._slot is None:
raise RuntimeError("Engine slot not assigned")
if not self.scraper.slot.is_idle(): # type: ignore[union-attr]
return False
if self.downloader.active: # downloader has pending requests
return False
if self._start is not None: # not all start requests are handled
return False
return not self._slot.scheduler.has_pending_requests()
def crawl(self, request: Request) -> None:
"""Inject the request into the spider <-> downloader pipeline"""
if self.spider is None:
raise RuntimeError(f"No open spider to crawl: {request}")
self._schedule_request(request)
self._slot.nextcall.schedule() # type: ignore[union-attr]
def _schedule_request(self, request: Request) -> None:
request_scheduled_result = self.signals.send_catch_log(
signals.request_scheduled,
request=request,
spider=self.spider,
dont_log=IgnoreRequest,
)
for handler, result in request_scheduled_result:
if isinstance(result, Failure) and isinstance(result.value, IgnoreRequest):
return
if not self._slot.scheduler.enqueue_request(request): # type: ignore[union-attr]
self.signals.send_catch_log(
signals.request_dropped, request=request, spider=self.spider
)
def download(self, request: Request) -> Deferred[Response]:
"""Return a Deferred which fires with a Response as result, only downloader middlewares are applied"""
warnings.warn(
"ExecutionEngine.download() is deprecated, use download_async() instead",
ScrapyDeprecationWarning,
stacklevel=2,
)
return deferred_from_coro(self.download_async(request))
async def download_async(self, request: Request) -> Response:
"""Return a coroutine which fires with a Response as result.
Only downloader middlewares are applied.
.. versionadded:: VERSION
"""
if self.spider is None:
raise RuntimeError(f"No open spider to crawl: {request}")
try:
response_or_request = await maybe_deferred_to_future(
self._download(request)
)
finally:
assert self._slot is not None
self._slot.remove_request(request)
if isinstance(response_or_request, Request):
return await self.download_async(response_or_request)
return response_or_request
@inlineCallbacks
def _download(
self, request: Request
) -> Generator[Deferred[Any], Any, Response | Request]:
assert self._slot is not None # typing
assert self.spider is not None
self._slot.add_request(request)
try:
result: Response | Request
if self._downloader_fetch_needs_spider:
result = yield self.downloader.fetch(request, self.spider)
else:
result = yield self.downloader.fetch(request)
if not isinstance(result, (Response, Request)):
raise TypeError(
f"Incorrect type: expected Response or Request, got {type(result)}: {result!r}"
)
if isinstance(result, Response):
if result.request is None:
result.request = request
logkws = self.logformatter.crawled(result.request, result, self.spider)
if logkws is not None:
logger.log(
*logformatter_adapter(logkws), extra={"spider": self.spider}
)
self.signals.send_catch_log(
signal=signals.response_received,
response=result,
request=result.request,
spider=self.spider,
)
return result
finally:
self._slot.nextcall.schedule()
def open_spider(self, spider: Spider, close_if_idle: bool = True) -> Deferred[None]:
warnings.warn(
"ExecutionEngine.open_spider() is deprecated, use open_spider_async() instead",
ScrapyDeprecationWarning,
stacklevel=2,
)
return deferred_from_coro(self.open_spider_async(close_if_idle=close_if_idle))
async def open_spider_async(self, *, close_if_idle: bool = True) -> None:
assert self.crawler.spider
if self._slot is not None:
raise RuntimeError(
f"No free spider slot when opening {self.crawler.spider.name!r}"
)
logger.info("Spider opened", extra={"spider": self.crawler.spider})
self.spider = self.crawler.spider
nextcall = CallLaterOnce(self._start_scheduled_requests)
scheduler = build_from_crawler(self.scheduler_cls, self.crawler)
self._slot = _Slot(close_if_idle, nextcall, scheduler)
self._start = await self.scraper.spidermw.process_start()
if hasattr(scheduler, "open") and (d := scheduler.open(self.crawler.spider)):
await maybe_deferred_to_future(d)
await self.scraper.open_spider_async()
assert self.crawler.stats
self.crawler.stats.open_spider()
await self.signals.send_catch_log_async(
signals.spider_opened, spider=self.crawler.spider
)
def _spider_idle(self) -> None:
"""
Called when a spider gets idle, i.e. when there are no remaining requests to download or schedule.
It can be called multiple times. If a handler for the spider_idle signal raises a DontCloseSpider
exception, the spider is not closed until the next loop and this function is guaranteed to be called
(at least) once again. A handler can raise CloseSpider to provide a custom closing reason.
"""
assert self.spider is not None # typing
expected_ex = (DontCloseSpider, CloseSpider)
res = self.signals.send_catch_log(
signals.spider_idle, spider=self.spider, dont_log=expected_ex
)
detected_ex = {
ex: x.value
for _, x in res
for ex in expected_ex
if isinstance(x, Failure) and isinstance(x.value, ex)
}
if DontCloseSpider in detected_ex:
return
if self.spider_is_idle():
ex = detected_ex.get(CloseSpider, CloseSpider(reason="finished"))
assert isinstance(ex, CloseSpider) # typing
_schedule_coro(self.close_spider_async(reason=ex.reason))
def close_spider(self, spider: Spider, reason: str = "cancelled") -> Deferred[None]:
warnings.warn(
"ExecutionEngine.close_spider() is deprecated, use close_spider_async() instead",
ScrapyDeprecationWarning,
stacklevel=2,
)
return deferred_from_coro(self.close_spider_async(reason=reason))
async def close_spider_async(self, *, reason: str = "cancelled") -> None:
"""Close (cancel) spider and clear all its outstanding requests.
.. versionadded:: VERSION
"""
if self.spider is None:
raise RuntimeError("Spider not opened")
if self._slot is None:
raise RuntimeError("Engine slot not assigned")
if self._slot.closing is not None:
await maybe_deferred_to_future(self._slot.closing)
return
spider = self.spider
logger.info(
"Closing spider (%(reason)s)", {"reason": reason}, extra={"spider": spider}
)
def log_failure(msg: str) -> None:
logger.error(msg, exc_info=True, extra={"spider": spider}) # noqa: LOG014
try:
await self._slot.close()
except Exception:
log_failure("Slot close failure")
try:
self.downloader.close()
except Exception:
log_failure("Downloader close failure")
try:
await self.scraper.close_spider_async()
except Exception:
log_failure("Scraper close failure")
if hasattr(self._slot.scheduler, "close"):
try:
if (d := self._slot.scheduler.close(reason)) is not None:
await maybe_deferred_to_future(d)
except Exception:
log_failure("Scheduler close failure")
try:
await self.signals.send_catch_log_async(
signal=signals.spider_closed,
spider=spider,
reason=reason,
)
except Exception:
log_failure("Error while sending spider_close signal")
assert self.crawler.stats
try:
self.crawler.stats.close_spider(reason=reason)
except Exception:
log_failure("Stats close failure")
logger.info(
"Spider closed (%(reason)s)",
{"reason": reason},
extra={"spider": spider},
)
self._slot = None
self.spider = None
try:
await ensure_awaitable(self._spider_closed_callback(spider))
except Exception:
log_failure("Error running spider_closed_callback")
| python | BSD-3-Clause | d1bd8eb49f7aba9289e4ff692006cead8bcd9080 | 2026-01-04T14:38:41.023839Z | false |
scrapy/scrapy | https://github.com/scrapy/scrapy/blob/d1bd8eb49f7aba9289e4ff692006cead8bcd9080/scrapy/core/http2/stream.py | scrapy/core/http2/stream.py | from __future__ import annotations
import logging
from enum import Enum
from io import BytesIO
from typing import TYPE_CHECKING, Any
from h2.errors import ErrorCodes
from h2.exceptions import H2Error, ProtocolError, StreamClosedError
from twisted.internet.defer import CancelledError, Deferred
from twisted.internet.error import ConnectionClosed
from twisted.python.failure import Failure
from twisted.web.client import ResponseFailed
from scrapy.http.headers import Headers
from scrapy.responsetypes import responsetypes
from scrapy.utils.httpobj import urlparse_cached
if TYPE_CHECKING:
from hpack import HeaderTuple
from scrapy.core.http2.protocol import H2ClientProtocol
from scrapy.http import Request, Response
logger = logging.getLogger(__name__)
class InactiveStreamClosed(ConnectionClosed):
"""Connection was closed without sending request headers
of the stream. This happens when a stream is waiting for other
streams to close and connection is lost."""
def __init__(self, request: Request) -> None:
self.request = request
def __str__(self) -> str:
return f"InactiveStreamClosed: Connection was closed without sending the request {self.request!r}"
class InvalidHostname(H2Error):
def __init__(
self, request: Request, expected_hostname: str, expected_netloc: str
) -> None:
self.request = request
self.expected_hostname = expected_hostname
self.expected_netloc = expected_netloc
def __str__(self) -> str:
return f"InvalidHostname: Expected {self.expected_hostname} or {self.expected_netloc} in {self.request}"
class StreamCloseReason(Enum):
# Received a StreamEnded event from the remote
ENDED = 1
# Received a StreamReset event -- ended abruptly
RESET = 2
# Transport connection was lost
CONNECTION_LOST = 3
# Expected response body size is more than allowed limit
MAXSIZE_EXCEEDED = 4
# Response deferred is cancelled by the client
# (happens when client called response_deferred.cancel())
CANCELLED = 5
# Connection lost and the stream was not initiated
INACTIVE = 6
# The hostname of the request is not same as of connected peer hostname
# As a result sending this request will the end the connection
INVALID_HOSTNAME = 7
class Stream:
"""Represents a single HTTP/2 Stream.
Stream is a bidirectional flow of bytes within an established connection,
which may carry one or more messages. Handles the transfer of HTTP Headers
and Data frames.
Role of this class is to
1. Combine all the data frames
"""
def __init__(
self,
stream_id: int,
request: Request,
protocol: H2ClientProtocol,
download_maxsize: int = 0,
download_warnsize: int = 0,
) -> None:
"""
Arguments:
stream_id -- Unique identifier for the stream within a single HTTP/2 connection
request -- The HTTP request associated to the stream
protocol -- Parent H2ClientProtocol instance
"""
self.stream_id: int = stream_id
self._request: Request = request
self._protocol: H2ClientProtocol = protocol
self._download_maxsize = self._request.meta.get(
"download_maxsize", download_maxsize
)
self._download_warnsize = self._request.meta.get(
"download_warnsize", download_warnsize
)
# Metadata of an HTTP/2 connection stream
# initialized when stream is instantiated
self.metadata: dict[str, Any] = {
"request_content_length": (
0 if self._request.body is None else len(self._request.body)
),
# Flag to keep track whether the stream has initiated the request
"request_sent": False,
# Flag to track whether we have logged about exceeding download warnsize
"reached_warnsize": False,
# Each time we send a data frame, we will decrease value by the amount send.
"remaining_content_length": (
0 if self._request.body is None else len(self._request.body)
),
# Flag to keep track whether client (self) have closed this stream
"stream_closed_local": False,
# Flag to keep track whether the server has closed the stream
"stream_closed_server": False,
}
# Private variable used to build the response
# this response is then converted to appropriate Response class
# passed to the response deferred callback
self._response: dict[str, Any] = {
# Data received frame by frame from the server is appended
# and passed to the response Deferred when completely received.
"body": BytesIO(),
# The amount of data received that counts against the
# flow control window
"flow_controlled_size": 0,
# Headers received after sending the request
"headers": Headers({}),
}
def _cancel(_: Any) -> None:
# Close this stream as gracefully as possible
# If the associated request is initiated we reset this stream
# else we directly call close() method
if self.metadata["request_sent"]:
self.reset_stream(StreamCloseReason.CANCELLED)
else:
self.close(StreamCloseReason.CANCELLED)
self._deferred_response: Deferred[Response] = Deferred(_cancel)
def __repr__(self) -> str:
return f"Stream(id={self.stream_id!r})"
@property
def _log_warnsize(self) -> bool:
"""Checks if we have received data which exceeds the download warnsize
and whether we have not already logged about it.
Returns:
True if both the above conditions hold true
False if any of the conditions is false
"""
content_length_header = int(
self._response["headers"].get(b"Content-Length", -1)
)
return (
self._download_warnsize
and (
self._response["flow_controlled_size"] > self._download_warnsize
or content_length_header > self._download_warnsize
)
and not self.metadata["reached_warnsize"]
)
def get_response(self) -> Deferred[Response]:
"""Simply return a Deferred which fires when response
from the asynchronous request is available
"""
return self._deferred_response
def check_request_url(self) -> bool:
# Make sure that we are sending the request to the correct URL
url = urlparse_cached(self._request)
return (
url.netloc == str(self._protocol.metadata["uri"].host, "utf-8")
or url.netloc == str(self._protocol.metadata["uri"].netloc, "utf-8")
or url.netloc
== f"{self._protocol.metadata['ip_address']}:{self._protocol.metadata['uri'].port}"
)
def _get_request_headers(self) -> list[tuple[str, str]]:
url = urlparse_cached(self._request)
path = url.path
if url.query:
path += "?" + url.query
# This pseudo-header field MUST NOT be empty for "http" or "https"
# URIs; "http" or "https" URIs that do not contain a path component
# MUST include a value of '/'. The exception to this rule is an
# OPTIONS request for an "http" or "https" URI that does not include
# a path component; these MUST include a ":path" pseudo-header field
# with a value of '*' (refer RFC 7540 - Section 8.1.2.3)
if not path:
path = "*" if self._request.method == "OPTIONS" else "/"
# Make sure pseudo-headers comes before all the other headers
headers = [
(":method", self._request.method),
(":authority", url.netloc),
]
# The ":scheme" and ":path" pseudo-header fields MUST
# be omitted for CONNECT method (refer RFC 7540 - Section 8.3)
if self._request.method != "CONNECT":
headers += [
(":scheme", self._protocol.metadata["uri"].scheme),
(":path", path),
]
content_length = str(len(self._request.body))
headers.append(("Content-Length", content_length))
content_length_name = self._request.headers.normkey(b"Content-Length")
for name, values in self._request.headers.items():
for value_bytes in values:
value = str(value_bytes, "utf-8")
if name == content_length_name:
if value != content_length:
logger.warning(
"Ignoring bad Content-Length header %r of request %r, "
"sending %r instead",
value,
self._request,
content_length,
)
continue
headers.append((str(name, "utf-8"), value))
return headers
def initiate_request(self) -> None:
if self.check_request_url():
headers = self._get_request_headers()
self._protocol.conn.send_headers(self.stream_id, headers, end_stream=False)
self.metadata["request_sent"] = True
self.send_data()
else:
# Close this stream calling the response errback
# Note that we have not sent any headers
self.close(StreamCloseReason.INVALID_HOSTNAME)
def send_data(self) -> None:
"""Called immediately after the headers are sent. Here we send all the
data as part of the request.
If the content length is 0 initially then we end the stream immediately and
wait for response data.
Warning: Only call this method when stream not closed from client side
and has initiated request already by sending HEADER frame. If not then
stream will raise ProtocolError (raise by h2 state machine).
"""
if self.metadata["stream_closed_local"]:
raise StreamClosedError(self.stream_id)
# Firstly, check what the flow control window is for current stream.
window_size = self._protocol.conn.local_flow_control_window(
stream_id=self.stream_id
)
# Next, check what the maximum frame size is.
max_frame_size = self._protocol.conn.max_outbound_frame_size
# We will send no more than the window size or the remaining file size
# of data in this call, whichever is smaller.
bytes_to_send_size = min(window_size, self.metadata["remaining_content_length"])
# We now need to send a number of data frames.
while bytes_to_send_size > 0:
chunk_size = min(bytes_to_send_size, max_frame_size)
data_chunk_start_id = (
self.metadata["request_content_length"]
- self.metadata["remaining_content_length"]
)
data_chunk = self._request.body[
data_chunk_start_id : data_chunk_start_id + chunk_size
]
self._protocol.conn.send_data(self.stream_id, data_chunk, end_stream=False)
bytes_to_send_size -= chunk_size
self.metadata["remaining_content_length"] -= chunk_size
self.metadata["remaining_content_length"] = max(
0, self.metadata["remaining_content_length"]
)
# End the stream if no more data needs to be send
if self.metadata["remaining_content_length"] == 0:
self._protocol.conn.end_stream(self.stream_id)
# Q. What about the rest of the data?
# Ans: Remaining Data frames will be sent when we get a WindowUpdate frame
def receive_window_update(self) -> None:
"""Flow control window size was changed.
Send data that earlier could not be sent as we were
blocked behind the flow control.
"""
if (
self.metadata["remaining_content_length"]
and not self.metadata["stream_closed_server"]
and self.metadata["request_sent"]
):
self.send_data()
def receive_data(self, data: bytes, flow_controlled_length: int) -> None:
self._response["body"].write(data)
self._response["flow_controlled_size"] += flow_controlled_length
# We check maxsize here in case the Content-Length header was not received
if (
self._download_maxsize
and self._response["flow_controlled_size"] > self._download_maxsize
):
self.reset_stream(StreamCloseReason.MAXSIZE_EXCEEDED)
return
if self._log_warnsize:
self.metadata["reached_warnsize"] = True
warning_msg = (
f"Received more ({self._response['flow_controlled_size']}) bytes than download "
f"warn size ({self._download_warnsize}) in request {self._request}"
)
logger.warning(warning_msg)
# Acknowledge the data received
self._protocol.conn.acknowledge_received_data(
self._response["flow_controlled_size"], self.stream_id
)
def receive_headers(self, headers: list[HeaderTuple]) -> None:
for name, value in headers:
self._response["headers"].appendlist(name, value)
# Check if we exceed the allowed max data size which can be received
expected_size = int(self._response["headers"].get(b"Content-Length", -1))
if self._download_maxsize and expected_size > self._download_maxsize:
self.reset_stream(StreamCloseReason.MAXSIZE_EXCEEDED)
return
if self._log_warnsize:
self.metadata["reached_warnsize"] = True
warning_msg = (
f"Expected response size ({expected_size}) larger than "
f"download warn size ({self._download_warnsize}) in request {self._request}"
)
logger.warning(warning_msg)
def reset_stream(self, reason: StreamCloseReason = StreamCloseReason.RESET) -> None:
"""Close this stream by sending a RST_FRAME to the remote peer"""
if self.metadata["stream_closed_local"]:
raise StreamClosedError(self.stream_id)
# Clear buffer earlier to avoid keeping data in memory for a long time
self._response["body"].truncate(0)
self.metadata["stream_closed_local"] = True
self._protocol.conn.reset_stream(self.stream_id, ErrorCodes.REFUSED_STREAM)
self.close(reason)
def close(
self,
reason: StreamCloseReason,
errors: list[BaseException] | None = None,
from_protocol: bool = False,
) -> None:
"""Based on the reason sent we will handle each case."""
if self.metadata["stream_closed_server"]:
raise StreamClosedError(self.stream_id)
if not isinstance(reason, StreamCloseReason):
raise TypeError(
f"Expected StreamCloseReason, received {reason.__class__.__qualname__}"
)
# Have default value of errors as an empty list as
# some cases can add a list of exceptions
errors = errors or []
if not from_protocol:
self._protocol.pop_stream(self.stream_id)
self.metadata["stream_closed_server"] = True
# We do not check for Content-Length or Transfer-Encoding in response headers
# and add `partial` flag as in HTTP/1.1 as 'A request or response that includes
# a payload body can include a content-length header field' (RFC 7540 - Section 8.1.2.6)
# NOTE: Order of handling the events is important here
# As we immediately cancel the request when maxsize is exceeded while
# receiving DATA_FRAME's when we have received the headers (not
# having Content-Length)
if reason is StreamCloseReason.MAXSIZE_EXCEEDED:
expected_size = int(
self._response["headers"].get(
b"Content-Length", self._response["flow_controlled_size"]
)
)
error_msg = (
f"Cancelling download of {self._request.url}: received response "
f"size ({expected_size}) larger than download max size ({self._download_maxsize})"
)
logger.error(error_msg)
self._deferred_response.errback(CancelledError(error_msg))
elif reason is StreamCloseReason.ENDED:
self._fire_response_deferred()
# Stream was abruptly ended here
elif reason is StreamCloseReason.CANCELLED:
# Client has cancelled the request. Remove all the data
# received and fire the response deferred with no flags set
# NOTE: The data is already flushed in Stream.reset_stream() called
# immediately when the stream needs to be cancelled
# There maybe no :status in headers, we make
# HTTP Status Code: 499 - Client Closed Request
self._response["headers"][":status"] = "499"
self._fire_response_deferred()
elif reason is StreamCloseReason.RESET:
self._deferred_response.errback(
ResponseFailed(
[
Failure(
f"Remote peer {self._protocol.metadata['ip_address']} sent RST_STREAM",
ProtocolError,
)
]
)
)
elif reason is StreamCloseReason.CONNECTION_LOST:
self._deferred_response.errback(ResponseFailed(errors))
elif reason is StreamCloseReason.INACTIVE:
errors.insert(0, InactiveStreamClosed(self._request))
self._deferred_response.errback(ResponseFailed(errors))
else:
assert reason is StreamCloseReason.INVALID_HOSTNAME
self._deferred_response.errback(
InvalidHostname(
self._request,
str(self._protocol.metadata["uri"].host, "utf-8"),
f"{self._protocol.metadata['ip_address']}:{self._protocol.metadata['uri'].port}",
)
)
def _fire_response_deferred(self) -> None:
"""Builds response from the self._response dict
and fires the response deferred callback with the
generated response instance"""
body = self._response["body"].getvalue()
response_cls = responsetypes.from_args(
headers=self._response["headers"],
url=self._request.url,
body=body,
)
response = response_cls(
url=self._request.url,
status=int(self._response["headers"][":status"]),
headers=self._response["headers"],
body=body,
request=self._request,
certificate=self._protocol.metadata["certificate"],
ip_address=self._protocol.metadata["ip_address"],
protocol="h2",
)
self._deferred_response.callback(response)
| python | BSD-3-Clause | d1bd8eb49f7aba9289e4ff692006cead8bcd9080 | 2026-01-04T14:38:41.023839Z | false |
scrapy/scrapy | https://github.com/scrapy/scrapy/blob/d1bd8eb49f7aba9289e4ff692006cead8bcd9080/scrapy/core/http2/__init__.py | scrapy/core/http2/__init__.py | python | BSD-3-Clause | d1bd8eb49f7aba9289e4ff692006cead8bcd9080 | 2026-01-04T14:38:41.023839Z | false | |
scrapy/scrapy | https://github.com/scrapy/scrapy/blob/d1bd8eb49f7aba9289e4ff692006cead8bcd9080/scrapy/core/http2/protocol.py | scrapy/core/http2/protocol.py | from __future__ import annotations
import ipaddress
import itertools
import logging
from collections import deque
from typing import TYPE_CHECKING, Any
from h2.config import H2Configuration
from h2.connection import H2Connection
from h2.errors import ErrorCodes
from h2.events import (
ConnectionTerminated,
DataReceived,
Event,
ResponseReceived,
SettingsAcknowledged,
StreamEnded,
StreamReset,
UnknownFrameReceived,
WindowUpdated,
)
from h2.exceptions import FrameTooLargeError, H2Error
from twisted.internet.error import TimeoutError as TxTimeoutError
from twisted.internet.interfaces import (
IAddress,
IHandshakeListener,
IProtocolNegotiationFactory,
)
from twisted.internet.protocol import Factory, Protocol, connectionDone
from twisted.internet.ssl import Certificate
from twisted.protocols.policies import TimeoutMixin
from zope.interface import implementer
from scrapy.core.http2.stream import Stream, StreamCloseReason
from scrapy.http import Request, Response
from scrapy.utils.deprecate import warn_on_deprecated_spider_attribute
if TYPE_CHECKING:
from ipaddress import IPv4Address, IPv6Address
from twisted.internet.defer import Deferred
from twisted.python.failure import Failure
from twisted.web.client import URI
from scrapy.settings import Settings
from scrapy.spiders import Spider
logger = logging.getLogger(__name__)
PROTOCOL_NAME = b"h2"
class InvalidNegotiatedProtocol(H2Error):
def __init__(self, negotiated_protocol: bytes) -> None:
self.negotiated_protocol = negotiated_protocol
def __str__(self) -> str:
return f"Expected {PROTOCOL_NAME!r}, received {self.negotiated_protocol!r}"
class RemoteTerminatedConnection(H2Error):
def __init__(
self,
remote_ip_address: IPv4Address | IPv6Address | None,
event: ConnectionTerminated,
) -> None:
self.remote_ip_address = remote_ip_address
self.terminate_event = event
def __str__(self) -> str:
return f"Received GOAWAY frame from {self.remote_ip_address!r}"
class MethodNotAllowed405(H2Error):
def __init__(self, remote_ip_address: IPv4Address | IPv6Address | None) -> None:
self.remote_ip_address = remote_ip_address
def __str__(self) -> str:
return f"Received 'HTTP/2.0 405 Method Not Allowed' from {self.remote_ip_address!r}"
@implementer(IHandshakeListener)
class H2ClientProtocol(Protocol, TimeoutMixin):
IDLE_TIMEOUT = 240
def __init__(
self,
uri: URI,
settings: Settings,
conn_lost_deferred: Deferred[list[BaseException]],
) -> None:
"""
Arguments:
uri -- URI of the base url to which HTTP/2 Connection will be made.
uri is used to verify that incoming client requests have correct
base URL.
settings -- Scrapy project settings
conn_lost_deferred -- Deferred fires with the reason: Failure to notify
that connection was lost
"""
self._conn_lost_deferred: Deferred[list[BaseException]] = conn_lost_deferred
config = H2Configuration(client_side=True, header_encoding="utf-8")
self.conn = H2Connection(config=config)
# ID of the next request stream
# Following the convention - 'Streams initiated by a client MUST
# use odd-numbered stream identifiers' (RFC 7540 - Section 5.1.1)
self._stream_id_generator = itertools.count(start=1, step=2)
# Streams are stored in a dictionary keyed off their stream IDs
self.streams: dict[int, Stream] = {}
# If requests are received before connection is made we keep
# all requests in a pool and send them as the connection is made
self._pending_request_stream_pool: deque[Stream] = deque()
# Save an instance of errors raised which lead to losing the connection
# We pass these instances to the streams ResponseFailed() failure
self._conn_lost_errors: list[BaseException] = []
# Some meta data of this connection
# initialized when connection is successfully made
self.metadata: dict[str, Any] = {
# Peer certificate instance
"certificate": None,
# Address of the server we are connected to which
# is updated when HTTP/2 connection is made successfully
"ip_address": None,
# URI of the peer HTTP/2 connection is made
"uri": uri,
# Both ip_address and uri are used by the Stream before
# initiating the request to verify that the base address
# Variables taken from Project Settings
"default_download_maxsize": settings.getint("DOWNLOAD_MAXSIZE"),
"default_download_warnsize": settings.getint("DOWNLOAD_WARNSIZE"),
# Counter to keep track of opened streams. This counter
# is used to make sure that not more than MAX_CONCURRENT_STREAMS
# streams are opened which leads to ProtocolError
# We use simple FIFO policy to handle pending requests
"active_streams": 0,
# Flag to keep track if settings were acknowledged by the remote
# This ensures that we have established a HTTP/2 connection
"settings_acknowledged": False,
}
@property
def h2_connected(self) -> bool:
"""Boolean to keep track of the connection status.
This is used while initiating pending streams to make sure
that we initiate stream only during active HTTP/2 Connection
"""
assert self.transport is not None # typing
return bool(self.transport.connected) and self.metadata["settings_acknowledged"]
@property
def allowed_max_concurrent_streams(self) -> int:
"""We keep total two streams for client (sending data) and
server side (receiving data) for a single request. To be safe
we choose the minimum. Since this value can change in event
RemoteSettingsChanged we make variable a property.
"""
return min(
self.conn.local_settings.max_concurrent_streams,
self.conn.remote_settings.max_concurrent_streams,
)
def _send_pending_requests(self) -> None:
"""Initiate all pending requests from the deque following FIFO
We make sure that at any time {allowed_max_concurrent_streams}
streams are active.
"""
while (
self._pending_request_stream_pool
and self.metadata["active_streams"] < self.allowed_max_concurrent_streams
and self.h2_connected
):
self.metadata["active_streams"] += 1
stream = self._pending_request_stream_pool.popleft()
stream.initiate_request()
self._write_to_transport()
def pop_stream(self, stream_id: int) -> Stream:
"""Perform cleanup when a stream is closed"""
stream = self.streams.pop(stream_id)
self.metadata["active_streams"] -= 1
self._send_pending_requests()
return stream
def _new_stream(self, request: Request, spider: Spider) -> Stream:
"""Instantiates a new Stream object"""
if hasattr(spider, "download_maxsize"): # pragma: no cover
warn_on_deprecated_spider_attribute("download_maxsize", "DOWNLOAD_MAXSIZE")
if hasattr(spider, "download_warnsize"): # pragma: no cover
warn_on_deprecated_spider_attribute(
"download_warnsize", "DOWNLOAD_WARNSIZE"
)
stream = Stream(
stream_id=next(self._stream_id_generator),
request=request,
protocol=self,
download_maxsize=getattr(
spider, "download_maxsize", self.metadata["default_download_maxsize"]
),
download_warnsize=getattr(
spider, "download_warnsize", self.metadata["default_download_warnsize"]
),
)
self.streams[stream.stream_id] = stream
return stream
def _write_to_transport(self) -> None:
"""Write data to the underlying transport connection
from the HTTP2 connection instance if any
"""
assert self.transport is not None # typing
# Reset the idle timeout as connection is still actively sending data
self.resetTimeout()
data = self.conn.data_to_send()
self.transport.write(data)
def request(self, request: Request, spider: Spider) -> Deferred[Response]:
if not isinstance(request, Request):
raise TypeError(
f"Expected scrapy.http.Request, received {request.__class__.__qualname__}"
)
stream = self._new_stream(request, spider)
d: Deferred[Response] = stream.get_response()
# Add the stream to the request pool
self._pending_request_stream_pool.append(stream)
# If we receive a request when connection is idle
# We need to initiate pending requests
self._send_pending_requests()
return d
def connectionMade(self) -> None:
"""Called by Twisted when the connection is established. We can start
sending some data now: we should open with the connection preamble.
"""
# Initialize the timeout
self.setTimeout(self.IDLE_TIMEOUT)
assert self.transport is not None # typing
destination = self.transport.getPeer()
self.metadata["ip_address"] = ipaddress.ip_address(destination.host)
# Initiate H2 Connection
self.conn.initiate_connection()
self._write_to_transport()
def _lose_connection_with_error(self, errors: list[BaseException]) -> None:
"""Helper function to lose the connection with the error sent as a
reason"""
self._conn_lost_errors += errors
assert self.transport is not None # typing
self.transport.loseConnection()
def handshakeCompleted(self) -> None:
"""
Close the connection if it's not made via the expected protocol
"""
assert self.transport is not None # typing
if (
self.transport.negotiatedProtocol is not None
and self.transport.negotiatedProtocol != PROTOCOL_NAME
):
# we have not initiated the connection yet, no need to send a GOAWAY frame to the remote peer
self._lose_connection_with_error(
[InvalidNegotiatedProtocol(self.transport.negotiatedProtocol)]
)
def _check_received_data(self, data: bytes) -> None:
"""Checks for edge cases where the connection to remote fails
without raising an appropriate H2Error
Arguments:
data -- Data received from the remote
"""
if data.startswith(b"HTTP/2.0 405 Method Not Allowed"):
raise MethodNotAllowed405(self.metadata["ip_address"])
def dataReceived(self, data: bytes) -> None:
# Reset the idle timeout as connection is still actively receiving data
self.resetTimeout()
try:
self._check_received_data(data)
events = self.conn.receive_data(data)
self._handle_events(events)
except H2Error as e:
if isinstance(e, FrameTooLargeError):
# hyper-h2 does not drop the connection in this scenario, we
# need to abort the connection manually.
self._conn_lost_errors += [e]
assert self.transport is not None # typing
self.transport.abortConnection()
return
# Save this error as ultimately the connection will be dropped
# internally by hyper-h2. Saved error will be passed to all the streams
# closed with the connection.
self._lose_connection_with_error([e])
finally:
self._write_to_transport()
def timeoutConnection(self) -> None:
"""Called when the connection times out.
We lose the connection with TimeoutError"""
# Check whether there are open streams. If there are, we're going to
# want to use the error code PROTOCOL_ERROR. If there aren't, use
# NO_ERROR.
if (
self.conn.open_outbound_streams > 0
or self.conn.open_inbound_streams > 0
or self.metadata["active_streams"] > 0
):
error_code = ErrorCodes.PROTOCOL_ERROR
else:
error_code = ErrorCodes.NO_ERROR
self.conn.close_connection(error_code=error_code)
self._write_to_transport()
self._lose_connection_with_error(
[TxTimeoutError(f"Connection was IDLE for more than {self.IDLE_TIMEOUT}s")]
)
def connectionLost(self, reason: Failure = connectionDone) -> None:
"""Called by Twisted when the transport connection is lost.
No need to write anything to transport here.
"""
# Cancel the timeout if not done yet
self.setTimeout(None)
# Notify the connection pool instance such that no new requests are
# sent over current connection
if not reason.check(connectionDone):
self._conn_lost_errors.append(reason)
self._conn_lost_deferred.callback(self._conn_lost_errors)
for stream in self.streams.values():
if stream.metadata["request_sent"]:
close_reason = StreamCloseReason.CONNECTION_LOST
else:
close_reason = StreamCloseReason.INACTIVE
stream.close(close_reason, self._conn_lost_errors, from_protocol=True)
self.metadata["active_streams"] -= len(self.streams)
self.streams.clear()
self._pending_request_stream_pool.clear()
self.conn.close_connection()
def _handle_events(self, events: list[Event]) -> None:
"""Private method which acts as a bridge between the events
received from the HTTP/2 data and IH2EventsHandler
Arguments:
events -- A list of events that the remote peer triggered by sending data
"""
for event in events:
if isinstance(event, ConnectionTerminated):
self.connection_terminated(event)
elif isinstance(event, DataReceived):
self.data_received(event)
elif isinstance(event, ResponseReceived):
self.response_received(event)
elif isinstance(event, StreamEnded):
self.stream_ended(event)
elif isinstance(event, StreamReset):
self.stream_reset(event)
elif isinstance(event, WindowUpdated):
self.window_updated(event)
elif isinstance(event, SettingsAcknowledged):
self.settings_acknowledged(event)
elif isinstance(event, UnknownFrameReceived):
logger.warning("Unknown frame received: %s", event.frame)
# Event handler functions starts here
def connection_terminated(self, event: ConnectionTerminated) -> None:
self._lose_connection_with_error(
[RemoteTerminatedConnection(self.metadata["ip_address"], event)]
)
def data_received(self, event: DataReceived) -> None:
try:
stream = self.streams[event.stream_id]
except KeyError:
pass # We ignore server-initiated events
else:
stream.receive_data(event.data, event.flow_controlled_length)
def response_received(self, event: ResponseReceived) -> None:
try:
stream = self.streams[event.stream_id]
except KeyError:
pass # We ignore server-initiated events
else:
stream.receive_headers(event.headers)
def settings_acknowledged(self, event: SettingsAcknowledged) -> None:
self.metadata["settings_acknowledged"] = True
# Send off all the pending requests as now we have
# established a proper HTTP/2 connection
self._send_pending_requests()
# Update certificate when our HTTP/2 connection is established
assert self.transport is not None # typing
self.metadata["certificate"] = Certificate(self.transport.getPeerCertificate())
def stream_ended(self, event: StreamEnded) -> None:
try:
stream = self.pop_stream(event.stream_id)
except KeyError:
pass # We ignore server-initiated events
else:
stream.close(StreamCloseReason.ENDED, from_protocol=True)
def stream_reset(self, event: StreamReset) -> None:
try:
stream = self.pop_stream(event.stream_id)
except KeyError:
pass # We ignore server-initiated events
else:
stream.close(StreamCloseReason.RESET, from_protocol=True)
def window_updated(self, event: WindowUpdated) -> None:
if event.stream_id != 0:
self.streams[event.stream_id].receive_window_update()
else:
# Send leftover data for all the streams
for stream in self.streams.values():
stream.receive_window_update()
@implementer(IProtocolNegotiationFactory)
class H2ClientFactory(Factory):
def __init__(
self,
uri: URI,
settings: Settings,
conn_lost_deferred: Deferred[list[BaseException]],
) -> None:
self.uri = uri
self.settings = settings
self.conn_lost_deferred = conn_lost_deferred
def buildProtocol(self, addr: IAddress) -> H2ClientProtocol:
return H2ClientProtocol(self.uri, self.settings, self.conn_lost_deferred)
def acceptableProtocols(self) -> list[bytes]:
return [PROTOCOL_NAME]
| python | BSD-3-Clause | d1bd8eb49f7aba9289e4ff692006cead8bcd9080 | 2026-01-04T14:38:41.023839Z | false |
scrapy/scrapy | https://github.com/scrapy/scrapy/blob/d1bd8eb49f7aba9289e4ff692006cead8bcd9080/scrapy/core/http2/agent.py | scrapy/core/http2/agent.py | from __future__ import annotations
from collections import deque
from typing import TYPE_CHECKING
from twisted.internet import defer
from twisted.internet.defer import Deferred
from twisted.python.failure import Failure
from twisted.web.client import (
URI,
BrowserLikePolicyForHTTPS,
ResponseFailed,
_StandardEndpointFactory,
)
from twisted.web.error import SchemeNotSupported
from scrapy.core.downloader.contextfactory import AcceptableProtocolsContextFactory
from scrapy.core.http2.protocol import H2ClientFactory, H2ClientProtocol
if TYPE_CHECKING:
from twisted.internet.base import ReactorBase
from twisted.internet.endpoints import HostnameEndpoint
from scrapy.http import Request, Response
from scrapy.settings import Settings
from scrapy.spiders import Spider
ConnectionKeyT = tuple[bytes, bytes, int]
class H2ConnectionPool:
def __init__(self, reactor: ReactorBase, settings: Settings) -> None:
self._reactor = reactor
self.settings = settings
# Store a dictionary which is used to get the respective
# H2ClientProtocolInstance using the key as Tuple(scheme, hostname, port)
self._connections: dict[ConnectionKeyT, H2ClientProtocol] = {}
# Save all requests that arrive before the connection is established
self._pending_requests: dict[
ConnectionKeyT, deque[Deferred[H2ClientProtocol]]
] = {}
def get_connection(
self, key: ConnectionKeyT, uri: URI, endpoint: HostnameEndpoint
) -> Deferred[H2ClientProtocol]:
if key in self._pending_requests:
# Received a request while connecting to remote
# Create a deferred which will fire with the H2ClientProtocol
# instance
d: Deferred[H2ClientProtocol] = Deferred()
self._pending_requests[key].append(d)
return d
# Check if we already have a connection to the remote
conn = self._connections.get(key, None)
if conn:
# Return this connection instance wrapped inside a deferred
return defer.succeed(conn)
# No connection is established for the given URI
return self._new_connection(key, uri, endpoint)
def _new_connection(
self, key: ConnectionKeyT, uri: URI, endpoint: HostnameEndpoint
) -> Deferred[H2ClientProtocol]:
self._pending_requests[key] = deque()
conn_lost_deferred: Deferred[list[BaseException]] = Deferred()
conn_lost_deferred.addCallback(self._remove_connection, key)
factory = H2ClientFactory(uri, self.settings, conn_lost_deferred)
conn_d = endpoint.connect(factory)
conn_d.addCallback(self.put_connection, key)
d: Deferred[H2ClientProtocol] = Deferred()
self._pending_requests[key].append(d)
return d
def put_connection(
self, conn: H2ClientProtocol, key: ConnectionKeyT
) -> H2ClientProtocol:
self._connections[key] = conn
# Now as we have established a proper HTTP/2 connection
# we fire all the deferred's with the connection instance
pending_requests = self._pending_requests.pop(key, None)
while pending_requests:
d = pending_requests.popleft()
d.callback(conn)
return conn
def _remove_connection(
self, errors: list[BaseException], key: ConnectionKeyT
) -> None:
self._connections.pop(key)
# Call the errback of all the pending requests for this connection
pending_requests = self._pending_requests.pop(key, None)
while pending_requests:
d = pending_requests.popleft()
d.errback(ResponseFailed(errors))
def close_connections(self) -> None:
"""Close all the HTTP/2 connections and remove them from pool
Returns:
Deferred that fires when all connections have been closed
"""
for conn in self._connections.values():
assert conn.transport is not None # typing
conn.transport.abortConnection()
class H2Agent:
def __init__(
self,
reactor: ReactorBase,
pool: H2ConnectionPool,
context_factory: BrowserLikePolicyForHTTPS = BrowserLikePolicyForHTTPS(),
connect_timeout: float | None = None,
bind_address: bytes | None = None,
) -> None:
self._reactor = reactor
self._pool = pool
self._context_factory = AcceptableProtocolsContextFactory(
context_factory, acceptable_protocols=[b"h2"]
)
self.endpoint_factory = _StandardEndpointFactory(
self._reactor, self._context_factory, connect_timeout, bind_address
)
def get_endpoint(self, uri: URI) -> HostnameEndpoint:
return self.endpoint_factory.endpointForURI(uri)
def get_key(self, uri: URI) -> ConnectionKeyT:
"""
Arguments:
uri - URI obtained directly from request URL
"""
return uri.scheme, uri.host, uri.port
def request(self, request: Request, spider: Spider) -> Deferred[Response]:
uri = URI.fromBytes(bytes(request.url, encoding="utf-8"))
try:
endpoint = self.get_endpoint(uri)
except SchemeNotSupported:
return defer.fail(Failure())
key = self.get_key(uri)
d: Deferred[H2ClientProtocol] = self._pool.get_connection(key, uri, endpoint)
d2: Deferred[Response] = d.addCallback(
lambda conn: conn.request(request, spider)
)
return d2
class ScrapyProxyH2Agent(H2Agent):
def __init__(
self,
reactor: ReactorBase,
proxy_uri: URI,
pool: H2ConnectionPool,
context_factory: BrowserLikePolicyForHTTPS = BrowserLikePolicyForHTTPS(),
connect_timeout: float | None = None,
bind_address: bytes | None = None,
) -> None:
super().__init__(
reactor=reactor,
pool=pool,
context_factory=context_factory,
connect_timeout=connect_timeout,
bind_address=bind_address,
)
self._proxy_uri = proxy_uri
def get_endpoint(self, uri: URI) -> HostnameEndpoint:
return self.endpoint_factory.endpointForURI(self._proxy_uri)
def get_key(self, uri: URI) -> ConnectionKeyT:
"""We use the proxy uri instead of uri obtained from request url"""
return b"http-proxy", self._proxy_uri.host, self._proxy_uri.port
| python | BSD-3-Clause | d1bd8eb49f7aba9289e4ff692006cead8bcd9080 | 2026-01-04T14:38:41.023839Z | false |
scrapy/scrapy | https://github.com/scrapy/scrapy/blob/d1bd8eb49f7aba9289e4ff692006cead8bcd9080/scrapy/core/downloader/tls.py | scrapy/core/downloader/tls.py | import logging
from typing import Any
from OpenSSL import SSL
from service_identity.exceptions import CertificateError
from twisted.internet._sslverify import (
ClientTLSOptions,
VerificationError,
verifyHostname,
)
from twisted.internet.ssl import AcceptableCiphers
from scrapy.utils.ssl import get_temp_key_info, x509name_to_string
logger = logging.getLogger(__name__)
METHOD_TLS = "TLS"
METHOD_TLSv10 = "TLSv1.0"
METHOD_TLSv11 = "TLSv1.1"
METHOD_TLSv12 = "TLSv1.2"
openssl_methods: dict[str, int] = {
METHOD_TLS: SSL.SSLv23_METHOD, # protocol negotiation (recommended)
METHOD_TLSv10: SSL.TLSv1_METHOD, # TLS 1.0 only
METHOD_TLSv11: SSL.TLSv1_1_METHOD, # TLS 1.1 only
METHOD_TLSv12: SSL.TLSv1_2_METHOD, # TLS 1.2 only
}
class ScrapyClientTLSOptions(ClientTLSOptions):
"""
SSL Client connection creator ignoring certificate verification errors
(for genuinely invalid certificates or bugs in verification code).
Same as Twisted's private _sslverify.ClientTLSOptions,
except that VerificationError, CertificateError and ValueError
exceptions are caught, so that the connection is not closed, only
logging warnings. Also, HTTPS connection parameters logging is added.
"""
def __init__(self, hostname: str, ctx: SSL.Context, verbose_logging: bool = False):
super().__init__(hostname, ctx)
self.verbose_logging: bool = verbose_logging
def _identityVerifyingInfoCallback(
self, connection: SSL.Connection, where: int, ret: Any
) -> None:
if where & SSL.SSL_CB_HANDSHAKE_START:
connection.set_tlsext_host_name(self._hostnameBytes)
elif where & SSL.SSL_CB_HANDSHAKE_DONE:
if self.verbose_logging:
logger.debug(
"SSL connection to %s using protocol %s, cipher %s",
self._hostnameASCII,
connection.get_protocol_version_name(),
connection.get_cipher_name(),
)
server_cert = connection.get_peer_certificate()
if server_cert:
logger.debug(
'SSL connection certificate: issuer "%s", subject "%s"',
x509name_to_string(server_cert.get_issuer()),
x509name_to_string(server_cert.get_subject()),
)
key_info = get_temp_key_info(connection._ssl)
if key_info:
logger.debug("SSL temp key: %s", key_info)
try:
verifyHostname(connection, self._hostnameASCII)
except (CertificateError, VerificationError) as e:
logger.warning(
'Remote certificate is not valid for hostname "%s"; %s',
self._hostnameASCII,
e,
)
except ValueError as e:
logger.warning(
"Ignoring error while verifying certificate "
'from host "%s" (exception: %r)',
self._hostnameASCII,
e,
)
DEFAULT_CIPHERS: AcceptableCiphers = AcceptableCiphers.fromOpenSSLCipherString(
"DEFAULT"
)
| python | BSD-3-Clause | d1bd8eb49f7aba9289e4ff692006cead8bcd9080 | 2026-01-04T14:38:41.023839Z | false |
scrapy/scrapy | https://github.com/scrapy/scrapy/blob/d1bd8eb49f7aba9289e4ff692006cead8bcd9080/scrapy/core/downloader/middleware.py | scrapy/core/downloader/middleware.py | """
Downloader Middleware manager
See documentation in docs/topics/downloader-middleware.rst
"""
from __future__ import annotations
import warnings
from functools import wraps
from typing import TYPE_CHECKING, Any, cast
from scrapy.exceptions import ScrapyDeprecationWarning, _InvalidOutput
from scrapy.http import Request, Response
from scrapy.middleware import MiddlewareManager
from scrapy.utils.conf import build_component_list
from scrapy.utils.defer import (
_defer_sleep_async,
deferred_from_coro,
ensure_awaitable,
maybe_deferred_to_future,
)
from scrapy.utils.python import global_object_name
if TYPE_CHECKING:
from collections.abc import Callable, Coroutine
from twisted.internet.defer import Deferred
from scrapy import Spider
from scrapy.settings import BaseSettings
class DownloaderMiddlewareManager(MiddlewareManager):
component_name = "downloader middleware"
@classmethod
def _get_mwlist_from_settings(cls, settings: BaseSettings) -> list[Any]:
return build_component_list(settings.getwithbase("DOWNLOADER_MIDDLEWARES"))
def _add_middleware(self, mw: Any) -> None:
if hasattr(mw, "process_request"):
self.methods["process_request"].append(mw.process_request)
self._check_mw_method_spider_arg(mw.process_request)
if hasattr(mw, "process_response"):
self.methods["process_response"].appendleft(mw.process_response)
self._check_mw_method_spider_arg(mw.process_response)
if hasattr(mw, "process_exception"):
self.methods["process_exception"].appendleft(mw.process_exception)
self._check_mw_method_spider_arg(mw.process_exception)
def download(
self,
download_func: Callable[[Request, Spider], Deferred[Response]],
request: Request,
spider: Spider,
) -> Deferred[Response | Request]:
warnings.warn(
"DownloaderMiddlewareManager.download() is deprecated, use download_async() instead",
ScrapyDeprecationWarning,
stacklevel=2,
)
@wraps(download_func)
async def download_func_wrapped(request: Request) -> Response:
return await maybe_deferred_to_future(download_func(request, spider))
self._set_compat_spider(spider)
return deferred_from_coro(self.download_async(download_func_wrapped, request))
async def download_async(
self,
download_func: Callable[[Request], Coroutine[Any, Any, Response]],
request: Request,
) -> Response | Request:
async def process_request(request: Request) -> Response | Request:
for method in self.methods["process_request"]:
method = cast("Callable", method)
if method in self._mw_methods_requiring_spider:
response = await ensure_awaitable(
method(request=request, spider=self._spider),
_warn=global_object_name(method),
)
else:
response = await ensure_awaitable(
method(request=request), _warn=global_object_name(method)
)
if response is not None and not isinstance(
response, (Response, Request)
):
raise _InvalidOutput(
f"Middleware {method.__qualname__} must return None, Response or "
f"Request, got {response.__class__.__name__}"
)
if response:
return response
return await download_func(request)
async def process_response(response: Response | Request) -> Response | Request:
if response is None:
raise TypeError("Received None in process_response")
if isinstance(response, Request):
return response
for method in self.methods["process_response"]:
method = cast("Callable", method)
if method in self._mw_methods_requiring_spider:
response = await ensure_awaitable(
method(request=request, response=response, spider=self._spider),
_warn=global_object_name(method),
)
else:
response = await ensure_awaitable(
method(request=request, response=response),
_warn=global_object_name(method),
)
if not isinstance(response, (Response, Request)):
raise _InvalidOutput(
f"Middleware {method.__qualname__} must return Response or Request, "
f"got {type(response)}"
)
if isinstance(response, Request):
return response
return response
async def process_exception(exception: Exception) -> Response | Request:
for method in self.methods["process_exception"]:
method = cast("Callable", method)
if method in self._mw_methods_requiring_spider:
response = await ensure_awaitable(
method(
request=request, exception=exception, spider=self._spider
),
_warn=global_object_name(method),
)
else:
response = await ensure_awaitable(
method(request=request, exception=exception),
_warn=global_object_name(method),
)
if response is not None and not isinstance(
response, (Response, Request)
):
raise _InvalidOutput(
f"Middleware {method.__qualname__} must return None, Response or "
f"Request, got {type(response)}"
)
if response:
return response
raise exception
try:
result: Response | Request = await process_request(request)
except Exception as ex:
await _defer_sleep_async()
# either returns a request or response (which we pass to process_response())
# or reraises the exception
result = await process_exception(ex)
return await process_response(result)
| python | BSD-3-Clause | d1bd8eb49f7aba9289e4ff692006cead8bcd9080 | 2026-01-04T14:38:41.023839Z | false |
scrapy/scrapy | https://github.com/scrapy/scrapy/blob/d1bd8eb49f7aba9289e4ff692006cead8bcd9080/scrapy/core/downloader/contextfactory.py | scrapy/core/downloader/contextfactory.py | from __future__ import annotations
import warnings
from typing import TYPE_CHECKING, Any
from OpenSSL import SSL
from twisted.internet._sslverify import _setAcceptableProtocols
from twisted.internet.ssl import (
AcceptableCiphers,
CertificateOptions,
optionsForClientTLS,
platformTrust,
)
from twisted.web.client import BrowserLikePolicyForHTTPS
from twisted.web.iweb import IPolicyForHTTPS
from zope.interface.declarations import implementer
from zope.interface.verify import verifyObject
from scrapy.core.downloader.tls import (
DEFAULT_CIPHERS,
ScrapyClientTLSOptions,
openssl_methods,
)
from scrapy.exceptions import ScrapyDeprecationWarning
from scrapy.utils.deprecate import method_is_overridden
from scrapy.utils.misc import build_from_crawler, load_object
if TYPE_CHECKING:
from twisted.internet._sslverify import ClientTLSOptions
# typing.Self requires Python 3.11
from typing_extensions import Self
from scrapy.crawler import Crawler
from scrapy.settings import BaseSettings
@implementer(IPolicyForHTTPS)
class ScrapyClientContextFactory(BrowserLikePolicyForHTTPS):
"""
Non-peer-certificate verifying HTTPS context factory
Default OpenSSL method is TLS_METHOD (also called SSLv23_METHOD)
which allows TLS protocol negotiation
'A TLS/SSL connection established with [this method] may
understand the TLSv1, TLSv1.1 and TLSv1.2 protocols.'
"""
def __init__(
self,
method: int = SSL.SSLv23_METHOD,
tls_verbose_logging: bool = False,
tls_ciphers: str | None = None,
*args: Any,
**kwargs: Any,
):
super().__init__(*args, **kwargs)
self._ssl_method: int = method
self.tls_verbose_logging: bool = tls_verbose_logging
self.tls_ciphers: AcceptableCiphers
if tls_ciphers:
self.tls_ciphers = AcceptableCiphers.fromOpenSSLCipherString(tls_ciphers)
else:
self.tls_ciphers = DEFAULT_CIPHERS
if method_is_overridden(type(self), ScrapyClientContextFactory, "getContext"):
warnings.warn(
"Overriding ScrapyClientContextFactory.getContext() is deprecated and that method"
" will be removed in a future Scrapy version. Override creatorForNetloc() instead.",
category=ScrapyDeprecationWarning,
stacklevel=2,
)
@classmethod
def from_crawler(
cls,
crawler: Crawler,
method: int = SSL.SSLv23_METHOD,
*args: Any,
**kwargs: Any,
) -> Self:
tls_verbose_logging: bool = crawler.settings.getbool(
"DOWNLOADER_CLIENT_TLS_VERBOSE_LOGGING"
)
tls_ciphers: str | None = crawler.settings["DOWNLOADER_CLIENT_TLS_CIPHERS"]
return cls( # type: ignore[misc]
method=method,
tls_verbose_logging=tls_verbose_logging,
tls_ciphers=tls_ciphers,
*args,
**kwargs,
)
def getCertificateOptions(self) -> CertificateOptions:
# setting verify=True will require you to provide CAs
# to verify against; in other words: it's not that simple
return CertificateOptions(
verify=False,
method=self._ssl_method,
fixBrokenPeers=True,
acceptableCiphers=self.tls_ciphers,
)
# kept for old-style HTTP/1.0 downloader context twisted calls,
# e.g. connectSSL()
def getContext(self, hostname: Any = None, port: Any = None) -> SSL.Context:
ctx: SSL.Context = self.getCertificateOptions().getContext()
ctx.set_options(0x4) # OP_LEGACY_SERVER_CONNECT
return ctx
def creatorForNetloc(self, hostname: bytes, port: int) -> ClientTLSOptions:
return ScrapyClientTLSOptions(
hostname.decode("ascii"),
self.getContext(),
verbose_logging=self.tls_verbose_logging,
)
@implementer(IPolicyForHTTPS)
class BrowserLikeContextFactory(ScrapyClientContextFactory):
"""
Twisted-recommended context factory for web clients.
Quoting the documentation of the :class:`~twisted.web.client.Agent` class:
The default is to use a
:class:`~twisted.web.client.BrowserLikePolicyForHTTPS`, so unless you
have special requirements you can leave this as-is.
:meth:`creatorForNetloc` is the same as
:class:`~twisted.web.client.BrowserLikePolicyForHTTPS` except this context
factory allows setting the TLS/SSL method to use.
The default OpenSSL method is ``TLS_METHOD`` (also called
``SSLv23_METHOD``) which allows TLS protocol negotiation.
"""
def creatorForNetloc(self, hostname: bytes, port: int) -> ClientTLSOptions:
# trustRoot set to platformTrust() will use the platform's root CAs.
#
# This means that a website like https://www.cacert.org will be rejected
# by default, since CAcert.org CA certificate is seldom shipped.
return optionsForClientTLS(
hostname=hostname.decode("ascii"),
trustRoot=platformTrust(),
extraCertificateOptions={"method": self._ssl_method},
)
@implementer(IPolicyForHTTPS)
class AcceptableProtocolsContextFactory:
"""Context factory to used to override the acceptable protocols
to set up the [OpenSSL.SSL.Context] for doing NPN and/or ALPN
negotiation.
"""
def __init__(self, context_factory: Any, acceptable_protocols: list[bytes]):
verifyObject(IPolicyForHTTPS, context_factory)
self._wrapped_context_factory: Any = context_factory
self._acceptable_protocols: list[bytes] = acceptable_protocols
def creatorForNetloc(self, hostname: bytes, port: int) -> ClientTLSOptions:
options: ClientTLSOptions = self._wrapped_context_factory.creatorForNetloc(
hostname, port
)
_setAcceptableProtocols(options._ctx, self._acceptable_protocols)
return options
def load_context_factory_from_settings(
settings: BaseSettings, crawler: Crawler
) -> IPolicyForHTTPS:
ssl_method = openssl_methods[settings.get("DOWNLOADER_CLIENT_TLS_METHOD")]
context_factory_cls = load_object(settings["DOWNLOADER_CLIENTCONTEXTFACTORY"])
# try method-aware context factory
try:
context_factory = build_from_crawler(
context_factory_cls,
crawler,
method=ssl_method,
)
except TypeError:
# use context factory defaults
context_factory = build_from_crawler(
context_factory_cls,
crawler,
)
msg = (
f"{settings['DOWNLOADER_CLIENTCONTEXTFACTORY']} does not accept "
"a `method` argument (type OpenSSL.SSL method, e.g. "
"OpenSSL.SSL.SSLv23_METHOD) and/or a `tls_verbose_logging` "
"argument and/or a `tls_ciphers` argument. Please, upgrade your "
"context factory class to handle them or ignore them."
)
warnings.warn(msg)
return context_factory
| python | BSD-3-Clause | d1bd8eb49f7aba9289e4ff692006cead8bcd9080 | 2026-01-04T14:38:41.023839Z | false |
scrapy/scrapy | https://github.com/scrapy/scrapy/blob/d1bd8eb49f7aba9289e4ff692006cead8bcd9080/scrapy/core/downloader/__init__.py | scrapy/core/downloader/__init__.py | from __future__ import annotations
import random
from collections import deque
from datetime import datetime
from time import time
from typing import TYPE_CHECKING, Any
from twisted.internet.defer import Deferred, inlineCallbacks
from twisted.python.failure import Failure
from scrapy import Request, Spider, signals
from scrapy.core.downloader.handlers import DownloadHandlers
from scrapy.core.downloader.middleware import DownloaderMiddlewareManager
from scrapy.resolver import dnscache
from scrapy.utils.asyncio import (
AsyncioLoopingCall,
CallLaterResult,
call_later,
create_looping_call,
)
from scrapy.utils.decorators import _warn_spider_arg
from scrapy.utils.defer import (
_defer_sleep_async,
_schedule_coro,
deferred_from_coro,
maybe_deferred_to_future,
)
from scrapy.utils.deprecate import warn_on_deprecated_spider_attribute
from scrapy.utils.httpobj import urlparse_cached
if TYPE_CHECKING:
from collections.abc import Generator
from twisted.internet.task import LoopingCall
from scrapy.crawler import Crawler
from scrapy.http import Response
from scrapy.settings import BaseSettings
from scrapy.signalmanager import SignalManager
class Slot:
"""Downloader slot"""
def __init__(
self,
concurrency: int,
delay: float,
randomize_delay: bool,
):
self.concurrency: int = concurrency
self.delay: float = delay
self.randomize_delay: bool = randomize_delay
self.active: set[Request] = set()
self.queue: deque[tuple[Request, Deferred[Response]]] = deque()
self.transferring: set[Request] = set()
self.lastseen: float = 0
self.latercall: CallLaterResult | None = None
def free_transfer_slots(self) -> int:
return self.concurrency - len(self.transferring)
def download_delay(self) -> float:
if self.randomize_delay:
return random.uniform(0.5 * self.delay, 1.5 * self.delay) # noqa: S311
return self.delay
def close(self) -> None:
if self.latercall:
self.latercall.cancel()
self.latercall = None
def __repr__(self) -> str:
cls_name = self.__class__.__name__
return (
f"{cls_name}(concurrency={self.concurrency!r}, "
f"delay={self.delay:.2f}, "
f"randomize_delay={self.randomize_delay!r})"
)
def __str__(self) -> str:
return (
f"<downloader.Slot concurrency={self.concurrency!r} "
f"delay={self.delay:.2f} randomize_delay={self.randomize_delay!r} "
f"len(active)={len(self.active)} len(queue)={len(self.queue)} "
f"len(transferring)={len(self.transferring)} "
f"lastseen={datetime.fromtimestamp(self.lastseen).isoformat()}>"
)
def _get_concurrency_delay(
concurrency: int, spider: Spider, settings: BaseSettings
) -> tuple[int, float]:
delay: float = settings.getfloat("DOWNLOAD_DELAY")
if hasattr(spider, "download_delay"):
delay = spider.download_delay
if hasattr(spider, "max_concurrent_requests"): # pragma: no cover
warn_on_deprecated_spider_attribute(
"max_concurrent_requests", "CONCURRENT_REQUESTS"
)
concurrency = spider.max_concurrent_requests
return concurrency, delay
class Downloader:
DOWNLOAD_SLOT = "download_slot"
def __init__(self, crawler: Crawler):
self.crawler: Crawler = crawler
self.settings: BaseSettings = crawler.settings
self.signals: SignalManager = crawler.signals
self.slots: dict[str, Slot] = {}
self.active: set[Request] = set()
self.handlers: DownloadHandlers = DownloadHandlers(crawler)
self.total_concurrency: int = self.settings.getint("CONCURRENT_REQUESTS")
self.domain_concurrency: int = self.settings.getint(
"CONCURRENT_REQUESTS_PER_DOMAIN"
)
self.ip_concurrency: int = self.settings.getint("CONCURRENT_REQUESTS_PER_IP")
self.randomize_delay: bool = self.settings.getbool("RANDOMIZE_DOWNLOAD_DELAY")
self.middleware: DownloaderMiddlewareManager = (
DownloaderMiddlewareManager.from_crawler(crawler)
)
self._slot_gc_loop: AsyncioLoopingCall | LoopingCall = create_looping_call(
self._slot_gc
)
self._slot_gc_loop.start(60)
self.per_slot_settings: dict[str, dict[str, Any]] = self.settings.getdict(
"DOWNLOAD_SLOTS"
)
@inlineCallbacks
@_warn_spider_arg
def fetch(
self, request: Request, spider: Spider | None = None
) -> Generator[Deferred[Any], Any, Response | Request]:
self.active.add(request)
try:
return (
yield deferred_from_coro(
self.middleware.download_async(self._enqueue_request, request)
)
)
finally:
self.active.remove(request)
def needs_backout(self) -> bool:
return len(self.active) >= self.total_concurrency
@_warn_spider_arg
def _get_slot(
self, request: Request, spider: Spider | None = None
) -> tuple[str, Slot]:
key = self.get_slot_key(request)
if key not in self.slots:
assert self.crawler.spider
slot_settings = self.per_slot_settings.get(key, {})
conc = (
self.ip_concurrency if self.ip_concurrency else self.domain_concurrency
)
conc, delay = _get_concurrency_delay(
conc, self.crawler.spider, self.settings
)
conc, delay = (
slot_settings.get("concurrency", conc),
slot_settings.get("delay", delay),
)
randomize_delay = slot_settings.get("randomize_delay", self.randomize_delay)
new_slot = Slot(conc, delay, randomize_delay)
self.slots[key] = new_slot
return key, self.slots[key]
def get_slot_key(self, request: Request) -> str:
if (meta_slot := request.meta.get(self.DOWNLOAD_SLOT)) is not None:
return meta_slot
key = urlparse_cached(request).hostname or ""
if self.ip_concurrency:
key = dnscache.get(key, key)
return key
# passed as download_func into self.middleware.download() in self.fetch()
async def _enqueue_request(self, request: Request) -> Response:
key, slot = self._get_slot(request)
request.meta[self.DOWNLOAD_SLOT] = key
slot.active.add(request)
self.signals.send_catch_log(
signal=signals.request_reached_downloader,
request=request,
spider=self.crawler.spider,
)
d: Deferred[Response] = Deferred()
slot.queue.append((request, d))
self._process_queue(slot)
try:
return await maybe_deferred_to_future(d) # fired in _wait_for_download()
finally:
slot.active.remove(request)
def _process_queue(self, slot: Slot) -> None:
if slot.latercall:
# block processing until slot.latercall is called
return
# Delay queue processing if a download_delay is configured
now = time()
delay = slot.download_delay()
if delay:
penalty = delay - now + slot.lastseen
if penalty > 0:
slot.latercall = call_later(penalty, self._latercall, slot)
return
# Process enqueued requests if there are free slots to transfer for this slot
while slot.queue and slot.free_transfer_slots() > 0:
slot.lastseen = now
request, queue_dfd = slot.queue.popleft()
_schedule_coro(self._wait_for_download(slot, request, queue_dfd))
# prevent burst if inter-request delays were configured
if delay:
self._process_queue(slot)
break
def _latercall(self, slot: Slot) -> None:
slot.latercall = None
self._process_queue(slot)
async def _download(self, slot: Slot, request: Request) -> Response:
# The order is very important for the following logic. Do not change!
slot.transferring.add(request)
try:
# 1. Download the response
response: Response = await self.handlers.download_request_async(request)
# 2. Notify response_downloaded listeners about the recent download
# before querying queue for next request
self.signals.send_catch_log(
signal=signals.response_downloaded,
response=response,
request=request,
spider=self.crawler.spider,
)
return response
except Exception:
await _defer_sleep_async()
raise
finally:
# 3. After response arrives, remove the request from transferring
# state to free up the transferring slot so it can be used by the
# following requests (perhaps those which came from the downloader
# middleware itself)
slot.transferring.remove(request)
self._process_queue(slot)
self.signals.send_catch_log(
signal=signals.request_left_downloader,
request=request,
spider=self.crawler.spider,
)
async def _wait_for_download(
self, slot: Slot, request: Request, queue_dfd: Deferred[Response]
) -> None:
try:
response = await self._download(slot, request)
except Exception:
queue_dfd.errback(Failure())
else:
queue_dfd.callback(response) # awaited in _enqueue_request()
def close(self) -> None:
self._slot_gc_loop.stop()
for slot in self.slots.values():
slot.close()
def _slot_gc(self, age: float = 60) -> None:
mintime = time() - age
for key, slot in list(self.slots.items()):
if not slot.active and slot.lastseen + slot.delay < mintime:
self.slots.pop(key).close()
| python | BSD-3-Clause | d1bd8eb49f7aba9289e4ff692006cead8bcd9080 | 2026-01-04T14:38:41.023839Z | false |
scrapy/scrapy | https://github.com/scrapy/scrapy/blob/d1bd8eb49f7aba9289e4ff692006cead8bcd9080/scrapy/core/downloader/webclient.py | scrapy/core/downloader/webclient.py | """Deprecated HTTP/1.0 helper classes used by HTTP10DownloadHandler."""
from __future__ import annotations
import warnings
from time import time
from typing import TYPE_CHECKING
from urllib.parse import urldefrag, urlparse, urlunparse
from twisted.internet import defer
from twisted.internet.protocol import ClientFactory
from twisted.web.http import HTTPClient
from scrapy.exceptions import ScrapyDeprecationWarning
from scrapy.http import Headers, Response
from scrapy.responsetypes import responsetypes
from scrapy.utils.httpobj import urlparse_cached
from scrapy.utils.python import to_bytes, to_unicode
if TYPE_CHECKING:
from scrapy import Request
class ScrapyHTTPPageGetter(HTTPClient):
delimiter = b"\n"
def __init__(self):
warnings.warn(
"ScrapyHTTPPageGetter is deprecated and will be removed in a future Scrapy version.",
category=ScrapyDeprecationWarning,
stacklevel=2,
)
super().__init__()
def connectionMade(self):
self.headers = Headers() # bucket for response headers
# Method command
self.sendCommand(self.factory.method, self.factory.path)
# Headers
for key, values in self.factory.headers.items():
for value in values:
self.sendHeader(key, value)
self.endHeaders()
# Body
if self.factory.body is not None:
self.transport.write(self.factory.body)
def lineReceived(self, line):
return HTTPClient.lineReceived(self, line.rstrip())
def handleHeader(self, key, value):
self.headers.appendlist(key, value)
def handleStatus(self, version, status, message):
self.factory.gotStatus(version, status, message)
def handleEndHeaders(self):
self.factory.gotHeaders(self.headers)
def connectionLost(self, reason):
self._connection_lost_reason = reason
HTTPClient.connectionLost(self, reason)
self.factory.noPage(reason)
def handleResponse(self, response):
if self.factory.method.upper() == b"HEAD":
self.factory.page(b"")
elif self.length is not None and self.length > 0:
self.factory.noPage(self._connection_lost_reason)
else:
self.factory.page(response)
self.transport.loseConnection()
def timeout(self):
self.transport.loseConnection()
# transport cleanup needed for HTTPS connections
if self.factory.url.startswith(b"https"):
self.transport.stopProducing()
self.factory.noPage(
defer.TimeoutError(
f"Getting {self.factory.url} took longer "
f"than {self.factory.timeout} seconds."
)
)
# This class used to inherit from Twisted’s
# twisted.web.client.HTTPClientFactory. When that class was deprecated in
# Twisted (https://github.com/twisted/twisted/pull/643), we merged its
# non-overridden code into this class.
class ScrapyHTTPClientFactory(ClientFactory):
protocol = ScrapyHTTPPageGetter
waiting = 1
noisy = False
followRedirect = False
afterFoundGet = False
def _build_response(self, body, request):
request.meta["download_latency"] = self.headers_time - self.start_time
status = int(self.status)
headers = Headers(self.response_headers)
respcls = responsetypes.from_args(headers=headers, url=self._url, body=body)
return respcls(
url=self._url,
status=status,
headers=headers,
body=body,
protocol=to_unicode(self.version),
)
def _set_connection_attributes(self, request):
proxy = request.meta.get("proxy")
if proxy:
proxy_parsed = urlparse(to_bytes(proxy, encoding="ascii"))
self.scheme = proxy_parsed.scheme
self.host = proxy_parsed.hostname
self.port = proxy_parsed.port
self.netloc = proxy_parsed.netloc
if self.port is None:
self.port = 443 if proxy_parsed.scheme == b"https" else 80
self.path = self.url
else:
parsed = urlparse_cached(request)
path_str = urlunparse(
("", "", parsed.path or "/", parsed.params, parsed.query, "")
)
self.path = to_bytes(path_str, encoding="ascii")
assert parsed.hostname is not None
self.host = to_bytes(parsed.hostname, encoding="ascii")
self.port = parsed.port
self.scheme = to_bytes(parsed.scheme, encoding="ascii")
self.netloc = to_bytes(parsed.netloc, encoding="ascii")
if self.port is None:
self.port = 443 if self.scheme == b"https" else 80
def __init__(self, request: Request, timeout: float = 180):
warnings.warn(
"ScrapyHTTPClientFactory is deprecated and will be removed in a future Scrapy version.",
category=ScrapyDeprecationWarning,
stacklevel=2,
)
self._url: str = urldefrag(request.url)[0]
# converting to bytes to comply to Twisted interface
self.url: bytes = to_bytes(self._url, encoding="ascii")
self.method: bytes = to_bytes(request.method, encoding="ascii")
self.body: bytes | None = request.body or None
self.headers: Headers = Headers(request.headers)
self.response_headers: Headers | None = None
self.timeout: float = request.meta.get("download_timeout") or timeout
self.start_time: float = time()
self.deferred: defer.Deferred[Response] = defer.Deferred().addCallback(
self._build_response, request
)
# Fixes Twisted 11.1.0+ support as HTTPClientFactory is expected
# to have _disconnectedDeferred. See Twisted r32329.
# As Scrapy implements it's own logic to handle redirects is not
# needed to add the callback _waitForDisconnect.
# Specifically this avoids the AttributeError exception when
# clientConnectionFailed method is called.
self._disconnectedDeferred: defer.Deferred[None] = defer.Deferred()
self._set_connection_attributes(request)
# set Host header based on url
self.headers.setdefault("Host", self.netloc)
# set Content-Length based len of body
if self.body is not None:
self.headers["Content-Length"] = len(self.body)
# just in case a broken http/1.1 decides to keep connection alive
self.headers.setdefault("Connection", "close")
# Content-Length must be specified in POST method even with no body
elif self.method == b"POST":
self.headers["Content-Length"] = 0
def __repr__(self) -> str:
return f"<{self.__class__.__name__}: {self._url}>"
def _cancelTimeout(self, result, timeoutCall):
if timeoutCall.active():
timeoutCall.cancel()
return result
def buildProtocol(self, addr):
p = ClientFactory.buildProtocol(self, addr)
p.followRedirect = self.followRedirect
p.afterFoundGet = self.afterFoundGet
if self.timeout:
from twisted.internet import reactor
timeoutCall = reactor.callLater(self.timeout, p.timeout)
self.deferred.addBoth(self._cancelTimeout, timeoutCall)
return p
def gotHeaders(self, headers):
self.headers_time = time()
self.response_headers = headers
def gotStatus(self, version, status, message):
"""
Set the status of the request on us.
@param version: The HTTP version.
@type version: L{bytes}
@param status: The HTTP status code, an integer represented as a
bytestring.
@type status: L{bytes}
@param message: The HTTP status message.
@type message: L{bytes}
"""
self.version, self.status, self.message = version, status, message
def page(self, page):
if self.waiting:
self.waiting = 0
self.deferred.callback(page)
def noPage(self, reason):
if self.waiting:
self.waiting = 0
self.deferred.errback(reason)
def clientConnectionFailed(self, _, reason):
"""
When a connection attempt fails, the request cannot be issued. If no
result has yet been provided to the result Deferred, provide the
connection failure reason as an error result.
"""
if self.waiting:
self.waiting = 0
# If the connection attempt failed, there is nothing more to
# disconnect, so just fire that Deferred now.
self._disconnectedDeferred.callback(None)
self.deferred.errback(reason)
| python | BSD-3-Clause | d1bd8eb49f7aba9289e4ff692006cead8bcd9080 | 2026-01-04T14:38:41.023839Z | false |
scrapy/scrapy | https://github.com/scrapy/scrapy/blob/d1bd8eb49f7aba9289e4ff692006cead8bcd9080/scrapy/core/downloader/handlers/datauri.py | scrapy/core/downloader/handlers/datauri.py | from __future__ import annotations
from typing import TYPE_CHECKING, Any
from w3lib.url import parse_data_uri
from scrapy.core.downloader.handlers.base import BaseDownloadHandler
from scrapy.http import Response, TextResponse
from scrapy.responsetypes import responsetypes
if TYPE_CHECKING:
from scrapy import Request
class DataURIDownloadHandler(BaseDownloadHandler):
async def download_request(self, request: Request) -> Response:
uri = parse_data_uri(request.url)
respcls = responsetypes.from_mimetype(uri.media_type)
resp_kwargs: dict[str, Any] = {}
if issubclass(respcls, TextResponse) and uri.media_type.split("/")[0] == "text":
charset = uri.media_type_parameters.get("charset")
resp_kwargs["encoding"] = charset
return respcls(url=request.url, body=uri.data, **resp_kwargs)
| python | BSD-3-Clause | d1bd8eb49f7aba9289e4ff692006cead8bcd9080 | 2026-01-04T14:38:41.023839Z | false |
scrapy/scrapy | https://github.com/scrapy/scrapy/blob/d1bd8eb49f7aba9289e4ff692006cead8bcd9080/scrapy/core/downloader/handlers/file.py | scrapy/core/downloader/handlers/file.py | from __future__ import annotations
from pathlib import Path
from typing import TYPE_CHECKING
from w3lib.url import file_uri_to_path
from scrapy.core.downloader.handlers.base import BaseDownloadHandler
from scrapy.responsetypes import responsetypes
if TYPE_CHECKING:
from scrapy import Request
from scrapy.http import Response
class FileDownloadHandler(BaseDownloadHandler):
async def download_request(self, request: Request) -> Response:
filepath = file_uri_to_path(request.url)
body = Path(filepath).read_bytes()
respcls = responsetypes.from_args(filename=filepath, body=body)
return respcls(url=request.url, body=body)
| python | BSD-3-Clause | d1bd8eb49f7aba9289e4ff692006cead8bcd9080 | 2026-01-04T14:38:41.023839Z | false |
scrapy/scrapy | https://github.com/scrapy/scrapy/blob/d1bd8eb49f7aba9289e4ff692006cead8bcd9080/scrapy/core/downloader/handlers/http.py | scrapy/core/downloader/handlers/http.py | import warnings
from scrapy.core.downloader.handlers.http10 import HTTP10DownloadHandler
from scrapy.core.downloader.handlers.http11 import (
HTTP11DownloadHandler as HTTPDownloadHandler,
)
from scrapy.exceptions import ScrapyDeprecationWarning
warnings.warn(
"The scrapy.core.downloader.handlers.http module is deprecated,"
" please import scrapy.core.downloader.handlers.http11.HTTP11DownloadHandler"
" instead of its deprecated alias scrapy.core.downloader.handlers.http.HTTPDownloadHandler",
ScrapyDeprecationWarning,
stacklevel=2,
)
__all__ = [
"HTTP10DownloadHandler",
"HTTPDownloadHandler",
]
| python | BSD-3-Clause | d1bd8eb49f7aba9289e4ff692006cead8bcd9080 | 2026-01-04T14:38:41.023839Z | false |
scrapy/scrapy | https://github.com/scrapy/scrapy/blob/d1bd8eb49f7aba9289e4ff692006cead8bcd9080/scrapy/core/downloader/handlers/ftp.py | scrapy/core/downloader/handlers/ftp.py | """
An asynchronous FTP file download handler for scrapy which somehow emulates an http response.
FTP connection parameters are passed using the request meta field:
- ftp_user (required)
- ftp_password (required)
- ftp_passive (by default, enabled) sets FTP connection passive mode
- ftp_local_filename
- If not given, file data will come in the response.body, as a normal scrapy Response,
which will imply that the entire file will be on memory.
- if given, file data will be saved in a local file with the given name
This helps when downloading very big files to avoid memory issues. In addition, for
convenience the local file name will also be given in the response body.
The status of the built html response will be, by default
- 200 in case of success
- 404 in case specified file was not found in the server (ftp code 550)
or raise corresponding ftp exception otherwise
The matching from server ftp command return codes to html response codes is defined in the
CODE_MAPPING attribute of the handler class. The key 'default' is used for any code
that is not explicitly present among the map keys. You may need to overwrite this
mapping if want a different behaviour than default.
In case of status 200 request, response.headers will come with two keys:
'Local Filename' - with the value of the local filename if given
'Size' - with size of the downloaded data
"""
from __future__ import annotations
import re
from io import BytesIO
from pathlib import Path
from typing import TYPE_CHECKING, BinaryIO
from urllib.parse import unquote
from twisted.internet.protocol import ClientCreator, Protocol
from scrapy.core.downloader.handlers.base import BaseDownloadHandler
from scrapy.http import Response
from scrapy.responsetypes import responsetypes
from scrapy.utils.defer import maybe_deferred_to_future
from scrapy.utils.httpobj import urlparse_cached
if TYPE_CHECKING:
from twisted.protocols.ftp import FTPClient
from scrapy import Request
from scrapy.crawler import Crawler
class ReceivedDataProtocol(Protocol):
def __init__(self, filename: bytes | None = None):
self.__filename: bytes | None = filename
self.body: BinaryIO = (
Path(filename.decode()).open("wb") if filename else BytesIO()
)
self.size: int = 0
def dataReceived(self, data: bytes) -> None:
self.body.write(data)
self.size += len(data)
@property
def filename(self) -> bytes | None:
return self.__filename
def close(self) -> None:
if self.filename:
self.body.close()
else:
self.body.seek(0)
_CODE_RE = re.compile(r"\d+")
class FTPDownloadHandler(BaseDownloadHandler):
CODE_MAPPING: dict[str, int] = {
"550": 404,
"default": 503,
}
def __init__(self, crawler: Crawler):
super().__init__(crawler)
self.default_user = crawler.settings["FTP_USER"]
self.default_password = crawler.settings["FTP_PASSWORD"]
self.passive_mode = crawler.settings["FTP_PASSIVE_MODE"]
async def download_request(self, request: Request) -> Response:
from twisted.internet import reactor
from twisted.protocols.ftp import CommandFailed, FTPClient
parsed_url = urlparse_cached(request)
user = request.meta.get("ftp_user", self.default_user)
password = request.meta.get("ftp_password", self.default_password)
passive_mode = (
1 if bool(request.meta.get("ftp_passive", self.passive_mode)) else 0
)
creator = ClientCreator(
reactor, FTPClient, user, password, passive=passive_mode
)
client: FTPClient = await maybe_deferred_to_future(
creator.connectTCP(parsed_url.hostname, parsed_url.port or 21)
)
filepath = unquote(parsed_url.path)
protocol = ReceivedDataProtocol(request.meta.get("ftp_local_filename"))
try:
await maybe_deferred_to_future(client.retrieveFile(filepath, protocol))
except CommandFailed as e:
message = str(e)
if m := _CODE_RE.search(message):
ftpcode = m.group()
httpcode = self.CODE_MAPPING.get(ftpcode, self.CODE_MAPPING["default"])
return Response(url=request.url, status=httpcode, body=message.encode())
raise
protocol.close()
headers = {"local filename": protocol.filename or b"", "size": protocol.size}
body = protocol.filename or protocol.body.read()
respcls = responsetypes.from_args(url=request.url, body=body)
# hints for Headers-related types may need to be fixed to not use AnyStr
return respcls(url=request.url, status=200, body=body, headers=headers) # type: ignore[arg-type]
| python | BSD-3-Clause | d1bd8eb49f7aba9289e4ff692006cead8bcd9080 | 2026-01-04T14:38:41.023839Z | false |
scrapy/scrapy | https://github.com/scrapy/scrapy/blob/d1bd8eb49f7aba9289e4ff692006cead8bcd9080/scrapy/core/downloader/handlers/http11.py | scrapy/core/downloader/handlers/http11.py | """Download handlers for http and https schemes"""
from __future__ import annotations
import ipaddress
import logging
import re
from contextlib import suppress
from io import BytesIO
from time import time
from typing import TYPE_CHECKING, Any, TypedDict, TypeVar, cast
from urllib.parse import urldefrag, urlparse
from twisted.internet import ssl
from twisted.internet.defer import CancelledError, Deferred, succeed
from twisted.internet.endpoints import TCP4ClientEndpoint
from twisted.internet.error import TimeoutError as TxTimeoutError
from twisted.internet.protocol import Factory, Protocol, connectionDone
from twisted.python.failure import Failure
from twisted.web.client import (
URI,
Agent,
HTTPConnectionPool,
ResponseDone,
ResponseFailed,
)
from twisted.web.client import Response as TxResponse
from twisted.web.http import PotentialDataLoss, _DataLoss
from twisted.web.http_headers import Headers as TxHeaders
from twisted.web.iweb import UNKNOWN_LENGTH, IBodyProducer, IPolicyForHTTPS, IResponse
from zope.interface import implementer
from scrapy import Request, signals
from scrapy.core.downloader.contextfactory import load_context_factory_from_settings
from scrapy.core.downloader.handlers.base import BaseDownloadHandler
from scrapy.exceptions import StopDownload
from scrapy.http import Headers, Response
from scrapy.responsetypes import responsetypes
from scrapy.utils.defer import maybe_deferred_to_future
from scrapy.utils.deprecate import warn_on_deprecated_spider_attribute
from scrapy.utils.httpobj import urlparse_cached
from scrapy.utils.python import to_bytes, to_unicode
from scrapy.utils.url import add_http_if_no_scheme
if TYPE_CHECKING:
from twisted.internet.base import ReactorBase
from twisted.internet.interfaces import IConsumer
# typing.NotRequired requires Python 3.11
from typing_extensions import NotRequired
from scrapy.crawler import Crawler
logger = logging.getLogger(__name__)
_T = TypeVar("_T")
class _ResultT(TypedDict):
txresponse: TxResponse
body: bytes
flags: list[str] | None
certificate: ssl.Certificate | None
ip_address: ipaddress.IPv4Address | ipaddress.IPv6Address | None
failure: NotRequired[Failure | None]
class HTTP11DownloadHandler(BaseDownloadHandler):
def __init__(self, crawler: Crawler):
super().__init__(crawler)
self._crawler = crawler
from twisted.internet import reactor
self._pool: HTTPConnectionPool = HTTPConnectionPool(reactor, persistent=True)
self._pool.maxPersistentPerHost = crawler.settings.getint(
"CONCURRENT_REQUESTS_PER_DOMAIN"
)
self._pool._factory.noisy = False
self._contextFactory: IPolicyForHTTPS = load_context_factory_from_settings(
crawler.settings, crawler
)
self._default_maxsize: int = crawler.settings.getint("DOWNLOAD_MAXSIZE")
self._default_warnsize: int = crawler.settings.getint("DOWNLOAD_WARNSIZE")
self._fail_on_dataloss: bool = crawler.settings.getbool(
"DOWNLOAD_FAIL_ON_DATALOSS"
)
self._disconnect_timeout: int = 1
async def download_request(self, request: Request) -> Response:
"""Return a deferred for the HTTP download"""
if hasattr(self._crawler.spider, "download_maxsize"): # pragma: no cover
warn_on_deprecated_spider_attribute("download_maxsize", "DOWNLOAD_MAXSIZE")
if hasattr(self._crawler.spider, "download_warnsize"): # pragma: no cover
warn_on_deprecated_spider_attribute(
"download_warnsize", "DOWNLOAD_WARNSIZE"
)
agent = ScrapyAgent(
contextFactory=self._contextFactory,
pool=self._pool,
maxsize=getattr(
self._crawler.spider, "download_maxsize", self._default_maxsize
),
warnsize=getattr(
self._crawler.spider, "download_warnsize", self._default_warnsize
),
fail_on_dataloss=self._fail_on_dataloss,
crawler=self._crawler,
)
return await maybe_deferred_to_future(agent.download_request(request))
async def close(self) -> None:
from twisted.internet import reactor
d: Deferred[None] = self._pool.closeCachedConnections()
# closeCachedConnections will hang on network or server issues, so
# we'll manually timeout the deferred.
#
# Twisted issue addressing this problem can be found here:
# https://github.com/twisted/twisted/issues/7738
#
# closeCachedConnections doesn't handle external errbacks, so we'll
# issue a callback after `_disconnect_timeout` seconds.
#
# See also https://github.com/scrapy/scrapy/issues/2653
delayed_call = reactor.callLater(self._disconnect_timeout, d.callback, [])
try:
await maybe_deferred_to_future(d)
finally:
if delayed_call.active():
delayed_call.cancel()
class TunnelError(Exception):
"""An HTTP CONNECT tunnel could not be established by the proxy."""
class TunnelingTCP4ClientEndpoint(TCP4ClientEndpoint):
"""An endpoint that tunnels through proxies to allow HTTPS downloads. To
accomplish that, this endpoint sends an HTTP CONNECT to the proxy.
The HTTP CONNECT is always sent when using this endpoint, I think this could
be improved as the CONNECT will be redundant if the connection associated
with this endpoint comes from the pool and a CONNECT has already been issued
for it.
"""
_truncatedLength = 1000
_responseAnswer = (
r"HTTP/1\.. (?P<status>\d{3})(?P<reason>.{," + str(_truncatedLength) + r"})"
)
_responseMatcher = re.compile(_responseAnswer.encode())
def __init__(
self,
reactor: ReactorBase,
host: str,
port: int,
proxyConf: tuple[str, int, bytes | None],
contextFactory: IPolicyForHTTPS,
timeout: float = 30,
bindAddress: tuple[str, int] | None = None,
):
proxyHost, proxyPort, self._proxyAuthHeader = proxyConf
super().__init__(reactor, proxyHost, proxyPort, timeout, bindAddress)
self._tunnelReadyDeferred: Deferred[Protocol] = Deferred()
self._tunneledHost: str = host
self._tunneledPort: int = port
self._contextFactory: IPolicyForHTTPS = contextFactory
self._connectBuffer: bytearray = bytearray()
def requestTunnel(self, protocol: Protocol) -> Protocol:
"""Asks the proxy to open a tunnel."""
assert protocol.transport
tunnelReq = tunnel_request_data(
self._tunneledHost, self._tunneledPort, self._proxyAuthHeader
)
protocol.transport.write(tunnelReq)
self._protocolDataReceived = protocol.dataReceived
protocol.dataReceived = self.processProxyResponse # type: ignore[method-assign]
self._protocol = protocol
return protocol
def processProxyResponse(self, data: bytes) -> None:
"""Processes the response from the proxy. If the tunnel is successfully
created, notifies the client that we are ready to send requests. If not
raises a TunnelError.
"""
assert self._protocol.transport
self._connectBuffer += data
# make sure that enough (all) bytes are consumed
# and that we've got all HTTP headers (ending with a blank line)
# from the proxy so that we don't send those bytes to the TLS layer
#
# see https://github.com/scrapy/scrapy/issues/2491
if b"\r\n\r\n" not in self._connectBuffer:
return
self._protocol.dataReceived = self._protocolDataReceived # type: ignore[method-assign]
respm = TunnelingTCP4ClientEndpoint._responseMatcher.match(self._connectBuffer)
if respm and int(respm.group("status")) == 200:
# set proper Server Name Indication extension
sslOptions = self._contextFactory.creatorForNetloc( # type: ignore[call-arg,misc]
self._tunneledHost, self._tunneledPort
)
self._protocol.transport.startTLS(sslOptions, self._protocolFactory)
self._tunnelReadyDeferred.callback(self._protocol)
else:
extra: Any
if respm:
extra = {
"status": int(respm.group("status")),
"reason": respm.group("reason").strip(),
}
else:
extra = data[: self._truncatedLength]
self._tunnelReadyDeferred.errback(
TunnelError(
"Could not open CONNECT tunnel with proxy "
f"{self._host}:{self._port} [{extra!r}]"
)
)
def connectFailed(self, reason: Failure) -> None:
"""Propagates the errback to the appropriate deferred."""
self._tunnelReadyDeferred.errback(reason)
def connect(self, protocolFactory: Factory) -> Deferred[Protocol]:
self._protocolFactory = protocolFactory
connectDeferred = super().connect(protocolFactory)
connectDeferred.addCallback(self.requestTunnel)
connectDeferred.addErrback(self.connectFailed)
return self._tunnelReadyDeferred
def tunnel_request_data(
host: str, port: int, proxy_auth_header: bytes | None = None
) -> bytes:
r"""
Return binary content of a CONNECT request.
>>> from scrapy.utils.python import to_unicode as s
>>> s(tunnel_request_data("example.com", 8080))
'CONNECT example.com:8080 HTTP/1.1\r\nHost: example.com:8080\r\n\r\n'
>>> s(tunnel_request_data("example.com", 8080, b"123"))
'CONNECT example.com:8080 HTTP/1.1\r\nHost: example.com:8080\r\nProxy-Authorization: 123\r\n\r\n'
>>> s(tunnel_request_data(b"example.com", "8090"))
'CONNECT example.com:8090 HTTP/1.1\r\nHost: example.com:8090\r\n\r\n'
"""
host_value = to_bytes(host, encoding="ascii") + b":" + to_bytes(str(port))
tunnel_req = b"CONNECT " + host_value + b" HTTP/1.1\r\n"
tunnel_req += b"Host: " + host_value + b"\r\n"
if proxy_auth_header:
tunnel_req += b"Proxy-Authorization: " + proxy_auth_header + b"\r\n"
tunnel_req += b"\r\n"
return tunnel_req
class TunnelingAgent(Agent):
"""An agent that uses a L{TunnelingTCP4ClientEndpoint} to make HTTPS
downloads. It may look strange that we have chosen to subclass Agent and not
ProxyAgent but consider that after the tunnel is opened the proxy is
transparent to the client; thus the agent should behave like there is no
proxy involved.
"""
def __init__(
self,
*,
reactor: ReactorBase,
proxyConf: tuple[str, int, bytes | None],
contextFactory: IPolicyForHTTPS,
connectTimeout: float | None = None,
bindAddress: bytes | None = None,
pool: HTTPConnectionPool | None = None,
):
super().__init__(reactor, contextFactory, connectTimeout, bindAddress, pool)
self._proxyConf: tuple[str, int, bytes | None] = proxyConf
self._contextFactory: IPolicyForHTTPS = contextFactory
def _getEndpoint(self, uri: URI) -> TunnelingTCP4ClientEndpoint:
return TunnelingTCP4ClientEndpoint(
reactor=self._reactor,
host=uri.host,
port=uri.port,
proxyConf=self._proxyConf,
contextFactory=self._contextFactory,
timeout=self._endpointFactory._connectTimeout,
bindAddress=self._endpointFactory._bindAddress,
)
def _requestWithEndpoint(
self,
key: Any,
endpoint: TCP4ClientEndpoint,
method: bytes,
parsedURI: URI,
headers: TxHeaders | None,
bodyProducer: IBodyProducer | None,
requestPath: bytes,
) -> Deferred[IResponse]:
# proxy host and port are required for HTTP pool `key`
# otherwise, same remote host connection request could reuse
# a cached tunneled connection to a different proxy
key += self._proxyConf
return super()._requestWithEndpoint(
key=key,
endpoint=endpoint,
method=method,
parsedURI=parsedURI,
headers=headers,
bodyProducer=bodyProducer,
requestPath=requestPath,
)
class ScrapyProxyAgent(Agent):
def __init__(
self,
reactor: ReactorBase,
proxyURI: bytes,
connectTimeout: float | None = None,
bindAddress: bytes | None = None,
pool: HTTPConnectionPool | None = None,
):
super().__init__(
reactor=reactor,
connectTimeout=connectTimeout,
bindAddress=bindAddress,
pool=pool,
)
self._proxyURI: URI = URI.fromBytes(proxyURI)
def request(
self,
method: bytes,
uri: bytes,
headers: TxHeaders | None = None,
bodyProducer: IBodyProducer | None = None,
) -> Deferred[IResponse]:
"""
Issue a new request via the configured proxy.
"""
# Cache *all* connections under the same key, since we are only
# connecting to a single destination, the proxy:
return self._requestWithEndpoint(
key=(b"http-proxy", self._proxyURI.host, self._proxyURI.port),
endpoint=self._getEndpoint(self._proxyURI),
method=method,
parsedURI=URI.fromBytes(uri),
headers=headers,
bodyProducer=bodyProducer,
requestPath=uri,
)
class ScrapyAgent:
_Agent = Agent
_ProxyAgent = ScrapyProxyAgent
_TunnelingAgent = TunnelingAgent
def __init__(
self,
*,
contextFactory: IPolicyForHTTPS,
connectTimeout: float = 10,
bindAddress: bytes | None = None,
pool: HTTPConnectionPool | None = None,
maxsize: int = 0,
warnsize: int = 0,
fail_on_dataloss: bool = True,
crawler: Crawler,
):
self._contextFactory: IPolicyForHTTPS = contextFactory
self._connectTimeout: float = connectTimeout
self._bindAddress: bytes | None = bindAddress
self._pool: HTTPConnectionPool | None = pool
self._maxsize: int = maxsize
self._warnsize: int = warnsize
self._fail_on_dataloss: bool = fail_on_dataloss
self._txresponse: TxResponse | None = None
self._crawler: Crawler = crawler
def _get_agent(self, request: Request, timeout: float) -> Agent:
from twisted.internet import reactor
bindaddress = request.meta.get("bindaddress") or self._bindAddress
proxy = request.meta.get("proxy")
if proxy:
proxy = add_http_if_no_scheme(proxy)
proxy_parsed = urlparse(proxy)
proxy_host = proxy_parsed.hostname
proxy_port = proxy_parsed.port
if not proxy_port:
proxy_port = 443 if proxy_parsed.scheme == "https" else 80
if urlparse_cached(request).scheme == "https":
assert proxy_host is not None
proxyAuth = request.headers.get(b"Proxy-Authorization", None)
proxyConf = (proxy_host, proxy_port, proxyAuth)
return self._TunnelingAgent(
reactor=reactor,
proxyConf=proxyConf,
contextFactory=self._contextFactory,
connectTimeout=timeout,
bindAddress=bindaddress,
pool=self._pool,
)
return self._ProxyAgent(
reactor=reactor,
proxyURI=to_bytes(proxy, encoding="ascii"),
connectTimeout=timeout,
bindAddress=bindaddress,
pool=self._pool,
)
return self._Agent(
reactor=reactor,
contextFactory=self._contextFactory,
connectTimeout=timeout,
bindAddress=bindaddress,
pool=self._pool,
)
def download_request(self, request: Request) -> Deferred[Response]:
from twisted.internet import reactor
timeout = request.meta.get("download_timeout") or self._connectTimeout
agent = self._get_agent(request, timeout)
# request details
url = urldefrag(request.url)[0]
method = to_bytes(request.method)
headers = TxHeaders(request.headers)
if isinstance(agent, self._TunnelingAgent):
headers.removeHeader(b"Proxy-Authorization")
bodyproducer = _RequestBodyProducer(request.body) if request.body else None
start_time = time()
d: Deferred[IResponse] = agent.request(
method,
to_bytes(url, encoding="ascii"),
headers,
cast("IBodyProducer", bodyproducer),
)
# set download latency
d.addCallback(self._cb_latency, request, start_time)
# response body is ready to be consumed
d2: Deferred[_ResultT] = d.addCallback(self._cb_bodyready, request)
d3: Deferred[Response] = d2.addCallback(self._cb_bodydone, request, url)
# check download timeout
self._timeout_cl = reactor.callLater(timeout, d3.cancel)
d3.addBoth(self._cb_timeout, request, url, timeout)
return d3
def _cb_timeout(self, result: _T, request: Request, url: str, timeout: float) -> _T:
if self._timeout_cl.active():
self._timeout_cl.cancel()
return result
# needed for HTTPS requests, otherwise _ResponseReader doesn't
# receive connectionLost()
if self._txresponse:
self._txresponse._transport.stopProducing()
raise TxTimeoutError(f"Getting {url} took longer than {timeout} seconds.")
def _cb_latency(self, result: _T, request: Request, start_time: float) -> _T:
request.meta["download_latency"] = time() - start_time
return result
@staticmethod
def _headers_from_twisted_response(response: TxResponse) -> Headers:
headers = Headers()
if response.length != UNKNOWN_LENGTH:
headers[b"Content-Length"] = str(response.length).encode()
headers.update(response.headers.getAllRawHeaders())
return headers
def _cb_bodyready(
self, txresponse: TxResponse, request: Request
) -> _ResultT | Deferred[_ResultT]:
headers_received_result = self._crawler.signals.send_catch_log(
signal=signals.headers_received,
headers=self._headers_from_twisted_response(txresponse),
body_length=txresponse.length,
request=request,
spider=self._crawler.spider,
)
for handler, result in headers_received_result:
if isinstance(result, Failure) and isinstance(result.value, StopDownload):
logger.debug(
"Download stopped for %(request)s from signal handler %(handler)s",
{"request": request, "handler": handler.__qualname__},
)
txresponse._transport.stopProducing()
txresponse._transport.loseConnection()
return {
"txresponse": txresponse,
"body": b"",
"flags": ["download_stopped"],
"certificate": None,
"ip_address": None,
"failure": result if result.value.fail else None,
}
# deliverBody hangs for responses without body
if txresponse.length == 0:
return {
"txresponse": txresponse,
"body": b"",
"flags": None,
"certificate": None,
"ip_address": None,
}
maxsize = request.meta.get("download_maxsize", self._maxsize)
warnsize = request.meta.get("download_warnsize", self._warnsize)
expected_size = txresponse.length if txresponse.length != UNKNOWN_LENGTH else -1
fail_on_dataloss = request.meta.get(
"download_fail_on_dataloss", self._fail_on_dataloss
)
if maxsize and expected_size > maxsize:
warning_msg = (
"Cancelling download of %(url)s: expected response "
"size (%(size)s) larger than download max size (%(maxsize)s)."
)
warning_args = {
"url": request.url,
"size": expected_size,
"maxsize": maxsize,
}
logger.warning(warning_msg, warning_args)
txresponse._transport.loseConnection()
raise CancelledError(warning_msg % warning_args)
if warnsize and expected_size > warnsize:
logger.warning(
"Expected response size (%(size)s) larger than "
"download warn size (%(warnsize)s) in request %(request)s.",
{"size": expected_size, "warnsize": warnsize, "request": request},
)
def _cancel(_: Any) -> None:
# Abort connection immediately.
txresponse._transport._producer.abortConnection()
d: Deferred[_ResultT] = Deferred(_cancel)
txresponse.deliverBody(
_ResponseReader(
finished=d,
txresponse=txresponse,
request=request,
maxsize=maxsize,
warnsize=warnsize,
fail_on_dataloss=fail_on_dataloss,
crawler=self._crawler,
)
)
# save response for timeouts
self._txresponse = txresponse
return d
def _cb_bodydone(
self, result: _ResultT, request: Request, url: str
) -> Response | Failure:
headers = self._headers_from_twisted_response(result["txresponse"])
respcls = responsetypes.from_args(headers=headers, url=url, body=result["body"])
try:
version = result["txresponse"].version
protocol = f"{to_unicode(version[0])}/{version[1]}.{version[2]}"
except (AttributeError, TypeError, IndexError):
protocol = None
response = respcls(
url=url,
status=int(result["txresponse"].code),
headers=headers,
body=result["body"],
flags=result["flags"],
certificate=result["certificate"],
ip_address=result["ip_address"],
protocol=protocol,
)
if result.get("failure"):
assert result["failure"]
result["failure"].value.response = response
return result["failure"]
return response
@implementer(IBodyProducer)
class _RequestBodyProducer:
def __init__(self, body: bytes):
self.body = body
self.length = len(body)
def startProducing(self, consumer: IConsumer) -> Deferred[None]:
consumer.write(self.body)
return succeed(None)
def pauseProducing(self) -> None:
pass
def stopProducing(self) -> None:
pass
class _ResponseReader(Protocol):
def __init__(
self,
finished: Deferred[_ResultT],
txresponse: TxResponse,
request: Request,
maxsize: int,
warnsize: int,
fail_on_dataloss: bool,
crawler: Crawler,
):
self._finished: Deferred[_ResultT] = finished
self._txresponse: TxResponse = txresponse
self._request: Request = request
self._bodybuf: BytesIO = BytesIO()
self._maxsize: int = maxsize
self._warnsize: int = warnsize
self._fail_on_dataloss: bool = fail_on_dataloss
self._fail_on_dataloss_warned: bool = False
self._reached_warnsize: bool = False
self._bytes_received: int = 0
self._certificate: ssl.Certificate | None = None
self._ip_address: ipaddress.IPv4Address | ipaddress.IPv6Address | None = None
self._crawler: Crawler = crawler
def _finish_response(
self, flags: list[str] | None = None, failure: Failure | None = None
) -> None:
self._finished.callback(
{
"txresponse": self._txresponse,
"body": self._bodybuf.getvalue(),
"flags": flags,
"certificate": self._certificate,
"ip_address": self._ip_address,
"failure": failure,
}
)
def connectionMade(self) -> None:
assert self.transport
if self._certificate is None:
with suppress(AttributeError):
self._certificate = ssl.Certificate(
self.transport._producer.getPeerCertificate()
)
if self._ip_address is None:
self._ip_address = ipaddress.ip_address(
self.transport._producer.getPeer().host
)
def dataReceived(self, bodyBytes: bytes) -> None:
# This maybe called several times after cancel was called with buffered data.
if self._finished.called:
return
assert self.transport
self._bodybuf.write(bodyBytes)
self._bytes_received += len(bodyBytes)
bytes_received_result = self._crawler.signals.send_catch_log(
signal=signals.bytes_received,
data=bodyBytes,
request=self._request,
spider=self._crawler.spider,
)
for handler, result in bytes_received_result:
if isinstance(result, Failure) and isinstance(result.value, StopDownload):
logger.debug(
"Download stopped for %(request)s from signal handler %(handler)s",
{"request": self._request, "handler": handler.__qualname__},
)
self.transport.stopProducing()
self.transport.loseConnection()
failure = result if result.value.fail else None
self._finish_response(flags=["download_stopped"], failure=failure)
if self._maxsize and self._bytes_received > self._maxsize:
logger.warning(
"Received (%(bytes)s) bytes larger than download "
"max size (%(maxsize)s) in request %(request)s.",
{
"bytes": self._bytes_received,
"maxsize": self._maxsize,
"request": self._request,
},
)
# Clear buffer earlier to avoid keeping data in memory for a long time.
self._bodybuf.truncate(0)
self._finished.cancel()
if (
self._warnsize
and self._bytes_received > self._warnsize
and not self._reached_warnsize
):
self._reached_warnsize = True
logger.warning(
"Received more bytes than download "
"warn size (%(warnsize)s) in request %(request)s.",
{"warnsize": self._warnsize, "request": self._request},
)
def connectionLost(self, reason: Failure = connectionDone) -> None:
if self._finished.called:
return
if reason.check(ResponseDone):
self._finish_response()
return
if reason.check(PotentialDataLoss):
self._finish_response(flags=["partial"])
return
if reason.check(ResponseFailed) and any(
r.check(_DataLoss) for r in reason.value.reasons
):
if not self._fail_on_dataloss:
self._finish_response(flags=["dataloss"])
return
if not self._fail_on_dataloss_warned:
logger.warning(
"Got data loss in %s. If you want to process broken "
"responses set the setting DOWNLOAD_FAIL_ON_DATALOSS = False"
" -- This message won't be shown in further requests",
self._txresponse.request.absoluteURI.decode(),
)
self._fail_on_dataloss_warned = True
self._finished.errback(reason)
| python | BSD-3-Clause | d1bd8eb49f7aba9289e4ff692006cead8bcd9080 | 2026-01-04T14:38:41.023839Z | false |
scrapy/scrapy | https://github.com/scrapy/scrapy/blob/d1bd8eb49f7aba9289e4ff692006cead8bcd9080/scrapy/core/downloader/handlers/http10.py | scrapy/core/downloader/handlers/http10.py | """Download handlers for http and https schemes"""
from __future__ import annotations
import warnings
from typing import TYPE_CHECKING
from scrapy.exceptions import ScrapyDeprecationWarning
from scrapy.utils.defer import maybe_deferred_to_future
from scrapy.utils.misc import build_from_crawler, load_object
from scrapy.utils.python import to_unicode
if TYPE_CHECKING:
from twisted.internet.interfaces import IConnector
# typing.Self requires Python 3.11
from typing_extensions import Self
from scrapy import Request
from scrapy.core.downloader.contextfactory import ScrapyClientContextFactory
from scrapy.core.downloader.webclient import ScrapyHTTPClientFactory
from scrapy.crawler import Crawler
from scrapy.http import Response
from scrapy.settings import BaseSettings
class HTTP10DownloadHandler:
lazy = False
def __init__(self, settings: BaseSettings, crawler: Crawler):
warnings.warn(
"HTTP10DownloadHandler is deprecated and will be removed in a future Scrapy version.",
category=ScrapyDeprecationWarning,
stacklevel=2,
)
self.HTTPClientFactory: type[ScrapyHTTPClientFactory] = load_object(
settings["DOWNLOADER_HTTPCLIENTFACTORY"]
)
self.ClientContextFactory: type[ScrapyClientContextFactory] = load_object(
settings["DOWNLOADER_CLIENTCONTEXTFACTORY"]
)
self._settings: BaseSettings = settings
self._crawler: Crawler = crawler
@classmethod
def from_crawler(cls, crawler: Crawler) -> Self:
return cls(crawler.settings, crawler)
async def download_request(self, request: Request) -> Response:
factory = self.HTTPClientFactory(request)
self._connect(factory)
return await maybe_deferred_to_future(factory.deferred)
def _connect(self, factory: ScrapyHTTPClientFactory) -> IConnector:
from twisted.internet import reactor
host, port = to_unicode(factory.host), factory.port
if factory.scheme == b"https":
client_context_factory = build_from_crawler(
self.ClientContextFactory,
self._crawler,
)
return reactor.connectSSL(host, port, factory, client_context_factory)
return reactor.connectTCP(host, port, factory)
async def close(self) -> None:
pass
| python | BSD-3-Clause | d1bd8eb49f7aba9289e4ff692006cead8bcd9080 | 2026-01-04T14:38:41.023839Z | false |
scrapy/scrapy | https://github.com/scrapy/scrapy/blob/d1bd8eb49f7aba9289e4ff692006cead8bcd9080/scrapy/core/downloader/handlers/__init__.py | scrapy/core/downloader/handlers/__init__.py | """Download handlers for different schemes"""
from __future__ import annotations
import inspect
import logging
import warnings
from typing import TYPE_CHECKING, Any, Protocol, cast
from scrapy import Request, Spider, signals
from scrapy.exceptions import NotConfigured, NotSupported, ScrapyDeprecationWarning
from scrapy.utils.defer import (
deferred_from_coro,
ensure_awaitable,
maybe_deferred_to_future,
)
from scrapy.utils.httpobj import urlparse_cached
from scrapy.utils.misc import build_from_crawler, load_object
from scrapy.utils.python import global_object_name, without_none_values
if TYPE_CHECKING:
from collections.abc import Callable
from twisted.internet.defer import Deferred
from scrapy.crawler import Crawler
from scrapy.http import Response
logger = logging.getLogger(__name__)
# This is the official API but we temporarily support the old deprecated one:
# * lazy is not mandatory (defaults to True).
# * download_request() can return a Deferred[Response] instead of a coroutine,
# and takes a spider argument in this case.
# * close() can return None or Deferred[None] instead of a coroutine.
# * close() is not mandatory.
class DownloadHandlerProtocol(Protocol):
lazy: bool
async def download_request(self, request: Request) -> Response: ...
async def close(self) -> None: ...
class DownloadHandlers:
def __init__(self, crawler: Crawler):
self._crawler: Crawler = crawler
# stores acceptable schemes on instancing
self._schemes: dict[str, str | Callable[..., Any]] = {}
# stores instanced handlers for schemes
self._handlers: dict[str, DownloadHandlerProtocol] = {}
# remembers failed handlers
self._notconfigured: dict[str, str] = {}
# remembers handlers with Deferred-based download_request()
self._old_style_handlers: set[str] = set()
handlers: dict[str, str | Callable[..., Any]] = without_none_values(
cast(
"dict[str, str | Callable[..., Any]]",
crawler.settings.getwithbase("DOWNLOAD_HANDLERS"),
)
)
for scheme, clspath in handlers.items():
self._schemes[scheme] = clspath
self._load_handler(scheme, skip_lazy=True)
crawler.signals.connect(self._close, signals.engine_stopped)
def _get_handler(self, scheme: str) -> DownloadHandlerProtocol | None:
"""Lazy-load the downloadhandler for a scheme
only on the first request for that scheme.
"""
if scheme in self._handlers:
return self._handlers[scheme]
if scheme in self._notconfigured:
return None
if scheme not in self._schemes:
self._notconfigured[scheme] = "no handler available for that scheme"
return None
return self._load_handler(scheme)
def _load_handler(
self, scheme: str, skip_lazy: bool = False
) -> DownloadHandlerProtocol | None:
path = self._schemes[scheme]
try:
dhcls: type[DownloadHandlerProtocol] = load_object(path)
if skip_lazy:
if not hasattr(dhcls, "lazy"):
warnings.warn(
f"{global_object_name(dhcls)} doesn't define a 'lazy' attribute."
f" This is deprecated, please add 'lazy = True' (which is the current"
f" default value) to the class definition.",
category=ScrapyDeprecationWarning,
stacklevel=1,
)
if getattr(dhcls, "lazy", True):
return None
dh = build_from_crawler(
dhcls,
self._crawler,
)
except NotConfigured as ex:
self._notconfigured[scheme] = str(ex)
return None
except Exception as ex:
logger.error(
'Loading "%(clspath)s" for scheme "%(scheme)s"',
{"clspath": path, "scheme": scheme},
exc_info=True,
extra={"crawler": self._crawler},
)
self._notconfigured[scheme] = str(ex)
return None
self._handlers[scheme] = dh
if not inspect.iscoroutinefunction(dh.download_request): # pragma: no cover
warnings.warn(
f"{global_object_name(dh.download_request)} is not a coroutine function."
f" This is deprecated, please rewrite it to return a coroutine and remove"
f" the 'spider' argument.",
category=ScrapyDeprecationWarning,
stacklevel=1,
)
self._old_style_handlers.add(scheme)
return dh
def download_request(
self, request: Request, spider: Spider | None = None
) -> Deferred[Response]: # pragma: no cover
warnings.warn(
"DownloadHandlers.download_request() is deprecated, use download_request_async() instead",
category=ScrapyDeprecationWarning,
stacklevel=2,
)
return deferred_from_coro(self.download_request_async(request))
async def download_request_async(self, request: Request) -> Response:
scheme = urlparse_cached(request).scheme
handler = self._get_handler(scheme)
if not handler:
raise NotSupported(
f"Unsupported URL scheme '{scheme}': {self._notconfigured[scheme]}"
)
assert self._crawler.spider
if scheme in self._old_style_handlers: # pragma: no cover
return await maybe_deferred_to_future(
cast(
"Deferred[Response]",
handler.download_request(request, self._crawler.spider), # type: ignore[call-arg]
)
)
return await handler.download_request(request)
async def _close(self) -> None:
for dh in self._handlers.values():
if not hasattr(dh, "close"): # pragma: no cover
warnings.warn(
f"{global_object_name(dh)} doesn't define a close() method."
f" This is deprecated, please add an empty 'async def close()' method.",
category=ScrapyDeprecationWarning,
stacklevel=1,
)
continue
if inspect.iscoroutinefunction(dh.close):
await dh.close()
else: # pragma: no cover
warnings.warn(
f"{global_object_name(dh.close)} is not a coroutine function."
f" This is deprecated, please rewrite it to return a coroutine.",
category=ScrapyDeprecationWarning,
stacklevel=1,
)
await ensure_awaitable(dh.close())
| python | BSD-3-Clause | d1bd8eb49f7aba9289e4ff692006cead8bcd9080 | 2026-01-04T14:38:41.023839Z | false |
scrapy/scrapy | https://github.com/scrapy/scrapy/blob/d1bd8eb49f7aba9289e4ff692006cead8bcd9080/scrapy/core/downloader/handlers/http2.py | scrapy/core/downloader/handlers/http2.py | from __future__ import annotations
from time import time
from typing import TYPE_CHECKING
from urllib.parse import urldefrag
from twisted.internet.error import TimeoutError as TxTimeoutError
from twisted.web.client import URI
from scrapy.core.downloader.contextfactory import load_context_factory_from_settings
from scrapy.core.downloader.handlers.base import BaseDownloadHandler
from scrapy.core.http2.agent import H2Agent, H2ConnectionPool, ScrapyProxyH2Agent
from scrapy.utils.defer import maybe_deferred_to_future
from scrapy.utils.httpobj import urlparse_cached
from scrapy.utils.python import to_bytes
if TYPE_CHECKING:
from twisted.internet.base import DelayedCall
from twisted.internet.defer import Deferred
from twisted.web.iweb import IPolicyForHTTPS
from scrapy.crawler import Crawler
from scrapy.http import Request, Response
from scrapy.spiders import Spider
class H2DownloadHandler(BaseDownloadHandler):
lazy = True
def __init__(self, crawler: Crawler):
super().__init__(crawler)
self._crawler = crawler
from twisted.internet import reactor
self._pool = H2ConnectionPool(reactor, crawler.settings)
self._context_factory = load_context_factory_from_settings(
crawler.settings, crawler
)
async def download_request(self, request: Request) -> Response:
agent = ScrapyH2Agent(
context_factory=self._context_factory,
pool=self._pool,
crawler=self._crawler,
)
assert self._crawler.spider
return await maybe_deferred_to_future(
agent.download_request(request, self._crawler.spider)
)
async def close(self) -> None:
self._pool.close_connections()
class ScrapyH2Agent:
_Agent = H2Agent
_ProxyAgent = ScrapyProxyH2Agent
def __init__(
self,
context_factory: IPolicyForHTTPS,
pool: H2ConnectionPool,
connect_timeout: int = 10,
bind_address: bytes | None = None,
crawler: Crawler | None = None,
) -> None:
self._context_factory = context_factory
self._connect_timeout = connect_timeout
self._bind_address = bind_address
self._pool = pool
self._crawler = crawler
def _get_agent(self, request: Request, timeout: float | None) -> H2Agent:
from twisted.internet import reactor
bind_address = request.meta.get("bindaddress") or self._bind_address
proxy = request.meta.get("proxy")
if proxy:
if urlparse_cached(request).scheme == "https":
# ToDo
raise NotImplementedError(
"Tunneling via CONNECT method using HTTP/2.0 is not yet supported"
)
return self._ProxyAgent(
reactor=reactor,
context_factory=self._context_factory,
proxy_uri=URI.fromBytes(to_bytes(proxy, encoding="ascii")),
connect_timeout=timeout,
bind_address=bind_address,
pool=self._pool,
)
return self._Agent(
reactor=reactor,
context_factory=self._context_factory,
connect_timeout=timeout,
bind_address=bind_address,
pool=self._pool,
)
def download_request(self, request: Request, spider: Spider) -> Deferred[Response]:
from twisted.internet import reactor
timeout = request.meta.get("download_timeout") or self._connect_timeout
agent = self._get_agent(request, timeout)
start_time = time()
d = agent.request(request, spider)
d.addCallback(self._cb_latency, request, start_time)
timeout_cl = reactor.callLater(timeout, d.cancel)
d.addBoth(self._cb_timeout, request, timeout, timeout_cl)
return d
@staticmethod
def _cb_latency(
response: Response, request: Request, start_time: float
) -> Response:
request.meta["download_latency"] = time() - start_time
return response
@staticmethod
def _cb_timeout(
response: Response, request: Request, timeout: float, timeout_cl: DelayedCall
) -> Response:
if timeout_cl.active():
timeout_cl.cancel()
return response
url = urldefrag(request.url)[0]
raise TxTimeoutError(f"Getting {url} took longer than {timeout} seconds.")
| python | BSD-3-Clause | d1bd8eb49f7aba9289e4ff692006cead8bcd9080 | 2026-01-04T14:38:41.023839Z | false |
scrapy/scrapy | https://github.com/scrapy/scrapy/blob/d1bd8eb49f7aba9289e4ff692006cead8bcd9080/scrapy/core/downloader/handlers/base.py | scrapy/core/downloader/handlers/base.py | from __future__ import annotations
from abc import ABC, abstractmethod
from typing import TYPE_CHECKING
if TYPE_CHECKING:
# typing.Self requires Python 3.11
from typing_extensions import Self
from scrapy import Request
from scrapy.crawler import Crawler
from scrapy.http import Response
class BaseDownloadHandler(ABC):
"""Optional base class for download handlers."""
lazy: bool = False
def __init__(self, crawler: Crawler):
self.crawler = crawler
@classmethod
def from_crawler(cls, crawler: Crawler) -> Self:
return cls(crawler)
@abstractmethod
async def download_request(self, request: Request) -> Response:
raise NotImplementedError
async def close(self) -> None: # noqa: B027
pass
| python | BSD-3-Clause | d1bd8eb49f7aba9289e4ff692006cead8bcd9080 | 2026-01-04T14:38:41.023839Z | false |
scrapy/scrapy | https://github.com/scrapy/scrapy/blob/d1bd8eb49f7aba9289e4ff692006cead8bcd9080/scrapy/core/downloader/handlers/s3.py | scrapy/core/downloader/handlers/s3.py | from __future__ import annotations
from typing import TYPE_CHECKING
from scrapy.core.downloader.handlers.base import BaseDownloadHandler
from scrapy.core.downloader.handlers.http11 import HTTP11DownloadHandler
from scrapy.exceptions import NotConfigured
from scrapy.utils.boto import is_botocore_available
from scrapy.utils.httpobj import urlparse_cached
from scrapy.utils.misc import build_from_crawler
if TYPE_CHECKING:
from scrapy import Request
from scrapy.crawler import Crawler
from scrapy.http import Response
class S3DownloadHandler(BaseDownloadHandler):
lazy = True
def __init__(self, crawler: Crawler):
if not is_botocore_available():
raise NotConfigured("missing botocore library")
super().__init__(crawler)
aws_access_key_id = crawler.settings["AWS_ACCESS_KEY_ID"]
aws_secret_access_key = crawler.settings["AWS_SECRET_ACCESS_KEY"]
aws_session_token = crawler.settings["AWS_SESSION_TOKEN"]
self.anon = not aws_access_key_id and not aws_secret_access_key
self._signer = None
if not self.anon:
import botocore.auth # noqa: PLC0415
import botocore.credentials # noqa: PLC0415
SignerCls = botocore.auth.AUTH_TYPE_MAPS["s3"]
# botocore.auth.BaseSigner doesn't have an __init__() with args, only subclasses do
self._signer = SignerCls( # type: ignore[call-arg]
botocore.credentials.Credentials(
aws_access_key_id, aws_secret_access_key, aws_session_token
)
)
_http_handler = build_from_crawler(HTTP11DownloadHandler, crawler)
self._download_http = _http_handler.download_request
async def download_request(self, request: Request) -> Response:
p = urlparse_cached(request)
scheme = "https" if request.meta.get("is_secure") else "http"
bucket = p.hostname
path = p.path + "?" + p.query if p.query else p.path
url = f"{scheme}://{bucket}.s3.amazonaws.com{path}"
if self.anon:
request = request.replace(url=url)
else:
import botocore.awsrequest # noqa: PLC0415
awsrequest = botocore.awsrequest.AWSRequest(
method=request.method,
url=f"{scheme}://s3.amazonaws.com/{bucket}{path}",
headers=request.headers.to_unicode_dict(),
data=request.body,
)
assert self._signer
self._signer.add_auth(awsrequest)
request = request.replace(url=url, headers=awsrequest.headers.items())
return await self._download_http(request)
| python | BSD-3-Clause | d1bd8eb49f7aba9289e4ff692006cead8bcd9080 | 2026-01-04T14:38:41.023839Z | false |
scrapy/scrapy | https://github.com/scrapy/scrapy/blob/d1bd8eb49f7aba9289e4ff692006cead8bcd9080/scrapy/spiders/sitemap.py | scrapy/spiders/sitemap.py | from __future__ import annotations
import logging
import re
# Iterable is needed at the run time for the SitemapSpider._parse_sitemap() annotation
from collections.abc import AsyncIterator, Iterable, Sequence # noqa: TC003
from typing import TYPE_CHECKING, Any, cast
from scrapy.http import Request, Response, XmlResponse
from scrapy.spiders import Spider
from scrapy.utils._compression import _DecompressionMaxSizeExceeded
from scrapy.utils.gz import gunzip, gzip_magic_number
from scrapy.utils.sitemap import Sitemap, sitemap_urls_from_robots
if TYPE_CHECKING:
# typing.Self requires Python 3.11
from typing_extensions import Self
from scrapy.crawler import Crawler
from scrapy.http.request import CallbackT
logger = logging.getLogger(__name__)
class SitemapSpider(Spider):
sitemap_urls: Sequence[str] = ()
sitemap_rules: Sequence[tuple[re.Pattern[str] | str, str | CallbackT]] = [
("", "parse")
]
sitemap_follow: Sequence[re.Pattern[str] | str] = [""]
sitemap_alternate_links: bool = False
_max_size: int
_warn_size: int
@classmethod
def from_crawler(cls, crawler: Crawler, *args: Any, **kwargs: Any) -> Self:
spider = super().from_crawler(crawler, *args, **kwargs)
spider._max_size = getattr(
spider, "download_maxsize", spider.settings.getint("DOWNLOAD_MAXSIZE")
)
spider._warn_size = getattr(
spider, "download_warnsize", spider.settings.getint("DOWNLOAD_WARNSIZE")
)
return spider
def __init__(self, *a: Any, **kw: Any):
super().__init__(*a, **kw)
self._cbs: list[tuple[re.Pattern[str], CallbackT]] = []
for r, c in self.sitemap_rules:
if isinstance(c, str):
c = cast("CallbackT", getattr(self, c))
self._cbs.append((regex(r), c))
self._follow: list[re.Pattern[str]] = [regex(x) for x in self.sitemap_follow]
async def start(self) -> AsyncIterator[Any]:
for item_or_request in self.start_requests():
yield item_or_request
def start_requests(self) -> Iterable[Request]:
for url in self.sitemap_urls:
yield Request(url, self._parse_sitemap)
def sitemap_filter(
self, entries: Iterable[dict[str, Any]]
) -> Iterable[dict[str, Any]]:
"""This method can be used to filter sitemap entries by their
attributes, for example, you can filter locs with lastmod greater
than a given date (see docs).
"""
yield from entries
def _parse_sitemap(self, response: Response) -> Iterable[Request]:
if response.url.endswith("/robots.txt"):
for url in sitemap_urls_from_robots(response.text, base_url=response.url):
yield Request(url, callback=self._parse_sitemap)
else:
body = self._get_sitemap_body(response)
if body is None:
logger.warning(
"Ignoring invalid sitemap: %(response)s",
{"response": response},
extra={"spider": self},
)
return
s = Sitemap(body)
it = self.sitemap_filter(s)
if s.type == "sitemapindex":
for loc in iterloc(it, self.sitemap_alternate_links):
if any(x.search(loc) for x in self._follow):
yield Request(loc, callback=self._parse_sitemap)
elif s.type == "urlset":
for loc in iterloc(it, self.sitemap_alternate_links):
for r, c in self._cbs:
if r.search(loc):
yield Request(loc, callback=c)
break
def _get_sitemap_body(self, response: Response) -> bytes | None:
"""Return the sitemap body contained in the given response,
or None if the response is not a sitemap.
"""
if isinstance(response, XmlResponse):
return response.body
if gzip_magic_number(response):
uncompressed_size = len(response.body)
max_size = response.meta.get("download_maxsize", self._max_size)
warn_size = response.meta.get("download_warnsize", self._warn_size)
try:
body = gunzip(response.body, max_size=max_size)
except _DecompressionMaxSizeExceeded:
return None
if uncompressed_size < warn_size <= len(body):
logger.warning(
f"{response} body size after decompression ({len(body)} B) "
f"is larger than the download warning size ({warn_size} B)."
)
return body
# actual gzipped sitemap files are decompressed above ;
# if we are here (response body is not gzipped)
# and have a response for .xml.gz,
# it usually means that it was already gunzipped
# by HttpCompression middleware,
# the HTTP response being sent with "Content-Encoding: gzip"
# without actually being a .xml.gz file in the first place,
# merely XML gzip-compressed on the fly,
# in other word, here, we have plain XML
if response.url.endswith(".xml") or response.url.endswith(".xml.gz"):
return response.body
return None
def regex(x: re.Pattern[str] | str) -> re.Pattern[str]:
if isinstance(x, str):
return re.compile(x)
return x
def iterloc(it: Iterable[dict[str, Any]], alt: bool = False) -> Iterable[str]:
for d in it:
yield d["loc"]
# Also consider alternate URLs (xhtml:link rel="alternate")
if alt and "alternate" in d:
yield from d["alternate"]
| python | BSD-3-Clause | d1bd8eb49f7aba9289e4ff692006cead8bcd9080 | 2026-01-04T14:38:41.023839Z | false |
scrapy/scrapy | https://github.com/scrapy/scrapy/blob/d1bd8eb49f7aba9289e4ff692006cead8bcd9080/scrapy/spiders/crawl.py | scrapy/spiders/crawl.py | """
This modules implements the CrawlSpider which is the recommended spider to use
for scraping typical websites that requires crawling pages.
See documentation in docs/topics/spiders.rst
"""
from __future__ import annotations
import copy
import warnings
from collections.abc import AsyncIterator, Awaitable, Callable
from typing import TYPE_CHECKING, Any, TypeAlias, TypeVar, cast
from scrapy.http import HtmlResponse, Request, Response
from scrapy.link import Link
from scrapy.linkextractors import LinkExtractor
from scrapy.spiders import Spider
from scrapy.utils.asyncgen import collect_asyncgen
from scrapy.utils.deprecate import method_is_overridden
from scrapy.utils.python import global_object_name
from scrapy.utils.spider import iterate_spider_output
if TYPE_CHECKING:
from collections.abc import Iterable, Sequence
from twisted.python.failure import Failure
# typing.Self requires Python 3.11
from typing_extensions import Self
from scrapy.crawler import Crawler
from scrapy.http.request import CallbackT
_T = TypeVar("_T")
ProcessLinksT: TypeAlias = Callable[[list[Link]], list[Link]]
ProcessRequestT: TypeAlias = Callable[[Request, Response], Request | None]
def _identity(x: _T) -> _T:
return x
def _identity_process_request(request: Request, response: Response) -> Request | None:
return request
def _get_method(method: Callable | str | None, spider: Spider) -> Callable | None:
if callable(method):
return method
if isinstance(method, str):
return getattr(spider, method, None)
return None
_default_link_extractor = LinkExtractor()
class Rule:
def __init__(
self,
link_extractor: LinkExtractor | None = None,
callback: CallbackT | str | None = None,
cb_kwargs: dict[str, Any] | None = None,
follow: bool | None = None,
process_links: ProcessLinksT | str | None = None,
process_request: ProcessRequestT | str | None = None,
errback: Callable[[Failure], Any] | str | None = None,
):
self.link_extractor: LinkExtractor = link_extractor or _default_link_extractor
self.callback: CallbackT | str | None = callback
self.errback: Callable[[Failure], Any] | str | None = errback
self.cb_kwargs: dict[str, Any] = cb_kwargs or {}
self.process_links: ProcessLinksT | str = process_links or _identity
self.process_request: ProcessRequestT | str = (
process_request or _identity_process_request
)
self.follow: bool = follow if follow is not None else not callback
def _compile(self, spider: Spider) -> None:
# this replaces method names with methods and we can't express this in type hints
self.callback = cast("CallbackT", _get_method(self.callback, spider))
self.errback = cast(
"Callable[[Failure], Any]", _get_method(self.errback, spider)
)
self.process_links = cast(
"ProcessLinksT", _get_method(self.process_links, spider)
)
self.process_request = cast(
"ProcessRequestT", _get_method(self.process_request, spider)
)
class CrawlSpider(Spider):
rules: Sequence[Rule] = ()
_rules: list[Rule]
_follow_links: bool
def __init__(self, *a: Any, **kw: Any):
super().__init__(*a, **kw)
self._compile_rules()
if method_is_overridden(self.__class__, CrawlSpider, "_parse_response"):
warnings.warn(
f"The CrawlSpider._parse_response method, which the "
f"{global_object_name(self.__class__)} class overrides, is "
f"deprecated: it will be removed in future Scrapy releases. "
f"Please override the CrawlSpider.parse_with_rules method "
f"instead."
)
def _parse(self, response: Response, **kwargs: Any) -> Any:
return self.parse_with_rules(
response=response,
callback=self.parse_start_url,
cb_kwargs=kwargs,
follow=True,
)
def parse_start_url(self, response: Response, **kwargs: Any) -> Any:
return []
def process_results(
self, response: Response, results: Iterable[Any]
) -> Iterable[Any]:
return results
def _build_request(self, rule_index: int, link: Link) -> Request:
return Request(
url=link.url,
callback=self._callback,
errback=self._errback,
meta={"rule": rule_index, "link_text": link.text},
)
def _requests_to_follow(self, response: Response) -> Iterable[Request | None]:
if not isinstance(response, HtmlResponse):
return
seen: set[Link] = set()
for rule_index, rule in enumerate(self._rules):
links: list[Link] = [
lnk
for lnk in rule.link_extractor.extract_links(response)
if lnk not in seen
]
for link in cast("ProcessLinksT", rule.process_links)(links):
seen.add(link)
request = self._build_request(rule_index, link)
yield cast("ProcessRequestT", rule.process_request)(request, response)
def _callback(self, response: Response, **cb_kwargs: Any) -> Any:
rule = self._rules[cast("int", response.meta["rule"])]
return self.parse_with_rules(
response,
cast("CallbackT", rule.callback),
{**rule.cb_kwargs, **cb_kwargs},
rule.follow,
)
def _errback(self, failure: Failure) -> Iterable[Any]:
rule = self._rules[cast("int", failure.request.meta["rule"])] # type: ignore[attr-defined]
return self._handle_failure(
failure, cast("Callable[[Failure], Any]", rule.errback)
)
async def parse_with_rules(
self,
response: Response,
callback: CallbackT | None,
cb_kwargs: dict[str, Any],
follow: bool = True,
) -> AsyncIterator[Any]:
if callback:
cb_res = callback(response, **cb_kwargs) or ()
if isinstance(cb_res, AsyncIterator):
cb_res = await collect_asyncgen(cb_res)
elif isinstance(cb_res, Awaitable):
cb_res = await cb_res
cb_res = self.process_results(response, cb_res)
for request_or_item in iterate_spider_output(cb_res):
yield request_or_item
if follow and self._follow_links:
for request_or_item in self._requests_to_follow(response):
yield request_or_item
def _parse_response(
self,
response: Response,
callback: CallbackT | None,
cb_kwargs: dict[str, Any],
follow: bool = True,
) -> AsyncIterator[Any]:
warnings.warn(
"The CrawlSpider._parse_response method is deprecated: "
"it will be removed in future Scrapy releases. "
"Please use the CrawlSpider.parse_with_rules method instead.",
stacklevel=2,
)
return self.parse_with_rules(response, callback, cb_kwargs, follow)
def _handle_failure(
self, failure: Failure, errback: Callable[[Failure], Any] | None
) -> Iterable[Any]:
if errback:
results = errback(failure) or ()
yield from iterate_spider_output(results)
def _compile_rules(self) -> None:
self._rules = []
for rule in self.rules:
self._rules.append(copy.copy(rule))
self._rules[-1]._compile(self)
@classmethod
def from_crawler(cls, crawler: Crawler, *args: Any, **kwargs: Any) -> Self:
spider = super().from_crawler(crawler, *args, **kwargs)
spider._follow_links = crawler.settings.getbool("CRAWLSPIDER_FOLLOW_LINKS")
return spider
| python | BSD-3-Clause | d1bd8eb49f7aba9289e4ff692006cead8bcd9080 | 2026-01-04T14:38:41.023839Z | false |
scrapy/scrapy | https://github.com/scrapy/scrapy/blob/d1bd8eb49f7aba9289e4ff692006cead8bcd9080/scrapy/spiders/feed.py | scrapy/spiders/feed.py | """
This module implements the XMLFeedSpider which is the recommended spider to use
for scraping from an XML feed.
See documentation in docs/topics/spiders.rst
"""
from __future__ import annotations
from typing import TYPE_CHECKING, Any
from scrapy.exceptions import NotConfigured, NotSupported
from scrapy.http import Response, TextResponse
from scrapy.selector import Selector
from scrapy.spiders import Spider
from scrapy.utils.iterators import csviter, xmliter_lxml
from scrapy.utils.spider import iterate_spider_output
if TYPE_CHECKING:
from collections.abc import Iterable, Sequence
class XMLFeedSpider(Spider):
"""
This class intends to be the base class for spiders that scrape
from XML feeds.
You can choose whether to parse the file using the 'iternodes' iterator, an
'xml' selector, or an 'html' selector. In most cases, it's convenient to
use iternodes, since it's a faster and cleaner.
"""
iterator: str = "iternodes"
itertag: str = "item"
namespaces: Sequence[tuple[str, str]] = ()
def process_results(
self, response: Response, results: Iterable[Any]
) -> Iterable[Any]:
"""This overridable method is called for each result (item or request)
returned by the spider, and it's intended to perform any last time
processing required before returning the results to the framework core,
for example setting the item GUIDs. It receives a list of results and
the response which originated that results. It must return a list of
results (items or requests).
"""
return results
def adapt_response(self, response: Response) -> Response:
"""You can override this function in order to make any changes you want
to into the feed before parsing it. This function must return a
response.
"""
return response
def parse_node(self, response: Response, selector: Selector) -> Any:
"""This method must be overridden with your custom spider functionality"""
if hasattr(self, "parse_item"): # backward compatibility
return self.parse_item(response, selector)
raise NotImplementedError
def parse_nodes(self, response: Response, nodes: Iterable[Selector]) -> Any:
"""This method is called for the nodes matching the provided tag name
(itertag). Receives the response and an Selector for each node.
Overriding this method is mandatory. Otherwise, you spider won't work.
This method must return either an item, a request, or a list
containing any of them.
"""
for selector in nodes:
ret = iterate_spider_output(self.parse_node(response, selector))
yield from self.process_results(response, ret)
def _parse(self, response: Response, **kwargs: Any) -> Any:
if not hasattr(self, "parse_node"):
raise NotConfigured(
"You must define parse_node method in order to scrape this XML feed"
)
response = self.adapt_response(response)
nodes: Iterable[Selector]
if self.iterator == "iternodes":
nodes = self._iternodes(response)
elif self.iterator == "xml":
if not isinstance(response, TextResponse):
raise ValueError("Response content isn't text")
selector = Selector(response, type="xml")
self._register_namespaces(selector)
nodes = selector.xpath(f"//{self.itertag}")
elif self.iterator == "html":
if not isinstance(response, TextResponse):
raise ValueError("Response content isn't text")
selector = Selector(response, type="html")
self._register_namespaces(selector)
nodes = selector.xpath(f"//{self.itertag}")
else:
raise NotSupported("Unsupported node iterator")
return self.parse_nodes(response, nodes)
def _iternodes(self, response: Response) -> Iterable[Selector]:
for node in xmliter_lxml(response, self.itertag):
self._register_namespaces(node)
yield node
def _register_namespaces(self, selector: Selector) -> None:
for prefix, uri in self.namespaces:
selector.register_namespace(prefix, uri)
class CSVFeedSpider(Spider):
"""Spider for parsing CSV feeds.
It receives a CSV file in a response; iterates through each of its rows,
and calls parse_row with a dict containing each field's data.
You can set some options regarding the CSV file, such as the delimiter, quotechar
and the file's headers.
"""
delimiter: str | None = (
None # When this is None, python's csv module's default delimiter is used
)
quotechar: str | None = (
None # When this is None, python's csv module's default quotechar is used
)
headers: list[str] | None = None
def process_results(
self, response: Response, results: Iterable[Any]
) -> Iterable[Any]:
"""This method has the same purpose as the one in XMLFeedSpider"""
return results
def adapt_response(self, response: Response) -> Response:
"""This method has the same purpose as the one in XMLFeedSpider"""
return response
def parse_row(self, response: Response, row: dict[str, str]) -> Any:
"""This method must be overridden with your custom spider functionality"""
raise NotImplementedError
def parse_rows(self, response: Response) -> Any:
"""Receives a response and a dict (representing each row) with a key for
each provided (or detected) header of the CSV file. This spider also
gives the opportunity to override adapt_response and
process_results methods for pre and post-processing purposes.
"""
for row in csviter(
response, self.delimiter, self.headers, quotechar=self.quotechar
):
ret = iterate_spider_output(self.parse_row(response, row))
yield from self.process_results(response, ret)
def _parse(self, response: Response, **kwargs: Any) -> Any:
if not hasattr(self, "parse_row"):
raise NotConfigured(
"You must define parse_row method in order to scrape this CSV feed"
)
response = self.adapt_response(response)
return self.parse_rows(response)
| python | BSD-3-Clause | d1bd8eb49f7aba9289e4ff692006cead8bcd9080 | 2026-01-04T14:38:41.023839Z | false |
scrapy/scrapy | https://github.com/scrapy/scrapy/blob/d1bd8eb49f7aba9289e4ff692006cead8bcd9080/scrapy/spiders/__init__.py | scrapy/spiders/__init__.py | """
Base class for Scrapy spiders
See documentation in docs/topics/spiders.rst
"""
from __future__ import annotations
import logging
import warnings
from typing import TYPE_CHECKING, Any, cast
from scrapy import signals
from scrapy.exceptions import ScrapyDeprecationWarning
from scrapy.http import Request, Response
from scrapy.utils.trackref import object_ref
from scrapy.utils.url import url_is_from_spider
if TYPE_CHECKING:
from collections.abc import AsyncIterator, Iterable
from twisted.internet.defer import Deferred
# typing.Self requires Python 3.11
from typing_extensions import Self
from scrapy.crawler import Crawler
from scrapy.http.request import CallbackT
from scrapy.settings import BaseSettings, _SettingsKey
from scrapy.utils.log import SpiderLoggerAdapter
class Spider(object_ref):
"""Base class that any spider must subclass.
It provides a default :meth:`start` implementation that sends
requests based on the :attr:`start_urls` class attribute and calls the
:meth:`parse` method for each response.
"""
name: str
custom_settings: dict[_SettingsKey, Any] | None = None
#: Start URLs. See :meth:`start`.
start_urls: list[str]
def __init__(self, name: str | None = None, **kwargs: Any):
if name is not None:
self.name: str = name
elif not getattr(self, "name", None):
raise ValueError(f"{type(self).__name__} must have a name")
self.__dict__.update(kwargs)
if not hasattr(self, "start_urls"):
self.start_urls: list[str] = []
@property
def logger(self) -> SpiderLoggerAdapter:
# circular import
from scrapy.utils.log import SpiderLoggerAdapter # noqa: PLC0415
logger = logging.getLogger(self.name)
return SpiderLoggerAdapter(logger, {"spider": self})
def log(self, message: Any, level: int = logging.DEBUG, **kw: Any) -> None:
"""Log the given message at the given log level
This helper wraps a log call to the logger within the spider, but you
can use it directly (e.g. Spider.logger.info('msg')) or use any other
Python logger too.
"""
self.logger.log(level, message, **kw)
@classmethod
def from_crawler(cls, crawler: Crawler, *args: Any, **kwargs: Any) -> Self:
spider = cls(*args, **kwargs)
spider._set_crawler(crawler)
return spider
def _set_crawler(self, crawler: Crawler) -> None:
self.crawler: Crawler = crawler
self.settings: BaseSettings = crawler.settings
crawler.signals.connect(self.close, signals.spider_closed)
async def start(self) -> AsyncIterator[Any]:
"""Yield the initial :class:`~scrapy.Request` objects to send.
.. versionadded:: 2.13
For example:
.. code-block:: python
from scrapy import Request, Spider
class MySpider(Spider):
name = "myspider"
async def start(self):
yield Request("https://toscrape.com/")
The default implementation reads URLs from :attr:`start_urls` and
yields a request for each with :attr:`~scrapy.Request.dont_filter`
enabled. It is functionally equivalent to:
.. code-block:: python
async def start(self):
for url in self.start_urls:
yield Request(url, dont_filter=True)
You can also yield :ref:`items <topics-items>`. For example:
.. code-block:: python
async def start(self):
yield {"foo": "bar"}
To write spiders that work on Scrapy versions lower than 2.13,
define also a synchronous ``start_requests()`` method that returns an
iterable. For example:
.. code-block:: python
def start_requests(self):
yield Request("https://toscrape.com/")
.. seealso:: :ref:`start-requests`
"""
with warnings.catch_warnings():
warnings.filterwarnings(
"ignore", category=ScrapyDeprecationWarning, module=r"^scrapy\.spiders$"
)
for item_or_request in self.start_requests():
yield item_or_request
def start_requests(self) -> Iterable[Any]:
warnings.warn(
(
"The Spider.start_requests() method is deprecated, use "
"Spider.start() instead. If you are calling "
"super().start_requests() from a Spider.start() override, "
"iterate super().start() instead."
),
ScrapyDeprecationWarning,
stacklevel=2,
)
if not self.start_urls and hasattr(self, "start_url"):
raise AttributeError(
"Crawling could not start: 'start_urls' not found "
"or empty (but found 'start_url' attribute instead, "
"did you miss an 's'?)"
)
for url in self.start_urls:
yield Request(url, dont_filter=True)
def _parse(self, response: Response, **kwargs: Any) -> Any:
return self.parse(response, **kwargs)
if TYPE_CHECKING:
parse: CallbackT
else:
def parse(self, response: Response, **kwargs: Any) -> Any:
raise NotImplementedError(
f"{self.__class__.__name__}.parse callback is not defined"
)
@classmethod
def update_settings(cls, settings: BaseSettings) -> None:
settings.setdict(cls.custom_settings or {}, priority="spider")
@classmethod
def handles_request(cls, request: Request) -> bool:
return url_is_from_spider(request.url, cls)
@staticmethod
def close(spider: Spider, reason: str) -> Deferred[None] | None:
closed = getattr(spider, "closed", None)
if callable(closed):
return cast("Deferred[None] | None", closed(reason))
return None
def __repr__(self) -> str:
return f"<{type(self).__name__} {self.name!r} at 0x{id(self):0x}>"
# Top-level imports
from scrapy.spiders.crawl import CrawlSpider, Rule
from scrapy.spiders.feed import CSVFeedSpider, XMLFeedSpider
from scrapy.spiders.sitemap import SitemapSpider
__all__ = [
"CSVFeedSpider",
"CrawlSpider",
"Rule",
"SitemapSpider",
"Spider",
"XMLFeedSpider",
]
| python | BSD-3-Clause | d1bd8eb49f7aba9289e4ff692006cead8bcd9080 | 2026-01-04T14:38:41.023839Z | false |
scrapy/scrapy | https://github.com/scrapy/scrapy/blob/d1bd8eb49f7aba9289e4ff692006cead8bcd9080/scrapy/spiders/init.py | scrapy/spiders/init.py | from __future__ import annotations
import warnings
from typing import TYPE_CHECKING, Any, cast
from scrapy.exceptions import ScrapyDeprecationWarning
from scrapy.spiders import Spider
from scrapy.utils.spider import iterate_spider_output
if TYPE_CHECKING:
from collections.abc import AsyncIterator, Iterable
from scrapy import Request
from scrapy.http import Response
class InitSpider(Spider):
"""Base Spider with initialization facilities
.. warning:: This class is deprecated. Copy its code into your project if needed.
It will be removed in a future Scrapy version.
"""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
warnings.warn(
"InitSpider is deprecated. Copy its code from Scrapy's source if needed. "
"Will be removed in a future version.",
ScrapyDeprecationWarning,
stacklevel=2,
)
async def start(self) -> AsyncIterator[Any]:
with warnings.catch_warnings():
warnings.filterwarnings(
"ignore", category=ScrapyDeprecationWarning, module=r"^scrapy\.spiders$"
)
for item_or_request in self.start_requests():
yield item_or_request
def start_requests(self) -> Iterable[Request]:
self._postinit_reqs: Iterable[Request] = super().start_requests()
return cast("Iterable[Request]", iterate_spider_output(self.init_request()))
def initialized(self, response: Response | None = None) -> Any:
"""This method must be set as the callback of your last initialization
request. See self.init_request() docstring for more info.
"""
return self.__dict__.pop("_postinit_reqs")
def init_request(self) -> Any:
"""This function should return one initialization request, with the
self.initialized method as callback. When the self.initialized method
is called this spider is considered initialized. If you need to perform
several requests for initializing your spider, you can do so by using
different callbacks. The only requirement is that the final callback
(of the last initialization request) must be self.initialized.
The default implementation calls self.initialized immediately, and
means that no initialization is needed. This method should be
overridden only when you need to perform requests to initialize your
spider
"""
return self.initialized()
| python | BSD-3-Clause | d1bd8eb49f7aba9289e4ff692006cead8bcd9080 | 2026-01-04T14:38:41.023839Z | false |
scrapy/scrapy | https://github.com/scrapy/scrapy/blob/d1bd8eb49f7aba9289e4ff692006cead8bcd9080/scrapy/downloadermiddlewares/redirect.py | scrapy/downloadermiddlewares/redirect.py | from __future__ import annotations
import logging
from typing import TYPE_CHECKING, Any, cast
from urllib.parse import urljoin
from w3lib.url import safe_url_string
from scrapy.exceptions import IgnoreRequest, NotConfigured
from scrapy.http import HtmlResponse, Response
from scrapy.utils.decorators import _warn_spider_arg
from scrapy.utils.httpobj import urlparse_cached
from scrapy.utils.response import get_meta_refresh
if TYPE_CHECKING:
# typing.Self requires Python 3.11
from typing_extensions import Self
from scrapy import Request, Spider
from scrapy.crawler import Crawler
from scrapy.settings import BaseSettings
logger = logging.getLogger(__name__)
def _build_redirect_request(
source_request: Request, *, url: str, **kwargs: Any
) -> Request:
redirect_request = source_request.replace(
url=url,
**kwargs,
cls=None,
cookies=None,
)
if "_scheme_proxy" in redirect_request.meta:
source_request_scheme = urlparse_cached(source_request).scheme
redirect_request_scheme = urlparse_cached(redirect_request).scheme
if source_request_scheme != redirect_request_scheme:
redirect_request.meta.pop("_scheme_proxy")
redirect_request.meta.pop("proxy", None)
redirect_request.meta.pop("_auth_proxy", None)
redirect_request.headers.pop(b"Proxy-Authorization", None)
has_cookie_header = "Cookie" in redirect_request.headers
has_authorization_header = "Authorization" in redirect_request.headers
if has_cookie_header or has_authorization_header:
default_ports = {"http": 80, "https": 443}
parsed_source_request = urlparse_cached(source_request)
source_scheme, source_host, source_port = (
parsed_source_request.scheme,
parsed_source_request.hostname,
parsed_source_request.port
or default_ports.get(parsed_source_request.scheme),
)
parsed_redirect_request = urlparse_cached(redirect_request)
redirect_scheme, redirect_host, redirect_port = (
parsed_redirect_request.scheme,
parsed_redirect_request.hostname,
parsed_redirect_request.port
or default_ports.get(parsed_redirect_request.scheme),
)
if has_cookie_header and (
redirect_scheme not in {source_scheme, "https"}
or source_host != redirect_host
):
del redirect_request.headers["Cookie"]
# https://fetch.spec.whatwg.org/#ref-for-cors-non-wildcard-request-header-name
if has_authorization_header and (
source_scheme != redirect_scheme
or source_host != redirect_host
or source_port != redirect_port
):
del redirect_request.headers["Authorization"]
return redirect_request
class BaseRedirectMiddleware:
crawler: Crawler
enabled_setting: str = "REDIRECT_ENABLED"
def __init__(self, settings: BaseSettings):
if not settings.getbool(self.enabled_setting):
raise NotConfigured
self.max_redirect_times: int = settings.getint("REDIRECT_MAX_TIMES")
self.priority_adjust: int = settings.getint("REDIRECT_PRIORITY_ADJUST")
@classmethod
def from_crawler(cls, crawler: Crawler) -> Self:
o = cls(crawler.settings)
o.crawler = crawler
return o
def _redirect(self, redirected: Request, request: Request, reason: Any) -> Request:
ttl = request.meta.setdefault("redirect_ttl", self.max_redirect_times)
redirects = request.meta.get("redirect_times", 0) + 1
if ttl and redirects <= self.max_redirect_times:
redirected.meta["redirect_times"] = redirects
redirected.meta["redirect_ttl"] = ttl - 1
redirected.meta["redirect_urls"] = [
*request.meta.get("redirect_urls", []),
request.url,
]
redirected.meta["redirect_reasons"] = [
*request.meta.get("redirect_reasons", []),
reason,
]
redirected.dont_filter = request.dont_filter
redirected.priority = request.priority + self.priority_adjust
logger.debug(
"Redirecting (%(reason)s) to %(redirected)s from %(request)s",
{"reason": reason, "redirected": redirected, "request": request},
extra={"spider": self.crawler.spider},
)
return redirected
logger.debug(
"Discarding %(request)s: max redirections reached",
{"request": request},
extra={"spider": self.crawler.spider},
)
raise IgnoreRequest("max redirections reached")
def _redirect_request_using_get(
self, request: Request, redirect_url: str
) -> Request:
redirect_request = _build_redirect_request(
request,
url=redirect_url,
method="GET",
body="",
)
redirect_request.headers.pop("Content-Type", None)
redirect_request.headers.pop("Content-Length", None)
return redirect_request
class RedirectMiddleware(BaseRedirectMiddleware):
"""
Handle redirection of requests based on response status
and meta-refresh html tag.
"""
@_warn_spider_arg
def process_response(
self, request: Request, response: Response, spider: Spider | None = None
) -> Request | Response:
if (
request.meta.get("dont_redirect", False)
or response.status
in getattr(self.crawler.spider, "handle_httpstatus_list", [])
or response.status in request.meta.get("handle_httpstatus_list", [])
or request.meta.get("handle_httpstatus_all", False)
):
return response
allowed_status = (301, 302, 303, 307, 308)
if "Location" not in response.headers or response.status not in allowed_status:
return response
assert response.headers["Location"] is not None
location = safe_url_string(response.headers["Location"])
if response.headers["Location"].startswith(b"//"):
request_scheme = urlparse_cached(request).scheme
location = request_scheme + "://" + location.lstrip("/")
redirected_url = urljoin(request.url, location)
redirected = _build_redirect_request(request, url=redirected_url)
if urlparse_cached(redirected).scheme not in {"http", "https"}:
return response
if response.status in (301, 307, 308) or request.method == "HEAD":
return self._redirect(redirected, request, response.status)
redirected = self._redirect_request_using_get(request, redirected_url)
return self._redirect(redirected, request, response.status)
class MetaRefreshMiddleware(BaseRedirectMiddleware):
enabled_setting = "METAREFRESH_ENABLED"
def __init__(self, settings: BaseSettings):
super().__init__(settings)
self._ignore_tags: list[str] = settings.getlist("METAREFRESH_IGNORE_TAGS")
self._maxdelay: int = settings.getint("METAREFRESH_MAXDELAY")
@_warn_spider_arg
def process_response(
self, request: Request, response: Response, spider: Spider | None = None
) -> Request | Response:
if (
request.meta.get("dont_redirect", False)
or request.method == "HEAD"
or not isinstance(response, HtmlResponse)
or urlparse_cached(request).scheme not in {"http", "https"}
):
return response
interval, url = get_meta_refresh(response, ignore_tags=self._ignore_tags)
if not url:
return response
redirected = self._redirect_request_using_get(request, url)
if urlparse_cached(redirected).scheme not in {"http", "https"}:
return response
if cast("float", interval) < self._maxdelay:
return self._redirect(redirected, request, "meta refresh")
return response
| python | BSD-3-Clause | d1bd8eb49f7aba9289e4ff692006cead8bcd9080 | 2026-01-04T14:38:41.023839Z | false |
scrapy/scrapy | https://github.com/scrapy/scrapy/blob/d1bd8eb49f7aba9289e4ff692006cead8bcd9080/scrapy/downloadermiddlewares/robotstxt.py | scrapy/downloadermiddlewares/robotstxt.py | """
This is a middleware to respect robots.txt policies. To activate it you must
enable this middleware and enable the ROBOTSTXT_OBEY setting.
"""
from __future__ import annotations
import logging
from typing import TYPE_CHECKING
from twisted.internet.defer import Deferred
from scrapy.exceptions import IgnoreRequest, NotConfigured
from scrapy.http import Request, Response
from scrapy.http.request import NO_CALLBACK
from scrapy.utils.decorators import _warn_spider_arg
from scrapy.utils.defer import maybe_deferred_to_future
from scrapy.utils.httpobj import urlparse_cached
from scrapy.utils.misc import load_object
if TYPE_CHECKING:
# typing.Self requires Python 3.11
from typing_extensions import Self
from scrapy import Spider
from scrapy.crawler import Crawler
from scrapy.robotstxt import RobotParser
logger = logging.getLogger(__name__)
class RobotsTxtMiddleware:
DOWNLOAD_PRIORITY: int = 1000
def __init__(self, crawler: Crawler):
if not crawler.settings.getbool("ROBOTSTXT_OBEY"):
raise NotConfigured
self._default_useragent: str = crawler.settings["USER_AGENT"]
self._robotstxt_useragent: str | None = crawler.settings["ROBOTSTXT_USER_AGENT"]
self.crawler: Crawler = crawler
self._parsers: dict[str, RobotParser | Deferred[RobotParser | None] | None] = {}
self._parserimpl: RobotParser = load_object(
crawler.settings.get("ROBOTSTXT_PARSER")
)
# check if parser dependencies are met, this should throw an error otherwise.
self._parserimpl.from_crawler(self.crawler, b"")
@classmethod
def from_crawler(cls, crawler: Crawler) -> Self:
return cls(crawler)
@_warn_spider_arg
async def process_request(
self, request: Request, spider: Spider | None = None
) -> None:
if request.meta.get("dont_obey_robotstxt"):
return
if request.url.startswith("data:") or request.url.startswith("file:"):
return
rp = await self.robot_parser(request)
self.process_request_2(rp, request)
def process_request_2(self, rp: RobotParser | None, request: Request) -> None:
if rp is None:
return
useragent: str | bytes | None = self._robotstxt_useragent
if not useragent:
useragent = request.headers.get(b"User-Agent", self._default_useragent)
assert useragent is not None
if not rp.allowed(request.url, useragent):
logger.debug(
"Forbidden by robots.txt: %(request)s",
{"request": request},
extra={"spider": self.crawler.spider},
)
assert self.crawler.stats
self.crawler.stats.inc_value("robotstxt/forbidden")
raise IgnoreRequest("Forbidden by robots.txt")
async def robot_parser(self, request: Request) -> RobotParser | None:
url = urlparse_cached(request)
netloc = url.netloc
if netloc not in self._parsers:
self._parsers[netloc] = Deferred()
robotsurl = f"{url.scheme}://{url.netloc}/robots.txt"
robotsreq = Request(
robotsurl,
priority=self.DOWNLOAD_PRIORITY,
meta={"dont_obey_robotstxt": True},
callback=NO_CALLBACK,
)
assert self.crawler.engine
assert self.crawler.stats
try:
resp = await self.crawler.engine.download_async(robotsreq)
self._parse_robots(resp, netloc)
except Exception as e:
if not isinstance(e, IgnoreRequest):
logger.error(
"Error downloading %(request)s: %(f_exception)s",
{"request": request, "f_exception": e},
exc_info=True,
extra={"spider": self.crawler.spider},
)
self._robots_error(e, netloc)
self.crawler.stats.inc_value("robotstxt/request_count")
parser = self._parsers[netloc]
if isinstance(parser, Deferred):
return await maybe_deferred_to_future(parser)
return parser
def _parse_robots(self, response: Response, netloc: str) -> None:
assert self.crawler.stats
self.crawler.stats.inc_value("robotstxt/response_count")
self.crawler.stats.inc_value(
f"robotstxt/response_status_count/{response.status}"
)
rp = self._parserimpl.from_crawler(self.crawler, response.body)
rp_dfd = self._parsers[netloc]
assert isinstance(rp_dfd, Deferred)
self._parsers[netloc] = rp
rp_dfd.callback(rp)
def _robots_error(self, exc: Exception, netloc: str) -> None:
if not isinstance(exc, IgnoreRequest):
key = f"robotstxt/exception_count/{type(exc)}"
assert self.crawler.stats
self.crawler.stats.inc_value(key)
rp_dfd = self._parsers[netloc]
assert isinstance(rp_dfd, Deferred)
self._parsers[netloc] = None
rp_dfd.callback(None)
| python | BSD-3-Clause | d1bd8eb49f7aba9289e4ff692006cead8bcd9080 | 2026-01-04T14:38:41.023839Z | false |
scrapy/scrapy | https://github.com/scrapy/scrapy/blob/d1bd8eb49f7aba9289e4ff692006cead8bcd9080/scrapy/downloadermiddlewares/httpproxy.py | scrapy/downloadermiddlewares/httpproxy.py | from __future__ import annotations
import base64
from typing import TYPE_CHECKING
from urllib.parse import unquote, urlunparse
from urllib.request import ( # type: ignore[attr-defined]
_parse_proxy,
getproxies,
proxy_bypass,
)
from scrapy.exceptions import NotConfigured
from scrapy.utils.decorators import _warn_spider_arg
from scrapy.utils.httpobj import urlparse_cached
from scrapy.utils.python import to_bytes
if TYPE_CHECKING:
# typing.Self requires Python 3.11
from typing_extensions import Self
from scrapy import Request, Spider
from scrapy.crawler import Crawler
from scrapy.http import Response
class HttpProxyMiddleware:
def __init__(self, auth_encoding: str | None = "latin-1"):
self.auth_encoding: str | None = auth_encoding
self.proxies: dict[str, tuple[bytes | None, str]] = {}
for type_, url in getproxies().items():
try:
self.proxies[type_] = self._get_proxy(url, type_)
# some values such as '/var/run/docker.sock' can't be parsed
# by _parse_proxy and as such should be skipped
except ValueError:
continue
@classmethod
def from_crawler(cls, crawler: Crawler) -> Self:
if not crawler.settings.getbool("HTTPPROXY_ENABLED"):
raise NotConfigured
auth_encoding: str | None = crawler.settings.get("HTTPPROXY_AUTH_ENCODING")
return cls(auth_encoding)
def _basic_auth_header(self, username: str, password: str) -> bytes:
user_pass = to_bytes(
f"{unquote(username)}:{unquote(password)}", encoding=self.auth_encoding
)
return base64.b64encode(user_pass)
def _get_proxy(self, url: str, orig_type: str) -> tuple[bytes | None, str]:
proxy_type, user, password, hostport = _parse_proxy(url)
proxy_url = urlunparse((proxy_type or orig_type, hostport, "", "", "", ""))
creds = self._basic_auth_header(user, password) if user else None
return creds, proxy_url
@_warn_spider_arg
def process_request(
self, request: Request, spider: Spider | None = None
) -> Request | Response | None:
creds, proxy_url, scheme = None, None, None
if "proxy" in request.meta:
if request.meta["proxy"] is not None:
creds, proxy_url = self._get_proxy(request.meta["proxy"], "")
elif self.proxies:
parsed = urlparse_cached(request)
_scheme = parsed.scheme
if (
# 'no_proxy' is only supported by http schemes
_scheme not in ("http", "https")
or (parsed.hostname and not proxy_bypass(parsed.hostname))
) and _scheme in self.proxies:
scheme = _scheme
creds, proxy_url = self.proxies[scheme]
self._set_proxy_and_creds(request, proxy_url, creds, scheme)
return None
def _set_proxy_and_creds(
self,
request: Request,
proxy_url: str | None,
creds: bytes | None,
scheme: str | None,
) -> None:
if scheme:
request.meta["_scheme_proxy"] = True
if proxy_url:
request.meta["proxy"] = proxy_url
elif request.meta.get("proxy") is not None:
request.meta["proxy"] = None
if creds:
request.headers[b"Proxy-Authorization"] = b"Basic " + creds
request.meta["_auth_proxy"] = proxy_url
elif "_auth_proxy" in request.meta:
if proxy_url != request.meta["_auth_proxy"]:
if b"Proxy-Authorization" in request.headers:
del request.headers[b"Proxy-Authorization"]
del request.meta["_auth_proxy"]
elif b"Proxy-Authorization" in request.headers:
if proxy_url:
request.meta["_auth_proxy"] = proxy_url
else:
del request.headers[b"Proxy-Authorization"]
| python | BSD-3-Clause | d1bd8eb49f7aba9289e4ff692006cead8bcd9080 | 2026-01-04T14:38:41.023839Z | false |
scrapy/scrapy | https://github.com/scrapy/scrapy/blob/d1bd8eb49f7aba9289e4ff692006cead8bcd9080/scrapy/downloadermiddlewares/httpcompression.py | scrapy/downloadermiddlewares/httpcompression.py | from __future__ import annotations
import warnings
from itertools import chain
from logging import getLogger
from typing import TYPE_CHECKING, Any
from scrapy import Request, Spider, signals
from scrapy.exceptions import IgnoreRequest, NotConfigured
from scrapy.http import Response, TextResponse
from scrapy.responsetypes import responsetypes
from scrapy.utils._compression import (
_DecompressionMaxSizeExceeded,
_inflate,
_unbrotli,
_unzstd,
)
from scrapy.utils.decorators import _warn_spider_arg
from scrapy.utils.deprecate import warn_on_deprecated_spider_attribute
from scrapy.utils.gz import gunzip
if TYPE_CHECKING:
# typing.Self requires Python 3.11
from typing_extensions import Self
from scrapy.crawler import Crawler
from scrapy.statscollectors import StatsCollector
logger = getLogger(__name__)
ACCEPTED_ENCODINGS: list[bytes] = [b"gzip", b"deflate"]
try:
try:
import brotli
except ImportError:
import brotlicffi as brotli
except ImportError:
pass
else:
try:
brotli.Decompressor.can_accept_more_data
except AttributeError: # pragma: no cover
warnings.warn(
"You have brotli installed. But 'br' encoding support now requires "
"brotli's or brotlicffi's version >= 1.2.0. Please upgrade "
"brotli/brotlicffi to make Scrapy decode 'br' encoded responses.",
)
else:
ACCEPTED_ENCODINGS.append(b"br")
try:
import zstandard # noqa: F401
except ImportError:
pass
else:
ACCEPTED_ENCODINGS.append(b"zstd")
class HttpCompressionMiddleware:
"""This middleware allows compressed (gzip, deflate) traffic to be
sent/received from websites"""
def __init__(
self,
stats: StatsCollector | None = None,
*,
crawler: Crawler | None = None,
):
if not crawler:
self.stats = stats
self._max_size = 1073741824
self._warn_size = 33554432
return
self.stats = crawler.stats
self._max_size = crawler.settings.getint("DOWNLOAD_MAXSIZE")
self._warn_size = crawler.settings.getint("DOWNLOAD_WARNSIZE")
crawler.signals.connect(self.open_spider, signals.spider_opened)
@classmethod
def from_crawler(cls, crawler: Crawler) -> Self:
if not crawler.settings.getbool("COMPRESSION_ENABLED"):
raise NotConfigured
return cls(crawler=crawler)
def open_spider(self, spider: Spider) -> None:
if hasattr(spider, "download_maxsize"):
warn_on_deprecated_spider_attribute("download_maxsize", "DOWNLOAD_MAXSIZE")
self._max_size = spider.download_maxsize
if hasattr(spider, "download_warnsize"):
warn_on_deprecated_spider_attribute(
"download_warnsize", "DOWNLOAD_WARNSIZE"
)
self._warn_size = spider.download_warnsize
@_warn_spider_arg
def process_request(
self, request: Request, spider: Spider | None = None
) -> Request | Response | None:
request.headers.setdefault("Accept-Encoding", b", ".join(ACCEPTED_ENCODINGS))
return None
@_warn_spider_arg
def process_response(
self, request: Request, response: Response, spider: Spider | None = None
) -> Request | Response:
if request.method == "HEAD":
return response
if isinstance(response, Response):
content_encoding = response.headers.getlist("Content-Encoding")
if content_encoding:
max_size = request.meta.get("download_maxsize", self._max_size)
warn_size = request.meta.get("download_warnsize", self._warn_size)
try:
decoded_body, content_encoding = self._handle_encoding(
response.body, content_encoding, max_size
)
except _DecompressionMaxSizeExceeded as e:
raise IgnoreRequest(
f"Ignored response {response} because its body "
f"({len(response.body)} B compressed, "
f"{e.decompressed_size} B decompressed so far) exceeded "
f"DOWNLOAD_MAXSIZE ({max_size} B) during decompression."
) from e
if len(response.body) < warn_size <= len(decoded_body):
logger.warning(
f"{response} body size after decompression "
f"({len(decoded_body)} B) is larger than the "
f"download warning size ({warn_size} B)."
)
if content_encoding:
self._warn_unknown_encoding(response, content_encoding)
response.headers["Content-Encoding"] = content_encoding
if self.stats:
self.stats.inc_value(
"httpcompression/response_bytes",
len(decoded_body),
)
self.stats.inc_value("httpcompression/response_count")
respcls = responsetypes.from_args(
headers=response.headers, url=response.url, body=decoded_body
)
kwargs: dict[str, Any] = {"body": decoded_body}
if issubclass(respcls, TextResponse):
# force recalculating the encoding until we make sure the
# responsetypes guessing is reliable
kwargs["encoding"] = None
response = response.replace(cls=respcls, **kwargs)
if not content_encoding:
del response.headers["Content-Encoding"]
return response
def _handle_encoding(
self, body: bytes, content_encoding: list[bytes], max_size: int
) -> tuple[bytes, list[bytes]]:
to_decode, to_keep = self._split_encodings(content_encoding)
for encoding in to_decode:
body = self._decode(body, encoding, max_size)
return body, to_keep
@staticmethod
def _split_encodings(
content_encoding: list[bytes],
) -> tuple[list[bytes], list[bytes]]:
supported_encodings = {*ACCEPTED_ENCODINGS, b"x-gzip"}
to_keep: list[bytes] = [
encoding.strip().lower()
for encoding in chain.from_iterable(
encodings.split(b",") for encodings in content_encoding
)
]
to_decode: list[bytes] = []
while to_keep:
encoding = to_keep.pop()
if encoding not in supported_encodings:
to_keep.append(encoding)
return to_decode, to_keep
to_decode.append(encoding)
return to_decode, to_keep
@staticmethod
def _decode(body: bytes, encoding: bytes, max_size: int) -> bytes:
if encoding in {b"gzip", b"x-gzip"}:
return gunzip(body, max_size=max_size)
if encoding == b"deflate":
return _inflate(body, max_size=max_size)
if encoding == b"br":
return _unbrotli(body, max_size=max_size)
if encoding == b"zstd":
return _unzstd(body, max_size=max_size)
# shouldn't be reached
return body # pragma: no cover
def _warn_unknown_encoding(
self, response: Response, encodings: list[bytes]
) -> None:
encodings_str = b",".join(encodings).decode()
msg = (
f"{self.__class__.__name__} cannot decode the response for {response.url} "
f"from unsupported encoding(s) '{encodings_str}'."
)
if b"br" in encodings:
msg += " You need to install brotli or brotlicffi >= 1.2.0 to decode 'br'."
if b"zstd" in encodings:
msg += " You need to install zstandard to decode 'zstd'."
logger.warning(msg)
| python | BSD-3-Clause | d1bd8eb49f7aba9289e4ff692006cead8bcd9080 | 2026-01-04T14:38:41.023839Z | false |
scrapy/scrapy | https://github.com/scrapy/scrapy/blob/d1bd8eb49f7aba9289e4ff692006cead8bcd9080/scrapy/downloadermiddlewares/cookies.py | scrapy/downloadermiddlewares/cookies.py | from __future__ import annotations
import logging
from collections import defaultdict
from typing import TYPE_CHECKING, Any
from tldextract import TLDExtract
from scrapy.exceptions import NotConfigured
from scrapy.http import Response
from scrapy.http.cookies import CookieJar
from scrapy.utils.decorators import _warn_spider_arg
from scrapy.utils.httpobj import urlparse_cached
from scrapy.utils.python import to_unicode
if TYPE_CHECKING:
from collections.abc import Iterable, Sequence
from http.cookiejar import Cookie
# typing.Self requires Python 3.11
from typing_extensions import Self
from scrapy import Request, Spider
from scrapy.crawler import Crawler
from scrapy.http.request import VerboseCookie
logger = logging.getLogger(__name__)
_split_domain = TLDExtract(include_psl_private_domains=True)
_UNSET = object()
def _is_public_domain(domain: str) -> bool:
parts = _split_domain(domain)
return not parts.domain
class CookiesMiddleware:
"""This middleware enables working with sites that need cookies"""
crawler: Crawler
def __init__(self, debug: bool = False):
self.jars: defaultdict[Any, CookieJar] = defaultdict(CookieJar)
self.debug: bool = debug
@classmethod
def from_crawler(cls, crawler: Crawler) -> Self:
if not crawler.settings.getbool("COOKIES_ENABLED"):
raise NotConfigured
o = cls(crawler.settings.getbool("COOKIES_DEBUG"))
o.crawler = crawler
return o
def _process_cookies(
self, cookies: Iterable[Cookie], *, jar: CookieJar, request: Request
) -> None:
for cookie in cookies:
cookie_domain = cookie.domain
cookie_domain = cookie_domain.removeprefix(".")
hostname = urlparse_cached(request).hostname
assert hostname is not None
request_domain = hostname.lower()
if cookie_domain and _is_public_domain(cookie_domain):
if cookie_domain != request_domain:
continue
cookie.domain = request_domain
jar.set_cookie_if_ok(cookie, request)
@_warn_spider_arg
def process_request(
self, request: Request, spider: Spider | None = None
) -> Request | Response | None:
if request.meta.get("dont_merge_cookies", False):
return None
cookiejarkey = request.meta.get("cookiejar")
jar = self.jars[cookiejarkey]
cookies = self._get_request_cookies(jar, request)
self._process_cookies(cookies, jar=jar, request=request)
# set Cookie header
request.headers.pop("Cookie", None)
jar.add_cookie_header(request)
self._debug_cookie(request)
return None
@_warn_spider_arg
def process_response(
self, request: Request, response: Response, spider: Spider | None = None
) -> Request | Response:
if request.meta.get("dont_merge_cookies", False):
return response
# extract cookies from Set-Cookie and drop invalid/expired cookies
cookiejarkey = request.meta.get("cookiejar")
jar = self.jars[cookiejarkey]
cookies = jar.make_cookies(response, request)
self._process_cookies(cookies, jar=jar, request=request)
self._debug_set_cookie(response)
return response
def _debug_cookie(self, request: Request) -> None:
if self.debug:
cl = [
to_unicode(c, errors="replace")
for c in request.headers.getlist("Cookie")
]
if cl:
cookies = "\n".join(f"Cookie: {c}\n" for c in cl)
msg = f"Sending cookies to: {request}\n{cookies}"
logger.debug(msg, extra={"spider": self.crawler.spider})
def _debug_set_cookie(self, response: Response) -> None:
if self.debug:
cl = [
to_unicode(c, errors="replace")
for c in response.headers.getlist("Set-Cookie")
]
if cl:
cookies = "\n".join(f"Set-Cookie: {c}\n" for c in cl)
msg = f"Received cookies from: {response}\n{cookies}"
logger.debug(msg, extra={"spider": self.crawler.spider})
def _format_cookie(self, cookie: VerboseCookie, request: Request) -> str | None:
"""
Given a dict consisting of cookie components, return its string representation.
Decode from bytes if necessary.
"""
decoded = {}
flags = set()
for key in ("name", "value", "path", "domain"):
value = cookie.get(key)
if value is None:
if key in ("name", "value"):
msg = f"Invalid cookie found in request {request}: {cookie} ('{key}' is missing)"
logger.warning(msg)
return None
continue
if isinstance(value, (bool, float, int, str)):
decoded[key] = str(value)
else:
assert isinstance(value, bytes)
try:
decoded[key] = value.decode("utf8")
except UnicodeDecodeError:
logger.warning(
"Non UTF-8 encoded cookie found in request %s: %s",
request,
cookie,
)
decoded[key] = value.decode("latin1", errors="replace")
for flag in ("secure",):
value = cookie.get(flag, _UNSET)
if value is _UNSET or not value:
continue
flags.add(flag)
cookie_str = f"{decoded.pop('name')}={decoded.pop('value')}"
for key, value in decoded.items(): # path, domain
cookie_str += f"; {key.capitalize()}={value}"
for flag in flags: # secure
cookie_str += f"; {flag.capitalize()}"
return cookie_str
def _get_request_cookies(
self, jar: CookieJar, request: Request
) -> Sequence[Cookie]:
"""
Extract cookies from the Request.cookies attribute
"""
if not request.cookies:
return []
cookies: Iterable[VerboseCookie]
if isinstance(request.cookies, dict):
cookies = tuple({"name": k, "value": v} for k, v in request.cookies.items())
else:
cookies = request.cookies
for cookie in cookies:
cookie.setdefault("secure", urlparse_cached(request).scheme == "https")
formatted = filter(None, (self._format_cookie(c, request) for c in cookies))
response = Response(request.url, headers={"Set-Cookie": formatted})
return jar.make_cookies(response, request)
| python | BSD-3-Clause | d1bd8eb49f7aba9289e4ff692006cead8bcd9080 | 2026-01-04T14:38:41.023839Z | false |
scrapy/scrapy | https://github.com/scrapy/scrapy/blob/d1bd8eb49f7aba9289e4ff692006cead8bcd9080/scrapy/downloadermiddlewares/downloadtimeout.py | scrapy/downloadermiddlewares/downloadtimeout.py | """
Download timeout middleware
See documentation in docs/topics/downloader-middleware.rst
"""
from __future__ import annotations
from typing import TYPE_CHECKING
from scrapy import Request, Spider, signals
from scrapy.utils.decorators import _warn_spider_arg
from scrapy.utils.deprecate import warn_on_deprecated_spider_attribute
if TYPE_CHECKING:
# typing.Self requires Python 3.11
from typing_extensions import Self
from scrapy.crawler import Crawler
from scrapy.http import Response
class DownloadTimeoutMiddleware:
def __init__(self, timeout: float = 180):
self._timeout: float = timeout
@classmethod
def from_crawler(cls, crawler: Crawler) -> Self:
o = cls(crawler.settings.getfloat("DOWNLOAD_TIMEOUT"))
crawler.signals.connect(o.spider_opened, signal=signals.spider_opened)
return o
def spider_opened(self, spider: Spider) -> None:
if hasattr(spider, "download_timeout"): # pragma: no cover
warn_on_deprecated_spider_attribute("download_timeout", "DOWNLOAD_TIMEOUT")
self._timeout = getattr(spider, "download_timeout", self._timeout)
@_warn_spider_arg
def process_request(
self, request: Request, spider: Spider | None = None
) -> Request | Response | None:
if self._timeout:
request.meta.setdefault("download_timeout", self._timeout)
return None
| python | BSD-3-Clause | d1bd8eb49f7aba9289e4ff692006cead8bcd9080 | 2026-01-04T14:38:41.023839Z | false |
scrapy/scrapy | https://github.com/scrapy/scrapy/blob/d1bd8eb49f7aba9289e4ff692006cead8bcd9080/scrapy/downloadermiddlewares/offsite.py | scrapy/downloadermiddlewares/offsite.py | from __future__ import annotations
import logging
import re
import warnings
from typing import TYPE_CHECKING
from scrapy import Request, Spider, signals
from scrapy.exceptions import IgnoreRequest
from scrapy.utils.decorators import _warn_spider_arg
from scrapy.utils.httpobj import urlparse_cached
if TYPE_CHECKING:
# typing.Self requires Python 3.11
from typing_extensions import Self
from scrapy.crawler import Crawler
from scrapy.statscollectors import StatsCollector
logger = logging.getLogger(__name__)
class OffsiteMiddleware:
crawler: Crawler
def __init__(self, stats: StatsCollector):
self.stats = stats
self.domains_seen: set[str] = set()
@classmethod
def from_crawler(cls, crawler: Crawler) -> Self:
assert crawler.stats
o = cls(crawler.stats)
crawler.signals.connect(o.spider_opened, signal=signals.spider_opened)
crawler.signals.connect(o.request_scheduled, signal=signals.request_scheduled)
o.crawler = crawler
return o
def spider_opened(self, spider: Spider) -> None:
self.host_regex: re.Pattern[str] = self.get_host_regex(spider)
def request_scheduled(self, request: Request, spider: Spider) -> None:
self.process_request(request)
@_warn_spider_arg
def process_request(self, request: Request, spider: Spider | None = None) -> None:
assert self.crawler.spider
if (
request.dont_filter
or request.meta.get("allow_offsite")
or self.should_follow(request, self.crawler.spider)
):
return
domain = urlparse_cached(request).hostname
if domain and domain not in self.domains_seen:
self.domains_seen.add(domain)
logger.debug(
"Filtered offsite request to %(domain)r: %(request)s",
{"domain": domain, "request": request},
extra={"spider": self.crawler.spider},
)
self.stats.inc_value("offsite/domains")
self.stats.inc_value("offsite/filtered")
raise IgnoreRequest
def should_follow(self, request: Request, spider: Spider) -> bool:
regex = self.host_regex
# hostname can be None for wrong urls (like javascript links)
host = urlparse_cached(request).hostname or ""
return bool(regex.search(host))
def get_host_regex(self, spider: Spider) -> re.Pattern[str]:
"""Override this method to implement a different offsite policy"""
allowed_domains = getattr(spider, "allowed_domains", None)
if not allowed_domains:
return re.compile("") # allow all by default
url_pattern = re.compile(r"^https?://.*$")
port_pattern = re.compile(r":\d+$")
domains = []
for domain in allowed_domains:
if domain is None:
continue
if url_pattern.match(domain):
message = (
"allowed_domains accepts only domains, not URLs. "
f"Ignoring URL entry {domain} in allowed_domains."
)
warnings.warn(message)
elif port_pattern.search(domain):
message = (
"allowed_domains accepts only domains without ports. "
f"Ignoring entry {domain} in allowed_domains."
)
warnings.warn(message)
else:
domains.append(re.escape(domain))
regex = rf"^(.*\.)?({'|'.join(domains)})$"
return re.compile(regex)
| python | BSD-3-Clause | d1bd8eb49f7aba9289e4ff692006cead8bcd9080 | 2026-01-04T14:38:41.023839Z | false |
scrapy/scrapy | https://github.com/scrapy/scrapy/blob/d1bd8eb49f7aba9289e4ff692006cead8bcd9080/scrapy/downloadermiddlewares/useragent.py | scrapy/downloadermiddlewares/useragent.py | """Set User-Agent header per spider or use a default value from settings"""
from __future__ import annotations
from typing import TYPE_CHECKING
from scrapy import Request, Spider, signals
from scrapy.utils.decorators import _warn_spider_arg
from scrapy.utils.deprecate import warn_on_deprecated_spider_attribute
if TYPE_CHECKING:
# typing.Self requires Python 3.11
from typing_extensions import Self
from scrapy.crawler import Crawler
from scrapy.http import Response
class UserAgentMiddleware:
"""This middleware allows spiders to override the user_agent"""
def __init__(self, user_agent: str = "Scrapy"):
self.user_agent = user_agent
@classmethod
def from_crawler(cls, crawler: Crawler) -> Self:
o = cls(crawler.settings["USER_AGENT"])
crawler.signals.connect(o.spider_opened, signal=signals.spider_opened)
return o
def spider_opened(self, spider: Spider) -> None:
if hasattr(spider, "user_agent"): # pragma: no cover
warn_on_deprecated_spider_attribute("user_agent", "USER_AGENT")
self.user_agent = getattr(spider, "user_agent", self.user_agent)
@_warn_spider_arg
def process_request(
self, request: Request, spider: Spider | None = None
) -> Request | Response | None:
if self.user_agent:
request.headers.setdefault(b"User-Agent", self.user_agent)
return None
| python | BSD-3-Clause | d1bd8eb49f7aba9289e4ff692006cead8bcd9080 | 2026-01-04T14:38:41.023839Z | false |
scrapy/scrapy | https://github.com/scrapy/scrapy/blob/d1bd8eb49f7aba9289e4ff692006cead8bcd9080/scrapy/downloadermiddlewares/ajaxcrawl.py | scrapy/downloadermiddlewares/ajaxcrawl.py | from __future__ import annotations
import logging
import re
from typing import TYPE_CHECKING
from warnings import warn
from w3lib import html
from scrapy.exceptions import NotConfigured, ScrapyDeprecationWarning
from scrapy.http import HtmlResponse, Response
from scrapy.utils.url import escape_ajax
if TYPE_CHECKING:
# typing.Self requires Python 3.11
from typing_extensions import Self
from scrapy import Request, Spider
from scrapy.crawler import Crawler
from scrapy.settings import BaseSettings
logger = logging.getLogger(__name__)
class AjaxCrawlMiddleware:
"""
Handle 'AJAX crawlable' pages marked as crawlable via meta tag.
"""
def __init__(self, settings: BaseSettings):
if not settings.getbool("AJAXCRAWL_ENABLED"):
raise NotConfigured
warn(
"scrapy.downloadermiddlewares.ajaxcrawl.AjaxCrawlMiddleware is deprecated"
" and will be removed in a future Scrapy version.",
ScrapyDeprecationWarning,
stacklevel=2,
)
# XXX: Google parses at least first 100k bytes; scrapy's redirect
# middleware parses first 4k. 4k turns out to be insufficient
# for this middleware, and parsing 100k could be slow.
# We use something in between (32K) by default.
self.lookup_bytes: int = settings.getint("AJAXCRAWL_MAXSIZE")
@classmethod
def from_crawler(cls, crawler: Crawler) -> Self:
return cls(crawler.settings)
def process_response(
self, request: Request, response: Response, spider: Spider
) -> Request | Response:
if not isinstance(response, HtmlResponse) or response.status != 200:
return response
if request.method != "GET":
# other HTTP methods are either not safe or don't have a body
return response
if "ajax_crawlable" in request.meta: # prevent loops
return response
if not self._has_ajax_crawlable_variant(response):
return response
ajax_crawl_request = request.replace(url=escape_ajax(request.url + "#!"))
logger.debug(
"Downloading AJAX crawlable %(ajax_crawl_request)s instead of %(request)s",
{"ajax_crawl_request": ajax_crawl_request, "request": request},
extra={"spider": spider},
)
ajax_crawl_request.meta["ajax_crawlable"] = True
return ajax_crawl_request
def _has_ajax_crawlable_variant(self, response: Response) -> bool:
"""
Return True if a page without hash fragment could be "AJAX crawlable".
"""
body = response.text[: self.lookup_bytes]
return _has_ajaxcrawlable_meta(body)
_ajax_crawlable_re: re.Pattern[str] = re.compile(
r'<meta\s+name=["\']fragment["\']\s+content=["\']!["\']/?>'
)
def _has_ajaxcrawlable_meta(text: str) -> bool:
"""
>>> _has_ajaxcrawlable_meta('<html><head><meta name="fragment" content="!"/></head><body></body></html>')
True
>>> _has_ajaxcrawlable_meta("<html><head><meta name='fragment' content='!'></head></html>")
True
>>> _has_ajaxcrawlable_meta('<html><head><!--<meta name="fragment" content="!"/>--></head><body></body></html>')
False
>>> _has_ajaxcrawlable_meta('<html></html>')
False
"""
# Stripping scripts and comments is slow (about 20x slower than
# just checking if a string is in text); this is a quick fail-fast
# path that should work for most pages.
if "fragment" not in text:
return False
if "content" not in text:
return False
text = html.remove_tags_with_content(text, ("script", "noscript"))
text = html.replace_entities(text)
text = html.remove_comments(text)
return _ajax_crawlable_re.search(text) is not None
| python | BSD-3-Clause | d1bd8eb49f7aba9289e4ff692006cead8bcd9080 | 2026-01-04T14:38:41.023839Z | false |
scrapy/scrapy | https://github.com/scrapy/scrapy/blob/d1bd8eb49f7aba9289e4ff692006cead8bcd9080/scrapy/downloadermiddlewares/retry.py | scrapy/downloadermiddlewares/retry.py | """
An extension to retry failed requests that are potentially caused by temporary
problems such as a connection timeout or HTTP 500 error.
You can change the behaviour of this middleware by modifying the scraping settings:
RETRY_TIMES - how many times to retry a failed page
RETRY_HTTP_CODES - which HTTP response codes to retry
Failed pages are collected on the scraping process and rescheduled at the end,
once the spider has finished crawling all regular (non-failed) pages.
"""
from __future__ import annotations
from logging import Logger, getLogger
from typing import TYPE_CHECKING
from scrapy.exceptions import NotConfigured
from scrapy.utils.decorators import _warn_spider_arg
from scrapy.utils.misc import load_object
from scrapy.utils.python import global_object_name
from scrapy.utils.response import response_status_message
if TYPE_CHECKING:
# typing.Self requires Python 3.11
from typing_extensions import Self
from scrapy.crawler import Crawler
from scrapy.http import Response
from scrapy.http.request import Request
from scrapy.settings import BaseSettings
from scrapy.spiders import Spider
retry_logger = getLogger(__name__)
def get_retry_request(
request: Request,
*,
spider: Spider,
reason: str | Exception | type[Exception] = "unspecified",
max_retry_times: int | None = None,
priority_adjust: int | None = None,
logger: Logger = retry_logger,
stats_base_key: str = "retry",
) -> Request | None:
"""
Returns a new :class:`~scrapy.Request` object to retry the specified
request, or ``None`` if retries of the specified request have been
exhausted.
For example, in a :class:`~scrapy.Spider` callback, you could use it as
follows::
def parse(self, response):
if not response.text:
new_request_or_none = get_retry_request(
response.request,
spider=self,
reason='empty',
)
return new_request_or_none
*spider* is the :class:`~scrapy.Spider` instance which is asking for the
retry request. It is used to access the :ref:`settings <topics-settings>`
and :ref:`stats <topics-stats>`, and to provide extra logging context (see
:func:`logging.debug`).
*reason* is a string or an :class:`Exception` object that indicates the
reason why the request needs to be retried. It is used to name retry stats.
*max_retry_times* is a number that determines the maximum number of times
that *request* can be retried. If not specified or ``None``, the number is
read from the :reqmeta:`max_retry_times` meta key of the request. If the
:reqmeta:`max_retry_times` meta key is not defined or ``None``, the number
is read from the :setting:`RETRY_TIMES` setting.
*priority_adjust* is a number that determines how the priority of the new
request changes in relation to *request*. If not specified, the number is
read from the :setting:`RETRY_PRIORITY_ADJUST` setting.
*logger* is the logging.Logger object to be used when logging messages
*stats_base_key* is a string to be used as the base key for the
retry-related job stats
"""
settings = spider.crawler.settings
assert spider.crawler.stats
stats = spider.crawler.stats
retry_times = request.meta.get("retry_times", 0) + 1
if max_retry_times is None:
max_retry_times = request.meta.get("max_retry_times")
if max_retry_times is None:
max_retry_times = settings.getint("RETRY_TIMES")
if retry_times <= max_retry_times:
logger.debug(
"Retrying %(request)s (failed %(retry_times)d times): %(reason)s",
{"request": request, "retry_times": retry_times, "reason": reason},
extra={"spider": spider},
)
new_request: Request = request.copy()
new_request.meta["retry_times"] = retry_times
new_request.dont_filter = True
if priority_adjust is None:
priority_adjust = settings.getint("RETRY_PRIORITY_ADJUST")
new_request.priority = request.priority + priority_adjust
if callable(reason):
reason = reason()
if isinstance(reason, Exception):
reason = global_object_name(reason.__class__)
stats.inc_value(f"{stats_base_key}/count")
stats.inc_value(f"{stats_base_key}/reason_count/{reason}")
return new_request
stats.inc_value(f"{stats_base_key}/max_reached")
logger.error(
"Gave up retrying %(request)s (failed %(retry_times)d times): %(reason)s",
{"request": request, "retry_times": retry_times, "reason": reason},
extra={"spider": spider},
)
return None
class RetryMiddleware:
crawler: Crawler
def __init__(self, settings: BaseSettings):
if not settings.getbool("RETRY_ENABLED"):
raise NotConfigured
self.max_retry_times = settings.getint("RETRY_TIMES")
self.retry_http_codes = {int(x) for x in settings.getlist("RETRY_HTTP_CODES")}
self.priority_adjust = settings.getint("RETRY_PRIORITY_ADJUST")
self.exceptions_to_retry = tuple(
load_object(x) if isinstance(x, str) else x
for x in settings.getlist("RETRY_EXCEPTIONS")
)
@classmethod
def from_crawler(cls, crawler: Crawler) -> Self:
o = cls(crawler.settings)
o.crawler = crawler
return o
@_warn_spider_arg
def process_response(
self, request: Request, response: Response, spider: Spider | None = None
) -> Request | Response:
if request.meta.get("dont_retry", False):
return response
if response.status in self.retry_http_codes:
reason = response_status_message(response.status)
return self._retry(request, reason) or response
return response
@_warn_spider_arg
def process_exception(
self, request: Request, exception: Exception, spider: Spider | None = None
) -> Request | Response | None:
if isinstance(exception, self.exceptions_to_retry) and not request.meta.get(
"dont_retry", False
):
return self._retry(request, exception)
return None
def _retry(
self, request: Request, reason: str | Exception | type[Exception]
) -> Request | None:
max_retry_times = request.meta.get("max_retry_times", self.max_retry_times)
priority_adjust = request.meta.get("priority_adjust", self.priority_adjust)
assert self.crawler.spider
return get_retry_request(
request,
reason=reason,
spider=self.crawler.spider,
max_retry_times=max_retry_times,
priority_adjust=priority_adjust,
)
| python | BSD-3-Clause | d1bd8eb49f7aba9289e4ff692006cead8bcd9080 | 2026-01-04T14:38:41.023839Z | false |
scrapy/scrapy | https://github.com/scrapy/scrapy/blob/d1bd8eb49f7aba9289e4ff692006cead8bcd9080/scrapy/downloadermiddlewares/__init__.py | scrapy/downloadermiddlewares/__init__.py | python | BSD-3-Clause | d1bd8eb49f7aba9289e4ff692006cead8bcd9080 | 2026-01-04T14:38:41.023839Z | false | |
scrapy/scrapy | https://github.com/scrapy/scrapy/blob/d1bd8eb49f7aba9289e4ff692006cead8bcd9080/scrapy/downloadermiddlewares/defaultheaders.py | scrapy/downloadermiddlewares/defaultheaders.py | """
DefaultHeaders downloader middleware
See documentation in docs/topics/downloader-middleware.rst
"""
from __future__ import annotations
from typing import TYPE_CHECKING
from scrapy.utils.decorators import _warn_spider_arg
from scrapy.utils.python import without_none_values
if TYPE_CHECKING:
from collections.abc import Iterable
# typing.Self requires Python 3.11
from typing_extensions import Self
from scrapy import Request, Spider
from scrapy.crawler import Crawler
from scrapy.http import Response
class DefaultHeadersMiddleware:
def __init__(self, headers: Iterable[tuple[str, str]]):
self._headers: Iterable[tuple[str, str]] = headers
@classmethod
def from_crawler(cls, crawler: Crawler) -> Self:
headers = without_none_values(crawler.settings["DEFAULT_REQUEST_HEADERS"])
return cls(headers.items())
@_warn_spider_arg
def process_request(
self, request: Request, spider: Spider | None = None
) -> Request | Response | None:
for k, v in self._headers:
request.headers.setdefault(k, v)
return None
| python | BSD-3-Clause | d1bd8eb49f7aba9289e4ff692006cead8bcd9080 | 2026-01-04T14:38:41.023839Z | false |
scrapy/scrapy | https://github.com/scrapy/scrapy/blob/d1bd8eb49f7aba9289e4ff692006cead8bcd9080/scrapy/downloadermiddlewares/stats.py | scrapy/downloadermiddlewares/stats.py | from __future__ import annotations
from typing import TYPE_CHECKING
from twisted.web import http
from scrapy.exceptions import NotConfigured
from scrapy.utils.decorators import _warn_spider_arg
from scrapy.utils.python import global_object_name, to_bytes
from scrapy.utils.request import request_httprepr
if TYPE_CHECKING:
# typing.Self requires Python 3.11
from typing_extensions import Self
from scrapy import Request, Spider
from scrapy.crawler import Crawler
from scrapy.http import Response
from scrapy.statscollectors import StatsCollector
def get_header_size(
headers: dict[str, list[str | bytes] | tuple[str | bytes, ...]],
) -> int:
size = 0
for key, value in headers.items():
if isinstance(value, (list, tuple)):
for v in value:
size += len(b": ") + len(key) + len(v)
return size + len(b"\r\n") * (len(headers.keys()) - 1)
def get_status_size(response_status: int) -> int:
return len(to_bytes(http.RESPONSES.get(response_status, b""))) + 15
# resp.status + b"\r\n" + b"HTTP/1.1 <100-599> "
class DownloaderStats:
def __init__(self, stats: StatsCollector):
self.stats: StatsCollector = stats
@classmethod
def from_crawler(cls, crawler: Crawler) -> Self:
if not crawler.settings.getbool("DOWNLOADER_STATS"):
raise NotConfigured
assert crawler.stats
return cls(crawler.stats)
@_warn_spider_arg
def process_request(
self, request: Request, spider: Spider | None = None
) -> Request | Response | None:
self.stats.inc_value("downloader/request_count")
self.stats.inc_value(f"downloader/request_method_count/{request.method}")
reqlen = len(request_httprepr(request))
self.stats.inc_value("downloader/request_bytes", reqlen)
return None
@_warn_spider_arg
def process_response(
self, request: Request, response: Response, spider: Spider | None = None
) -> Request | Response:
self.stats.inc_value("downloader/response_count")
self.stats.inc_value(f"downloader/response_status_count/{response.status}")
reslen = (
len(response.body)
+ get_header_size(response.headers)
+ get_status_size(response.status)
+ 4
)
# response.body + b"\r\n"+ response.header + b"\r\n" + response.status
self.stats.inc_value("downloader/response_bytes", reslen)
return response
@_warn_spider_arg
def process_exception(
self, request: Request, exception: Exception, spider: Spider | None = None
) -> Request | Response | None:
ex_class = global_object_name(exception.__class__)
self.stats.inc_value("downloader/exception_count")
self.stats.inc_value(f"downloader/exception_type_count/{ex_class}")
return None
| python | BSD-3-Clause | d1bd8eb49f7aba9289e4ff692006cead8bcd9080 | 2026-01-04T14:38:41.023839Z | false |
scrapy/scrapy | https://github.com/scrapy/scrapy/blob/d1bd8eb49f7aba9289e4ff692006cead8bcd9080/scrapy/downloadermiddlewares/httpauth.py | scrapy/downloadermiddlewares/httpauth.py | """
HTTP basic auth downloader middleware
See documentation in docs/topics/downloader-middleware.rst
"""
from __future__ import annotations
from typing import TYPE_CHECKING
from w3lib.http import basic_auth_header
from scrapy import Request, Spider, signals
from scrapy.utils.decorators import _warn_spider_arg
from scrapy.utils.url import url_is_from_any_domain
if TYPE_CHECKING:
# typing.Self requires Python 3.11
from typing_extensions import Self
from scrapy.crawler import Crawler
from scrapy.http import Response
class HttpAuthMiddleware:
"""Set Basic HTTP Authorization header
(http_user and http_pass spider class attributes)"""
@classmethod
def from_crawler(cls, crawler: Crawler) -> Self:
o = cls()
crawler.signals.connect(o.spider_opened, signal=signals.spider_opened)
return o
def spider_opened(self, spider: Spider) -> None:
usr = getattr(spider, "http_user", "")
pwd = getattr(spider, "http_pass", "")
if usr or pwd:
self.auth = basic_auth_header(usr, pwd)
self.domain = spider.http_auth_domain # type: ignore[attr-defined]
@_warn_spider_arg
def process_request(
self, request: Request, spider: Spider | None = None
) -> Request | Response | None:
auth = getattr(self, "auth", None)
if (
auth
and b"Authorization" not in request.headers
and (not self.domain or url_is_from_any_domain(request.url, [self.domain]))
):
request.headers[b"Authorization"] = auth
return None
| python | BSD-3-Clause | d1bd8eb49f7aba9289e4ff692006cead8bcd9080 | 2026-01-04T14:38:41.023839Z | false |
scrapy/scrapy | https://github.com/scrapy/scrapy/blob/d1bd8eb49f7aba9289e4ff692006cead8bcd9080/scrapy/downloadermiddlewares/httpcache.py | scrapy/downloadermiddlewares/httpcache.py | from __future__ import annotations
from email.utils import formatdate
from typing import TYPE_CHECKING
from twisted.internet import defer
from twisted.internet.error import (
ConnectError,
ConnectionDone,
ConnectionLost,
DNSLookupError,
TCPTimedOutError,
)
from twisted.internet.error import ConnectionRefusedError as TxConnectionRefusedError
from twisted.internet.error import TimeoutError as TxTimeoutError
from twisted.web.client import ResponseFailed
from scrapy import signals
from scrapy.exceptions import IgnoreRequest, NotConfigured
from scrapy.utils.decorators import _warn_spider_arg
from scrapy.utils.misc import load_object
if TYPE_CHECKING:
# typing.Self requires Python 3.11
from typing_extensions import Self
from scrapy.crawler import Crawler
from scrapy.http.request import Request
from scrapy.http.response import Response
from scrapy.settings import Settings
from scrapy.spiders import Spider
from scrapy.statscollectors import StatsCollector
class HttpCacheMiddleware:
DOWNLOAD_EXCEPTIONS = (
defer.TimeoutError,
TxTimeoutError,
DNSLookupError,
TxConnectionRefusedError,
ConnectionDone,
ConnectError,
ConnectionLost,
TCPTimedOutError,
ResponseFailed,
OSError,
)
crawler: Crawler
def __init__(self, settings: Settings, stats: StatsCollector) -> None:
if not settings.getbool("HTTPCACHE_ENABLED"):
raise NotConfigured
self.policy = load_object(settings["HTTPCACHE_POLICY"])(settings)
self.storage = load_object(settings["HTTPCACHE_STORAGE"])(settings)
self.ignore_missing = settings.getbool("HTTPCACHE_IGNORE_MISSING")
self.stats = stats
@classmethod
def from_crawler(cls, crawler: Crawler) -> Self:
assert crawler.stats
o = cls(crawler.settings, crawler.stats)
crawler.signals.connect(o.spider_opened, signal=signals.spider_opened)
crawler.signals.connect(o.spider_closed, signal=signals.spider_closed)
o.crawler = crawler
return o
def spider_opened(self, spider: Spider) -> None:
self.storage.open_spider(spider)
def spider_closed(self, spider: Spider) -> None:
self.storage.close_spider(spider)
@_warn_spider_arg
def process_request(
self, request: Request, spider: Spider | None = None
) -> Request | Response | None:
if request.meta.get("dont_cache", False):
return None
# Skip uncacheable requests
if not self.policy.should_cache_request(request):
request.meta["_dont_cache"] = True # flag as uncacheable
return None
# Look for cached response and check if expired
cachedresponse: Response | None = self.storage.retrieve_response(
self.crawler.spider, request
)
if cachedresponse is None:
self.stats.inc_value("httpcache/miss")
if self.ignore_missing:
self.stats.inc_value("httpcache/ignore")
raise IgnoreRequest(f"Ignored request not in cache: {request}")
return None # first time request
# Return cached response only if not expired
cachedresponse.flags.append("cached")
if self.policy.is_cached_response_fresh(cachedresponse, request):
self.stats.inc_value("httpcache/hit")
return cachedresponse
# Keep a reference to cached response to avoid a second cache lookup on
# process_response hook
request.meta["cached_response"] = cachedresponse
return None
@_warn_spider_arg
def process_response(
self, request: Request, response: Response, spider: Spider | None = None
) -> Request | Response:
if request.meta.get("dont_cache", False):
return response
# Skip cached responses and uncacheable requests
if "cached" in response.flags or "_dont_cache" in request.meta:
request.meta.pop("_dont_cache", None)
return response
# RFC2616 requires origin server to set Date header,
# https://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.18
if "Date" not in response.headers:
response.headers["Date"] = formatdate(usegmt=True)
# Do not validate first-hand responses
cachedresponse: Response | None = request.meta.pop("cached_response", None)
if cachedresponse is None:
self.stats.inc_value("httpcache/firsthand")
self._cache_response(response, request)
return response
if self.policy.is_cached_response_valid(cachedresponse, response, request):
self.stats.inc_value("httpcache/revalidate")
return cachedresponse
self.stats.inc_value("httpcache/invalidate")
self._cache_response(response, request)
return response
@_warn_spider_arg
def process_exception(
self, request: Request, exception: Exception, spider: Spider | None = None
) -> Request | Response | None:
cachedresponse: Response | None = request.meta.pop("cached_response", None)
if cachedresponse is not None and isinstance(
exception, self.DOWNLOAD_EXCEPTIONS
):
self.stats.inc_value("httpcache/errorrecovery")
return cachedresponse
return None
def _cache_response(self, response: Response, request: Request) -> None:
if self.policy.should_cache_response(response, request):
self.stats.inc_value("httpcache/store")
self.storage.store_response(self.crawler.spider, request, response)
else:
self.stats.inc_value("httpcache/uncacheable")
| python | BSD-3-Clause | d1bd8eb49f7aba9289e4ff692006cead8bcd9080 | 2026-01-04T14:38:41.023839Z | false |
scrapy/scrapy | https://github.com/scrapy/scrapy/blob/d1bd8eb49f7aba9289e4ff692006cead8bcd9080/scrapy/http/cookies.py | scrapy/http/cookies.py | from __future__ import annotations
import re
import time
from http.cookiejar import Cookie, CookiePolicy, DefaultCookiePolicy
from http.cookiejar import CookieJar as _CookieJar
from typing import TYPE_CHECKING, Any, cast
from scrapy.utils.httpobj import urlparse_cached
from scrapy.utils.python import to_unicode
if TYPE_CHECKING:
from collections.abc import Iterator, Sequence
# typing.Self requires Python 3.11
from typing_extensions import Self
from scrapy import Request
from scrapy.http import Response
# Defined in the http.cookiejar module, but undocumented:
# https://github.com/python/cpython/blob/v3.9.0/Lib/http/cookiejar.py#L527
IPV4_RE = re.compile(r"\.\d+$", re.ASCII)
class CookieJar:
def __init__(
self,
policy: CookiePolicy | None = None,
check_expired_frequency: int = 10000,
):
self.policy: CookiePolicy = policy or DefaultCookiePolicy()
self.jar: _CookieJar = _CookieJar(self.policy)
self.jar._cookies_lock = _DummyLock() # type: ignore[attr-defined]
self.check_expired_frequency: int = check_expired_frequency
self.processed: int = 0
def extract_cookies(self, response: Response, request: Request) -> None:
wreq = WrappedRequest(request)
wrsp = WrappedResponse(response)
self.jar.extract_cookies(wrsp, wreq) # type: ignore[arg-type]
def add_cookie_header(self, request: Request) -> None:
wreq = WrappedRequest(request)
self.policy._now = self.jar._now = int(time.time()) # type: ignore[attr-defined]
# the cookiejar implementation iterates through all domains
# instead we restrict to potential matches on the domain
req_host = urlparse_cached(request).hostname
if not req_host:
return
if not IPV4_RE.search(req_host):
hosts = potential_domain_matches(req_host)
if "." not in req_host:
hosts += [req_host + ".local"]
else:
hosts = [req_host]
cookies = []
for host in hosts:
if host in self.jar._cookies: # type: ignore[attr-defined]
cookies += self.jar._cookies_for_domain(host, wreq) # type: ignore[attr-defined]
attrs = self.jar._cookie_attrs(cookies) # type: ignore[attr-defined]
if attrs and not wreq.has_header("Cookie"):
wreq.add_unredirected_header("Cookie", "; ".join(attrs))
self.processed += 1
if self.processed % self.check_expired_frequency == 0:
# This is still quite inefficient for large number of cookies
self.jar.clear_expired_cookies()
@property
def _cookies(self) -> dict[str, dict[str, dict[str, Cookie]]]:
return self.jar._cookies # type: ignore[attr-defined,no-any-return]
def clear_session_cookies(self) -> None:
return self.jar.clear_session_cookies()
def clear(
self,
domain: str | None = None,
path: str | None = None,
name: str | None = None,
) -> None:
self.jar.clear(domain, path, name)
def __iter__(self) -> Iterator[Cookie]:
return iter(self.jar)
def __len__(self) -> int:
return len(self.jar)
def set_policy(self, pol: CookiePolicy) -> None:
self.jar.set_policy(pol)
def make_cookies(self, response: Response, request: Request) -> Sequence[Cookie]:
wreq = WrappedRequest(request)
wrsp = WrappedResponse(response)
return self.jar.make_cookies(wrsp, wreq) # type: ignore[arg-type]
def set_cookie(self, cookie: Cookie) -> None:
self.jar.set_cookie(cookie)
def set_cookie_if_ok(self, cookie: Cookie, request: Request) -> None:
self.jar.set_cookie_if_ok(cookie, WrappedRequest(request)) # type: ignore[arg-type]
def potential_domain_matches(domain: str) -> list[str]:
"""Potential domain matches for a cookie
>>> potential_domain_matches('www.example.com')
['www.example.com', 'example.com', '.www.example.com', '.example.com']
"""
matches = [domain]
try:
start = domain.index(".") + 1
end = domain.rindex(".")
while start < end:
matches.append(domain[start:])
start = domain.index(".", start) + 1
except ValueError:
pass
return matches + ["." + d for d in matches]
class _DummyLock:
def acquire(self) -> None:
pass
def release(self) -> None:
pass
class WrappedRequest:
"""Wraps a scrapy Request class with methods defined by urllib2.Request class to interact with CookieJar class
see http://docs.python.org/library/urllib2.html#urllib2.Request
"""
def __init__(self, request: Request):
self.request = request
def get_full_url(self) -> str:
return self.request.url
def get_host(self) -> str:
return urlparse_cached(self.request).netloc
def get_type(self) -> str:
return urlparse_cached(self.request).scheme
def is_unverifiable(self) -> bool:
"""Unverifiable should indicate whether the request is unverifiable, as defined by RFC 2965.
It defaults to False. An unverifiable request is one whose URL the user did not have the
option to approve. For example, if the request is for an image in an
HTML document, and the user had no option to approve the automatic
fetching of the image, this should be true.
"""
return cast("bool", self.request.meta.get("is_unverifiable", False))
@property
def full_url(self) -> str:
return self.get_full_url()
@property
def host(self) -> str:
return self.get_host()
@property
def type(self) -> str:
return self.get_type()
@property
def unverifiable(self) -> bool:
return self.is_unverifiable()
@property
def origin_req_host(self) -> str:
return cast("str", urlparse_cached(self.request).hostname)
def has_header(self, name: str) -> bool:
return name in self.request.headers
def get_header(self, name: str, default: str | None = None) -> str | None:
value = self.request.headers.get(name, default)
return to_unicode(value, errors="replace") if value is not None else None
def header_items(self) -> list[tuple[str, list[str]]]:
return [
(
to_unicode(k, errors="replace"),
[to_unicode(x, errors="replace") for x in v],
)
for k, v in self.request.headers.items()
]
def add_unredirected_header(self, name: str, value: str) -> None:
self.request.headers.appendlist(name, value)
class WrappedResponse:
def __init__(self, response: Response):
self.response = response
def info(self) -> Self:
return self
def get_all(self, name: str, default: Any = None) -> list[str]:
return [
to_unicode(v, errors="replace") for v in self.response.headers.getlist(name)
]
| python | BSD-3-Clause | d1bd8eb49f7aba9289e4ff692006cead8bcd9080 | 2026-01-04T14:38:41.023839Z | false |
scrapy/scrapy | https://github.com/scrapy/scrapy/blob/d1bd8eb49f7aba9289e4ff692006cead8bcd9080/scrapy/http/__init__.py | scrapy/http/__init__.py | """
Module containing all HTTP related classes
Use this module (instead of the more specific ones) when importing Headers,
Request and Response outside this module.
"""
from scrapy.http.headers import Headers
from scrapy.http.request import Request
from scrapy.http.request.form import FormRequest
from scrapy.http.request.json_request import JsonRequest
from scrapy.http.request.rpc import XmlRpcRequest
from scrapy.http.response import Response
from scrapy.http.response.html import HtmlResponse
from scrapy.http.response.json import JsonResponse
from scrapy.http.response.text import TextResponse
from scrapy.http.response.xml import XmlResponse
__all__ = [
"FormRequest",
"Headers",
"HtmlResponse",
"JsonRequest",
"JsonResponse",
"Request",
"Response",
"TextResponse",
"XmlResponse",
"XmlRpcRequest",
]
| python | BSD-3-Clause | d1bd8eb49f7aba9289e4ff692006cead8bcd9080 | 2026-01-04T14:38:41.023839Z | false |
scrapy/scrapy | https://github.com/scrapy/scrapy/blob/d1bd8eb49f7aba9289e4ff692006cead8bcd9080/scrapy/http/headers.py | scrapy/http/headers.py | from __future__ import annotations
from collections.abc import Mapping
from typing import TYPE_CHECKING, Any, AnyStr, TypeAlias, cast
from w3lib.http import headers_dict_to_raw
from scrapy.utils.datatypes import CaseInsensitiveDict, CaselessDict
from scrapy.utils.python import to_unicode
if TYPE_CHECKING:
from collections.abc import Iterable
# typing.Self requires Python 3.11
from typing_extensions import Self
_RawValue: TypeAlias = bytes | str | int
# isn't fully compatible typing-wise with either dict or CaselessDict,
# but it needs refactoring anyway, see also https://github.com/scrapy/scrapy/pull/5146
class Headers(CaselessDict):
"""Case insensitive http headers dictionary"""
def __init__(
self,
seq: Mapping[AnyStr, Any] | Iterable[tuple[AnyStr, Any]] | None = None,
encoding: str = "utf-8",
):
self.encoding: str = encoding
super().__init__(seq)
def update( # type: ignore[override]
self, seq: Mapping[AnyStr, Any] | Iterable[tuple[AnyStr, Any]]
) -> None:
seq = seq.items() if isinstance(seq, Mapping) else seq
iseq: dict[bytes, list[bytes]] = {}
for k, v in seq:
iseq.setdefault(self.normkey(k), []).extend(self.normvalue(v))
super().update(iseq)
def normkey(self, key: AnyStr) -> bytes: # type: ignore[override]
"""Normalize key to bytes"""
return self._tobytes(key.title())
def normvalue(self, value: _RawValue | Iterable[_RawValue]) -> list[bytes]:
"""Normalize values to bytes"""
_value: Iterable[_RawValue]
if value is None:
_value = []
elif isinstance(value, (str, bytes)):
_value = [value]
elif hasattr(value, "__iter__"):
_value = value
else:
_value = [value]
return [self._tobytes(x) for x in _value]
def _tobytes(self, x: _RawValue) -> bytes:
if isinstance(x, bytes):
return x
if isinstance(x, str):
return x.encode(self.encoding)
if isinstance(x, int):
return str(x).encode(self.encoding)
raise TypeError(f"Unsupported value type: {type(x)}")
def __getitem__(self, key: AnyStr) -> bytes | None:
try:
return cast("list[bytes]", super().__getitem__(key))[-1]
except IndexError:
return None
def get(self, key: AnyStr, def_val: Any = None) -> bytes | None:
try:
return cast("list[bytes]", super().get(key, def_val))[-1]
except IndexError:
return None
def getlist(self, key: AnyStr, def_val: Any = None) -> list[bytes]:
try:
return cast("list[bytes]", super().__getitem__(key))
except KeyError:
if def_val is not None:
return self.normvalue(def_val)
return []
def setlist(self, key: AnyStr, list_: Iterable[_RawValue]) -> None:
self[key] = list_
def setlistdefault(
self, key: AnyStr, default_list: Iterable[_RawValue] = ()
) -> Any:
return self.setdefault(key, default_list)
def appendlist(self, key: AnyStr, value: Iterable[_RawValue]) -> None:
lst = self.getlist(key)
lst.extend(self.normvalue(value))
self[key] = lst
def items(self) -> Iterable[tuple[bytes, list[bytes]]]: # type: ignore[override]
return ((k, self.getlist(k)) for k in self.keys())
def values(self) -> list[bytes | None]: # type: ignore[override]
return [
self[k]
for k in self.keys() # pylint: disable=consider-using-dict-items
]
def to_string(self) -> bytes:
return headers_dict_to_raw(self)
def to_unicode_dict(self) -> CaseInsensitiveDict:
"""Return headers as a CaseInsensitiveDict with str keys
and str values. Multiple values are joined with ','.
"""
return CaseInsensitiveDict(
(
to_unicode(key, encoding=self.encoding),
to_unicode(b",".join(value), encoding=self.encoding),
)
for key, value in self.items()
)
def __copy__(self) -> Self:
return self.__class__(self)
copy = __copy__
| python | BSD-3-Clause | d1bd8eb49f7aba9289e4ff692006cead8bcd9080 | 2026-01-04T14:38:41.023839Z | false |
scrapy/scrapy | https://github.com/scrapy/scrapy/blob/d1bd8eb49f7aba9289e4ff692006cead8bcd9080/scrapy/http/response/html.py | scrapy/http/response/html.py | """
This module implements the HtmlResponse class which adds encoding
discovering through HTML encoding declarations to the TextResponse class.
See documentation in docs/topics/request-response.rst
"""
from scrapy.http.response.text import TextResponse
class HtmlResponse(TextResponse):
pass
| python | BSD-3-Clause | d1bd8eb49f7aba9289e4ff692006cead8bcd9080 | 2026-01-04T14:38:41.023839Z | false |
scrapy/scrapy | https://github.com/scrapy/scrapy/blob/d1bd8eb49f7aba9289e4ff692006cead8bcd9080/scrapy/http/response/xml.py | scrapy/http/response/xml.py | """
This module implements the XmlResponse class which adds encoding
discovering through XML encoding declarations to the TextResponse class.
See documentation in docs/topics/request-response.rst
"""
from scrapy.http.response.text import TextResponse
class XmlResponse(TextResponse):
pass
| python | BSD-3-Clause | d1bd8eb49f7aba9289e4ff692006cead8bcd9080 | 2026-01-04T14:38:41.023839Z | false |
scrapy/scrapy | https://github.com/scrapy/scrapy/blob/d1bd8eb49f7aba9289e4ff692006cead8bcd9080/scrapy/http/response/__init__.py | scrapy/http/response/__init__.py | """
This module implements the Response class which is used to represent HTTP
responses in Scrapy.
See documentation in docs/topics/request-response.rst
"""
from __future__ import annotations
from typing import TYPE_CHECKING, Any, AnyStr, TypeVar, overload
from urllib.parse import urljoin
from scrapy.exceptions import NotSupported
from scrapy.http.headers import Headers
from scrapy.http.request import Request
from scrapy.link import Link
from scrapy.utils.trackref import object_ref
if TYPE_CHECKING:
from collections.abc import Callable, Iterable, Mapping
from ipaddress import IPv4Address, IPv6Address
from twisted.internet.ssl import Certificate
from twisted.python.failure import Failure
# typing.Self requires Python 3.11
from typing_extensions import Self
from scrapy.http.request import CallbackT, CookiesT
from scrapy.selector import SelectorList
ResponseTypeVar = TypeVar("ResponseTypeVar", bound="Response")
class Response(object_ref):
"""An object that represents an HTTP response, which is usually
downloaded (by the Downloader) and fed to the Spiders for processing.
"""
attributes: tuple[str, ...] = (
"url",
"status",
"headers",
"body",
"flags",
"request",
"certificate",
"ip_address",
"protocol",
)
"""A tuple of :class:`str` objects containing the name of all public
attributes of the class that are also keyword parameters of the
``__init__()`` method.
Currently used by :meth:`Response.replace`.
"""
def __init__(
self,
url: str,
status: int = 200,
headers: Mapping[AnyStr, Any] | Iterable[tuple[AnyStr, Any]] | None = None,
body: bytes = b"",
flags: list[str] | None = None,
request: Request | None = None,
certificate: Certificate | None = None,
ip_address: IPv4Address | IPv6Address | None = None,
protocol: str | None = None,
):
self.headers: Headers = Headers(headers or {})
self.status: int = int(status)
self._set_body(body)
self._set_url(url)
self.request: Request | None = request
self.flags: list[str] = [] if flags is None else list(flags)
self.certificate: Certificate | None = certificate
self.ip_address: IPv4Address | IPv6Address | None = ip_address
self.protocol: str | None = protocol
@property
def cb_kwargs(self) -> dict[str, Any]:
try:
return self.request.cb_kwargs # type: ignore[union-attr]
except AttributeError:
raise AttributeError(
"Response.cb_kwargs not available, this response "
"is not tied to any request"
)
@property
def meta(self) -> dict[str, Any]:
try:
return self.request.meta # type: ignore[union-attr]
except AttributeError:
raise AttributeError(
"Response.meta not available, this response is not tied to any request"
)
@property
def url(self) -> str:
return self._url
def _set_url(self, url: str) -> None:
if isinstance(url, str):
self._url: str = url
else:
raise TypeError(
f"{type(self).__name__} url must be str, got {type(url).__name__}"
)
@property
def body(self) -> bytes:
return self._body
def _set_body(self, body: bytes | None) -> None:
if body is None:
self._body = b""
elif not isinstance(body, bytes):
raise TypeError(
"Response body must be bytes. "
"If you want to pass unicode body use TextResponse "
"or HtmlResponse."
)
else:
self._body = body
def __repr__(self) -> str:
return f"<{self.status} {self.url}>"
def copy(self) -> Self:
"""Return a copy of this Response"""
return self.replace()
@overload
def replace(
self, *args: Any, cls: type[ResponseTypeVar], **kwargs: Any
) -> ResponseTypeVar: ...
@overload
def replace(self, *args: Any, cls: None = None, **kwargs: Any) -> Self: ...
def replace(
self, *args: Any, cls: type[Response] | None = None, **kwargs: Any
) -> Response:
"""Create a new Response with the same attributes except for those given new values"""
for x in self.attributes:
kwargs.setdefault(x, getattr(self, x))
if cls is None:
cls = self.__class__
return cls(*args, **kwargs)
def urljoin(self, url: str) -> str:
"""Join this Response's url with a possible relative url to form an
absolute interpretation of the latter."""
return urljoin(self.url, url)
@property
def text(self) -> str:
"""For subclasses of TextResponse, this will return the body
as str
"""
raise AttributeError("Response content isn't text")
def css(self, *a: Any, **kw: Any) -> SelectorList:
"""Shortcut method implemented only by responses whose content
is text (subclasses of TextResponse).
"""
raise NotSupported("Response content isn't text")
def jmespath(self, *a: Any, **kw: Any) -> SelectorList:
"""Shortcut method implemented only by responses whose content
is text (subclasses of TextResponse).
"""
raise NotSupported("Response content isn't text")
def xpath(self, *a: Any, **kw: Any) -> SelectorList:
"""Shortcut method implemented only by responses whose content
is text (subclasses of TextResponse).
"""
raise NotSupported("Response content isn't text")
def follow(
self,
url: str | Link,
callback: CallbackT | None = None,
method: str = "GET",
headers: Mapping[AnyStr, Any] | Iterable[tuple[AnyStr, Any]] | None = None,
body: bytes | str | None = None,
cookies: CookiesT | None = None,
meta: dict[str, Any] | None = None,
encoding: str | None = "utf-8",
priority: int = 0,
dont_filter: bool = False,
errback: Callable[[Failure], Any] | None = None,
cb_kwargs: dict[str, Any] | None = None,
flags: list[str] | None = None,
) -> Request:
"""
Return a :class:`~.Request` instance to follow a link ``url``.
It accepts the same arguments as ``Request.__init__()`` method,
but ``url`` can be a relative URL or a :class:`~scrapy.link.Link` object,
not only an absolute URL.
:class:`~.TextResponse` provides a :meth:`~.TextResponse.follow`
method which supports selectors in addition to absolute/relative URLs
and Link objects.
.. versionadded:: 2.0
The *flags* parameter.
"""
if encoding is None:
raise ValueError("encoding can't be None")
if isinstance(url, Link):
url = url.url
elif url is None:
raise ValueError("url can't be None")
url = self.urljoin(url)
return Request(
url=url,
callback=callback,
method=method,
headers=headers,
body=body,
cookies=cookies,
meta=meta,
encoding=encoding,
priority=priority,
dont_filter=dont_filter,
errback=errback,
cb_kwargs=cb_kwargs,
flags=flags,
)
def follow_all(
self,
urls: Iterable[str | Link],
callback: CallbackT | None = None,
method: str = "GET",
headers: Mapping[AnyStr, Any] | Iterable[tuple[AnyStr, Any]] | None = None,
body: bytes | str | None = None,
cookies: CookiesT | None = None,
meta: dict[str, Any] | None = None,
encoding: str | None = "utf-8",
priority: int = 0,
dont_filter: bool = False,
errback: Callable[[Failure], Any] | None = None,
cb_kwargs: dict[str, Any] | None = None,
flags: list[str] | None = None,
) -> Iterable[Request]:
"""
.. versionadded:: 2.0
Return an iterable of :class:`~.Request` instances to follow all links
in ``urls``. It accepts the same arguments as ``Request.__init__()`` method,
but elements of ``urls`` can be relative URLs or :class:`~scrapy.link.Link` objects,
not only absolute URLs.
:class:`~.TextResponse` provides a :meth:`~.TextResponse.follow_all`
method which supports selectors in addition to absolute/relative URLs
and Link objects.
"""
if not hasattr(urls, "__iter__"):
raise TypeError("'urls' argument must be an iterable")
return (
self.follow(
url=url,
callback=callback,
method=method,
headers=headers,
body=body,
cookies=cookies,
meta=meta,
encoding=encoding,
priority=priority,
dont_filter=dont_filter,
errback=errback,
cb_kwargs=cb_kwargs,
flags=flags,
)
for url in urls
)
| python | BSD-3-Clause | d1bd8eb49f7aba9289e4ff692006cead8bcd9080 | 2026-01-04T14:38:41.023839Z | false |
scrapy/scrapy | https://github.com/scrapy/scrapy/blob/d1bd8eb49f7aba9289e4ff692006cead8bcd9080/scrapy/http/response/json.py | scrapy/http/response/json.py | """
This module implements the JsonResponse class that is used when the response
has a JSON MIME type in its Content-Type header.
See documentation in docs/topics/request-response.rst
"""
from scrapy.http.response.text import TextResponse
class JsonResponse(TextResponse):
pass
| python | BSD-3-Clause | d1bd8eb49f7aba9289e4ff692006cead8bcd9080 | 2026-01-04T14:38:41.023839Z | false |
scrapy/scrapy | https://github.com/scrapy/scrapy/blob/d1bd8eb49f7aba9289e4ff692006cead8bcd9080/scrapy/http/response/text.py | scrapy/http/response/text.py | """
This module implements the TextResponse class which adds encoding handling and
discovering (through HTTP headers) to base Response class.
See documentation in docs/topics/request-response.rst
"""
from __future__ import annotations
import json
from contextlib import suppress
from typing import TYPE_CHECKING, Any, AnyStr, cast
from urllib.parse import urljoin
import parsel
from w3lib.encoding import (
html_body_declared_encoding,
html_to_unicode,
http_content_type_encoding,
read_bom,
resolve_encoding,
)
from w3lib.html import strip_html5_whitespace
from scrapy.http.response import Response
from scrapy.utils.python import memoizemethod_noargs, to_unicode
from scrapy.utils.response import get_base_url
if TYPE_CHECKING:
from collections.abc import Callable, Iterable, Mapping
from twisted.python.failure import Failure
from scrapy.http.request import CallbackT, CookiesT, Request
from scrapy.link import Link
from scrapy.selector import Selector, SelectorList
_NONE = object()
class TextResponse(Response):
_DEFAULT_ENCODING = "ascii"
_cached_decoded_json = _NONE
attributes: tuple[str, ...] = (*Response.attributes, "encoding")
def __init__(self, *args: Any, **kwargs: Any):
self._encoding: str | None = kwargs.pop("encoding", None)
self._cached_benc: str | None = None
self._cached_ubody: str | None = None
self._cached_selector: Selector | None = None
super().__init__(*args, **kwargs)
def _set_body(self, body: str | bytes | None) -> None:
self._body: bytes = b"" # used by encoding detection
if isinstance(body, str):
if self._encoding is None:
raise TypeError(
"Cannot convert unicode body - "
f"{type(self).__name__} has no encoding"
)
self._body = body.encode(self._encoding)
else:
super()._set_body(body)
@property
def encoding(self) -> str:
return self._declared_encoding() or self._body_inferred_encoding()
def _declared_encoding(self) -> str | None:
return (
self._encoding
or self._bom_encoding()
or self._headers_encoding()
or self._body_declared_encoding()
)
def json(self) -> Any:
"""
.. versionadded:: 2.2
Deserialize a JSON document to a Python object.
"""
if self._cached_decoded_json is _NONE:
self._cached_decoded_json = json.loads(self.body)
return self._cached_decoded_json
@property
def text(self) -> str:
"""Body as unicode"""
# access self.encoding before _cached_ubody to make sure
# _body_inferred_encoding is called
benc = self.encoding
if self._cached_ubody is None:
charset = f"charset={benc}"
self._cached_ubody = html_to_unicode(charset, self.body)[1]
return self._cached_ubody
def urljoin(self, url: str) -> str:
"""Join this Response's url with a possible relative url to form an
absolute interpretation of the latter."""
return urljoin(get_base_url(self), url)
@memoizemethod_noargs
def _headers_encoding(self) -> str | None:
content_type = cast("bytes", self.headers.get(b"Content-Type", b""))
return http_content_type_encoding(to_unicode(content_type, encoding="latin-1"))
def _body_inferred_encoding(self) -> str:
if self._cached_benc is None:
content_type = to_unicode(
cast("bytes", self.headers.get(b"Content-Type", b"")),
encoding="latin-1",
)
benc, ubody = html_to_unicode(
content_type,
self.body,
auto_detect_fun=self._auto_detect_fun,
default_encoding=self._DEFAULT_ENCODING,
)
self._cached_benc = benc
self._cached_ubody = ubody
return self._cached_benc
def _auto_detect_fun(self, text: bytes) -> str | None:
for enc in (self._DEFAULT_ENCODING, "utf-8", "cp1252"):
try:
text.decode(enc)
except UnicodeError:
continue
return resolve_encoding(enc)
return None
@memoizemethod_noargs
def _body_declared_encoding(self) -> str | None:
return html_body_declared_encoding(self.body)
@memoizemethod_noargs
def _bom_encoding(self) -> str | None:
return read_bom(self.body)[0]
@property
def selector(self) -> Selector:
# circular import
from scrapy.selector import Selector # noqa: PLC0415
if self._cached_selector is None:
self._cached_selector = Selector(self)
return self._cached_selector
def jmespath(self, query: str, **kwargs: Any) -> SelectorList:
if not hasattr(self.selector, "jmespath"):
raise AttributeError(
"Please install parsel >= 1.8.1 to get jmespath support"
)
return cast("SelectorList", self.selector.jmespath(query, **kwargs))
def xpath(self, query: str, **kwargs: Any) -> SelectorList:
return cast("SelectorList", self.selector.xpath(query, **kwargs))
def css(self, query: str) -> SelectorList:
return cast("SelectorList", self.selector.css(query))
def follow(
self,
url: str | Link | parsel.Selector,
callback: CallbackT | None = None,
method: str = "GET",
headers: Mapping[AnyStr, Any] | Iterable[tuple[AnyStr, Any]] | None = None,
body: bytes | str | None = None,
cookies: CookiesT | None = None,
meta: dict[str, Any] | None = None,
encoding: str | None = None,
priority: int = 0,
dont_filter: bool = False,
errback: Callable[[Failure], Any] | None = None,
cb_kwargs: dict[str, Any] | None = None,
flags: list[str] | None = None,
) -> Request:
"""
Return a :class:`~.Request` instance to follow a link ``url``.
It accepts the same arguments as ``Request.__init__()`` method,
but ``url`` can be not only an absolute URL, but also
* a relative URL
* a :class:`~scrapy.link.Link` object, e.g. the result of
:ref:`topics-link-extractors`
* a :class:`~scrapy.Selector` object for a ``<link>`` or ``<a>`` element, e.g.
``response.css('a.my_link')[0]``
* an attribute :class:`~scrapy.Selector` (not SelectorList), e.g.
``response.css('a::attr(href)')[0]`` or
``response.xpath('//img/@src')[0]``
See :ref:`response-follow-example` for usage examples.
"""
if isinstance(url, parsel.Selector):
url = _url_from_selector(url)
elif isinstance(url, parsel.SelectorList):
raise ValueError("SelectorList is not supported")
encoding = self.encoding if encoding is None else encoding
return super().follow(
url=url,
callback=callback,
method=method,
headers=headers,
body=body,
cookies=cookies,
meta=meta,
encoding=encoding,
priority=priority,
dont_filter=dont_filter,
errback=errback,
cb_kwargs=cb_kwargs,
flags=flags,
)
def follow_all(
self,
urls: Iterable[str | Link] | parsel.SelectorList | None = None,
callback: CallbackT | None = None,
method: str = "GET",
headers: Mapping[AnyStr, Any] | Iterable[tuple[AnyStr, Any]] | None = None,
body: bytes | str | None = None,
cookies: CookiesT | None = None,
meta: dict[str, Any] | None = None,
encoding: str | None = None,
priority: int = 0,
dont_filter: bool = False,
errback: Callable[[Failure], Any] | None = None,
cb_kwargs: dict[str, Any] | None = None,
flags: list[str] | None = None,
css: str | None = None,
xpath: str | None = None,
) -> Iterable[Request]:
"""
A generator that produces :class:`~.Request` instances to follow all
links in ``urls``. It accepts the same arguments as the :class:`~.Request`'s
``__init__()`` method, except that each ``urls`` element does not need to be
an absolute URL, it can be any of the following:
* a relative URL
* a :class:`~scrapy.link.Link` object, e.g. the result of
:ref:`topics-link-extractors`
* a :class:`~scrapy.Selector` object for a ``<link>`` or ``<a>`` element, e.g.
``response.css('a.my_link')[0]``
* an attribute :class:`~scrapy.Selector` (not SelectorList), e.g.
``response.css('a::attr(href)')[0]`` or
``response.xpath('//img/@src')[0]``
In addition, ``css`` and ``xpath`` arguments are accepted to perform the link extraction
within the ``follow_all()`` method (only one of ``urls``, ``css`` and ``xpath`` is accepted).
Note that when passing a ``SelectorList`` as argument for the ``urls`` parameter or
using the ``css`` or ``xpath`` parameters, this method will not produce requests for
selectors from which links cannot be obtained (for instance, anchor tags without an
``href`` attribute)
"""
arguments = [x for x in (urls, css, xpath) if x is not None]
if len(arguments) != 1:
raise ValueError(
"Please supply exactly one of the following arguments: urls, css, xpath"
)
if not urls:
if css:
urls = self.css(css)
if xpath:
urls = self.xpath(xpath)
if isinstance(urls, parsel.SelectorList):
selectors = urls
urls = []
for sel in selectors:
with suppress(_InvalidSelector):
urls.append(_url_from_selector(sel))
return super().follow_all(
urls=cast("Iterable[str | Link]", urls),
callback=callback,
method=method,
headers=headers,
body=body,
cookies=cookies,
meta=meta,
encoding=encoding,
priority=priority,
dont_filter=dont_filter,
errback=errback,
cb_kwargs=cb_kwargs,
flags=flags,
)
class _InvalidSelector(ValueError):
"""
Raised when a URL cannot be obtained from a Selector
"""
def _url_from_selector(sel: parsel.Selector) -> str:
if isinstance(sel.root, str):
# e.g. ::attr(href) result
return strip_html5_whitespace(sel.root)
if not hasattr(sel.root, "tag"):
raise _InvalidSelector(f"Unsupported selector: {sel}")
if sel.root.tag not in ("a", "link"):
raise _InvalidSelector(
f"Only <a> and <link> elements are supported; got <{sel.root.tag}>"
)
href = sel.root.get("href")
if href is None:
raise _InvalidSelector(f"<{sel.root.tag}> element has no href attribute: {sel}")
return strip_html5_whitespace(href)
| python | BSD-3-Clause | d1bd8eb49f7aba9289e4ff692006cead8bcd9080 | 2026-01-04T14:38:41.023839Z | false |
scrapy/scrapy | https://github.com/scrapy/scrapy/blob/d1bd8eb49f7aba9289e4ff692006cead8bcd9080/scrapy/http/request/rpc.py | scrapy/http/request/rpc.py | """
This module implements the XmlRpcRequest class which is a more convenient class
(that Request) to generate xml-rpc requests.
See documentation in docs/topics/request-response.rst
"""
from __future__ import annotations
import xmlrpc.client as xmlrpclib
from typing import Any
import defusedxml.xmlrpc
from scrapy.http.request import Request
from scrapy.utils.python import get_func_args
defusedxml.xmlrpc.monkey_patch()
DUMPS_ARGS = get_func_args(xmlrpclib.dumps)
class XmlRpcRequest(Request):
def __init__(self, *args: Any, encoding: str | None = None, **kwargs: Any):
if "body" not in kwargs and "params" in kwargs:
kw = {k: kwargs.pop(k) for k in DUMPS_ARGS if k in kwargs}
kwargs["body"] = xmlrpclib.dumps(**kw)
# spec defines that requests must use POST method
kwargs.setdefault("method", "POST")
# xmlrpc query multiples times over the same url
kwargs.setdefault("dont_filter", True)
# restore encoding
if encoding is not None:
kwargs["encoding"] = encoding
super().__init__(*args, **kwargs)
self.headers.setdefault("Content-Type", "text/xml")
| python | BSD-3-Clause | d1bd8eb49f7aba9289e4ff692006cead8bcd9080 | 2026-01-04T14:38:41.023839Z | false |
scrapy/scrapy | https://github.com/scrapy/scrapy/blob/d1bd8eb49f7aba9289e4ff692006cead8bcd9080/scrapy/http/request/form.py | scrapy/http/request/form.py | """
This module implements the FormRequest class which is a more convenient class
(than Request) to generate Requests based on form data.
See documentation in docs/topics/request-response.rst
"""
from __future__ import annotations
from collections.abc import Iterable
from typing import TYPE_CHECKING, Any, TypeAlias, cast
from urllib.parse import urlencode, urljoin, urlsplit, urlunsplit
from parsel.csstranslator import HTMLTranslator
from w3lib.html import strip_html5_whitespace
from scrapy.http.request import Request
from scrapy.utils.python import is_listlike, to_bytes
if TYPE_CHECKING:
# typing.Self requires Python 3.11
from lxml.html import (
FormElement,
InputElement,
MultipleSelectOptions,
SelectElement,
TextareaElement,
)
from typing_extensions import Self
from scrapy.http.response.text import TextResponse
FormdataVType: TypeAlias = str | Iterable[str]
FormdataKVType: TypeAlias = tuple[str, FormdataVType]
FormdataType: TypeAlias = dict[str, FormdataVType] | list[FormdataKVType] | None
class FormRequest(Request):
valid_form_methods = ["GET", "POST"]
def __init__(
self, *args: Any, formdata: FormdataType = None, **kwargs: Any
) -> None:
if formdata and kwargs.get("method") is None:
kwargs["method"] = "POST"
super().__init__(*args, **kwargs)
if formdata:
items = formdata.items() if isinstance(formdata, dict) else formdata
form_query_str = _urlencode(items, self.encoding)
if self.method == "POST":
self.headers.setdefault(
b"Content-Type", b"application/x-www-form-urlencoded"
)
self._set_body(form_query_str)
else:
self._set_url(
urlunsplit(urlsplit(self.url)._replace(query=form_query_str))
)
@classmethod
def from_response(
cls,
response: TextResponse,
formname: str | None = None,
formid: str | None = None,
formnumber: int = 0,
formdata: FormdataType = None,
clickdata: dict[str, str | int] | None = None,
dont_click: bool = False,
formxpath: str | None = None,
formcss: str | None = None,
**kwargs: Any,
) -> Self:
kwargs.setdefault("encoding", response.encoding)
if formcss is not None:
formxpath = HTMLTranslator().css_to_xpath(formcss)
form = _get_form(response, formname, formid, formnumber, formxpath)
formdata = _get_inputs(form, formdata, dont_click, clickdata)
url = _get_form_url(form, kwargs.pop("url", None))
method = kwargs.pop("method", form.method)
if method is not None:
method = method.upper()
if method not in cls.valid_form_methods:
method = "GET"
return cls(url=url, method=method, formdata=formdata, **kwargs)
def _get_form_url(form: FormElement, url: str | None) -> str:
assert form.base_url is not None # typing
if url is None:
action = form.get("action")
if action is None:
return form.base_url
return urljoin(form.base_url, strip_html5_whitespace(action))
return urljoin(form.base_url, url)
def _urlencode(seq: Iterable[FormdataKVType], enc: str) -> str:
values = [
(to_bytes(k, enc), to_bytes(v, enc))
for k, vs in seq
for v in (cast("Iterable[str]", vs) if is_listlike(vs) else [cast("str", vs)])
]
return urlencode(values, doseq=True)
def _get_form(
response: TextResponse,
formname: str | None,
formid: str | None,
formnumber: int,
formxpath: str | None,
) -> FormElement:
"""Find the wanted form element within the given response."""
root = response.selector.root
forms = root.xpath("//form")
if not forms:
raise ValueError(f"No <form> element found in {response}")
if formname is not None:
f = root.xpath(f'//form[@name="{formname}"]')
if f:
return cast("FormElement", f[0])
if formid is not None:
f = root.xpath(f'//form[@id="{formid}"]')
if f:
return cast("FormElement", f[0])
# Get form element from xpath, if not found, go up
if formxpath is not None:
nodes = root.xpath(formxpath)
if nodes:
el = nodes[0]
while True:
if el.tag == "form":
return cast("FormElement", el)
el = el.getparent()
if el is None:
break
raise ValueError(f"No <form> element found with {formxpath}")
# If we get here, it means that either formname was None or invalid
try:
form = forms[formnumber]
except IndexError:
raise IndexError(f"Form number {formnumber} not found in {response}")
return cast("FormElement", form)
def _get_inputs(
form: FormElement,
formdata: FormdataType,
dont_click: bool,
clickdata: dict[str, str | int] | None,
) -> list[FormdataKVType]:
"""Return a list of key-value pairs for the inputs found in the given form."""
try:
formdata_keys = dict(formdata or ()).keys()
except (ValueError, TypeError):
raise ValueError("formdata should be a dict or iterable of tuples")
if not formdata:
formdata = []
inputs = form.xpath(
"descendant::textarea"
"|descendant::select"
"|descendant::input[not(@type) or @type["
' not(re:test(., "^(?:submit|image|reset)$", "i"))'
" and (../@checked or"
' not(re:test(., "^(?:checkbox|radio)$", "i")))]]',
namespaces={"re": "http://exslt.org/regular-expressions"},
)
values: list[FormdataKVType] = [
(k, "" if v is None else v)
for k, v in (_value(e) for e in inputs)
if k and k not in formdata_keys
]
if not dont_click:
clickable = _get_clickable(clickdata, form)
if clickable and clickable[0] not in formdata and clickable[0] is not None:
values.append(clickable)
formdata_items = formdata.items() if isinstance(formdata, dict) else formdata
values.extend((k, v) for k, v in formdata_items if v is not None)
return values
def _value(
ele: InputElement | SelectElement | TextareaElement,
) -> tuple[str | None, str | MultipleSelectOptions | None]:
n = ele.name
v = ele.value
if ele.tag == "select":
return _select_value(cast("SelectElement", ele), n, v)
return n, v
def _select_value(
ele: SelectElement, n: str | None, v: str | MultipleSelectOptions | None
) -> tuple[str | None, str | MultipleSelectOptions | None]:
multiple = ele.multiple
if v is None and not multiple:
# Match browser behaviour on simple select tag without options selected
# And for select tags without options
o = ele.value_options
return (n, o[0]) if o else (None, None)
return n, v
def _get_clickable(
clickdata: dict[str, str | int] | None, form: FormElement
) -> tuple[str, str] | None:
"""
Returns the clickable element specified in clickdata,
if the latter is given. If not, it returns the first
clickable element found
"""
clickables = list(
form.xpath(
'descendant::input[re:test(@type, "^(submit|image)$", "i")]'
'|descendant::button[not(@type) or re:test(@type, "^submit$", "i")]',
namespaces={"re": "http://exslt.org/regular-expressions"},
)
)
if not clickables:
return None
# If we don't have clickdata, we just use the first clickable element
if clickdata is None:
el = clickables[0]
return (el.get("name"), el.get("value") or "")
# If clickdata is given, we compare it to the clickable elements to find a
# match. We first look to see if the number is specified in clickdata,
# because that uniquely identifies the element
nr = clickdata.get("nr", None)
if nr is not None:
assert isinstance(nr, int)
try:
el = list(form.inputs)[nr]
except IndexError:
pass
else:
return (cast("str", el.get("name")), el.get("value") or "")
# We didn't find it, so now we build an XPath expression out of the other
# arguments, because they can be used as such
xpath = ".//*" + "".join(f'[@{k}="{v}"]' for k, v in clickdata.items())
el = form.xpath(xpath)
if len(el) == 1:
return (el[0].get("name"), el[0].get("value") or "")
if len(el) > 1:
raise ValueError(
f"Multiple elements found ({el!r}) matching the "
f"criteria in clickdata: {clickdata!r}"
)
raise ValueError(f"No clickable element matching clickdata: {clickdata!r}")
| python | BSD-3-Clause | d1bd8eb49f7aba9289e4ff692006cead8bcd9080 | 2026-01-04T14:38:41.023839Z | false |
scrapy/scrapy | https://github.com/scrapy/scrapy/blob/d1bd8eb49f7aba9289e4ff692006cead8bcd9080/scrapy/http/request/__init__.py | scrapy/http/request/__init__.py | """
This module implements the Request class which is used to represent HTTP
requests in Scrapy.
See documentation in docs/topics/request-response.rst
"""
from __future__ import annotations
import inspect
from typing import (
TYPE_CHECKING,
Any,
AnyStr,
Concatenate,
NoReturn,
TypeAlias,
TypedDict,
TypeVar,
overload,
)
from w3lib.url import safe_url_string
# a workaround for the docs "more than one target found" problem
import scrapy # noqa: TC001
from scrapy.http.headers import Headers
from scrapy.utils.curl import curl_to_request_kwargs
from scrapy.utils.python import to_bytes
from scrapy.utils.trackref import object_ref
if TYPE_CHECKING:
from collections.abc import Callable, Iterable, Mapping
from twisted.python.failure import Failure
# typing.NotRequired and typing.Self require Python 3.11
from typing_extensions import NotRequired, Self
# circular import
from scrapy.http import Response
CallbackT: TypeAlias = Callable[Concatenate[Response, ...], Any]
class VerboseCookie(TypedDict):
name: str | bytes
value: str | bytes | bool | float | int
domain: NotRequired[str | bytes]
path: NotRequired[str | bytes]
secure: NotRequired[bool]
CookiesT: TypeAlias = dict[str, str] | list[VerboseCookie]
RequestTypeVar = TypeVar("RequestTypeVar", bound="Request")
def NO_CALLBACK(*args: Any, **kwargs: Any) -> NoReturn:
"""When assigned to the ``callback`` parameter of
:class:`~scrapy.Request`, it indicates that the request is not meant
to have a spider callback at all.
For example:
.. code-block:: python
Request("https://example.com", callback=NO_CALLBACK)
This value should be used by :ref:`components <topics-components>` that
create and handle their own requests, e.g. through
:meth:`scrapy.core.engine.ExecutionEngine.download`, so that downloader
middlewares handling such requests can treat them differently from requests
intended for the :meth:`~scrapy.Spider.parse` callback.
"""
raise RuntimeError(
"The NO_CALLBACK callback has been called. This is a special callback "
"value intended for requests whose callback is never meant to be "
"called."
)
class Request(object_ref):
"""Represents an HTTP request, which is usually generated in a Spider and
executed by the Downloader, thus generating a :class:`~scrapy.http.Response`.
"""
attributes: tuple[str, ...] = (
"url",
"callback",
"method",
"headers",
"body",
"cookies",
"meta",
"encoding",
"priority",
"dont_filter",
"errback",
"flags",
"cb_kwargs",
)
"""A tuple of :class:`str` objects containing the name of all public
attributes of the class that are also keyword parameters of the
``__init__()`` method.
Currently used by :meth:`.Request.replace`, :meth:`.Request.to_dict` and
:func:`~scrapy.utils.request.request_from_dict`.
"""
def __init__(
self,
url: str,
callback: CallbackT | None = None,
method: str = "GET",
headers: Mapping[AnyStr, Any] | Iterable[tuple[AnyStr, Any]] | None = None,
body: bytes | str | None = None,
cookies: CookiesT | None = None,
meta: dict[str, Any] | None = None,
encoding: str = "utf-8",
priority: int = 0,
dont_filter: bool = False,
errback: Callable[[Failure], Any] | None = None,
flags: list[str] | None = None,
cb_kwargs: dict[str, Any] | None = None,
) -> None:
self._encoding: str = encoding # this one has to be set first
self.method: str = str(method).upper()
self._set_url(url)
self._set_body(body)
if not isinstance(priority, int):
raise TypeError(f"Request priority not an integer: {priority!r}")
#: Default: ``0``
#:
#: Value that the :ref:`scheduler <topics-scheduler>` may use for
#: request prioritization.
#:
#: Built-in schedulers prioritize requests with a higher priority
#: value.
#:
#: Negative values are allowed.
self.priority: int = priority
if not (callable(callback) or callback is None):
raise TypeError(
f"callback must be a callable, got {type(callback).__name__}"
)
if not (callable(errback) or errback is None):
raise TypeError(f"errback must be a callable, got {type(errback).__name__}")
#: :class:`~collections.abc.Callable` to parse the
#: :class:`~scrapy.http.Response` to this request once received.
#:
#: The callable must expect the response as its first parameter, and
#: support any additional keyword arguments set through
#: :attr:`cb_kwargs`.
#:
#: In addition to an arbitrary callable, the following values are also
#: supported:
#:
#: - ``None`` (default), which indicates that the
#: :meth:`~scrapy.Spider.parse` method of the spider must be used.
#:
#: - :func:`~scrapy.http.request.NO_CALLBACK`.
#:
#: If an unhandled exception is raised during request or response
#: processing, i.e. by a :ref:`spider middleware
#: <topics-spider-middleware>`, :ref:`downloader middleware
#: <topics-downloader-middleware>` or download handler
#: (:setting:`DOWNLOAD_HANDLERS`), :attr:`errback` is called instead.
#:
#: .. tip::
#: :class:`~scrapy.spidermiddlewares.httperror.HttpErrorMiddleware`
#: raises exceptions for non-2xx responses by default, sending them
#: to the :attr:`errback` instead.
#:
#: .. seealso::
#: :ref:`topics-request-response-ref-request-callback-arguments`
self.callback: CallbackT | None = callback
#: :class:`~collections.abc.Callable` to handle exceptions raised
#: during request or response processing.
#:
#: The callable must expect a :exc:`~twisted.python.failure.Failure` as
#: its first parameter.
#:
#: .. seealso:: :ref:`topics-request-response-ref-errbacks`
self.errback: Callable[[Failure], Any] | None = errback
self.cookies: CookiesT = cookies or {}
self.headers: Headers = Headers(headers or {}, encoding=encoding)
#: Whether this request may be filtered out by :ref:`components
#: <topics-components>` that support filtering out requests (``False``,
#: default), or those components should not filter out this request
#: (``True``).
#:
#: This attribute is commonly set to ``True`` to prevent duplicate
#: requests from being filtered out.
#:
#: When defining the start URLs of a spider through
#: :attr:`~scrapy.Spider.start_urls`, this attribute is enabled by
#: default. See :meth:`~scrapy.Spider.start`.
self.dont_filter: bool = dont_filter
self._meta: dict[str, Any] | None = dict(meta) if meta else None
self._cb_kwargs: dict[str, Any] | None = dict(cb_kwargs) if cb_kwargs else None
self.flags: list[str] = [] if flags is None else list(flags)
@property
def cb_kwargs(self) -> dict[str, Any]:
if self._cb_kwargs is None:
self._cb_kwargs = {}
return self._cb_kwargs
@property
def meta(self) -> dict[str, Any]:
if self._meta is None:
self._meta = {}
return self._meta
@property
def url(self) -> str:
return self._url
def _set_url(self, url: str) -> None:
if not isinstance(url, str):
raise TypeError(f"Request url must be str, got {type(url).__name__}")
self._url = safe_url_string(url, self.encoding)
if (
"://" not in self._url
and not self._url.startswith("about:")
and not self._url.startswith("data:")
):
raise ValueError(f"Missing scheme in request url: {self._url}")
@property
def body(self) -> bytes:
return self._body
def _set_body(self, body: str | bytes | None) -> None:
self._body = b"" if body is None else to_bytes(body, self.encoding)
@property
def encoding(self) -> str:
return self._encoding
def __repr__(self) -> str:
return f"<{self.method} {self.url}>"
def copy(self) -> Self:
return self.replace()
@overload
def replace(
self, *args: Any, cls: type[RequestTypeVar], **kwargs: Any
) -> RequestTypeVar: ...
@overload
def replace(self, *args: Any, cls: None = None, **kwargs: Any) -> Self: ...
def replace(
self, *args: Any, cls: type[Request] | None = None, **kwargs: Any
) -> Request:
"""Create a new Request with the same attributes except for those given new values"""
for x in self.attributes:
kwargs.setdefault(x, getattr(self, x))
if cls is None:
cls = self.__class__
return cls(*args, **kwargs)
@classmethod
def from_curl(
cls,
curl_command: str,
ignore_unknown_options: bool = True,
**kwargs: Any,
) -> Self:
"""Create a Request object from a string containing a `cURL
<https://curl.se/>`_ command. It populates the HTTP method, the
URL, the headers, the cookies and the body. It accepts the same
arguments as the :class:`Request` class, taking preference and
overriding the values of the same arguments contained in the cURL
command.
Unrecognized options are ignored by default. To raise an error when
finding unknown options call this method by passing
``ignore_unknown_options=False``.
.. caution:: Using :meth:`from_curl` from :class:`~scrapy.Request`
subclasses, such as :class:`~scrapy.http.JsonRequest`, or
:class:`~scrapy.http.XmlRpcRequest`, as well as having
:ref:`downloader middlewares <topics-downloader-middleware>`
and
:ref:`spider middlewares <topics-spider-middleware>`
enabled, such as
:class:`~scrapy.downloadermiddlewares.defaultheaders.DefaultHeadersMiddleware`,
:class:`~scrapy.downloadermiddlewares.useragent.UserAgentMiddleware`,
or
:class:`~scrapy.downloadermiddlewares.httpcompression.HttpCompressionMiddleware`,
may modify the :class:`~scrapy.Request` object.
To translate a cURL command into a Scrapy request,
you may use `curl2scrapy <https://michael-shub.github.io/curl2scrapy/>`_.
"""
request_kwargs = curl_to_request_kwargs(curl_command, ignore_unknown_options)
request_kwargs.update(kwargs)
return cls(**request_kwargs)
def to_dict(self, *, spider: scrapy.Spider | None = None) -> dict[str, Any]:
"""Return a dictionary containing the Request's data.
Use :func:`~scrapy.utils.request.request_from_dict` to convert back into a :class:`~scrapy.Request` object.
If a spider is given, this method will try to find out the name of the spider methods used as callback
and errback and include them in the output dict, raising an exception if they cannot be found.
"""
d = {
"url": self.url, # urls are safe (safe_string_url)
"callback": (
_find_method(spider, self.callback)
if callable(self.callback)
else self.callback
),
"errback": (
_find_method(spider, self.errback)
if callable(self.errback)
else self.errback
),
"headers": dict(self.headers),
}
for attr in self.attributes:
d.setdefault(attr, getattr(self, attr))
if type(self) is not Request: # pylint: disable=unidiomatic-typecheck
d["_class"] = self.__module__ + "." + self.__class__.__name__
return d
def _find_method(obj: Any, func: Callable[..., Any]) -> str:
"""Helper function for Request.to_dict"""
# Only instance methods contain ``__func__``
if obj and hasattr(func, "__func__"):
members = inspect.getmembers(obj, predicate=inspect.ismethod)
for name, obj_func in members:
# We need to use __func__ to access the original function object because instance
# method objects are generated each time attribute is retrieved from instance.
#
# Reference: The standard type hierarchy
# https://docs.python.org/3/reference/datamodel.html
if obj_func.__func__ is func.__func__:
return name
raise ValueError(f"Function {func} is not an instance method in: {obj}")
| python | BSD-3-Clause | d1bd8eb49f7aba9289e4ff692006cead8bcd9080 | 2026-01-04T14:38:41.023839Z | false |
scrapy/scrapy | https://github.com/scrapy/scrapy/blob/d1bd8eb49f7aba9289e4ff692006cead8bcd9080/scrapy/http/request/json_request.py | scrapy/http/request/json_request.py | """
This module implements the JsonRequest class which is a more convenient class
(than Request) to generate JSON Requests.
See documentation in docs/topics/request-response.rst
"""
from __future__ import annotations
import copy
import json
import warnings
from typing import TYPE_CHECKING, Any, overload
from scrapy.http.request import Request, RequestTypeVar
if TYPE_CHECKING:
# typing.Self requires Python 3.11
from typing_extensions import Self
class JsonRequest(Request):
attributes: tuple[str, ...] = (*Request.attributes, "dumps_kwargs")
def __init__(
self, *args: Any, dumps_kwargs: dict[str, Any] | None = None, **kwargs: Any
) -> None:
dumps_kwargs = copy.deepcopy(dumps_kwargs) if dumps_kwargs is not None else {}
dumps_kwargs.setdefault("sort_keys", True)
self._dumps_kwargs: dict[str, Any] = dumps_kwargs
body_passed = kwargs.get("body") is not None
data: Any = kwargs.pop("data", None)
data_passed: bool = data is not None
if body_passed and data_passed:
warnings.warn("Both body and data passed. data will be ignored")
elif not body_passed and data_passed:
kwargs["body"] = self._dumps(data)
if "method" not in kwargs:
kwargs["method"] = "POST"
super().__init__(*args, **kwargs)
self.headers.setdefault("Content-Type", "application/json")
self.headers.setdefault(
"Accept", "application/json, text/javascript, */*; q=0.01"
)
@property
def dumps_kwargs(self) -> dict[str, Any]:
return self._dumps_kwargs
@overload
def replace(
self, *args: Any, cls: type[RequestTypeVar], **kwargs: Any
) -> RequestTypeVar: ...
@overload
def replace(self, *args: Any, cls: None = None, **kwargs: Any) -> Self: ...
def replace(
self, *args: Any, cls: type[Request] | None = None, **kwargs: Any
) -> Request:
body_passed = kwargs.get("body") is not None
data: Any = kwargs.pop("data", None)
data_passed: bool = data is not None
if body_passed and data_passed:
warnings.warn("Both body and data passed. data will be ignored")
elif not body_passed and data_passed:
kwargs["body"] = self._dumps(data)
return super().replace(*args, cls=cls, **kwargs)
def _dumps(self, data: Any) -> str:
"""Convert to JSON"""
return json.dumps(data, **self._dumps_kwargs)
| python | BSD-3-Clause | d1bd8eb49f7aba9289e4ff692006cead8bcd9080 | 2026-01-04T14:38:41.023839Z | false |
scrapy/scrapy | https://github.com/scrapy/scrapy/blob/d1bd8eb49f7aba9289e4ff692006cead8bcd9080/scrapy/spidermiddlewares/referer.py | scrapy/spidermiddlewares/referer.py | """
RefererMiddleware: populates Request referer field, based on the Response which
originated it.
"""
from __future__ import annotations
import warnings
from abc import ABC, abstractmethod
from typing import TYPE_CHECKING, cast
from urllib.parse import urlparse
from w3lib.url import safe_url_string
from scrapy import Spider, signals
from scrapy.exceptions import NotConfigured
from scrapy.http import Request, Response
from scrapy.spidermiddlewares.base import BaseSpiderMiddleware
from scrapy.utils.misc import load_object
from scrapy.utils.python import to_unicode
from scrapy.utils.url import strip_url
if TYPE_CHECKING:
# typing.Self requires Python 3.11
from typing_extensions import Self
from scrapy.crawler import Crawler
from scrapy.settings import BaseSettings
LOCAL_SCHEMES: tuple[str, ...] = (
"about",
"blob",
"data",
"filesystem",
)
POLICY_NO_REFERRER = "no-referrer"
POLICY_NO_REFERRER_WHEN_DOWNGRADE = "no-referrer-when-downgrade"
POLICY_SAME_ORIGIN = "same-origin"
POLICY_ORIGIN = "origin"
POLICY_STRICT_ORIGIN = "strict-origin"
POLICY_ORIGIN_WHEN_CROSS_ORIGIN = "origin-when-cross-origin"
POLICY_STRICT_ORIGIN_WHEN_CROSS_ORIGIN = "strict-origin-when-cross-origin"
POLICY_UNSAFE_URL = "unsafe-url"
POLICY_SCRAPY_DEFAULT = "scrapy-default"
class ReferrerPolicy(ABC):
"""Abstract base class for referrer policies."""
NOREFERRER_SCHEMES: tuple[str, ...] = LOCAL_SCHEMES
name: str
@abstractmethod
def referrer(self, response_url: str, request_url: str) -> str | None:
raise NotImplementedError
def stripped_referrer(self, url: str) -> str | None:
if urlparse(url).scheme not in self.NOREFERRER_SCHEMES:
return self.strip_url(url)
return None
def origin_referrer(self, url: str) -> str | None:
if urlparse(url).scheme not in self.NOREFERRER_SCHEMES:
return self.origin(url)
return None
def strip_url(self, url: str, origin_only: bool = False) -> str | None:
"""
https://www.w3.org/TR/referrer-policy/#strip-url
If url is null, return no referrer.
If url's scheme is a local scheme, then return no referrer.
Set url's username to the empty string.
Set url's password to null.
Set url's fragment to null.
If the origin-only flag is true, then:
Set url's path to null.
Set url's query to null.
Return url.
"""
if not url:
return None
return strip_url(
url,
strip_credentials=True,
strip_fragment=True,
strip_default_port=True,
origin_only=origin_only,
)
def origin(self, url: str) -> str | None:
"""Return serialized origin (scheme, host, path) for a request or response URL."""
return self.strip_url(url, origin_only=True)
def potentially_trustworthy(self, url: str) -> bool:
# Note: this does not follow https://w3c.github.io/webappsec-secure-contexts/#is-url-trustworthy
parsed_url = urlparse(url)
if parsed_url.scheme in ("data",):
return False
return self.tls_protected(url)
def tls_protected(self, url: str) -> bool:
return urlparse(url).scheme in ("https", "ftps")
class NoReferrerPolicy(ReferrerPolicy):
"""
https://www.w3.org/TR/referrer-policy/#referrer-policy-no-referrer
The simplest policy is "no-referrer", which specifies that no referrer information
is to be sent along with requests made from a particular request client to any origin.
The header will be omitted entirely.
"""
name: str = POLICY_NO_REFERRER
def referrer(self, response_url: str, request_url: str) -> str | None:
return None
class NoReferrerWhenDowngradePolicy(ReferrerPolicy):
"""
https://www.w3.org/TR/referrer-policy/#referrer-policy-no-referrer-when-downgrade
The "no-referrer-when-downgrade" policy sends a full URL along with requests
from a TLS-protected environment settings object to a potentially trustworthy URL,
and requests from clients which are not TLS-protected to any origin.
Requests from TLS-protected clients to non-potentially trustworthy URLs,
on the other hand, will contain no referrer information.
A Referer HTTP header will not be sent.
This is a user agent's default behavior, if no policy is otherwise specified.
"""
name: str = POLICY_NO_REFERRER_WHEN_DOWNGRADE
def referrer(self, response_url: str, request_url: str) -> str | None:
if not self.tls_protected(response_url) or self.tls_protected(request_url):
return self.stripped_referrer(response_url)
return None
class SameOriginPolicy(ReferrerPolicy):
"""
https://www.w3.org/TR/referrer-policy/#referrer-policy-same-origin
The "same-origin" policy specifies that a full URL, stripped for use as a referrer,
is sent as referrer information when making same-origin requests from a particular request client.
Cross-origin requests, on the other hand, will contain no referrer information.
A Referer HTTP header will not be sent.
"""
name: str = POLICY_SAME_ORIGIN
def referrer(self, response_url: str, request_url: str) -> str | None:
if self.origin(response_url) == self.origin(request_url):
return self.stripped_referrer(response_url)
return None
class OriginPolicy(ReferrerPolicy):
"""
https://www.w3.org/TR/referrer-policy/#referrer-policy-origin
The "origin" policy specifies that only the ASCII serialization
of the origin of the request client is sent as referrer information
when making both same-origin requests and cross-origin requests
from a particular request client.
"""
name: str = POLICY_ORIGIN
def referrer(self, response_url: str, request_url: str) -> str | None:
return self.origin_referrer(response_url)
class StrictOriginPolicy(ReferrerPolicy):
"""
https://www.w3.org/TR/referrer-policy/#referrer-policy-strict-origin
The "strict-origin" policy sends the ASCII serialization
of the origin of the request client when making requests:
- from a TLS-protected environment settings object to a potentially trustworthy URL, and
- from non-TLS-protected environment settings objects to any origin.
Requests from TLS-protected request clients to non- potentially trustworthy URLs,
on the other hand, will contain no referrer information.
A Referer HTTP header will not be sent.
"""
name: str = POLICY_STRICT_ORIGIN
def referrer(self, response_url: str, request_url: str) -> str | None:
if (
self.tls_protected(response_url)
and self.potentially_trustworthy(request_url)
) or not self.tls_protected(response_url):
return self.origin_referrer(response_url)
return None
class OriginWhenCrossOriginPolicy(ReferrerPolicy):
"""
https://www.w3.org/TR/referrer-policy/#referrer-policy-origin-when-cross-origin
The "origin-when-cross-origin" policy specifies that a full URL,
stripped for use as a referrer, is sent as referrer information
when making same-origin requests from a particular request client,
and only the ASCII serialization of the origin of the request client
is sent as referrer information when making cross-origin requests
from a particular request client.
"""
name: str = POLICY_ORIGIN_WHEN_CROSS_ORIGIN
def referrer(self, response_url: str, request_url: str) -> str | None:
origin = self.origin(response_url)
if origin == self.origin(request_url):
return self.stripped_referrer(response_url)
return origin
class StrictOriginWhenCrossOriginPolicy(ReferrerPolicy):
"""
https://www.w3.org/TR/referrer-policy/#referrer-policy-strict-origin-when-cross-origin
The "strict-origin-when-cross-origin" policy specifies that a full URL,
stripped for use as a referrer, is sent as referrer information
when making same-origin requests from a particular request client,
and only the ASCII serialization of the origin of the request client
when making cross-origin requests:
- from a TLS-protected environment settings object to a potentially trustworthy URL, and
- from non-TLS-protected environment settings objects to any origin.
Requests from TLS-protected clients to non- potentially trustworthy URLs,
on the other hand, will contain no referrer information.
A Referer HTTP header will not be sent.
"""
name: str = POLICY_STRICT_ORIGIN_WHEN_CROSS_ORIGIN
def referrer(self, response_url: str, request_url: str) -> str | None:
origin = self.origin(response_url)
if origin == self.origin(request_url):
return self.stripped_referrer(response_url)
if (
self.tls_protected(response_url)
and self.potentially_trustworthy(request_url)
) or not self.tls_protected(response_url):
return self.origin_referrer(response_url)
return None
class UnsafeUrlPolicy(ReferrerPolicy):
"""
https://www.w3.org/TR/referrer-policy/#referrer-policy-unsafe-url
The "unsafe-url" policy specifies that a full URL, stripped for use as a referrer,
is sent along with both cross-origin requests
and same-origin requests made from a particular request client.
Note: The policy's name doesn't lie; it is unsafe.
This policy will leak origins and paths from TLS-protected resources
to insecure origins.
Carefully consider the impact of setting such a policy for potentially sensitive documents.
"""
name: str = POLICY_UNSAFE_URL
def referrer(self, response_url: str, request_url: str) -> str | None:
return self.stripped_referrer(response_url)
class DefaultReferrerPolicy(NoReferrerWhenDowngradePolicy):
"""
A variant of "no-referrer-when-downgrade",
with the addition that "Referer" is not sent if the parent request was
using ``file://`` or ``s3://`` scheme.
"""
NOREFERRER_SCHEMES: tuple[str, ...] = (*LOCAL_SCHEMES, "file", "s3")
name: str = POLICY_SCRAPY_DEFAULT
_policy_classes: dict[str, type[ReferrerPolicy]] = {
p.name: p
for p in (
NoReferrerPolicy,
NoReferrerWhenDowngradePolicy,
SameOriginPolicy,
OriginPolicy,
StrictOriginPolicy,
OriginWhenCrossOriginPolicy,
StrictOriginWhenCrossOriginPolicy,
UnsafeUrlPolicy,
DefaultReferrerPolicy,
)
}
# Reference: https://www.w3.org/TR/referrer-policy/#referrer-policy-empty-string
_policy_classes[""] = NoReferrerWhenDowngradePolicy
def _load_policy_class(
policy: str, warning_only: bool = False
) -> type[ReferrerPolicy] | None:
"""
Expect a string for the path to the policy class,
otherwise try to interpret the string as a standard value
from https://www.w3.org/TR/referrer-policy/#referrer-policies
"""
try:
return cast("type[ReferrerPolicy]", load_object(policy))
except ValueError:
tokens = [token.strip() for token in policy.lower().split(",")]
# https://www.w3.org/TR/referrer-policy/#parse-referrer-policy-from-header
for token in tokens[::-1]:
if token in _policy_classes:
return _policy_classes[token]
msg = f"Could not load referrer policy {policy!r}"
if not warning_only:
raise RuntimeError(msg)
warnings.warn(msg, RuntimeWarning)
return None
class RefererMiddleware(BaseSpiderMiddleware):
def __init__(self, settings: BaseSettings | None = None): # pylint: disable=super-init-not-called
self.default_policy: type[ReferrerPolicy] = DefaultReferrerPolicy
if settings is not None:
settings_policy = _load_policy_class(settings.get("REFERRER_POLICY"))
assert settings_policy
self.default_policy = settings_policy
@classmethod
def from_crawler(cls, crawler: Crawler) -> Self:
if not crawler.settings.getbool("REFERER_ENABLED"):
raise NotConfigured
mw = cls(crawler.settings)
# Note: this hook is a bit of a hack to intercept redirections
crawler.signals.connect(mw.request_scheduled, signal=signals.request_scheduled)
return mw
def policy(self, resp_or_url: Response | str, request: Request) -> ReferrerPolicy:
"""
Determine Referrer-Policy to use from a parent Response (or URL),
and a Request to be sent.
- if a valid policy is set in Request meta, it is used.
- if the policy is set in meta but is wrong (e.g. a typo error),
the policy from settings is used
- if the policy is not set in Request meta,
but there is a Referrer-policy header in the parent response,
it is used if valid
- otherwise, the policy from settings is used.
"""
policy_name = request.meta.get("referrer_policy")
if policy_name is None and isinstance(resp_or_url, Response):
policy_header = resp_or_url.headers.get("Referrer-Policy")
if policy_header is not None:
policy_name = to_unicode(policy_header.decode("latin1"))
if policy_name is None:
return self.default_policy()
cls = _load_policy_class(policy_name, warning_only=True)
return cls() if cls else self.default_policy()
def get_processed_request(
self, request: Request, response: Response | None
) -> Request | None:
if response is None:
# start requests
return request
referrer = self.policy(response, request).referrer(response.url, request.url)
if referrer is not None:
request.headers.setdefault("Referer", referrer)
return request
def request_scheduled(self, request: Request, spider: Spider) -> None:
# check redirected request to patch "Referer" header if necessary
redirected_urls = request.meta.get("redirect_urls", [])
if redirected_urls:
request_referrer = request.headers.get("Referer")
# we don't patch the referrer value if there is none
if request_referrer is not None:
# the request's referrer header value acts as a surrogate
# for the parent response URL
#
# Note: if the 3xx response contained a Referrer-Policy header,
# the information is not available using this hook
parent_url = safe_url_string(request_referrer)
policy_referrer = self.policy(parent_url, request).referrer(
parent_url, request.url
)
if policy_referrer != request_referrer.decode("latin1"):
if policy_referrer is None:
request.headers.pop("Referer")
else:
request.headers["Referer"] = policy_referrer
| python | BSD-3-Clause | d1bd8eb49f7aba9289e4ff692006cead8bcd9080 | 2026-01-04T14:38:41.023839Z | false |
scrapy/scrapy | https://github.com/scrapy/scrapy/blob/d1bd8eb49f7aba9289e4ff692006cead8bcd9080/scrapy/spidermiddlewares/start.py | scrapy/spidermiddlewares/start.py | from __future__ import annotations
from typing import TYPE_CHECKING
from .base import BaseSpiderMiddleware
if TYPE_CHECKING:
from scrapy.http import Request
from scrapy.http.response import Response
class StartSpiderMiddleware(BaseSpiderMiddleware):
"""Set :reqmeta:`is_start_request`.
.. reqmeta:: is_start_request
is_start_request
----------------
:attr:`~scrapy.Request.meta` key that is set to ``True`` in :ref:`start
requests <start-requests>`, allowing you to tell start requests apart from
other requests, e.g. in :ref:`downloader middlewares
<topics-downloader-middleware>`.
"""
def get_processed_request(
self, request: Request, response: Response | None
) -> Request | None:
if response is None:
request.meta.setdefault("is_start_request", True)
return request
| python | BSD-3-Clause | d1bd8eb49f7aba9289e4ff692006cead8bcd9080 | 2026-01-04T14:38:41.023839Z | false |
scrapy/scrapy | https://github.com/scrapy/scrapy/blob/d1bd8eb49f7aba9289e4ff692006cead8bcd9080/scrapy/spidermiddlewares/httperror.py | scrapy/spidermiddlewares/httperror.py | """
HttpError Spider Middleware
See documentation in docs/topics/spider-middleware.rst
"""
from __future__ import annotations
import logging
from typing import TYPE_CHECKING, Any
from scrapy.exceptions import IgnoreRequest
from scrapy.utils.decorators import _warn_spider_arg
if TYPE_CHECKING:
from collections.abc import Iterable
# typing.Self requires Python 3.11
from typing_extensions import Self
from scrapy import Spider
from scrapy.crawler import Crawler
from scrapy.http import Response
from scrapy.settings import BaseSettings
logger = logging.getLogger(__name__)
class HttpError(IgnoreRequest):
"""A non-200 response was filtered"""
def __init__(self, response: Response, *args: Any, **kwargs: Any):
self.response = response
super().__init__(*args, **kwargs)
class HttpErrorMiddleware:
crawler: Crawler
def __init__(self, settings: BaseSettings):
self.handle_httpstatus_all: bool = settings.getbool("HTTPERROR_ALLOW_ALL")
self.handle_httpstatus_list: list[int] = settings.getlist(
"HTTPERROR_ALLOWED_CODES"
)
@classmethod
def from_crawler(cls, crawler: Crawler) -> Self:
o = cls(crawler.settings)
o.crawler = crawler
return o
@_warn_spider_arg
def process_spider_input(
self, response: Response, spider: Spider | None = None
) -> None:
if 200 <= response.status < 300: # common case
return
meta = response.meta
if meta.get("handle_httpstatus_all", False):
return
if "handle_httpstatus_list" in meta:
allowed_statuses = meta["handle_httpstatus_list"]
elif self.handle_httpstatus_all:
return
else:
allowed_statuses = getattr(
self.crawler.spider,
"handle_httpstatus_list",
self.handle_httpstatus_list,
)
if response.status in allowed_statuses:
return
raise HttpError(response, "Ignoring non-200 response")
@_warn_spider_arg
def process_spider_exception(
self, response: Response, exception: Exception, spider: Spider | None = None
) -> Iterable[Any] | None:
if isinstance(exception, HttpError):
assert self.crawler.stats
self.crawler.stats.inc_value("httperror/response_ignored_count")
self.crawler.stats.inc_value(
f"httperror/response_ignored_status_count/{response.status}"
)
logger.info(
"Ignoring response %(response)r: HTTP status code is not handled or not allowed",
{"response": response},
extra={"spider": self.crawler.spider},
)
return []
return None
| python | BSD-3-Clause | d1bd8eb49f7aba9289e4ff692006cead8bcd9080 | 2026-01-04T14:38:41.023839Z | false |
scrapy/scrapy | https://github.com/scrapy/scrapy/blob/d1bd8eb49f7aba9289e4ff692006cead8bcd9080/scrapy/spidermiddlewares/urllength.py | scrapy/spidermiddlewares/urllength.py | """
Url Length Spider Middleware
See documentation in docs/topics/spider-middleware.rst
"""
from __future__ import annotations
import logging
from typing import TYPE_CHECKING
from scrapy.exceptions import NotConfigured
from scrapy.spidermiddlewares.base import BaseSpiderMiddleware
if TYPE_CHECKING:
# typing.Self requires Python 3.11
from typing_extensions import Self
from scrapy.crawler import Crawler
from scrapy.http import Request, Response
logger = logging.getLogger(__name__)
class UrlLengthMiddleware(BaseSpiderMiddleware):
crawler: Crawler
def __init__(self, maxlength: int): # pylint: disable=super-init-not-called
self.maxlength: int = maxlength
@classmethod
def from_crawler(cls, crawler: Crawler) -> Self:
maxlength = crawler.settings.getint("URLLENGTH_LIMIT")
if not maxlength:
raise NotConfigured
o = cls(maxlength)
o.crawler = crawler
return o
def get_processed_request(
self, request: Request, response: Response | None
) -> Request | None:
if len(request.url) <= self.maxlength:
return request
logger.info(
"Ignoring link (url length > %(maxlength)d): %(url)s ",
{"maxlength": self.maxlength, "url": request.url},
extra={"spider": self.crawler.spider},
)
assert self.crawler.stats
self.crawler.stats.inc_value("urllength/request_ignored_count")
return None
| python | BSD-3-Clause | d1bd8eb49f7aba9289e4ff692006cead8bcd9080 | 2026-01-04T14:38:41.023839Z | false |
scrapy/scrapy | https://github.com/scrapy/scrapy/blob/d1bd8eb49f7aba9289e4ff692006cead8bcd9080/scrapy/spidermiddlewares/__init__.py | scrapy/spidermiddlewares/__init__.py | python | BSD-3-Clause | d1bd8eb49f7aba9289e4ff692006cead8bcd9080 | 2026-01-04T14:38:41.023839Z | false | |
scrapy/scrapy | https://github.com/scrapy/scrapy/blob/d1bd8eb49f7aba9289e4ff692006cead8bcd9080/scrapy/spidermiddlewares/depth.py | scrapy/spidermiddlewares/depth.py | """
Depth Spider Middleware
See documentation in docs/topics/spider-middleware.rst
"""
from __future__ import annotations
import logging
from typing import TYPE_CHECKING, Any
from scrapy.spidermiddlewares.base import BaseSpiderMiddleware
from scrapy.utils.decorators import _warn_spider_arg
if TYPE_CHECKING:
from collections.abc import AsyncIterator, Iterable
# typing.Self requires Python 3.11
from typing_extensions import Self
from scrapy import Spider
from scrapy.crawler import Crawler
from scrapy.http import Request, Response
from scrapy.statscollectors import StatsCollector
logger = logging.getLogger(__name__)
class DepthMiddleware(BaseSpiderMiddleware):
crawler: Crawler
def __init__( # pylint: disable=super-init-not-called
self,
maxdepth: int,
stats: StatsCollector,
verbose_stats: bool = False,
prio: int = 1,
):
self.maxdepth = maxdepth
self.stats = stats
self.verbose_stats = verbose_stats
self.prio = prio
@classmethod
def from_crawler(cls, crawler: Crawler) -> Self:
settings = crawler.settings
maxdepth = settings.getint("DEPTH_LIMIT")
verbose = settings.getbool("DEPTH_STATS_VERBOSE")
prio = settings.getint("DEPTH_PRIORITY")
assert crawler.stats
o = cls(maxdepth, crawler.stats, verbose, prio)
o.crawler = crawler
return o
@_warn_spider_arg
def process_spider_output(
self, response: Response, result: Iterable[Any], spider: Spider | None = None
) -> Iterable[Any]:
self._init_depth(response)
yield from super().process_spider_output(response, result)
@_warn_spider_arg
async def process_spider_output_async(
self,
response: Response,
result: AsyncIterator[Any],
spider: Spider | None = None,
) -> AsyncIterator[Any]:
self._init_depth(response)
async for o in super().process_spider_output_async(response, result):
yield o
def _init_depth(self, response: Response) -> None:
# base case (depth=0)
if "depth" not in response.meta:
response.meta["depth"] = 0
if self.verbose_stats:
self.stats.inc_value("request_depth_count/0")
def get_processed_request(
self, request: Request, response: Response | None
) -> Request | None:
if response is None:
# start requests
return request
depth = response.meta["depth"] + 1
request.meta["depth"] = depth
if self.prio:
request.priority -= depth * self.prio
if self.maxdepth and depth > self.maxdepth:
logger.debug(
"Ignoring link (depth > %(maxdepth)d): %(requrl)s ",
{"maxdepth": self.maxdepth, "requrl": request.url},
extra={"spider": self.crawler.spider},
)
return None
if self.verbose_stats:
self.stats.inc_value(f"request_depth_count/{depth}")
self.stats.max_value("request_depth_max", depth)
return request
| python | BSD-3-Clause | d1bd8eb49f7aba9289e4ff692006cead8bcd9080 | 2026-01-04T14:38:41.023839Z | false |
scrapy/scrapy | https://github.com/scrapy/scrapy/blob/d1bd8eb49f7aba9289e4ff692006cead8bcd9080/scrapy/spidermiddlewares/base.py | scrapy/spidermiddlewares/base.py | from __future__ import annotations
from typing import TYPE_CHECKING, Any
from scrapy import Request, Spider
from scrapy.utils.decorators import _warn_spider_arg
if TYPE_CHECKING:
from collections.abc import AsyncIterator, Iterable
# typing.Self requires Python 3.11
from typing_extensions import Self
from scrapy.crawler import Crawler
from scrapy.http import Response
class BaseSpiderMiddleware:
"""Optional base class for spider middlewares.
.. versionadded:: 2.13
This class provides helper methods for asynchronous
``process_spider_output()`` and ``process_start()`` methods. Middlewares
that don't have either of these methods don't need to use this class.
You can override the
:meth:`~scrapy.spidermiddlewares.base.BaseSpiderMiddleware.get_processed_request`
method to add processing code for requests and the
:meth:`~scrapy.spidermiddlewares.base.BaseSpiderMiddleware.get_processed_item`
method to add processing code for items. These methods take a single
request or item from the spider output iterable and return a request or
item (the same or a new one), or ``None`` to remove this request or item
from the processing.
"""
def __init__(self, crawler: Crawler):
self.crawler: Crawler = crawler
@classmethod
def from_crawler(cls, crawler: Crawler) -> Self:
return cls(crawler)
def process_start_requests(
self, start: Iterable[Any], spider: Spider
) -> Iterable[Any]:
for o in start:
if (o := self._get_processed(o, None)) is not None:
yield o
async def process_start(self, start: AsyncIterator[Any]) -> AsyncIterator[Any]:
async for o in start:
if (o := self._get_processed(o, None)) is not None:
yield o
@_warn_spider_arg
def process_spider_output(
self, response: Response, result: Iterable[Any], spider: Spider | None = None
) -> Iterable[Any]:
for o in result:
if (o := self._get_processed(o, response)) is not None:
yield o
@_warn_spider_arg
async def process_spider_output_async(
self,
response: Response,
result: AsyncIterator[Any],
spider: Spider | None = None,
) -> AsyncIterator[Any]:
async for o in result:
if (o := self._get_processed(o, response)) is not None:
yield o
def _get_processed(self, o: Any, response: Response | None) -> Any:
if isinstance(o, Request):
return self.get_processed_request(o, response)
return self.get_processed_item(o, response)
def get_processed_request(
self, request: Request, response: Response | None
) -> Request | None:
"""Return a processed request from the spider output.
This method is called with a single request from the start seeds or the
spider output. It should return the same or a different request, or
``None`` to ignore it.
:param request: the input request
:type request: :class:`~scrapy.Request` object
:param response: the response being processed
:type response: :class:`~scrapy.http.Response` object or ``None`` for
start seeds
:return: the processed request or ``None``
"""
return request
def get_processed_item(self, item: Any, response: Response | None) -> Any:
"""Return a processed item from the spider output.
This method is called with a single item from the start seeds or the
spider output. It should return the same or a different item, or
``None`` to ignore it.
:param item: the input item
:type item: item object
:param response: the response being processed
:type response: :class:`~scrapy.http.Response` object or ``None`` for
start seeds
:return: the processed item or ``None``
"""
return item
| python | BSD-3-Clause | d1bd8eb49f7aba9289e4ff692006cead8bcd9080 | 2026-01-04T14:38:41.023839Z | false |
scrapy/scrapy | https://github.com/scrapy/scrapy/blob/d1bd8eb49f7aba9289e4ff692006cead8bcd9080/tests/test_loader.py | tests/test_loader.py | from __future__ import annotations
import dataclasses
import attr
import pytest
from itemadapter import ItemAdapter
from itemloaders.processors import Compose, Identity, MapCompose, TakeFirst
from scrapy.http import HtmlResponse, Response
from scrapy.item import Field, Item
from scrapy.loader import ItemLoader
from scrapy.selector import Selector
# test items
class NameItem(Item):
name = Field()
class SummaryItem(NameItem):
url = Field()
summary = Field()
class NestedItem(Item):
name = Field()
name_div = Field()
name_value = Field()
url = Field()
image = Field()
@attr.s
class AttrsNameItem:
name = attr.ib(default="")
@dataclasses.dataclass
class NameDataClass:
name: list = dataclasses.field(default_factory=list)
# test item loaders
class NameItemLoader(ItemLoader):
default_item_class = SummaryItem
class NestedItemLoader(ItemLoader):
default_item_class = NestedItem
class ProcessorItemLoader(NameItemLoader):
name_in = MapCompose(lambda v: v.title())
class DefaultedItemLoader(NameItemLoader):
default_input_processor = MapCompose(lambda v: v[:-1])
# test processors
def processor_with_args(value, other=None, loader_context=None):
if "key" in loader_context:
return loader_context["key"]
return value
class TestBasicItemLoader:
def test_add_value_on_unknown_field(self):
il = ProcessorItemLoader()
with pytest.raises(KeyError):
il.add_value("wrong_field", ["lala", "lolo"])
def test_load_item_using_default_loader(self):
i = SummaryItem()
i["summary"] = "lala"
il = ItemLoader(item=i)
il.add_value("name", "marta")
item = il.load_item()
assert item is i
assert item["summary"] == ["lala"]
assert item["name"] == ["marta"]
def test_load_item_using_custom_loader(self):
il = ProcessorItemLoader()
il.add_value("name", "marta")
item = il.load_item()
assert item["name"] == ["Marta"]
class InitializationTestMixin:
item_class: type | None = None
def test_keep_single_value(self):
"""Loaded item should contain values from the initial item"""
input_item = self.item_class(name="foo")
il = ItemLoader(item=input_item)
loaded_item = il.load_item()
assert isinstance(loaded_item, self.item_class)
assert ItemAdapter(loaded_item).asdict() == {"name": ["foo"]}
def test_keep_list(self):
"""Loaded item should contain values from the initial item"""
input_item = self.item_class(name=["foo", "bar"])
il = ItemLoader(item=input_item)
loaded_item = il.load_item()
assert isinstance(loaded_item, self.item_class)
assert ItemAdapter(loaded_item).asdict() == {"name": ["foo", "bar"]}
def test_add_value_singlevalue_singlevalue(self):
"""Values added after initialization should be appended"""
input_item = self.item_class(name="foo")
il = ItemLoader(item=input_item)
il.add_value("name", "bar")
loaded_item = il.load_item()
assert isinstance(loaded_item, self.item_class)
assert ItemAdapter(loaded_item).asdict() == {"name": ["foo", "bar"]}
def test_add_value_singlevalue_list(self):
"""Values added after initialization should be appended"""
input_item = self.item_class(name="foo")
il = ItemLoader(item=input_item)
il.add_value("name", ["item", "loader"])
loaded_item = il.load_item()
assert isinstance(loaded_item, self.item_class)
assert ItemAdapter(loaded_item).asdict() == {"name": ["foo", "item", "loader"]}
def test_add_value_list_singlevalue(self):
"""Values added after initialization should be appended"""
input_item = self.item_class(name=["foo", "bar"])
il = ItemLoader(item=input_item)
il.add_value("name", "qwerty")
loaded_item = il.load_item()
assert isinstance(loaded_item, self.item_class)
assert ItemAdapter(loaded_item).asdict() == {"name": ["foo", "bar", "qwerty"]}
def test_add_value_list_list(self):
"""Values added after initialization should be appended"""
input_item = self.item_class(name=["foo", "bar"])
il = ItemLoader(item=input_item)
il.add_value("name", ["item", "loader"])
loaded_item = il.load_item()
assert isinstance(loaded_item, self.item_class)
assert ItemAdapter(loaded_item).asdict() == {
"name": ["foo", "bar", "item", "loader"]
}
def test_get_output_value_singlevalue(self):
"""Getting output value must not remove value from item"""
input_item = self.item_class(name="foo")
il = ItemLoader(item=input_item)
assert il.get_output_value("name") == ["foo"]
loaded_item = il.load_item()
assert isinstance(loaded_item, self.item_class)
assert ItemAdapter(loaded_item).asdict() == {"name": ["foo"]}
def test_get_output_value_list(self):
"""Getting output value must not remove value from item"""
input_item = self.item_class(name=["foo", "bar"])
il = ItemLoader(item=input_item)
assert il.get_output_value("name") == ["foo", "bar"]
loaded_item = il.load_item()
assert isinstance(loaded_item, self.item_class)
assert ItemAdapter(loaded_item).asdict() == {"name": ["foo", "bar"]}
def test_values_single(self):
"""Values from initial item must be added to loader._values"""
input_item = self.item_class(name="foo")
il = ItemLoader(item=input_item)
assert il._values.get("name") == ["foo"]
def test_values_list(self):
"""Values from initial item must be added to loader._values"""
input_item = self.item_class(name=["foo", "bar"])
il = ItemLoader(item=input_item)
assert il._values.get("name") == ["foo", "bar"]
class TestInitializationFromDict(InitializationTestMixin):
item_class = dict
class TestInitializationFromItem(InitializationTestMixin):
item_class = NameItem
class TestInitializationFromAttrsItem(InitializationTestMixin):
item_class = AttrsNameItem
class TestInitializationFromDataClass(InitializationTestMixin):
item_class = NameDataClass
class BaseNoInputReprocessingLoader(ItemLoader):
title_in = MapCompose(str.upper)
title_out = TakeFirst()
class NoInputReprocessingItem(Item):
title = Field()
class NoInputReprocessingItemLoader(BaseNoInputReprocessingLoader):
default_item_class = NoInputReprocessingItem
class TestNoInputReprocessingFromItem:
"""
Loaders initialized from loaded items must not reprocess fields (Item instances)
"""
def test_avoid_reprocessing_with_initial_values_single(self):
il = NoInputReprocessingItemLoader(item=NoInputReprocessingItem(title="foo"))
il_loaded = il.load_item()
assert il_loaded == {"title": "foo"}
assert NoInputReprocessingItemLoader(item=il_loaded).load_item() == {
"title": "foo"
}
def test_avoid_reprocessing_with_initial_values_list(self):
il = NoInputReprocessingItemLoader(
item=NoInputReprocessingItem(title=["foo", "bar"])
)
il_loaded = il.load_item()
assert il_loaded == {"title": "foo"}
assert NoInputReprocessingItemLoader(item=il_loaded).load_item() == {
"title": "foo"
}
def test_avoid_reprocessing_without_initial_values_single(self):
il = NoInputReprocessingItemLoader()
il.add_value("title", "FOO")
il_loaded = il.load_item()
assert il_loaded == {"title": "FOO"}
assert NoInputReprocessingItemLoader(item=il_loaded).load_item() == {
"title": "FOO"
}
def test_avoid_reprocessing_without_initial_values_list(self):
il = NoInputReprocessingItemLoader()
il.add_value("title", ["foo", "bar"])
il_loaded = il.load_item()
assert il_loaded == {"title": "FOO"}
assert NoInputReprocessingItemLoader(item=il_loaded).load_item() == {
"title": "FOO"
}
class TestOutputProcessorItem:
def test_output_processor(self):
class TempItem(Item):
temp = Field()
def __init__(self, *args, **kwargs):
super().__init__(self, *args, **kwargs)
self.setdefault("temp", 0.3)
class TempLoader(ItemLoader):
default_item_class = TempItem
default_input_processor = Identity()
default_output_processor = Compose(TakeFirst())
loader = TempLoader()
item = loader.load_item()
assert isinstance(item, TempItem)
assert dict(item) == {"temp": 0.3}
class TestSelectortemLoader:
response = HtmlResponse(
url="",
encoding="utf-8",
body=b"""
<html>
<body>
<div id="id">marta</div>
<p>paragraph</p>
<a href="http://www.scrapy.org">homepage</a>
<img src="/images/logo.png" width="244" height="65" alt="Scrapy">
</body>
</html>
""",
)
def test_init_method(self):
l = ProcessorItemLoader()
assert l.selector is None
def test_init_method_errors(self):
l = ProcessorItemLoader()
with pytest.raises(RuntimeError):
l.add_xpath("url", "//a/@href")
with pytest.raises(RuntimeError):
l.replace_xpath("url", "//a/@href")
with pytest.raises(RuntimeError):
l.get_xpath("//a/@href")
with pytest.raises(RuntimeError):
l.add_css("name", "#name::text")
with pytest.raises(RuntimeError):
l.replace_css("name", "#name::text")
with pytest.raises(RuntimeError):
l.get_css("#name::text")
def test_init_method_with_selector(self):
sel = Selector(text="<html><body><div>marta</div></body></html>")
l = ProcessorItemLoader(selector=sel)
assert l.selector is sel
l.add_xpath("name", "//div/text()")
assert l.get_output_value("name") == ["Marta"]
def test_init_method_with_selector_css(self):
sel = Selector(text="<html><body><div>marta</div></body></html>")
l = ProcessorItemLoader(selector=sel)
assert l.selector is sel
l.add_css("name", "div::text")
assert l.get_output_value("name") == ["Marta"]
def test_init_method_with_base_response(self):
"""Selector should be None after initialization"""
response = Response("https://scrapy.org")
l = ProcessorItemLoader(response=response)
assert l.selector is None
def test_init_method_with_response(self):
l = ProcessorItemLoader(response=self.response)
assert l.selector
l.add_xpath("name", "//div/text()")
assert l.get_output_value("name") == ["Marta"]
def test_init_method_with_response_css(self):
l = ProcessorItemLoader(response=self.response)
assert l.selector
l.add_css("name", "div::text")
assert l.get_output_value("name") == ["Marta"]
l.add_css("url", "a::attr(href)")
assert l.get_output_value("url") == ["http://www.scrapy.org"]
# combining/accumulating CSS selectors and XPath expressions
l.add_xpath("name", "//div/text()")
assert l.get_output_value("name") == ["Marta", "Marta"]
l.add_xpath("url", "//img/@src")
assert l.get_output_value("url") == [
"http://www.scrapy.org",
"/images/logo.png",
]
def test_add_xpath_re(self):
l = ProcessorItemLoader(response=self.response)
l.add_xpath("name", "//div/text()", re="ma")
assert l.get_output_value("name") == ["Ma"]
def test_replace_xpath(self):
l = ProcessorItemLoader(response=self.response)
assert l.selector
l.add_xpath("name", "//div/text()")
assert l.get_output_value("name") == ["Marta"]
l.replace_xpath("name", "//p/text()")
assert l.get_output_value("name") == ["Paragraph"]
l.replace_xpath("name", ["//p/text()", "//div/text()"])
assert l.get_output_value("name") == ["Paragraph", "Marta"]
def test_get_xpath(self):
l = ProcessorItemLoader(response=self.response)
assert l.get_xpath("//p/text()") == ["paragraph"]
assert l.get_xpath("//p/text()", TakeFirst()) == "paragraph"
assert l.get_xpath("//p/text()", TakeFirst(), re="pa") == "pa"
assert l.get_xpath(["//p/text()", "//div/text()"]) == ["paragraph", "marta"]
def test_replace_xpath_multi_fields(self):
l = ProcessorItemLoader(response=self.response)
l.add_xpath(None, "//div/text()", TakeFirst(), lambda x: {"name": x})
assert l.get_output_value("name") == ["Marta"]
l.replace_xpath(None, "//p/text()", TakeFirst(), lambda x: {"name": x})
assert l.get_output_value("name") == ["Paragraph"]
def test_replace_xpath_re(self):
l = ProcessorItemLoader(response=self.response)
assert l.selector
l.add_xpath("name", "//div/text()")
assert l.get_output_value("name") == ["Marta"]
l.replace_xpath("name", "//div/text()", re="ma")
assert l.get_output_value("name") == ["Ma"]
def test_add_css_re(self):
l = ProcessorItemLoader(response=self.response)
l.add_css("name", "div::text", re="ma")
assert l.get_output_value("name") == ["Ma"]
l.add_css("url", "a::attr(href)", re="http://(.+)")
assert l.get_output_value("url") == ["www.scrapy.org"]
def test_replace_css(self):
l = ProcessorItemLoader(response=self.response)
assert l.selector
l.add_css("name", "div::text")
assert l.get_output_value("name") == ["Marta"]
l.replace_css("name", "p::text")
assert l.get_output_value("name") == ["Paragraph"]
l.replace_css("name", ["p::text", "div::text"])
assert l.get_output_value("name") == ["Paragraph", "Marta"]
l.add_css("url", "a::attr(href)", re="http://(.+)")
assert l.get_output_value("url") == ["www.scrapy.org"]
l.replace_css("url", "img::attr(src)")
assert l.get_output_value("url") == ["/images/logo.png"]
def test_get_css(self):
l = ProcessorItemLoader(response=self.response)
assert l.get_css("p::text") == ["paragraph"]
assert l.get_css("p::text", TakeFirst()) == "paragraph"
assert l.get_css("p::text", TakeFirst(), re="pa") == "pa"
assert l.get_css(["p::text", "div::text"]) == ["paragraph", "marta"]
assert l.get_css(["a::attr(href)", "img::attr(src)"]) == [
"http://www.scrapy.org",
"/images/logo.png",
]
def test_replace_css_multi_fields(self):
l = ProcessorItemLoader(response=self.response)
l.add_css(None, "div::text", TakeFirst(), lambda x: {"name": x})
assert l.get_output_value("name") == ["Marta"]
l.replace_css(None, "p::text", TakeFirst(), lambda x: {"name": x})
assert l.get_output_value("name") == ["Paragraph"]
l.add_css(None, "a::attr(href)", TakeFirst(), lambda x: {"url": x})
assert l.get_output_value("url") == ["http://www.scrapy.org"]
l.replace_css(None, "img::attr(src)", TakeFirst(), lambda x: {"url": x})
assert l.get_output_value("url") == ["/images/logo.png"]
def test_replace_css_re(self):
l = ProcessorItemLoader(response=self.response)
assert l.selector
l.add_css("url", "a::attr(href)")
assert l.get_output_value("url") == ["http://www.scrapy.org"]
l.replace_css("url", "a::attr(href)", re=r"http://www\.(.+)")
assert l.get_output_value("url") == ["scrapy.org"]
class TestSubselectorLoader:
response = HtmlResponse(
url="",
encoding="utf-8",
body=b"""
<html>
<body>
<header>
<div id="id">marta</div>
<p>paragraph</p>
</header>
<footer class="footer">
<a href="http://www.scrapy.org">homepage</a>
<img src="/images/logo.png" width="244" height="65" alt="Scrapy">
</footer>
</body>
</html>
""",
)
def test_nested_xpath(self):
l = NestedItemLoader(response=self.response)
nl = l.nested_xpath("//header")
nl.add_xpath("name", "div/text()")
nl.add_css("name_div", "#id")
nl.add_value("name_value", nl.selector.xpath('div[@id = "id"]/text()').getall())
assert l.get_output_value("name") == ["marta"]
assert l.get_output_value("name_div") == ['<div id="id">marta</div>']
assert l.get_output_value("name_value") == ["marta"]
assert l.get_output_value("name") == nl.get_output_value("name")
assert l.get_output_value("name_div") == nl.get_output_value("name_div")
assert l.get_output_value("name_value") == nl.get_output_value("name_value")
def test_nested_css(self):
l = NestedItemLoader(response=self.response)
nl = l.nested_css("header")
nl.add_xpath("name", "div/text()")
nl.add_css("name_div", "#id")
nl.add_value("name_value", nl.selector.xpath('div[@id = "id"]/text()').getall())
assert l.get_output_value("name") == ["marta"]
assert l.get_output_value("name_div") == ['<div id="id">marta</div>']
assert l.get_output_value("name_value") == ["marta"]
assert l.get_output_value("name") == nl.get_output_value("name")
assert l.get_output_value("name_div") == nl.get_output_value("name_div")
assert l.get_output_value("name_value") == nl.get_output_value("name_value")
def test_nested_replace(self):
l = NestedItemLoader(response=self.response)
nl1 = l.nested_xpath("//footer")
nl2 = nl1.nested_xpath("a")
l.add_xpath("url", "//footer/a/@href")
assert l.get_output_value("url") == ["http://www.scrapy.org"]
nl1.replace_xpath("url", "img/@src")
assert l.get_output_value("url") == ["/images/logo.png"]
nl2.replace_xpath("url", "@href")
assert l.get_output_value("url") == ["http://www.scrapy.org"]
def test_nested_ordering(self):
l = NestedItemLoader(response=self.response)
nl1 = l.nested_xpath("//footer")
nl2 = nl1.nested_xpath("a")
nl1.add_xpath("url", "img/@src")
l.add_xpath("url", "//footer/a/@href")
nl2.add_xpath("url", "text()")
l.add_xpath("url", "//footer/a/@href")
assert l.get_output_value("url") == [
"/images/logo.png",
"http://www.scrapy.org",
"homepage",
"http://www.scrapy.org",
]
def test_nested_load_item(self):
l = NestedItemLoader(response=self.response)
nl1 = l.nested_xpath("//footer")
nl2 = nl1.nested_xpath("img")
l.add_xpath("name", "//header/div/text()")
nl1.add_xpath("url", "a/@href")
nl2.add_xpath("image", "@src")
item = l.load_item()
assert item is l.item
assert item is nl1.item
assert item is nl2.item
assert item["name"] == ["marta"]
assert item["url"] == ["http://www.scrapy.org"]
assert item["image"] == ["/images/logo.png"]
# Functions as processors
def function_processor_strip(iterable):
return [x.strip() for x in iterable]
def function_processor_upper(iterable):
return [x.upper() for x in iterable]
class FunctionProcessorItem(Item):
foo = Field(
input_processor=function_processor_strip,
output_processor=function_processor_upper,
)
class FunctionProcessorItemLoader(ItemLoader):
default_item_class = FunctionProcessorItem
class TestFunctionProcessor:
def test_processor_defined_in_item(self):
lo = FunctionProcessorItemLoader()
lo.add_value("foo", " bar ")
lo.add_value("foo", [" asdf ", " qwerty "])
assert dict(lo.load_item()) == {"foo": ["BAR", "ASDF", "QWERTY"]}
| python | BSD-3-Clause | d1bd8eb49f7aba9289e4ff692006cead8bcd9080 | 2026-01-04T14:38:41.023839Z | false |
scrapy/scrapy | https://github.com/scrapy/scrapy/blob/d1bd8eb49f7aba9289e4ff692006cead8bcd9080/tests/test_spiderstate.py | tests/test_spiderstate.py | from datetime import datetime, timezone
import pytest
from scrapy.exceptions import NotConfigured
from scrapy.extensions.spiderstate import SpiderState
from scrapy.spiders import Spider
from scrapy.utils.test import get_crawler
def test_store_load(tmp_path):
jobdir = str(tmp_path)
spider = Spider(name="default")
dt = datetime.now(tz=timezone.utc)
ss = SpiderState(jobdir)
ss.spider_opened(spider)
spider.state["one"] = 1
spider.state["dt"] = dt
ss.spider_closed(spider)
spider2 = Spider(name="default")
ss2 = SpiderState(jobdir)
ss2.spider_opened(spider2)
assert spider.state == {"one": 1, "dt": dt}
ss2.spider_closed(spider2)
def test_state_attribute():
# state attribute must be present if jobdir is not set, to provide a
# consistent interface
spider = Spider(name="default")
ss = SpiderState()
ss.spider_opened(spider)
assert spider.state == {}
ss.spider_closed(spider)
def test_not_configured():
crawler = get_crawler(Spider)
with pytest.raises(NotConfigured):
SpiderState.from_crawler(crawler)
| python | BSD-3-Clause | d1bd8eb49f7aba9289e4ff692006cead8bcd9080 | 2026-01-04T14:38:41.023839Z | false |
scrapy/scrapy | https://github.com/scrapy/scrapy/blob/d1bd8eb49f7aba9289e4ff692006cead8bcd9080/tests/test_engine.py | tests/test_engine.py | from __future__ import annotations
import asyncio
import re
import subprocess
import sys
from collections import defaultdict
from dataclasses import dataclass
from logging import DEBUG
from typing import TYPE_CHECKING, cast
from unittest.mock import Mock, call
from urllib.parse import urlparse
import attr
import pytest
from itemadapter import ItemAdapter
from pydispatch import dispatcher
from testfixtures import LogCapture
from twisted.internet import defer
from twisted.internet.defer import inlineCallbacks
from scrapy import signals
from scrapy.core.engine import ExecutionEngine, _Slot
from scrapy.core.scheduler import BaseScheduler
from scrapy.exceptions import CloseSpider, IgnoreRequest
from scrapy.http import Request, Response
from scrapy.item import Field, Item
from scrapy.linkextractors import LinkExtractor
from scrapy.spiders import Spider
from scrapy.utils.defer import (
_schedule_coro,
deferred_f_from_coro_f,
deferred_from_coro,
maybe_deferred_to_future,
)
from scrapy.utils.signal import disconnect_all
from scrapy.utils.spider import DefaultSpider
from scrapy.utils.test import get_crawler
from tests import get_testdata
if TYPE_CHECKING:
from scrapy.core.scheduler import Scheduler
from scrapy.crawler import Crawler
from scrapy.statscollectors import MemoryStatsCollector
from tests.mockserver.http import MockServer
class MyItem(Item):
name = Field()
url = Field()
price = Field()
@attr.s
class AttrsItem:
name = attr.ib(default="")
url = attr.ib(default="")
price = attr.ib(default=0)
@dataclass
class DataClassItem:
name: str = ""
url: str = ""
price: int = 0
class MySpider(Spider):
name = "scrapytest.org"
itemurl_re = re.compile(r"item\d+.html")
name_re = re.compile(r"<h1>(.*?)</h1>", re.MULTILINE)
price_re = re.compile(r">Price: \$(.*?)<", re.MULTILINE)
item_cls: type = MyItem
def parse(self, response):
xlink = LinkExtractor()
itemre = re.compile(self.itemurl_re)
for link in xlink.extract_links(response):
if itemre.search(link.url):
yield Request(url=link.url, callback=self.parse_item)
def parse_item(self, response):
adapter = ItemAdapter(self.item_cls())
m = self.name_re.search(response.text)
if m:
adapter["name"] = m.group(1)
adapter["url"] = response.url
m = self.price_re.search(response.text)
if m:
adapter["price"] = m.group(1)
return adapter.item
class DupeFilterSpider(MySpider):
async def start(self):
for url in self.start_urls:
yield Request(url) # no dont_filter=True
class DictItemsSpider(MySpider):
item_cls = dict
class AttrsItemsSpider(MySpider):
item_cls = AttrsItem
class DataClassItemsSpider(MySpider):
item_cls = DataClassItem
class ItemZeroDivisionErrorSpider(MySpider):
custom_settings = {
"ITEM_PIPELINES": {
"tests.pipelines.ProcessWithZeroDivisionErrorPipeline": 300,
}
}
class ChangeCloseReasonSpider(MySpider):
@classmethod
def from_crawler(cls, crawler, *args, **kwargs):
spider = cls(*args, **kwargs)
spider._set_crawler(crawler)
crawler.signals.connect(spider.spider_idle, signals.spider_idle)
return spider
def spider_idle(self):
raise CloseSpider(reason="custom_reason")
class CrawlerRun:
"""A class to run the crawler and keep track of events occurred"""
def __init__(self, spider_class):
self.respplug = []
self.reqplug = []
self.reqdropped = []
self.reqreached = []
self.itemerror = []
self.itemresp = []
self.headers = {}
self.bytes = defaultdict(list)
self.signals_caught = {}
self.spider_class = spider_class
async def run(self, mockserver: MockServer) -> None:
self.mockserver = mockserver
start_urls = [
self.geturl("/static/"),
self.geturl("/redirect"),
self.geturl("/redirect"), # duplicate
self.geturl("/numbers"),
]
for name, signal in vars(signals).items():
if not name.startswith("_"):
dispatcher.connect(self.record_signal, signal)
self.crawler = get_crawler(self.spider_class)
self.crawler.signals.connect(self.item_scraped, signals.item_scraped)
self.crawler.signals.connect(self.item_error, signals.item_error)
self.crawler.signals.connect(self.headers_received, signals.headers_received)
self.crawler.signals.connect(self.bytes_received, signals.bytes_received)
self.crawler.signals.connect(self.request_scheduled, signals.request_scheduled)
self.crawler.signals.connect(self.request_dropped, signals.request_dropped)
self.crawler.signals.connect(
self.request_reached, signals.request_reached_downloader
)
self.crawler.signals.connect(
self.response_downloaded, signals.response_downloaded
)
self.crawler.crawl(start_urls=start_urls)
self.deferred: defer.Deferred[None] = defer.Deferred()
dispatcher.connect(self.stop, signals.engine_stopped)
await maybe_deferred_to_future(self.deferred)
async def stop(self):
for name, signal in vars(signals).items():
if not name.startswith("_"):
disconnect_all(signal)
self.deferred.callback(None)
await self.crawler.stop_async()
def geturl(self, path: str) -> str:
return self.mockserver.url(path)
def getpath(self, url):
u = urlparse(url)
return u.path
def item_error(self, item, response, spider, failure):
self.itemerror.append((item, response, spider, failure))
def item_scraped(self, item, spider, response):
self.itemresp.append((item, response))
def headers_received(self, headers, body_length, request, spider):
self.headers[request] = headers
def bytes_received(self, data, request, spider):
self.bytes[request].append(data)
def request_scheduled(self, request, spider):
self.reqplug.append((request, spider))
def request_reached(self, request, spider):
self.reqreached.append((request, spider))
def request_dropped(self, request, spider):
self.reqdropped.append((request, spider))
def response_downloaded(self, response, spider):
self.respplug.append((response, spider))
def record_signal(self, *args, **kwargs):
"""Record a signal and its parameters"""
signalargs = kwargs.copy()
sig = signalargs.pop("signal")
signalargs.pop("sender", None)
self.signals_caught[sig] = signalargs
class TestEngineBase:
@staticmethod
def _assert_visited_urls(run: CrawlerRun) -> None:
must_be_visited = [
"/static/",
"/redirect",
"/redirected",
"/static/item1.html",
"/static/item2.html",
"/static/item999.html",
]
urls_visited = {rp[0].url for rp in run.respplug}
urls_expected = {run.geturl(p) for p in must_be_visited}
assert urls_expected <= urls_visited, (
f"URLs not visited: {list(urls_expected - urls_visited)}"
)
@staticmethod
def _assert_scheduled_requests(run: CrawlerRun, count: int) -> None:
assert len(run.reqplug) == count
paths_expected = [
"/static/item999.html",
"/static/item2.html",
"/static/item1.html",
]
urls_requested = {rq[0].url for rq in run.reqplug}
urls_expected = {run.geturl(p) for p in paths_expected}
assert urls_expected <= urls_requested
scheduled_requests_count = len(run.reqplug)
dropped_requests_count = len(run.reqdropped)
responses_count = len(run.respplug)
assert scheduled_requests_count == dropped_requests_count + responses_count
assert len(run.reqreached) == responses_count
@staticmethod
def _assert_dropped_requests(run: CrawlerRun) -> None:
assert len(run.reqdropped) == 1
@staticmethod
def _assert_downloaded_responses(run: CrawlerRun, count: int) -> None:
# response tests
assert len(run.respplug) == count
assert len(run.reqreached) == count
for response, _ in run.respplug:
if run.getpath(response.url) == "/static/item999.html":
assert response.status == 404
if run.getpath(response.url) == "/redirect":
assert response.status == 302
@staticmethod
def _assert_items_error(run: CrawlerRun) -> None:
assert len(run.itemerror) == 2
for item, response, spider, failure in run.itemerror:
assert failure.value.__class__ is ZeroDivisionError
assert spider == run.crawler.spider
assert item["url"] == response.url
if "item1.html" in item["url"]:
assert item["name"] == "Item 1 name"
assert item["price"] == "100"
if "item2.html" in item["url"]:
assert item["name"] == "Item 2 name"
assert item["price"] == "200"
@staticmethod
def _assert_scraped_items(run: CrawlerRun) -> None:
assert len(run.itemresp) == 2
for item, response in run.itemresp:
item = ItemAdapter(item)
assert item["url"] == response.url
if "item1.html" in item["url"]:
assert item["name"] == "Item 1 name"
assert item["price"] == "100"
if "item2.html" in item["url"]:
assert item["name"] == "Item 2 name"
assert item["price"] == "200"
@staticmethod
def _assert_headers_received(run: CrawlerRun) -> None:
for headers in run.headers.values():
assert b"Server" in headers
assert b"TwistedWeb" in headers[b"Server"]
assert b"Date" in headers
assert b"Content-Type" in headers
@staticmethod
def _assert_bytes_received(run: CrawlerRun) -> None:
assert len(run.bytes) == 9
for request, data in run.bytes.items():
joined_data = b"".join(data)
if run.getpath(request.url) == "/static/":
assert joined_data == get_testdata("test_site", "index.html")
elif run.getpath(request.url) == "/static/item1.html":
assert joined_data == get_testdata("test_site", "item1.html")
elif run.getpath(request.url) == "/static/item2.html":
assert joined_data == get_testdata("test_site", "item2.html")
elif run.getpath(request.url) == "/redirected":
assert joined_data == b"Redirected here"
elif run.getpath(request.url) == "/redirect":
assert (
joined_data == b"\n<html>\n"
b" <head>\n"
b' <meta http-equiv="refresh" content="0;URL=/redirected">\n'
b" </head>\n"
b' <body bgcolor="#FFFFFF" text="#000000">\n'
b' <a href="/redirected">click here</a>\n'
b" </body>\n"
b"</html>\n"
)
elif run.getpath(request.url) == "/static/item999.html":
assert (
joined_data == b"\n<html>\n"
b" <head><title>404 - No Such Resource</title></head>\n"
b" <body>\n"
b" <h1>No Such Resource</h1>\n"
b" <p>File not found.</p>\n"
b" </body>\n"
b"</html>\n"
)
elif run.getpath(request.url) == "/numbers":
# signal was fired multiple times
assert len(data) > 1
# bytes were received in order
numbers = [str(x).encode("utf8") for x in range(2**18)]
assert joined_data == b"".join(numbers)
@staticmethod
def _assert_signals_caught(run: CrawlerRun) -> None:
assert signals.engine_started in run.signals_caught
assert signals.engine_stopped in run.signals_caught
assert signals.spider_opened in run.signals_caught
assert signals.spider_idle in run.signals_caught
assert signals.spider_closed in run.signals_caught
assert signals.headers_received in run.signals_caught
assert {"spider": run.crawler.spider} == run.signals_caught[
signals.spider_opened
]
assert {"spider": run.crawler.spider} == run.signals_caught[signals.spider_idle]
assert {
"spider": run.crawler.spider,
"reason": "finished",
} == run.signals_caught[signals.spider_closed]
class TestEngine(TestEngineBase):
@deferred_f_from_coro_f
async def test_crawler(self, mockserver: MockServer) -> None:
for spider in (
MySpider,
DictItemsSpider,
AttrsItemsSpider,
DataClassItemsSpider,
):
run = CrawlerRun(spider)
await run.run(mockserver)
self._assert_visited_urls(run)
self._assert_scheduled_requests(run, count=9)
self._assert_downloaded_responses(run, count=9)
self._assert_scraped_items(run)
self._assert_signals_caught(run)
self._assert_bytes_received(run)
@deferred_f_from_coro_f
async def test_crawler_dupefilter(self, mockserver: MockServer) -> None:
run = CrawlerRun(DupeFilterSpider)
await run.run(mockserver)
self._assert_scheduled_requests(run, count=8)
self._assert_dropped_requests(run)
@deferred_f_from_coro_f
async def test_crawler_itemerror(self, mockserver: MockServer) -> None:
run = CrawlerRun(ItemZeroDivisionErrorSpider)
await run.run(mockserver)
self._assert_items_error(run)
@deferred_f_from_coro_f
async def test_crawler_change_close_reason_on_idle(
self, mockserver: MockServer
) -> None:
run = CrawlerRun(ChangeCloseReasonSpider)
await run.run(mockserver)
assert {
"spider": run.crawler.spider,
"reason": "custom_reason",
} == run.signals_caught[signals.spider_closed]
@deferred_f_from_coro_f
async def test_close_downloader(self):
e = ExecutionEngine(get_crawler(MySpider), lambda _: None)
await e.close_async()
def test_close_without_downloader(self):
class CustomException(Exception):
pass
class BadDownloader:
def __init__(self, crawler):
raise CustomException
with pytest.raises(CustomException):
ExecutionEngine(
get_crawler(MySpider, {"DOWNLOADER": BadDownloader}), lambda _: None
)
@inlineCallbacks
def test_start_already_running_exception(self):
crawler = get_crawler(DefaultSpider)
crawler.spider = crawler._create_spider()
e = ExecutionEngine(crawler, lambda _: None)
crawler.engine = e
yield deferred_from_coro(e.open_spider_async())
_schedule_coro(e.start_async())
with pytest.raises(RuntimeError, match="Engine already running"):
yield deferred_from_coro(e.start_async())
yield deferred_from_coro(e.stop_async())
@pytest.mark.only_asyncio
@deferred_f_from_coro_f
async def test_start_already_running_exception_asyncio(self):
crawler = get_crawler(DefaultSpider)
crawler.spider = crawler._create_spider()
e = ExecutionEngine(crawler, lambda _: None)
crawler.engine = e
await e.open_spider_async()
with pytest.raises(RuntimeError, match="Engine already running"):
await asyncio.gather(e.start_async(), e.start_async())
await e.stop_async()
@inlineCallbacks
def test_start_request_processing_exception(self):
class BadRequestFingerprinter:
def fingerprint(self, request):
raise ValueError # to make Scheduler.enqueue_request() fail
class SimpleSpider(Spider):
name = "simple"
async def start(self):
yield Request("data:,")
crawler = get_crawler(
SimpleSpider, {"REQUEST_FINGERPRINTER_CLASS": BadRequestFingerprinter}
)
with LogCapture() as log:
yield crawler.crawl()
assert "Error while processing requests from start()" in str(log)
assert "Spider closed (shutdown)" in str(log)
def test_short_timeout(self):
args = (
sys.executable,
"-m",
"scrapy.cmdline",
"fetch",
"-s",
"CLOSESPIDER_TIMEOUT=0.001",
"-s",
"LOG_LEVEL=DEBUG",
"http://toscrape.com",
)
p = subprocess.Popen(
args,
stdout=subprocess.DEVNULL,
stderr=subprocess.PIPE,
)
try:
_, stderr = p.communicate(timeout=15)
except subprocess.TimeoutExpired:
p.kill()
p.communicate()
pytest.fail("Command took too much time to complete")
stderr_str = stderr.decode("utf-8")
assert "AttributeError" not in stderr_str, stderr_str
assert "AssertionError" not in stderr_str, stderr_str
class TestEngineDownloadAsync:
"""Test cases for ExecutionEngine.download_async()."""
@pytest.fixture
def engine(self) -> ExecutionEngine:
crawler = get_crawler(MySpider)
engine = ExecutionEngine(crawler, lambda _: None)
engine.downloader.close()
engine.downloader = Mock()
engine._slot = Mock()
engine._slot.inprogress = set()
return engine
@staticmethod
async def _download(engine: ExecutionEngine, request: Request) -> Response:
return await engine.download_async(request)
@deferred_f_from_coro_f
async def test_download_async_success(self, engine):
"""Test basic successful async download of a request."""
request = Request("http://example.com")
response = Response("http://example.com", body=b"test body")
engine.spider = Mock()
engine.downloader.fetch.return_value = defer.succeed(response)
engine._slot.add_request = Mock()
engine._slot.remove_request = Mock()
result = await self._download(engine, request)
assert result == response
engine._slot.add_request.assert_called_once_with(request)
engine._slot.remove_request.assert_called_once_with(request)
engine.downloader.fetch.assert_called_once_with(request)
@deferred_f_from_coro_f
async def test_download_async_redirect(self, engine):
"""Test async download with a redirect request."""
original_request = Request("http://example.com")
redirect_request = Request("http://example.com/redirect")
final_response = Response("http://example.com/redirect", body=b"redirected")
# First call returns redirect request, second call returns final response
engine.downloader.fetch.side_effect = [
defer.succeed(redirect_request),
defer.succeed(final_response),
]
engine.spider = Mock()
engine._slot.add_request = Mock()
engine._slot.remove_request = Mock()
result = await self._download(engine, original_request)
assert result == final_response
assert engine.downloader.fetch.call_count == 2
engine._slot.add_request.assert_has_calls(
[call(original_request), call(redirect_request)]
)
engine._slot.remove_request.assert_has_calls(
[call(original_request), call(redirect_request)]
)
@deferred_f_from_coro_f
async def test_download_async_no_spider(self, engine):
"""Test async download attempt when no spider is available."""
request = Request("http://example.com")
engine.spider = None
with pytest.raises(RuntimeError, match="No open spider to crawl:"):
await self._download(engine, request)
@deferred_f_from_coro_f
async def test_download_async_failure(self, engine):
"""Test async download when the downloader raises an exception."""
request = Request("http://example.com")
error = RuntimeError("Download failed")
engine.spider = Mock()
engine.downloader.fetch.return_value = defer.fail(error)
engine._slot.add_request = Mock()
engine._slot.remove_request = Mock()
with pytest.raises(RuntimeError, match="Download failed"):
await self._download(engine, request)
engine._slot.add_request.assert_called_once_with(request)
engine._slot.remove_request.assert_called_once_with(request)
@pytest.mark.filterwarnings("ignore::scrapy.exceptions.ScrapyDeprecationWarning")
class TestEngineDownload(TestEngineDownloadAsync):
"""Test cases for ExecutionEngine.download()."""
@staticmethod
async def _download(engine: ExecutionEngine, request: Request) -> Response:
return await maybe_deferred_to_future(engine.download(request))
def test_request_scheduled_signal(caplog):
class TestScheduler(BaseScheduler):
def __init__(self):
self.enqueued = []
def enqueue_request(self, request: Request) -> bool:
self.enqueued.append(request)
return True
def signal_handler(request: Request, spider: Spider) -> None:
if "drop" in request.url:
raise IgnoreRequest
crawler = get_crawler(MySpider)
engine = ExecutionEngine(crawler, lambda _: None)
engine.downloader._slot_gc_loop.stop()
scheduler = TestScheduler()
async def start():
return
yield
engine._start = start()
engine._slot = _Slot(False, Mock(), scheduler)
crawler.signals.connect(signal_handler, signals.request_scheduled)
keep_request = Request("https://keep.example")
engine._schedule_request(keep_request)
drop_request = Request("https://drop.example")
caplog.set_level(DEBUG)
engine._schedule_request(drop_request)
assert scheduler.enqueued == [keep_request], (
f"{scheduler.enqueued!r} != [{keep_request!r}]"
)
crawler.signals.disconnect(signal_handler, signals.request_scheduled)
class TestEngineCloseSpider:
"""Tests for exception handling coverage during close_spider_async()."""
@pytest.fixture
def crawler(self) -> Crawler:
crawler = get_crawler(DefaultSpider)
crawler.spider = crawler._create_spider()
return crawler
@deferred_f_from_coro_f
async def test_no_slot(self, crawler: Crawler) -> None:
engine = ExecutionEngine(crawler, lambda _: None)
crawler.engine = engine
await engine.open_spider_async()
slot = engine._slot
engine._slot = None
with pytest.raises(RuntimeError, match="Engine slot not assigned"):
await engine.close_spider_async()
# close it correctly
engine._slot = slot
await engine.close_spider_async()
@deferred_f_from_coro_f
async def test_no_spider(self, crawler: Crawler) -> None:
engine = ExecutionEngine(crawler, lambda _: None)
with pytest.raises(RuntimeError, match="Spider not opened"):
await engine.close_spider_async()
engine.downloader.close() # cleanup
@deferred_f_from_coro_f
async def test_exception_slot(
self, crawler: Crawler, caplog: pytest.LogCaptureFixture
) -> None:
engine = ExecutionEngine(crawler, lambda _: None)
crawler.engine = engine
await engine.open_spider_async()
assert engine._slot
del engine._slot.heartbeat
await engine.close_spider_async()
assert "Slot close failure" in caplog.text
@deferred_f_from_coro_f
async def test_exception_downloader(
self, crawler: Crawler, caplog: pytest.LogCaptureFixture
) -> None:
engine = ExecutionEngine(crawler, lambda _: None)
crawler.engine = engine
await engine.open_spider_async()
del engine.downloader.slots
await engine.close_spider_async()
assert "Downloader close failure" in caplog.text
@deferred_f_from_coro_f
async def test_exception_scraper(
self, crawler: Crawler, caplog: pytest.LogCaptureFixture
) -> None:
engine = ExecutionEngine(crawler, lambda _: None)
crawler.engine = engine
await engine.open_spider_async()
engine.scraper.slot = None
await engine.close_spider_async()
assert "Scraper close failure" in caplog.text
@deferred_f_from_coro_f
async def test_exception_scheduler(
self, crawler: Crawler, caplog: pytest.LogCaptureFixture
) -> None:
engine = ExecutionEngine(crawler, lambda _: None)
crawler.engine = engine
await engine.open_spider_async()
assert engine._slot
del cast("Scheduler", engine._slot.scheduler).dqs
await engine.close_spider_async()
assert "Scheduler close failure" in caplog.text
@deferred_f_from_coro_f
async def test_exception_signal(
self, crawler: Crawler, caplog: pytest.LogCaptureFixture
) -> None:
engine = ExecutionEngine(crawler, lambda _: None)
crawler.engine = engine
await engine.open_spider_async()
signal_manager = engine.signals
del engine.signals
await engine.close_spider_async()
assert "Error while sending spider_close signal" in caplog.text
# send the spider_closed signal to close various components
await signal_manager.send_catch_log_async(
signal=signals.spider_closed,
spider=engine.spider,
reason="cancelled",
)
@deferred_f_from_coro_f
async def test_exception_stats(
self, crawler: Crawler, caplog: pytest.LogCaptureFixture
) -> None:
engine = ExecutionEngine(crawler, lambda _: None)
crawler.engine = engine
await engine.open_spider_async()
del cast("MemoryStatsCollector", crawler.stats).spider_stats
await engine.close_spider_async()
assert "Stats close failure" in caplog.text
@deferred_f_from_coro_f
async def test_exception_callback(
self, crawler: Crawler, caplog: pytest.LogCaptureFixture
) -> None:
engine = ExecutionEngine(crawler, lambda _: defer.fail(ValueError()))
crawler.engine = engine
await engine.open_spider_async()
await engine.close_spider_async()
assert "Error running spider_closed_callback" in caplog.text
@deferred_f_from_coro_f
async def test_exception_async_callback(
self, crawler: Crawler, caplog: pytest.LogCaptureFixture
) -> None:
async def cb(_):
raise ValueError
engine = ExecutionEngine(crawler, cb)
crawler.engine = engine
await engine.open_spider_async()
await engine.close_spider_async()
assert "Error running spider_closed_callback" in caplog.text
| python | BSD-3-Clause | d1bd8eb49f7aba9289e4ff692006cead8bcd9080 | 2026-01-04T14:38:41.023839Z | false |
scrapy/scrapy | https://github.com/scrapy/scrapy/blob/d1bd8eb49f7aba9289e4ff692006cead8bcd9080/tests/test_request_dict.py | tests/test_request_dict.py | import pytest
from scrapy import Request, Spider
from scrapy.http import FormRequest, JsonRequest
from scrapy.utils.request import request_from_dict
class CustomRequest(Request):
pass
class TestRequestSerialization:
def setup_method(self):
self.spider = MethodsSpider()
def test_basic(self):
r = Request("http://www.example.com")
self._assert_serializes_ok(r)
def test_all_attributes(self):
r = Request(
url="http://www.example.com",
callback=self.spider.parse_item,
errback=self.spider.handle_error,
method="POST",
body=b"some body",
headers={"content-encoding": "text/html; charset=latin-1"},
cookies={"currency": "руб"},
encoding="latin-1",
priority=20,
meta={"a": "b"},
cb_kwargs={"k": "v"},
flags=["testFlag"],
)
self._assert_serializes_ok(r, spider=self.spider)
def test_latin1_body(self):
r = Request("http://www.example.com", body=b"\xa3")
self._assert_serializes_ok(r)
def test_utf8_body(self):
r = Request("http://www.example.com", body=b"\xc2\xa3")
self._assert_serializes_ok(r)
def _assert_serializes_ok(self, request, spider=None):
d = request.to_dict(spider=spider)
request2 = request_from_dict(d, spider=spider)
self._assert_same_request(request, request2)
def _assert_same_request(self, r1, r2):
assert r1.__class__ == r2.__class__
assert r1.url == r2.url
assert r1.callback == r2.callback
assert r1.errback == r2.errback
assert r1.method == r2.method
assert r1.body == r2.body
assert r1.headers == r2.headers
assert r1.cookies == r2.cookies
assert r1.meta == r2.meta
assert r1.cb_kwargs == r2.cb_kwargs
assert r1.encoding == r2.encoding
assert r1._encoding == r2._encoding
assert r1.priority == r2.priority
assert r1.dont_filter == r2.dont_filter
assert r1.flags == r2.flags
if isinstance(r1, JsonRequest):
assert r1.dumps_kwargs == r2.dumps_kwargs
def test_request_class(self):
r1 = FormRequest("http://www.example.com")
self._assert_serializes_ok(r1, spider=self.spider)
r2 = CustomRequest("http://www.example.com")
self._assert_serializes_ok(r2, spider=self.spider)
r3 = JsonRequest("http://www.example.com", dumps_kwargs={"indent": 4})
self._assert_serializes_ok(r3, spider=self.spider)
def test_callback_serialization(self):
r = Request(
"http://www.example.com",
callback=self.spider.parse_item,
errback=self.spider.handle_error,
)
self._assert_serializes_ok(r, spider=self.spider)
def test_reference_callback_serialization(self):
r = Request(
"http://www.example.com",
callback=self.spider.parse_item_reference,
errback=self.spider.handle_error_reference,
)
self._assert_serializes_ok(r, spider=self.spider)
request_dict = r.to_dict(spider=self.spider)
assert request_dict["callback"] == "parse_item_reference"
assert request_dict["errback"] == "handle_error_reference"
def test_private_reference_callback_serialization(self):
r = Request(
"http://www.example.com",
callback=self.spider._MethodsSpider__parse_item_reference,
errback=self.spider._MethodsSpider__handle_error_reference,
)
self._assert_serializes_ok(r, spider=self.spider)
request_dict = r.to_dict(spider=self.spider)
assert request_dict["callback"] == "_MethodsSpider__parse_item_reference"
assert request_dict["errback"] == "_MethodsSpider__handle_error_reference"
def test_private_callback_serialization(self):
r = Request(
"http://www.example.com",
callback=self.spider._MethodsSpider__parse_item_private,
errback=self.spider.handle_error,
)
self._assert_serializes_ok(r, spider=self.spider)
def test_mixin_private_callback_serialization(self):
r = Request(
"http://www.example.com",
callback=self.spider._SpiderMixin__mixin_callback,
errback=self.spider.handle_error,
)
self._assert_serializes_ok(r, spider=self.spider)
def test_delegated_callback_serialization(self):
r = Request(
"http://www.example.com",
callback=self.spider.delegated_callback,
errback=self.spider.handle_error,
)
self._assert_serializes_ok(r, spider=self.spider)
def test_unserializable_callback1(self):
r = Request("http://www.example.com", callback=lambda x: x)
with pytest.raises(
ValueError, match="is not an instance method in: <MethodsSpider"
):
r.to_dict(spider=self.spider)
def test_unserializable_callback2(self):
r = Request("http://www.example.com", callback=self.spider.parse_item)
with pytest.raises(ValueError, match="is not an instance method in: None"):
r.to_dict(spider=None)
def test_unserializable_callback3(self):
"""Parser method is removed or replaced dynamically."""
class MySpider(Spider):
name = "my_spider"
def parse(self, response):
pass
spider = MySpider()
r = Request("http://www.example.com", callback=spider.parse)
spider.parse = None
with pytest.raises(ValueError, match="is not an instance method in: <MySpider"):
r.to_dict(spider=spider)
def test_callback_not_available(self):
"""Callback method is not available in the spider passed to from_dict"""
spider = SpiderDelegation()
r = Request("http://www.example.com", callback=spider.delegated_callback)
d = r.to_dict(spider=spider)
with pytest.raises(
ValueError, match="Method 'delegated_callback' not found in: <Spider"
):
request_from_dict(d, spider=Spider("foo"))
class SpiderMixin:
def __mixin_callback(self, response): # pylint: disable=unused-private-member
pass
class SpiderDelegation:
def delegated_callback(self, response):
pass
def parse_item(response):
pass
def handle_error(failure):
pass
def private_parse_item(response):
pass
def private_handle_error(failure):
pass
class MethodsSpider(Spider, SpiderMixin):
name = "test"
parse_item_reference = parse_item
handle_error_reference = handle_error
__parse_item_reference = private_parse_item
__handle_error_reference = private_handle_error
def __init__(self, **kwargs):
super().__init__(**kwargs)
self.delegated_callback = SpiderDelegation().delegated_callback
def parse_item(self, response):
pass
def handle_error(self, failure):
pass
def __parse_item_private(self, response): # pylint: disable=unused-private-member
pass
| python | BSD-3-Clause | d1bd8eb49f7aba9289e4ff692006cead8bcd9080 | 2026-01-04T14:38:41.023839Z | false |
scrapy/scrapy | https://github.com/scrapy/scrapy/blob/d1bd8eb49f7aba9289e4ff692006cead8bcd9080/tests/test_downloadermiddleware_stats.py | tests/test_downloadermiddleware_stats.py | from scrapy.downloadermiddlewares.stats import DownloaderStats
from scrapy.http import Request, Response
from scrapy.spiders import Spider
from scrapy.utils.test import get_crawler
class MyException(Exception):
pass
class TestDownloaderStats:
def setup_method(self):
self.crawler = get_crawler(Spider)
self.mw = DownloaderStats(self.crawler.stats)
self.crawler.stats.open_spider()
self.req = Request("http://scrapytest.org")
self.res = Response("scrapytest.org", status=400)
def assertStatsEqual(self, key, value):
assert self.crawler.stats.get_value(key) == value, str(
self.crawler.stats.get_stats()
)
def test_process_request(self):
self.mw.process_request(self.req)
self.assertStatsEqual("downloader/request_count", 1)
def test_process_response(self):
self.mw.process_response(self.req, self.res)
self.assertStatsEqual("downloader/response_count", 1)
def test_process_exception(self):
self.mw.process_exception(self.req, MyException())
self.assertStatsEqual("downloader/exception_count", 1)
self.assertStatsEqual(
"downloader/exception_type_count/tests.test_downloadermiddleware_stats.MyException",
1,
)
def teardown_method(self):
self.crawler.stats.close_spider()
| python | BSD-3-Clause | d1bd8eb49f7aba9289e4ff692006cead8bcd9080 | 2026-01-04T14:38:41.023839Z | false |
scrapy/scrapy | https://github.com/scrapy/scrapy/blob/d1bd8eb49f7aba9289e4ff692006cead8bcd9080/tests/test_engine_loop.py | tests/test_engine_loop.py | from __future__ import annotations
from collections import deque
from logging import ERROR
from typing import TYPE_CHECKING
from twisted.internet.defer import Deferred
from scrapy import Request, Spider, signals
from scrapy.utils.defer import deferred_f_from_coro_f, maybe_deferred_to_future
from scrapy.utils.test import get_crawler
from tests.mockserver.http import MockServer
from tests.test_scheduler import MemoryScheduler
if TYPE_CHECKING:
import pytest
from scrapy.http import Response
async def sleep(seconds: float = 0.001) -> None:
from twisted.internet import reactor
deferred: Deferred[None] = Deferred()
reactor.callLater(seconds, deferred.callback, None)
await maybe_deferred_to_future(deferred)
class TestMain:
@deferred_f_from_coro_f
async def test_sleep(self):
"""Neither asynchronous sleeps on Spider.start() nor the equivalent on
the scheduler (returning no requests while also returning True from
the has_pending_requests() method) should cause the spider to miss the
processing of any later requests."""
seconds = 2
class TestSpider(Spider):
name = "test"
async def start(self):
from twisted.internet import reactor
yield Request("data:,a")
await sleep(seconds)
self.crawler.engine._slot.scheduler.pause()
self.crawler.engine._slot.scheduler.enqueue_request(Request("data:,b"))
# During this time, the scheduler reports having requests but
# returns None.
await sleep(seconds)
self.crawler.engine._slot.scheduler.unpause()
# The scheduler request is processed.
await sleep(seconds)
yield Request("data:,c")
await sleep(seconds)
self.crawler.engine._slot.scheduler.pause()
self.crawler.engine._slot.scheduler.enqueue_request(Request("data:,d"))
# The last start request is processed during the time until the
# delayed call below, proving that the start iteration can
# finish before a scheduler “sleep” without causing the
# scheduler to finish.
reactor.callLater(seconds, self.crawler.engine._slot.scheduler.unpause)
def parse(self, response):
pass
actual_urls = []
def track_url(request, spider):
actual_urls.append(request.url)
settings = {"SCHEDULER": MemoryScheduler}
crawler = get_crawler(TestSpider, settings_dict=settings)
crawler.signals.connect(track_url, signals.request_reached_downloader)
await crawler.crawl_async()
assert crawler.stats.get_value("finish_reason") == "finished"
expected_urls = ["data:,a", "data:,b", "data:,c", "data:,d"]
assert actual_urls == expected_urls, f"{actual_urls=} != {expected_urls=}"
@deferred_f_from_coro_f
async def test_close_during_start_iteration(
self, caplog: pytest.LogCaptureFixture
) -> None:
class TestSpider(Spider):
name = "test"
async def start(self):
assert self.crawler.engine is not None
await self.crawler.engine.close_async()
yield Request("data:,a")
def parse(self, response):
pass
actual_urls = []
def track_url(request, spider):
actual_urls.append(request.url)
settings = {"SCHEDULER": MemoryScheduler}
crawler = get_crawler(TestSpider, settings_dict=settings)
crawler.signals.connect(track_url, signals.request_reached_downloader)
caplog.clear()
with caplog.at_level(ERROR):
await crawler.crawl_async()
assert not caplog.records
assert crawler.stats
assert crawler.stats.get_value("finish_reason") == "shutdown"
assert not actual_urls
class TestRequestSendOrder:
seconds = 0.1 # increase if flaky
@classmethod
def setup_class(cls):
cls.mockserver = MockServer()
cls.mockserver.__enter__()
@classmethod
def teardown_class(cls):
cls.mockserver.__exit__(None, None, None) # increase if flaky
def request(self, num, response_seconds, download_slots, priority=0):
url = self.mockserver.url(f"/delay?n={response_seconds}&{num}")
meta = {"download_slot": str(num % download_slots)}
return Request(url, meta=meta, priority=priority)
def get_num(self, request_or_response: Request | Response):
return int(request_or_response.url.rsplit("&", maxsplit=1)[1])
async def _test_request_order(
self,
start_nums,
cb_nums=None,
settings=None,
response_seconds=None,
download_slots=1,
start_fn=None,
parse_fn=None,
):
cb_nums = cb_nums or []
settings = settings or {}
response_seconds = response_seconds or self.seconds
cb_requests = deque(
[self.request(num, response_seconds, download_slots) for num in cb_nums]
)
if start_fn is None:
async def start_fn(spider):
for num in start_nums:
yield self.request(num, response_seconds, download_slots)
if parse_fn is None:
def parse_fn(spider, response):
while cb_requests:
yield cb_requests.popleft()
class TestSpider(Spider):
name = "test"
start = start_fn
parse = parse_fn
actual_nums = []
def track_num(request, spider):
actual_nums.append(self.get_num(request))
crawler = get_crawler(TestSpider, settings_dict=settings)
crawler.signals.connect(track_num, signals.request_reached_downloader)
await crawler.crawl_async()
assert crawler.stats.get_value("finish_reason") == "finished"
expected_nums = sorted(start_nums + cb_nums)
assert actual_nums == expected_nums, f"{actual_nums=} != {expected_nums=}"
@deferred_f_from_coro_f
async def test_default(self):
"""By default, callback requests take priority over start requests and
are sent in order. Priority matters, but given the same priority, a
callback request takes precedence."""
nums = [1, 2, 3, 4, 5, 6]
response_seconds = 0
download_slots = 1
def _request(num, priority=0):
return self.request(
num, response_seconds, download_slots, priority=priority
)
async def start(spider):
# The first CONCURRENT_REQUESTS start requests are sent
# immediately.
yield _request(1)
for request in (
_request(2, priority=1),
_request(5),
):
spider.crawler.engine._slot.scheduler.enqueue_request(request)
yield _request(6)
yield _request(3, priority=1)
yield _request(4, priority=1)
def parse(spider, response):
return
yield
await self._test_request_order(
start_nums=nums,
settings={"CONCURRENT_REQUESTS": 1},
response_seconds=response_seconds,
start_fn=start,
parse_fn=parse,
)
@deferred_f_from_coro_f
async def test_lifo_start(self):
"""Changing the queues of start requests to LIFO, matching the queues
of non-start requests, does not cause all requests to be stored in the
same queue objects, it only affects the order of start requests."""
nums = [1, 2, 3, 4, 5, 6]
response_seconds = 0
download_slots = 1
def _request(num, priority=0):
return self.request(
num, response_seconds, download_slots, priority=priority
)
async def start(spider):
# The first CONCURRENT_REQUESTS start requests are sent
# immediately.
yield _request(1)
for request in (
_request(2, priority=1),
_request(5),
):
spider.crawler.engine._slot.scheduler.enqueue_request(request)
yield _request(6)
yield _request(4, priority=1)
yield _request(3, priority=1)
def parse(spider, response):
return
yield
await self._test_request_order(
start_nums=nums,
settings={
"CONCURRENT_REQUESTS": 1,
"SCHEDULER_START_MEMORY_QUEUE": "scrapy.squeues.LifoMemoryQueue",
},
response_seconds=response_seconds,
start_fn=start,
parse_fn=parse,
)
@deferred_f_from_coro_f
async def test_shared_queues(self):
"""If SCHEDULER_START_*_QUEUE is falsy, start requests and other
requests share the same queue, i.e. start requests are not priorized
over other requests if their priority matches."""
nums = list(range(1, 14))
response_seconds = 0
download_slots = 1
def _request(num, priority=0):
return self.request(
num, response_seconds, download_slots, priority=priority
)
async def start(spider):
# The first CONCURRENT_REQUESTS start requests are sent
# immediately.
yield _request(1)
# Below, priority 1 requests are sent first, and requests are sent
# in LIFO order.
for request in (
_request(7, priority=1),
_request(6, priority=1),
_request(13),
_request(12),
):
spider.crawler.engine._slot.scheduler.enqueue_request(request)
yield _request(11)
yield _request(10)
yield _request(5, priority=1)
yield _request(4, priority=1)
for request in (
_request(3, priority=1),
_request(2, priority=1),
_request(9),
_request(8),
):
spider.crawler.engine._slot.scheduler.enqueue_request(request)
def parse(spider, response):
return
yield
await self._test_request_order(
start_nums=nums,
settings={
"CONCURRENT_REQUESTS": 1,
"SCHEDULER_START_MEMORY_QUEUE": None,
},
response_seconds=response_seconds,
start_fn=start,
parse_fn=parse,
)
# Examples from the “Start requests” section of the documentation about
# spiders.
@deferred_f_from_coro_f
async def test_lazy(self):
start_nums = [1, 2, 4]
cb_nums = [3]
response_seconds = self.seconds * 2**1 # increase if flaky
download_slots = 1
async def start(spider):
for num in start_nums:
if spider.crawler.engine.needs_backout():
await spider.crawler.signals.wait_for(signals.scheduler_empty)
request = self.request(num, response_seconds, download_slots)
yield request
await self._test_request_order(
start_nums=start_nums,
cb_nums=cb_nums,
settings={
"CONCURRENT_REQUESTS": 1,
},
response_seconds=response_seconds,
start_fn=start,
)
| python | BSD-3-Clause | d1bd8eb49f7aba9289e4ff692006cead8bcd9080 | 2026-01-04T14:38:41.023839Z | false |
scrapy/scrapy | https://github.com/scrapy/scrapy/blob/d1bd8eb49f7aba9289e4ff692006cead8bcd9080/tests/test_utils_python.py | tests/test_utils_python.py | from __future__ import annotations
import functools
import operator
import platform
import sys
from typing import TYPE_CHECKING, TypeVar
import pytest
from scrapy.utils.asyncgen import as_async_generator, collect_asyncgen
from scrapy.utils.defer import aiter_errback, deferred_f_from_coro_f
from scrapy.utils.python import (
MutableAsyncChain,
MutableChain,
binary_is_text,
get_func_args,
memoizemethod_noargs,
to_bytes,
to_unicode,
without_none_values,
)
if TYPE_CHECKING:
from collections.abc import Iterable, Mapping
_KT = TypeVar("_KT")
_VT = TypeVar("_VT")
def test_mutablechain():
m = MutableChain(range(2), [2, 3], (4, 5))
m.extend(range(6, 7))
m.extend([7, 8])
m.extend([9, 10], (11, 12))
assert next(m) == 0
assert m.__next__() == 1
assert list(m) == list(range(2, 13))
class TestMutableAsyncChain:
@staticmethod
async def g1():
for i in range(3):
yield i
@staticmethod
async def g2():
return
yield
@staticmethod
async def g3():
for i in range(7, 10):
yield i
@staticmethod
async def g4():
for i in range(3, 5):
yield i
1 / 0
for i in range(5, 7):
yield i
@deferred_f_from_coro_f
async def test_mutableasyncchain(self):
m = MutableAsyncChain(self.g1(), as_async_generator(range(3, 7)))
m.extend(self.g2())
m.extend(self.g3())
assert await m.__anext__() == 0
results = await collect_asyncgen(m)
assert results == list(range(1, 10))
@deferred_f_from_coro_f
async def test_mutableasyncchain_exc(self):
m = MutableAsyncChain(self.g1())
m.extend(self.g4())
m.extend(self.g3())
results = await collect_asyncgen(aiter_errback(m, lambda _: None))
assert results == list(range(5))
class TestToUnicode:
def test_converting_an_utf8_encoded_string_to_unicode(self):
assert to_unicode(b"lel\xc3\xb1e") == "lel\xf1e"
def test_converting_a_latin_1_encoded_string_to_unicode(self):
assert to_unicode(b"lel\xf1e", "latin-1") == "lel\xf1e"
def test_converting_a_unicode_to_unicode_should_return_the_same_object(self):
assert to_unicode("\xf1e\xf1e\xf1e") == "\xf1e\xf1e\xf1e"
def test_converting_a_strange_object_should_raise_type_error(self):
with pytest.raises(TypeError):
to_unicode(423)
def test_errors_argument(self):
assert to_unicode(b"a\xedb", "utf-8", errors="replace") == "a\ufffdb"
class TestToBytes:
def test_converting_a_unicode_object_to_an_utf_8_encoded_string(self):
assert to_bytes("\xa3 49") == b"\xc2\xa3 49"
def test_converting_a_unicode_object_to_a_latin_1_encoded_string(self):
assert to_bytes("\xa3 49", "latin-1") == b"\xa3 49"
def test_converting_a_regular_bytes_to_bytes_should_return_the_same_object(self):
assert to_bytes(b"lel\xf1e") == b"lel\xf1e"
def test_converting_a_strange_object_should_raise_type_error(self):
with pytest.raises(TypeError):
to_bytes(pytest)
def test_errors_argument(self):
assert to_bytes("a\ufffdb", "latin-1", errors="replace") == b"a?b"
def test_memoizemethod_noargs():
class A:
@memoizemethod_noargs
def cached(self):
return object()
def noncached(self):
return object()
a = A()
one = a.cached()
two = a.cached()
three = a.noncached()
assert one is two
assert one is not three
@pytest.mark.parametrize(
("value", "expected"),
[
(b"hello", True),
("hello".encode("utf-16"), True),
(b"<div>Price \xa3</div>", True),
(b"\x02\xa3", False),
],
)
def test_binaryistext(value: bytes, expected: bool) -> None:
assert binary_is_text(value) is expected
def test_get_func_args():
def f1(a, b, c):
pass
def f2(a, b=None, c=None):
pass
def f3(a, b=None, *, c=None):
pass
class A:
def __init__(self, a, b, c):
pass
def method(self, a, b, c):
pass
class Callable:
def __call__(self, a, b, c):
pass
a = A(1, 2, 3)
cal = Callable()
partial_f1 = functools.partial(f1, None)
partial_f2 = functools.partial(f1, b=None)
partial_f3 = functools.partial(partial_f2, None)
assert get_func_args(f1) == ["a", "b", "c"]
assert get_func_args(f2) == ["a", "b", "c"]
assert get_func_args(f3) == ["a", "b", "c"]
assert get_func_args(A) == ["a", "b", "c"]
assert get_func_args(a.method) == ["a", "b", "c"]
assert get_func_args(partial_f1) == ["b", "c"]
assert get_func_args(partial_f2) == ["a", "c"]
assert get_func_args(partial_f3) == ["c"]
assert get_func_args(cal) == ["a", "b", "c"]
assert get_func_args(object) == [] # pylint: disable=use-implicit-booleaness-not-comparison
assert get_func_args(str.split, stripself=True) == ["sep", "maxsplit"]
assert get_func_args(" ".join, stripself=True) == ["iterable"]
if sys.version_info >= (3, 13) or platform.python_implementation() == "PyPy":
# the correct and correctly extracted signature
assert get_func_args(operator.itemgetter(2), stripself=True) == ["obj"]
elif platform.python_implementation() == "CPython":
# ["args", "kwargs"] is a correct result for the pre-3.13 incorrect function signature
# [] is an incorrect result on even older CPython (https://github.com/python/cpython/issues/86951)
assert get_func_args(operator.itemgetter(2), stripself=True) in [
[],
["args", "kwargs"],
]
@pytest.mark.parametrize(
("value", "expected"),
[
([1, None, 3, 4], [1, 3, 4]),
((1, None, 3, 4), (1, 3, 4)),
(
{"one": 1, "none": None, "three": 3, "four": 4},
{"one": 1, "three": 3, "four": 4},
),
],
)
def test_without_none_values(
value: Mapping[_KT, _VT] | Iterable[_KT], expected: dict[_KT, _VT] | Iterable[_KT]
) -> None:
assert without_none_values(value) == expected
| python | BSD-3-Clause | d1bd8eb49f7aba9289e4ff692006cead8bcd9080 | 2026-01-04T14:38:41.023839Z | false |
scrapy/scrapy | https://github.com/scrapy/scrapy/blob/d1bd8eb49f7aba9289e4ff692006cead8bcd9080/tests/test_utils_log.py | tests/test_utils_log.py | from __future__ import annotations
import json
import logging
import re
import sys
from io import StringIO
from typing import TYPE_CHECKING, Any
import pytest
from testfixtures import LogCapture
from twisted.python.failure import Failure
from scrapy.utils.log import (
LogCounterHandler,
SpiderLoggerAdapter,
StreamLogger,
TopLevelFormatter,
failure_to_exc_info,
)
from scrapy.utils.test import get_crawler
from tests.spiders import LogSpider
if TYPE_CHECKING:
from collections.abc import Generator, Mapping, MutableMapping
from scrapy.crawler import Crawler
class TestFailureToExcInfo:
def test_failure(self):
try:
0 / 0
except ZeroDivisionError:
exc_info = sys.exc_info()
failure = Failure()
assert exc_info == failure_to_exc_info(failure)
def test_non_failure(self):
assert failure_to_exc_info("test") is None
class TestTopLevelFormatter:
def setup_method(self):
self.handler = LogCapture()
self.handler.addFilter(TopLevelFormatter(["test"]))
def test_top_level_logger(self):
logger = logging.getLogger("test")
with self.handler as log:
logger.warning("test log msg")
log.check(("test", "WARNING", "test log msg"))
def test_children_logger(self):
logger = logging.getLogger("test.test1")
with self.handler as log:
logger.warning("test log msg")
log.check(("test", "WARNING", "test log msg"))
def test_overlapping_name_logger(self):
logger = logging.getLogger("test2")
with self.handler as log:
logger.warning("test log msg")
log.check(("test2", "WARNING", "test log msg"))
def test_different_name_logger(self):
logger = logging.getLogger("different")
with self.handler as log:
logger.warning("test log msg")
log.check(("different", "WARNING", "test log msg"))
class TestLogCounterHandler:
@pytest.fixture
def crawler(self) -> Crawler:
settings = {"LOG_LEVEL": "WARNING"}
return get_crawler(settings_dict=settings)
@pytest.fixture
def logger(self, crawler: Crawler) -> Generator[logging.Logger]:
logger = logging.getLogger("test")
logger.setLevel(logging.NOTSET)
logger.propagate = False
handler = LogCounterHandler(crawler)
logger.addHandler(handler)
yield logger
logger.propagate = True
logger.removeHandler(handler)
def test_init(self, crawler: Crawler, logger: logging.Logger) -> None:
assert crawler.stats
assert crawler.stats.get_value("log_count/DEBUG") is None
assert crawler.stats.get_value("log_count/INFO") is None
assert crawler.stats.get_value("log_count/WARNING") is None
assert crawler.stats.get_value("log_count/ERROR") is None
assert crawler.stats.get_value("log_count/CRITICAL") is None
def test_accepted_level(self, crawler: Crawler, logger: logging.Logger) -> None:
logger.error("test log msg")
assert crawler.stats
assert crawler.stats.get_value("log_count/ERROR") == 1
def test_filtered_out_level(self, crawler: Crawler, logger: logging.Logger) -> None:
logger.debug("test log msg")
assert crawler.stats
assert crawler.stats.get_value("log_count/INFO") is None
class TestStreamLogger:
def test_redirect(self):
logger = logging.getLogger("test")
logger.setLevel(logging.WARNING)
old_stdout = sys.stdout
sys.stdout = StreamLogger(logger, logging.ERROR)
with LogCapture() as log:
print("test log msg")
log.check(("test", "ERROR", "test log msg"))
sys.stdout = old_stdout
@pytest.mark.parametrize(
("base_extra", "log_extra", "expected_extra"),
[
(
{"spider": "test"},
{"extra": {"log_extra": "info"}},
{"extra": {"log_extra": "info", "spider": "test"}},
),
(
{"spider": "test"},
{"extra": None},
{"extra": {"spider": "test"}},
),
(
{"spider": "test"},
{"extra": {"spider": "test2"}},
{"extra": {"spider": "test"}},
),
],
)
def test_spider_logger_adapter_process(
base_extra: Mapping[str, Any], log_extra: MutableMapping, expected_extra: dict
) -> None:
logger = logging.getLogger("test")
spider_logger_adapter = SpiderLoggerAdapter(logger, base_extra)
log_message = "test_log_message"
result_message, result_kwargs = spider_logger_adapter.process(
log_message, log_extra
)
assert result_message == log_message
assert result_kwargs == expected_extra
class TestLogging:
@pytest.fixture
def log_stream(self) -> StringIO:
return StringIO()
@pytest.fixture
def spider(self) -> LogSpider:
return LogSpider()
@pytest.fixture(autouse=True)
def logger(self, log_stream: StringIO) -> Generator[logging.Logger]:
handler = logging.StreamHandler(log_stream)
logger = logging.getLogger("log_spider")
logger.addHandler(handler)
logger.setLevel(logging.DEBUG)
yield logger
logger.removeHandler(handler)
def test_debug_logging(self, log_stream: StringIO, spider: LogSpider) -> None:
log_message = "Foo message"
spider.log_debug(log_message)
log_contents = log_stream.getvalue()
assert log_contents == f"{log_message}\n"
def test_info_logging(self, log_stream: StringIO, spider: LogSpider) -> None:
log_message = "Bar message"
spider.log_info(log_message)
log_contents = log_stream.getvalue()
assert log_contents == f"{log_message}\n"
def test_warning_logging(self, log_stream: StringIO, spider: LogSpider) -> None:
log_message = "Baz message"
spider.log_warning(log_message)
log_contents = log_stream.getvalue()
assert log_contents == f"{log_message}\n"
def test_error_logging(self, log_stream: StringIO, spider: LogSpider) -> None:
log_message = "Foo bar message"
spider.log_error(log_message)
log_contents = log_stream.getvalue()
assert log_contents == f"{log_message}\n"
def test_critical_logging(self, log_stream: StringIO, spider: LogSpider) -> None:
log_message = "Foo bar baz message"
spider.log_critical(log_message)
log_contents = log_stream.getvalue()
assert log_contents == f"{log_message}\n"
class TestLoggingWithExtra:
regex_pattern = re.compile(r"^<LogSpider\s'log_spider'\sat\s[^>]+>$")
@pytest.fixture
def log_stream(self) -> StringIO:
return StringIO()
@pytest.fixture
def spider(self) -> LogSpider:
return LogSpider()
@pytest.fixture(autouse=True)
def logger(self, log_stream: StringIO) -> Generator[logging.Logger]:
handler = logging.StreamHandler(log_stream)
formatter = logging.Formatter(
'{"levelname": "%(levelname)s", "message": "%(message)s", "spider": "%(spider)s", "important_info": "%(important_info)s"}'
)
handler.setFormatter(formatter)
logger = logging.getLogger("log_spider")
logger.addHandler(handler)
logger.setLevel(logging.DEBUG)
yield logger
logger.removeHandler(handler)
def test_debug_logging(self, log_stream: StringIO, spider: LogSpider) -> None:
log_message = "Foo message"
extra = {"important_info": "foo"}
spider.log_debug(log_message, extra)
log_contents_str = log_stream.getvalue()
log_contents = json.loads(log_contents_str)
assert log_contents["levelname"] == "DEBUG"
assert log_contents["message"] == log_message
assert self.regex_pattern.match(log_contents["spider"])
assert log_contents["important_info"] == extra["important_info"]
def test_info_logging(self, log_stream: StringIO, spider: LogSpider) -> None:
log_message = "Bar message"
extra = {"important_info": "bar"}
spider.log_info(log_message, extra)
log_contents_str = log_stream.getvalue()
log_contents = json.loads(log_contents_str)
assert log_contents["levelname"] == "INFO"
assert log_contents["message"] == log_message
assert self.regex_pattern.match(log_contents["spider"])
assert log_contents["important_info"] == extra["important_info"]
def test_warning_logging(self, log_stream: StringIO, spider: LogSpider) -> None:
log_message = "Baz message"
extra = {"important_info": "baz"}
spider.log_warning(log_message, extra)
log_contents_str = log_stream.getvalue()
log_contents = json.loads(log_contents_str)
assert log_contents["levelname"] == "WARNING"
assert log_contents["message"] == log_message
assert self.regex_pattern.match(log_contents["spider"])
assert log_contents["important_info"] == extra["important_info"]
def test_error_logging(self, log_stream: StringIO, spider: LogSpider) -> None:
log_message = "Foo bar message"
extra = {"important_info": "foo bar"}
spider.log_error(log_message, extra)
log_contents_str = log_stream.getvalue()
log_contents = json.loads(log_contents_str)
assert log_contents["levelname"] == "ERROR"
assert log_contents["message"] == log_message
assert self.regex_pattern.match(log_contents["spider"])
assert log_contents["important_info"] == extra["important_info"]
def test_critical_logging(self, log_stream: StringIO, spider: LogSpider) -> None:
log_message = "Foo bar baz message"
extra = {"important_info": "foo bar baz"}
spider.log_critical(log_message, extra)
log_contents_str = log_stream.getvalue()
log_contents = json.loads(log_contents_str)
assert log_contents["levelname"] == "CRITICAL"
assert log_contents["message"] == log_message
assert self.regex_pattern.match(log_contents["spider"])
assert log_contents["important_info"] == extra["important_info"]
def test_overwrite_spider_extra(
self, log_stream: StringIO, spider: LogSpider
) -> None:
log_message = "Foo message"
extra = {"important_info": "foo", "spider": "shouldn't change"}
spider.log_error(log_message, extra)
log_contents_str = log_stream.getvalue()
log_contents = json.loads(log_contents_str)
assert log_contents["levelname"] == "ERROR"
assert log_contents["message"] == log_message
assert self.regex_pattern.match(log_contents["spider"])
assert log_contents["important_info"] == extra["important_info"]
| python | BSD-3-Clause | d1bd8eb49f7aba9289e4ff692006cead8bcd9080 | 2026-01-04T14:38:41.023839Z | false |
scrapy/scrapy | https://github.com/scrapy/scrapy/blob/d1bd8eb49f7aba9289e4ff692006cead8bcd9080/tests/test_downloadermiddleware_ajaxcrawlable.py | tests/test_downloadermiddleware_ajaxcrawlable.py | import pytest
from scrapy.downloadermiddlewares.ajaxcrawl import AjaxCrawlMiddleware
from scrapy.http import HtmlResponse, Request, Response
from scrapy.spiders import Spider
from scrapy.utils.test import get_crawler
@pytest.mark.filterwarnings("ignore::scrapy.exceptions.ScrapyDeprecationWarning")
class TestAjaxCrawlMiddleware:
def setup_method(self):
crawler = get_crawler(Spider, {"AJAXCRAWL_ENABLED": True})
self.spider = crawler._create_spider("foo")
self.mw = AjaxCrawlMiddleware.from_crawler(crawler)
def _ajaxcrawlable_body(self):
return b'<html><head><meta name="fragment" content="!"/></head><body></body></html>'
def _req_resp(self, url, req_kwargs=None, resp_kwargs=None):
req = Request(url, **(req_kwargs or {}))
resp = HtmlResponse(url, request=req, **(resp_kwargs or {}))
return req, resp
def test_non_get(self):
req, resp = self._req_resp("http://example.com/", {"method": "HEAD"})
resp2 = self.mw.process_response(req, resp, self.spider)
assert resp == resp2
def test_binary_response(self):
req = Request("http://example.com/")
resp = Response("http://example.com/", body=b"foobar\x00\x01\x02", request=req)
resp2 = self.mw.process_response(req, resp, self.spider)
assert resp is resp2
def test_ajaxcrawl(self):
req, resp = self._req_resp(
"http://example.com/",
{"meta": {"foo": "bar"}},
{"body": self._ajaxcrawlable_body()},
)
req2 = self.mw.process_response(req, resp, self.spider)
assert req2.url == "http://example.com/?_escaped_fragment_="
assert req2.meta["foo"] == "bar"
def test_ajaxcrawl_loop(self):
req, resp = self._req_resp(
"http://example.com/", {}, {"body": self._ajaxcrawlable_body()}
)
req2 = self.mw.process_response(req, resp, self.spider)
resp2 = HtmlResponse(req2.url, body=resp.body, request=req2)
resp3 = self.mw.process_response(req2, resp2, self.spider)
assert isinstance(resp3, HtmlResponse), (resp3.__class__, resp3)
assert resp3.request.url == "http://example.com/?_escaped_fragment_="
assert resp3 is resp2
def test_noncrawlable_body(self):
req, resp = self._req_resp(
"http://example.com/", {}, {"body": b"<html></html>"}
)
resp2 = self.mw.process_response(req, resp, self.spider)
assert resp is resp2
| python | BSD-3-Clause | d1bd8eb49f7aba9289e4ff692006cead8bcd9080 | 2026-01-04T14:38:41.023839Z | false |
scrapy/scrapy | https://github.com/scrapy/scrapy/blob/d1bd8eb49f7aba9289e4ff692006cead8bcd9080/tests/test_utils_sitemap.py | tests/test_utils_sitemap.py | from scrapy.utils.sitemap import Sitemap, sitemap_urls_from_robots
def test_sitemap():
s = Sitemap(
b"""<?xml version="1.0" encoding="UTF-8"?>
<urlset xmlns="http://www.google.com/schemas/sitemap/0.84">
<url>
<loc>http://www.example.com/</loc>
<lastmod>2009-08-16</lastmod>
<changefreq>daily</changefreq>
<priority>1</priority>
</url>
<url>
<loc>http://www.example.com/Special-Offers.html</loc>
<lastmod>2009-08-16</lastmod>
<changefreq>weekly</changefreq>
<priority>0.8</priority>
</url>
</urlset>"""
)
assert s.type == "urlset"
assert list(s) == [
{
"priority": "1",
"loc": "http://www.example.com/",
"lastmod": "2009-08-16",
"changefreq": "daily",
},
{
"priority": "0.8",
"loc": "http://www.example.com/Special-Offers.html",
"lastmod": "2009-08-16",
"changefreq": "weekly",
},
]
def test_sitemap_index():
s = Sitemap(
b"""<?xml version="1.0" encoding="UTF-8"?>
<sitemapindex xmlns="http://www.sitemaps.org/schemas/sitemap/0.9">
<sitemap>
<loc>http://www.example.com/sitemap1.xml.gz</loc>
<lastmod>2004-10-01T18:23:17+00:00</lastmod>
</sitemap>
<sitemap>
<loc>http://www.example.com/sitemap2.xml.gz</loc>
<lastmod>2005-01-01</lastmod>
</sitemap>
</sitemapindex>"""
)
assert s.type == "sitemapindex"
assert list(s) == [
{
"loc": "http://www.example.com/sitemap1.xml.gz",
"lastmod": "2004-10-01T18:23:17+00:00",
},
{
"loc": "http://www.example.com/sitemap2.xml.gz",
"lastmod": "2005-01-01",
},
]
def test_sitemap_strip():
"""Assert we can deal with trailing spaces inside <loc> tags - we've
seen those
"""
s = Sitemap(
b"""<?xml version="1.0" encoding="UTF-8"?>
<urlset xmlns="http://www.google.com/schemas/sitemap/0.84">
<url>
<loc> http://www.example.com/</loc>
<lastmod>2009-08-16</lastmod>
<changefreq>daily</changefreq>
<priority>1</priority>
</url>
<url>
<loc> http://www.example.com/2</loc>
<lastmod />
</url>
</urlset>
"""
)
assert list(s) == [
{
"priority": "1",
"loc": "http://www.example.com/",
"lastmod": "2009-08-16",
"changefreq": "daily",
},
{"loc": "http://www.example.com/2", "lastmod": ""},
]
def test_sitemap_wrong_ns():
"""We have seen sitemaps with wrongs ns. Presumably, Google still works
with these, though is not 100% confirmed"""
s = Sitemap(
b"""<?xml version="1.0" encoding="UTF-8"?>
<urlset xmlns="http://www.google.com/schemas/sitemap/0.84">
<url xmlns="">
<loc> http://www.example.com/</loc>
<lastmod>2009-08-16</lastmod>
<changefreq>daily</changefreq>
<priority>1</priority>
</url>
<url xmlns="">
<loc> http://www.example.com/2</loc>
<lastmod />
</url>
</urlset>
"""
)
assert list(s) == [
{
"priority": "1",
"loc": "http://www.example.com/",
"lastmod": "2009-08-16",
"changefreq": "daily",
},
{"loc": "http://www.example.com/2", "lastmod": ""},
]
def test_sitemap_wrong_ns2():
"""We have seen sitemaps with wrongs ns. Presumably, Google still works
with these, though is not 100% confirmed"""
s = Sitemap(
b"""<?xml version="1.0" encoding="UTF-8"?>
<urlset>
<url xmlns="">
<loc> http://www.example.com/</loc>
<lastmod>2009-08-16</lastmod>
<changefreq>daily</changefreq>
<priority>1</priority>
</url>
<url xmlns="">
<loc> http://www.example.com/2</loc>
<lastmod />
</url>
</urlset>
"""
)
assert s.type == "urlset"
assert list(s) == [
{
"priority": "1",
"loc": "http://www.example.com/",
"lastmod": "2009-08-16",
"changefreq": "daily",
},
{"loc": "http://www.example.com/2", "lastmod": ""},
]
def test_sitemap_urls_from_robots():
robots = """User-agent: *
Disallow: /aff/
Disallow: /wl/
# Search and shopping refining
Disallow: /s*/*facet
Disallow: /s*/*tags
# Sitemap files
Sitemap: http://example.com/sitemap.xml
Sitemap: http://example.com/sitemap-product-index.xml
Sitemap: HTTP://example.com/sitemap-uppercase.xml
Sitemap: /sitemap-relative-url.xml
# Forums
Disallow: /forum/search/
Disallow: /forum/active/
"""
assert list(sitemap_urls_from_robots(robots, base_url="http://example.com")) == [
"http://example.com/sitemap.xml",
"http://example.com/sitemap-product-index.xml",
"http://example.com/sitemap-uppercase.xml",
"http://example.com/sitemap-relative-url.xml",
]
def test_sitemap_blanklines():
"""Assert we can deal with starting blank lines before <xml> tag"""
s = Sitemap(
b"""
<?xml version="1.0" encoding="UTF-8"?>
<sitemapindex xmlns="http://www.sitemaps.org/schemas/sitemap/0.9">
<!-- cache: cached = yes name = sitemap_jspCache key = sitemap -->
<sitemap>
<loc>http://www.example.com/sitemap1.xml</loc>
<lastmod>2013-07-15</lastmod>
</sitemap>
<sitemap>
<loc>http://www.example.com/sitemap2.xml</loc>
<lastmod>2013-07-15</lastmod>
</sitemap>
<sitemap>
<loc>http://www.example.com/sitemap3.xml</loc>
<lastmod>2013-07-15</lastmod>
</sitemap>
<!-- end cache -->
</sitemapindex>
"""
)
assert list(s) == [
{"lastmod": "2013-07-15", "loc": "http://www.example.com/sitemap1.xml"},
{"lastmod": "2013-07-15", "loc": "http://www.example.com/sitemap2.xml"},
{"lastmod": "2013-07-15", "loc": "http://www.example.com/sitemap3.xml"},
]
def test_comment():
s = Sitemap(
b"""<?xml version="1.0" encoding="UTF-8"?>
<urlset xmlns="http://www.sitemaps.org/schemas/sitemap/0.9"
xmlns:xhtml="http://www.w3.org/1999/xhtml">
<url>
<loc>http://www.example.com/</loc>
<!-- this is a comment on which the parser might raise an exception if implemented incorrectly -->
</url>
</urlset>"""
)
assert list(s) == [{"loc": "http://www.example.com/"}]
def test_alternate():
s = Sitemap(
b"""<?xml version="1.0" encoding="UTF-8"?>
<urlset xmlns="http://www.sitemaps.org/schemas/sitemap/0.9"
xmlns:xhtml="http://www.w3.org/1999/xhtml">
<url>
<loc>http://www.example.com/english/</loc>
<xhtml:link rel="alternate" hreflang="de"
href="http://www.example.com/deutsch/"/>
<xhtml:link rel="alternate" hreflang="de-ch"
href="http://www.example.com/schweiz-deutsch/"/>
<xhtml:link rel="alternate" hreflang="en"
href="http://www.example.com/english/"/>
<xhtml:link rel="alternate" hreflang="en"/><!-- wrong tag without href -->
</url>
</urlset>"""
)
assert list(s) == [
{
"loc": "http://www.example.com/english/",
"alternate": [
"http://www.example.com/deutsch/",
"http://www.example.com/schweiz-deutsch/",
"http://www.example.com/english/",
],
}
]
def test_xml_entity_expansion():
s = Sitemap(
b"""<?xml version="1.0" encoding="utf-8"?>
<!DOCTYPE foo [
<!ELEMENT foo ANY >
<!ENTITY xxe SYSTEM "file:///etc/passwd" >
]>
<urlset xmlns="http://www.sitemaps.org/schemas/sitemap/0.9">
<url>
<loc>http://127.0.0.1:8000/&xxe;</loc>
</url>
</urlset>
"""
)
assert list(s) == [{"loc": "http://127.0.0.1:8000/"}]
| python | BSD-3-Clause | d1bd8eb49f7aba9289e4ff692006cead8bcd9080 | 2026-01-04T14:38:41.023839Z | false |
scrapy/scrapy | https://github.com/scrapy/scrapy/blob/d1bd8eb49f7aba9289e4ff692006cead8bcd9080/tests/test_spidermiddleware_urllength.py | tests/test_spidermiddleware_urllength.py | from __future__ import annotations
from logging import INFO
from typing import TYPE_CHECKING
import pytest
from scrapy.http import Request, Response
from scrapy.spidermiddlewares.urllength import UrlLengthMiddleware
from scrapy.spiders import Spider
from scrapy.utils.test import get_crawler
if TYPE_CHECKING:
from scrapy.crawler import Crawler
from scrapy.statscollectors import StatsCollector
maxlength = 25
response = Response("http://scrapytest.org")
short_url_req = Request("http://scrapytest.org/")
long_url_req = Request("http://scrapytest.org/this_is_a_long_url")
reqs: list[Request] = [short_url_req, long_url_req]
@pytest.fixture
def crawler() -> Crawler:
return get_crawler(Spider, {"URLLENGTH_LIMIT": maxlength})
@pytest.fixture
def stats(crawler: Crawler) -> StatsCollector:
assert crawler.stats is not None
return crawler.stats
@pytest.fixture
def mw(crawler: Crawler) -> UrlLengthMiddleware:
return UrlLengthMiddleware.from_crawler(crawler)
def process_spider_output(mw: UrlLengthMiddleware) -> list[Request]:
return list(mw.process_spider_output(response, reqs))
def test_middleware_works(mw: UrlLengthMiddleware) -> None:
assert process_spider_output(mw) == [short_url_req]
def test_logging(
stats: StatsCollector, mw: UrlLengthMiddleware, caplog: pytest.LogCaptureFixture
) -> None:
with caplog.at_level(INFO):
process_spider_output(mw)
ric = stats.get_value("urllength/request_ignored_count")
assert ric == 1
assert f"Ignoring link (url length > {maxlength})" in caplog.text
| python | BSD-3-Clause | d1bd8eb49f7aba9289e4ff692006cead8bcd9080 | 2026-01-04T14:38:41.023839Z | false |
scrapy/scrapy | https://github.com/scrapy/scrapy/blob/d1bd8eb49f7aba9289e4ff692006cead8bcd9080/tests/test_robotstxt_interface.py | tests/test_robotstxt_interface.py | import pytest
from scrapy.robotstxt import (
ProtegoRobotParser,
PythonRobotParser,
RerpRobotParser,
decode_robotstxt,
)
def rerp_available():
# check if robotexclusionrulesparser is installed
try:
from robotexclusionrulesparser import ( # noqa: PLC0415
RobotExclusionRulesParser, # noqa: F401
)
except ImportError:
return False
return True
class BaseRobotParserTest:
def _setUp(self, parser_cls):
self.parser_cls = parser_cls
def test_allowed(self):
robotstxt_robotstxt_body = (
b"User-agent: * \nDisallow: /disallowed \nAllow: /allowed \nCrawl-delay: 10"
)
rp = self.parser_cls.from_crawler(
crawler=None, robotstxt_body=robotstxt_robotstxt_body
)
assert rp.allowed("https://www.site.local/allowed", "*")
assert not rp.allowed("https://www.site.local/disallowed", "*")
def test_allowed_wildcards(self):
robotstxt_robotstxt_body = b"""User-agent: first
Disallow: /disallowed/*/end$
User-agent: second
Allow: /*allowed
Disallow: /
"""
rp = self.parser_cls.from_crawler(
crawler=None, robotstxt_body=robotstxt_robotstxt_body
)
assert rp.allowed("https://www.site.local/disallowed", "first")
assert not rp.allowed("https://www.site.local/disallowed/xyz/end", "first")
assert not rp.allowed("https://www.site.local/disallowed/abc/end", "first")
assert rp.allowed("https://www.site.local/disallowed/xyz/endinglater", "first")
assert rp.allowed("https://www.site.local/allowed", "second")
assert rp.allowed("https://www.site.local/is_still_allowed", "second")
assert rp.allowed("https://www.site.local/is_allowed_too", "second")
def test_length_based_precedence(self):
robotstxt_robotstxt_body = b"User-agent: * \nDisallow: / \nAllow: /page"
rp = self.parser_cls.from_crawler(
crawler=None, robotstxt_body=robotstxt_robotstxt_body
)
assert rp.allowed("https://www.site.local/page", "*")
def test_order_based_precedence(self):
robotstxt_robotstxt_body = b"User-agent: * \nDisallow: / \nAllow: /page"
rp = self.parser_cls.from_crawler(
crawler=None, robotstxt_body=robotstxt_robotstxt_body
)
assert not rp.allowed("https://www.site.local/page", "*")
def test_empty_response(self):
"""empty response should equal 'allow all'"""
rp = self.parser_cls.from_crawler(crawler=None, robotstxt_body=b"")
assert rp.allowed("https://site.local/", "*")
assert rp.allowed("https://site.local/", "chrome")
assert rp.allowed("https://site.local/index.html", "*")
assert rp.allowed("https://site.local/disallowed", "*")
def test_garbage_response(self):
"""garbage response should be discarded, equal 'allow all'"""
robotstxt_robotstxt_body = b"GIF89a\xd3\x00\xfe\x00\xa2"
rp = self.parser_cls.from_crawler(
crawler=None, robotstxt_body=robotstxt_robotstxt_body
)
assert rp.allowed("https://site.local/", "*")
assert rp.allowed("https://site.local/", "chrome")
assert rp.allowed("https://site.local/index.html", "*")
assert rp.allowed("https://site.local/disallowed", "*")
def test_unicode_url_and_useragent(self):
robotstxt_robotstxt_body = """
User-Agent: *
Disallow: /admin/
Disallow: /static/
# taken from https://en.wikipedia.org/robots.txt
Disallow: /wiki/K%C3%A4ytt%C3%A4j%C3%A4:
Disallow: /wiki/Käyttäjä:
User-Agent: UnicödeBöt
Disallow: /some/randome/page.html""".encode()
rp = self.parser_cls.from_crawler(
crawler=None, robotstxt_body=robotstxt_robotstxt_body
)
assert rp.allowed("https://site.local/", "*")
assert not rp.allowed("https://site.local/admin/", "*")
assert not rp.allowed("https://site.local/static/", "*")
assert rp.allowed("https://site.local/admin/", "UnicödeBöt")
assert not rp.allowed("https://site.local/wiki/K%C3%A4ytt%C3%A4j%C3%A4:", "*")
assert not rp.allowed("https://site.local/wiki/Käyttäjä:", "*")
assert rp.allowed("https://site.local/some/randome/page.html", "*")
assert not rp.allowed("https://site.local/some/randome/page.html", "UnicödeBöt")
class TestDecodeRobotsTxt:
def test_native_string_conversion(self):
robotstxt_body = b"User-agent: *\nDisallow: /\n"
decoded_content = decode_robotstxt(
robotstxt_body, spider=None, to_native_str_type=True
)
assert decoded_content == "User-agent: *\nDisallow: /\n"
def test_decode_utf8(self):
robotstxt_body = b"User-agent: *\nDisallow: /\n"
decoded_content = decode_robotstxt(robotstxt_body, spider=None)
assert decoded_content == "User-agent: *\nDisallow: /\n"
def test_decode_non_utf8(self):
robotstxt_body = b"User-agent: *\n\xffDisallow: /\n"
decoded_content = decode_robotstxt(robotstxt_body, spider=None)
assert decoded_content == "User-agent: *\nDisallow: /\n"
# UTF-8 BOM at the beginning of the file ignored
def test_decode_utf8_bom(self):
robotstxt_body = b"\xef\xbb\xbfUser-agent: *\nDisallow: /\n"
decoded_content = decode_robotstxt(robotstxt_body, spider=None)
assert decoded_content == "User-agent: *\nDisallow: /\n"
class TestPythonRobotParser(BaseRobotParserTest):
def setup_method(self):
super()._setUp(PythonRobotParser)
def test_length_based_precedence(self):
pytest.skip(
"RobotFileParser does not support length based directives precedence."
)
def test_allowed_wildcards(self):
pytest.skip("RobotFileParser does not support wildcards.")
@pytest.mark.skipif(not rerp_available(), reason="Rerp parser is not installed")
class TestRerpRobotParser(BaseRobotParserTest):
def setup_method(self):
super()._setUp(RerpRobotParser)
def test_length_based_precedence(self):
pytest.skip("Rerp does not support length based directives precedence.")
class TestProtegoRobotParser(BaseRobotParserTest):
def setup_method(self):
super()._setUp(ProtegoRobotParser)
def test_order_based_precedence(self):
pytest.skip("Protego does not support order based directives precedence.")
| python | BSD-3-Clause | d1bd8eb49f7aba9289e4ff692006cead8bcd9080 | 2026-01-04T14:38:41.023839Z | false |
scrapy/scrapy | https://github.com/scrapy/scrapy/blob/d1bd8eb49f7aba9289e4ff692006cead8bcd9080/tests/test_spidermiddleware_process_start.py | tests/test_spidermiddleware_process_start.py | import warnings
from asyncio import sleep
import pytest
from scrapy import Spider, signals
from scrapy.exceptions import ScrapyDeprecationWarning
from scrapy.utils.defer import deferred_f_from_coro_f, maybe_deferred_to_future
from scrapy.utils.test import get_crawler
from tests.test_spider_start import SLEEP_SECONDS
from .utils import twisted_sleep
ITEM_A = {"id": "a"}
ITEM_B = {"id": "b"}
ITEM_C = {"id": "c"}
ITEM_D = {"id": "d"}
class AsyncioSleepSpiderMiddleware:
async def process_start(self, start):
await sleep(SLEEP_SECONDS)
async for item_or_request in start:
yield item_or_request
class NoOpSpiderMiddleware:
async def process_start(self, start):
async for item_or_request in start:
yield item_or_request
class TwistedSleepSpiderMiddleware:
async def process_start(self, start):
await maybe_deferred_to_future(twisted_sleep(SLEEP_SECONDS))
async for item_or_request in start:
yield item_or_request
class UniversalSpiderMiddleware:
async def process_start(self, start):
async for item_or_request in start:
yield item_or_request
def process_start_requests(self, start_requests, spider):
raise NotImplementedError
# Spiders and spider middlewares for TestMain._test_wrap
class ModernWrapSpider(Spider):
name = "test"
async def start(self):
yield ITEM_B
class ModernWrapSpiderSubclass(ModernWrapSpider):
name = "test"
class UniversalWrapSpider(Spider):
name = "test"
async def start(self):
yield ITEM_B
def start_requests(self):
yield ITEM_D
class DeprecatedWrapSpider(Spider):
name = "test"
def start_requests(self):
yield ITEM_B
class ModernWrapSpiderMiddleware:
async def process_start(self, start):
yield ITEM_A
async for item_or_request in start:
yield item_or_request
yield ITEM_C
class UniversalWrapSpiderMiddleware:
async def process_start(self, start):
yield ITEM_A
async for item_or_request in start:
yield item_or_request
yield ITEM_C
def process_start_requests(self, start, spider):
yield ITEM_A
yield from start
yield ITEM_C
class DeprecatedWrapSpiderMiddleware:
def process_start_requests(self, start, spider):
yield ITEM_A
yield from start
yield ITEM_C
class TestMain:
async def _test(self, spider_middlewares, spider_cls, expected_items):
actual_items = []
def track_item(item, response, spider):
actual_items.append(item)
settings = {
"SPIDER_MIDDLEWARES": {cls: n for n, cls in enumerate(spider_middlewares)},
}
crawler = get_crawler(spider_cls, settings_dict=settings)
crawler.signals.connect(track_item, signals.item_scraped)
await crawler.crawl_async()
assert crawler.stats.get_value("finish_reason") == "finished"
assert actual_items == expected_items, f"{actual_items=} != {expected_items=}"
async def _test_wrap(self, spider_middleware, spider_cls, expected_items=None):
expected_items = expected_items or [ITEM_A, ITEM_B, ITEM_C]
await self._test([spider_middleware], spider_cls, expected_items)
async def _test_douple_wrap(self, smw1, smw2, spider_cls, expected_items=None):
expected_items = expected_items or [ITEM_A, ITEM_A, ITEM_B, ITEM_C, ITEM_C]
await self._test([smw1, smw2], spider_cls, expected_items)
@deferred_f_from_coro_f
async def test_modern_mw_modern_spider(self):
with warnings.catch_warnings():
warnings.simplefilter("error")
await self._test_wrap(ModernWrapSpiderMiddleware, ModernWrapSpider)
@deferred_f_from_coro_f
async def test_modern_mw_universal_spider(self):
with warnings.catch_warnings():
warnings.simplefilter("error")
await self._test_wrap(ModernWrapSpiderMiddleware, UniversalWrapSpider)
@deferred_f_from_coro_f
async def test_modern_mw_deprecated_spider(self):
with pytest.warns(
ScrapyDeprecationWarning, match=r"deprecated start_requests\(\)"
):
await self._test_wrap(ModernWrapSpiderMiddleware, DeprecatedWrapSpider)
@deferred_f_from_coro_f
async def test_universal_mw_modern_spider(self):
with warnings.catch_warnings():
warnings.simplefilter("error")
await self._test_wrap(UniversalWrapSpiderMiddleware, ModernWrapSpider)
@deferred_f_from_coro_f
async def test_universal_mw_universal_spider(self):
with warnings.catch_warnings():
warnings.simplefilter("error")
await self._test_wrap(UniversalWrapSpiderMiddleware, UniversalWrapSpider)
@deferred_f_from_coro_f
async def test_universal_mw_deprecated_spider(self):
with pytest.warns(
ScrapyDeprecationWarning, match=r"deprecated start_requests\(\)"
):
await self._test_wrap(UniversalWrapSpiderMiddleware, DeprecatedWrapSpider)
@deferred_f_from_coro_f
async def test_deprecated_mw_modern_spider(self):
with (
pytest.warns(
ScrapyDeprecationWarning, match=r"deprecated process_start_requests\(\)"
),
pytest.raises(
ValueError, match=r"only compatible with \(deprecated\) spiders"
),
):
await self._test_wrap(DeprecatedWrapSpiderMiddleware, ModernWrapSpider)
@deferred_f_from_coro_f
async def test_deprecated_mw_modern_spider_subclass(self):
with (
pytest.warns(
ScrapyDeprecationWarning, match=r"deprecated process_start_requests\(\)"
),
pytest.raises(
ValueError,
match=r"^\S+?\.ModernWrapSpider \(inherited by \S+?.ModernWrapSpiderSubclass\) .*? only compatible with \(deprecated\) spiders",
),
):
await self._test_wrap(
DeprecatedWrapSpiderMiddleware, ModernWrapSpiderSubclass
)
@deferred_f_from_coro_f
async def test_deprecated_mw_universal_spider(self):
with pytest.warns(
ScrapyDeprecationWarning, match=r"deprecated process_start_requests\(\)"
):
await self._test_wrap(
DeprecatedWrapSpiderMiddleware,
UniversalWrapSpider,
[ITEM_A, ITEM_D, ITEM_C],
)
@deferred_f_from_coro_f
async def test_deprecated_mw_deprecated_spider(self):
with (
pytest.warns(
ScrapyDeprecationWarning, match=r"deprecated process_start_requests\(\)"
),
pytest.warns(
ScrapyDeprecationWarning, match=r"deprecated start_requests\(\)"
),
):
await self._test_wrap(DeprecatedWrapSpiderMiddleware, DeprecatedWrapSpider)
@deferred_f_from_coro_f
async def test_modern_mw_universal_mw_modern_spider(self):
with warnings.catch_warnings():
warnings.simplefilter("error")
await self._test_douple_wrap(
ModernWrapSpiderMiddleware,
UniversalWrapSpiderMiddleware,
ModernWrapSpider,
)
@deferred_f_from_coro_f
async def test_modern_mw_deprecated_mw_modern_spider(self):
with pytest.raises(ValueError, match=r"trying to combine spider middlewares"):
await self._test_douple_wrap(
ModernWrapSpiderMiddleware,
DeprecatedWrapSpiderMiddleware,
ModernWrapSpider,
)
@deferred_f_from_coro_f
async def test_universal_mw_deprecated_mw_modern_spider(self):
with (
pytest.warns(
ScrapyDeprecationWarning, match=r"deprecated process_start_requests\(\)"
),
pytest.raises(
ValueError, match=r"only compatible with \(deprecated\) spiders"
),
):
await self._test_douple_wrap(
UniversalWrapSpiderMiddleware,
DeprecatedWrapSpiderMiddleware,
ModernWrapSpider,
)
@deferred_f_from_coro_f
async def test_modern_mw_universal_mw_universal_spider(self):
with warnings.catch_warnings():
warnings.simplefilter("error")
await self._test_douple_wrap(
ModernWrapSpiderMiddleware,
UniversalWrapSpiderMiddleware,
UniversalWrapSpider,
)
@deferred_f_from_coro_f
async def test_modern_mw_deprecated_mw_universal_spider(self):
with pytest.raises(ValueError, match=r"trying to combine spider middlewares"):
await self._test_douple_wrap(
ModernWrapSpiderMiddleware,
DeprecatedWrapSpiderMiddleware,
UniversalWrapSpider,
)
@deferred_f_from_coro_f
async def test_universal_mw_deprecated_mw_universal_spider(self):
with pytest.warns(
ScrapyDeprecationWarning, match=r"deprecated process_start_requests\(\)"
):
await self._test_douple_wrap(
UniversalWrapSpiderMiddleware,
DeprecatedWrapSpiderMiddleware,
UniversalWrapSpider,
[ITEM_A, ITEM_A, ITEM_D, ITEM_C, ITEM_C],
)
@deferred_f_from_coro_f
async def test_modern_mw_universal_mw_deprecated_spider(self):
with pytest.warns(
ScrapyDeprecationWarning, match=r"deprecated start_requests\(\)"
):
await self._test_douple_wrap(
ModernWrapSpiderMiddleware,
UniversalWrapSpiderMiddleware,
DeprecatedWrapSpider,
)
@deferred_f_from_coro_f
async def test_modern_mw_deprecated_mw_deprecated_spider(self):
with pytest.raises(ValueError, match=r"trying to combine spider middlewares"):
await self._test_douple_wrap(
ModernWrapSpiderMiddleware,
DeprecatedWrapSpiderMiddleware,
DeprecatedWrapSpider,
)
@deferred_f_from_coro_f
async def test_universal_mw_deprecated_mw_deprecated_spider(self):
with (
pytest.warns(
ScrapyDeprecationWarning, match=r"deprecated process_start_requests\(\)"
),
pytest.warns(
ScrapyDeprecationWarning, match=r"deprecated start_requests\(\)"
),
):
await self._test_douple_wrap(
UniversalWrapSpiderMiddleware,
DeprecatedWrapSpiderMiddleware,
DeprecatedWrapSpider,
)
async def _test_sleep(self, spider_middlewares):
class TestSpider(Spider):
name = "test"
async def start(self):
yield ITEM_A
await self._test(spider_middlewares, TestSpider, [ITEM_A])
@pytest.mark.only_asyncio
@deferred_f_from_coro_f
async def test_asyncio_sleep_single(self):
await self._test_sleep([AsyncioSleepSpiderMiddleware])
@pytest.mark.only_asyncio
@deferred_f_from_coro_f
async def test_asyncio_sleep_multiple(self):
await self._test_sleep(
[NoOpSpiderMiddleware, AsyncioSleepSpiderMiddleware, NoOpSpiderMiddleware]
)
@deferred_f_from_coro_f
async def test_twisted_sleep_single(self):
await self._test_sleep([TwistedSleepSpiderMiddleware])
@deferred_f_from_coro_f
async def test_twisted_sleep_multiple(self):
await self._test_sleep(
[NoOpSpiderMiddleware, TwistedSleepSpiderMiddleware, NoOpSpiderMiddleware]
)
| python | BSD-3-Clause | d1bd8eb49f7aba9289e4ff692006cead8bcd9080 | 2026-01-04T14:38:41.023839Z | false |
scrapy/scrapy | https://github.com/scrapy/scrapy/blob/d1bd8eb49f7aba9289e4ff692006cead8bcd9080/tests/test_downloadermiddleware_retry.py | tests/test_downloadermiddleware_retry.py | import logging
import pytest
from testfixtures import LogCapture
from twisted.internet import defer
from twisted.internet.error import (
ConnectError,
ConnectionDone,
ConnectionLost,
DNSLookupError,
TCPTimedOutError,
)
from twisted.internet.error import ConnectionRefusedError as TxConnectionRefusedError
from twisted.internet.error import TimeoutError as TxTimeoutError
from twisted.web.client import ResponseFailed
from scrapy.downloadermiddlewares.retry import RetryMiddleware, get_retry_request
from scrapy.exceptions import IgnoreRequest
from scrapy.http import Request, Response
from scrapy.settings.default_settings import RETRY_EXCEPTIONS
from scrapy.spiders import Spider
from scrapy.utils.spider import DefaultSpider
from scrapy.utils.test import get_crawler
class TestRetry:
def setup_method(self):
self.crawler = get_crawler(DefaultSpider)
self.crawler.spider = self.crawler._create_spider()
self.mw = RetryMiddleware.from_crawler(self.crawler)
self.mw.max_retry_times = 2
def test_priority_adjust(self):
req = Request("http://www.scrapytest.org/503")
rsp = Response("http://www.scrapytest.org/503", body=b"", status=503)
req2 = self.mw.process_response(req, rsp)
assert req2.priority < req.priority
def test_404(self):
req = Request("http://www.scrapytest.org/404")
rsp = Response("http://www.scrapytest.org/404", body=b"", status=404)
# dont retry 404s
assert self.mw.process_response(req, rsp) is rsp
def test_dont_retry(self):
req = Request("http://www.scrapytest.org/503", meta={"dont_retry": True})
rsp = Response("http://www.scrapytest.org/503", body=b"", status=503)
# first retry
r = self.mw.process_response(req, rsp)
assert r is rsp
# Test retry when dont_retry set to False
req = Request("http://www.scrapytest.org/503", meta={"dont_retry": False})
rsp = Response("http://www.scrapytest.org/503")
# first retry
r = self.mw.process_response(req, rsp)
assert r is rsp
def test_dont_retry_exc(self):
req = Request("http://www.scrapytest.org/503", meta={"dont_retry": True})
r = self.mw.process_exception(req, DNSLookupError())
assert r is None
def test_503(self):
req = Request("http://www.scrapytest.org/503")
rsp = Response("http://www.scrapytest.org/503", body=b"", status=503)
# first retry
req = self.mw.process_response(req, rsp)
assert isinstance(req, Request)
assert req.meta["retry_times"] == 1
# second retry
req = self.mw.process_response(req, rsp)
assert isinstance(req, Request)
assert req.meta["retry_times"] == 2
# discard it
assert self.mw.process_response(req, rsp) is rsp
assert self.crawler.stats.get_value("retry/max_reached") == 1
assert (
self.crawler.stats.get_value("retry/reason_count/503 Service Unavailable")
== 2
)
assert self.crawler.stats.get_value("retry/count") == 2
def test_twistederrors(self):
exceptions = [
ConnectError,
ConnectionDone,
ConnectionLost,
TxConnectionRefusedError,
defer.TimeoutError,
DNSLookupError,
ResponseFailed,
TCPTimedOutError,
TxTimeoutError,
]
for exc in exceptions:
req = Request(f"http://www.scrapytest.org/{exc.__name__}")
self._test_retry_exception(req, exc("foo"))
stats = self.crawler.stats
assert stats.get_value("retry/max_reached") == len(exceptions)
assert stats.get_value("retry/count") == len(exceptions) * 2
assert (
stats.get_value("retry/reason_count/twisted.internet.defer.TimeoutError")
== 2
)
def test_exception_to_retry_added(self):
exc = ValueError
settings_dict = {
"RETRY_EXCEPTIONS": [*RETRY_EXCEPTIONS, exc],
}
crawler = get_crawler(DefaultSpider, settings_dict=settings_dict)
crawler.spider = crawler._create_spider()
mw = RetryMiddleware.from_crawler(crawler)
req = Request(f"http://www.scrapytest.org/{exc.__name__}")
self._test_retry_exception(req, exc("foo"), mw)
def _test_retry_exception(self, req, exception, mw=None):
if mw is None:
mw = self.mw
# first retry
req = mw.process_exception(req, exception)
assert isinstance(req, Request)
assert req.meta["retry_times"] == 1
# second retry
req = mw.process_exception(req, exception)
assert isinstance(req, Request)
assert req.meta["retry_times"] == 2
# discard it
req = mw.process_exception(req, exception)
assert req is None
class TestMaxRetryTimes:
invalid_url = "http://www.scrapytest.org/invalid_url"
def get_middleware(self, settings=None):
crawler = get_crawler(DefaultSpider, settings or {})
crawler.spider = crawler._create_spider()
return RetryMiddleware.from_crawler(crawler)
def test_with_settings_zero(self):
max_retry_times = 0
settings = {"RETRY_TIMES": max_retry_times}
middleware = self.get_middleware(settings)
req = Request(self.invalid_url)
self._test_retry(
req,
DNSLookupError("foo"),
max_retry_times,
middleware=middleware,
)
def test_with_metakey_zero(self):
max_retry_times = 0
middleware = self.get_middleware()
meta = {"max_retry_times": max_retry_times}
req = Request(self.invalid_url, meta=meta)
self._test_retry(
req,
DNSLookupError("foo"),
max_retry_times,
middleware=middleware,
)
def test_without_metakey(self):
max_retry_times = 5
settings = {"RETRY_TIMES": max_retry_times}
middleware = self.get_middleware(settings)
req = Request(self.invalid_url)
self._test_retry(
req,
DNSLookupError("foo"),
max_retry_times,
middleware=middleware,
)
def test_with_metakey_greater(self):
meta_max_retry_times = 3
middleware_max_retry_times = 2
req1 = Request(self.invalid_url, meta={"max_retry_times": meta_max_retry_times})
req2 = Request(self.invalid_url)
settings = {"RETRY_TIMES": middleware_max_retry_times}
middleware = self.get_middleware(settings)
self._test_retry(
req1,
DNSLookupError("foo"),
meta_max_retry_times,
middleware=middleware,
)
self._test_retry(
req2,
DNSLookupError("foo"),
middleware_max_retry_times,
middleware=middleware,
)
def test_with_metakey_lesser(self):
meta_max_retry_times = 4
middleware_max_retry_times = 5
req1 = Request(self.invalid_url, meta={"max_retry_times": meta_max_retry_times})
req2 = Request(self.invalid_url)
settings = {"RETRY_TIMES": middleware_max_retry_times}
middleware = self.get_middleware(settings)
self._test_retry(
req1,
DNSLookupError("foo"),
meta_max_retry_times,
middleware=middleware,
)
self._test_retry(
req2,
DNSLookupError("foo"),
middleware_max_retry_times,
middleware=middleware,
)
def test_with_dont_retry(self):
max_retry_times = 4
middleware = self.get_middleware()
meta = {
"max_retry_times": max_retry_times,
"dont_retry": True,
}
req = Request(self.invalid_url, meta=meta)
self._test_retry(
req,
DNSLookupError("foo"),
0,
middleware=middleware,
)
def _test_retry(
self,
req,
exception,
max_retry_times,
middleware=None,
):
middleware = middleware or self.mw
for i in range(max_retry_times):
req = middleware.process_exception(req, exception)
assert isinstance(req, Request)
# discard it
req = middleware.process_exception(req, exception)
assert req is None
class TestGetRetryRequest:
def get_spider(self, settings=None):
crawler = get_crawler(Spider, settings or {})
return crawler._create_spider("foo")
def test_basic_usage(self):
request = Request("https://example.com")
spider = self.get_spider()
with LogCapture() as log:
new_request = get_retry_request(
request,
spider=spider,
)
assert isinstance(new_request, Request)
assert new_request != request
assert new_request.dont_filter
expected_retry_times = 1
assert new_request.meta["retry_times"] == expected_retry_times
assert new_request.priority == -1
expected_reason = "unspecified"
for stat in ("retry/count", f"retry/reason_count/{expected_reason}"):
assert spider.crawler.stats.get_value(stat) == 1
log.check_present(
(
"scrapy.downloadermiddlewares.retry",
"DEBUG",
f"Retrying {request} (failed {expected_retry_times} times): "
f"{expected_reason}",
)
)
def test_max_retries_reached(self):
request = Request("https://example.com")
spider = self.get_spider()
max_retry_times = 0
with LogCapture() as log:
new_request = get_retry_request(
request,
spider=spider,
max_retry_times=max_retry_times,
)
assert new_request is None
assert spider.crawler.stats.get_value("retry/max_reached") == 1
failure_count = max_retry_times + 1
expected_reason = "unspecified"
log.check_present(
(
"scrapy.downloadermiddlewares.retry",
"ERROR",
f"Gave up retrying {request} (failed {failure_count} times): "
f"{expected_reason}",
)
)
def test_one_retry(self):
request = Request("https://example.com")
spider = self.get_spider()
with LogCapture() as log:
new_request = get_retry_request(
request,
spider=spider,
max_retry_times=1,
)
assert isinstance(new_request, Request)
assert new_request != request
assert new_request.dont_filter
expected_retry_times = 1
assert new_request.meta["retry_times"] == expected_retry_times
assert new_request.priority == -1
expected_reason = "unspecified"
for stat in ("retry/count", f"retry/reason_count/{expected_reason}"):
assert spider.crawler.stats.get_value(stat) == 1
log.check_present(
(
"scrapy.downloadermiddlewares.retry",
"DEBUG",
f"Retrying {request} (failed {expected_retry_times} times): "
f"{expected_reason}",
)
)
def test_two_retries(self):
spider = self.get_spider()
request = Request("https://example.com")
new_request = request
max_retry_times = 2
for index in range(max_retry_times):
with LogCapture() as log:
new_request = get_retry_request(
new_request,
spider=spider,
max_retry_times=max_retry_times,
)
assert isinstance(new_request, Request)
assert new_request != request
assert new_request.dont_filter
expected_retry_times = index + 1
assert new_request.meta["retry_times"] == expected_retry_times
assert new_request.priority == -expected_retry_times
expected_reason = "unspecified"
for stat in ("retry/count", f"retry/reason_count/{expected_reason}"):
value = spider.crawler.stats.get_value(stat)
assert value == expected_retry_times
log.check_present(
(
"scrapy.downloadermiddlewares.retry",
"DEBUG",
f"Retrying {request} (failed {expected_retry_times} times): "
f"{expected_reason}",
)
)
with LogCapture() as log:
new_request = get_retry_request(
new_request,
spider=spider,
max_retry_times=max_retry_times,
)
assert new_request is None
assert spider.crawler.stats.get_value("retry/max_reached") == 1
failure_count = max_retry_times + 1
expected_reason = "unspecified"
log.check_present(
(
"scrapy.downloadermiddlewares.retry",
"ERROR",
f"Gave up retrying {request} (failed {failure_count} times): "
f"{expected_reason}",
)
)
def test_no_spider(self):
request = Request("https://example.com")
with pytest.raises(TypeError):
get_retry_request(request) # pylint: disable=missing-kwoa
def test_max_retry_times_setting(self):
max_retry_times = 0
spider = self.get_spider({"RETRY_TIMES": max_retry_times})
request = Request("https://example.com")
new_request = get_retry_request(
request,
spider=spider,
)
assert new_request is None
def test_max_retry_times_meta(self):
max_retry_times = 0
spider = self.get_spider({"RETRY_TIMES": max_retry_times + 1})
meta = {"max_retry_times": max_retry_times}
request = Request("https://example.com", meta=meta)
new_request = get_retry_request(
request,
spider=spider,
)
assert new_request is None
def test_max_retry_times_argument(self):
max_retry_times = 0
spider = self.get_spider({"RETRY_TIMES": max_retry_times + 1})
meta = {"max_retry_times": max_retry_times + 1}
request = Request("https://example.com", meta=meta)
new_request = get_retry_request(
request,
spider=spider,
max_retry_times=max_retry_times,
)
assert new_request is None
def test_priority_adjust_setting(self):
priority_adjust = 1
spider = self.get_spider({"RETRY_PRIORITY_ADJUST": priority_adjust})
request = Request("https://example.com")
new_request = get_retry_request(
request,
spider=spider,
)
assert new_request.priority == priority_adjust
def test_priority_adjust_argument(self):
priority_adjust = 1
spider = self.get_spider({"RETRY_PRIORITY_ADJUST": priority_adjust + 1})
request = Request("https://example.com")
new_request = get_retry_request(
request,
spider=spider,
priority_adjust=priority_adjust,
)
assert new_request.priority == priority_adjust
def test_log_extra_retry_success(self):
request = Request("https://example.com")
spider = self.get_spider()
with LogCapture(attributes=("spider",)) as log:
get_retry_request(
request,
spider=spider,
)
log.check_present(spider)
def test_log_extra_retries_exceeded(self):
request = Request("https://example.com")
spider = self.get_spider()
with LogCapture(attributes=("spider",)) as log:
get_retry_request(
request,
spider=spider,
max_retry_times=0,
)
log.check_present(spider)
def test_reason_string(self):
request = Request("https://example.com")
spider = self.get_spider()
expected_reason = "because"
with LogCapture() as log:
get_retry_request(
request,
spider=spider,
reason=expected_reason,
)
expected_retry_times = 1
for stat in ("retry/count", f"retry/reason_count/{expected_reason}"):
assert spider.crawler.stats.get_value(stat) == 1
log.check_present(
(
"scrapy.downloadermiddlewares.retry",
"DEBUG",
f"Retrying {request} (failed {expected_retry_times} times): "
f"{expected_reason}",
)
)
def test_reason_builtin_exception(self):
request = Request("https://example.com")
spider = self.get_spider()
expected_reason = NotImplementedError()
expected_reason_string = "builtins.NotImplementedError"
with LogCapture() as log:
get_retry_request(
request,
spider=spider,
reason=expected_reason,
)
expected_retry_times = 1
stat = spider.crawler.stats.get_value(
f"retry/reason_count/{expected_reason_string}"
)
assert stat == 1
log.check_present(
(
"scrapy.downloadermiddlewares.retry",
"DEBUG",
f"Retrying {request} (failed {expected_retry_times} times): "
f"{expected_reason}",
)
)
def test_reason_builtin_exception_class(self):
request = Request("https://example.com")
spider = self.get_spider()
expected_reason = NotImplementedError
expected_reason_string = "builtins.NotImplementedError"
with LogCapture() as log:
get_retry_request(
request,
spider=spider,
reason=expected_reason,
)
expected_retry_times = 1
stat = spider.crawler.stats.get_value(
f"retry/reason_count/{expected_reason_string}"
)
assert stat == 1
log.check_present(
(
"scrapy.downloadermiddlewares.retry",
"DEBUG",
f"Retrying {request} (failed {expected_retry_times} times): "
f"{expected_reason}",
)
)
def test_reason_custom_exception(self):
request = Request("https://example.com")
spider = self.get_spider()
expected_reason = IgnoreRequest()
expected_reason_string = "scrapy.exceptions.IgnoreRequest"
with LogCapture() as log:
get_retry_request(
request,
spider=spider,
reason=expected_reason,
)
expected_retry_times = 1
stat = spider.crawler.stats.get_value(
f"retry/reason_count/{expected_reason_string}"
)
assert stat == 1
log.check_present(
(
"scrapy.downloadermiddlewares.retry",
"DEBUG",
f"Retrying {request} (failed {expected_retry_times} times): "
f"{expected_reason}",
)
)
def test_reason_custom_exception_class(self):
request = Request("https://example.com")
spider = self.get_spider()
expected_reason = IgnoreRequest
expected_reason_string = "scrapy.exceptions.IgnoreRequest"
with LogCapture() as log:
get_retry_request(
request,
spider=spider,
reason=expected_reason,
)
expected_retry_times = 1
stat = spider.crawler.stats.get_value(
f"retry/reason_count/{expected_reason_string}"
)
assert stat == 1
log.check_present(
(
"scrapy.downloadermiddlewares.retry",
"DEBUG",
f"Retrying {request} (failed {expected_retry_times} times): "
f"{expected_reason}",
)
)
def test_custom_logger(self):
logger = logging.getLogger("custom-logger")
request = Request("https://example.com")
spider = self.get_spider()
expected_reason = "because"
with LogCapture() as log:
get_retry_request(
request,
spider=spider,
reason=expected_reason,
logger=logger,
)
log.check_present(
(
"custom-logger",
"DEBUG",
f"Retrying {request} (failed 1 times): {expected_reason}",
)
)
def test_custom_stats_key(self):
request = Request("https://example.com")
spider = self.get_spider()
expected_reason = "because"
stats_key = "custom_retry"
get_retry_request(
request,
spider=spider,
reason=expected_reason,
stats_base_key=stats_key,
)
for stat in (
f"{stats_key}/count",
f"{stats_key}/reason_count/{expected_reason}",
):
assert spider.crawler.stats.get_value(stat) == 1
| python | BSD-3-Clause | d1bd8eb49f7aba9289e4ff692006cead8bcd9080 | 2026-01-04T14:38:41.023839Z | false |
scrapy/scrapy | https://github.com/scrapy/scrapy/blob/d1bd8eb49f7aba9289e4ff692006cead8bcd9080/tests/test_item.py | tests/test_item.py | from abc import ABCMeta
from unittest import mock
import pytest
from scrapy.item import Field, Item, ItemMeta
class TestItem:
def assertSortedEqual(self, first, second, msg=None):
assert sorted(first) == sorted(second), msg
def test_simple(self):
class TestItem(Item):
name = Field()
i = TestItem()
i["name"] = "name"
assert i["name"] == "name"
def test_init(self):
class TestItem(Item):
name = Field()
i = TestItem()
with pytest.raises(KeyError):
i["name"]
i2 = TestItem(name="john doe")
assert i2["name"] == "john doe"
i3 = TestItem({"name": "john doe"})
assert i3["name"] == "john doe"
i4 = TestItem(i3)
assert i4["name"] == "john doe"
with pytest.raises(KeyError):
TestItem({"name": "john doe", "other": "foo"})
def test_invalid_field(self):
class TestItem(Item):
pass
i = TestItem()
with pytest.raises(KeyError):
i["field"] = "text"
with pytest.raises(KeyError):
i["field"]
def test_repr(self):
class TestItem(Item):
name = Field()
number = Field()
i = TestItem()
i["name"] = "John Doe"
i["number"] = 123
itemrepr = repr(i)
assert itemrepr == "{'name': 'John Doe', 'number': 123}"
i2 = eval(itemrepr) # pylint: disable=eval-used
assert i2["name"] == "John Doe"
assert i2["number"] == 123
def test_private_attr(self):
class TestItem(Item):
name = Field()
i = TestItem()
i._private = "test"
assert i._private == "test"
def test_raise_getattr(self):
class TestItem(Item):
name = Field()
i = TestItem()
with pytest.raises(AttributeError):
i.name
def test_raise_setattr(self):
class TestItem(Item):
name = Field()
i = TestItem()
with pytest.raises(AttributeError):
i.name = "john"
def test_custom_methods(self):
class TestItem(Item):
name = Field()
def get_name(self):
return self["name"]
def change_name(self, name):
self["name"] = name
i = TestItem()
with pytest.raises(KeyError):
i.get_name()
i["name"] = "lala"
assert i.get_name() == "lala"
i.change_name("other")
assert i.get_name() == "other"
def test_metaclass(self):
class TestItem(Item):
name = Field()
keys = Field()
values = Field()
i = TestItem()
i["name"] = "John"
assert list(i.keys()) == ["name"]
assert list(i.values()) == ["John"]
i["keys"] = "Keys"
i["values"] = "Values"
self.assertSortedEqual(list(i.keys()), ["keys", "values", "name"])
self.assertSortedEqual(list(i.values()), ["Keys", "Values", "John"])
def test_metaclass_with_fields_attribute(self):
class TestItem(Item):
fields = {"new": Field(default="X")}
item = TestItem(new="New")
self.assertSortedEqual(list(item.keys()), ["new"])
self.assertSortedEqual(list(item.values()), ["New"])
def test_metaclass_inheritance(self):
class ParentItem(Item):
name = Field()
keys = Field()
values = Field()
class TestItem(ParentItem):
keys = Field()
i = TestItem()
i["keys"] = 3
assert list(i.keys()) == ["keys"]
assert list(i.values()) == [3]
def test_metaclass_multiple_inheritance_simple(self):
class A(Item):
fields = {"load": Field(default="A")}
save = Field(default="A")
class B(A):
pass
class C(Item):
fields = {"load": Field(default="C")}
save = Field(default="C")
class D(B, C):
pass
item = D(save="X", load="Y")
assert item["save"] == "X"
assert item["load"] == "Y"
assert D.fields == {"load": {"default": "A"}, "save": {"default": "A"}}
# D class inverted
class E(C, B):
pass
assert E(save="X")["save"] == "X"
assert E(load="X")["load"] == "X"
assert E.fields == {"load": {"default": "C"}, "save": {"default": "C"}}
def test_metaclass_multiple_inheritance_diamond(self):
class A(Item):
fields = {"update": Field(default="A")}
save = Field(default="A")
load = Field(default="A")
class B(A):
pass
class C(A):
fields = {"update": Field(default="C")}
save = Field(default="C")
class D(B, C):
fields = {"update": Field(default="D")}
load = Field(default="D")
assert D(save="X")["save"] == "X"
assert D(load="X")["load"] == "X"
assert D.fields == {
"save": {"default": "C"},
"load": {"default": "D"},
"update": {"default": "D"},
}
# D class inverted
class E(C, B):
load = Field(default="E")
assert E(save="X")["save"] == "X"
assert E(load="X")["load"] == "X"
assert E.fields == {
"save": {"default": "C"},
"load": {"default": "E"},
"update": {"default": "C"},
}
def test_metaclass_multiple_inheritance_without_metaclass(self):
class A(Item):
fields = {"load": Field(default="A")}
save = Field(default="A")
class B(A):
pass
class C:
fields = {"load": Field(default="C")}
not_allowed = Field(default="not_allowed")
save = Field(default="C")
class D(B, C):
pass
with pytest.raises(KeyError):
D(not_allowed="value")
assert D(save="X")["save"] == "X"
assert D.fields == {"save": {"default": "A"}, "load": {"default": "A"}}
# D class inverted
class E(C, B):
pass
with pytest.raises(KeyError):
E(not_allowed="value")
assert E(save="X")["save"] == "X"
assert E.fields == {"save": {"default": "A"}, "load": {"default": "A"}}
def test_to_dict(self):
class TestItem(Item):
name = Field()
i = TestItem()
i["name"] = "John"
assert dict(i) == {"name": "John"}
def test_copy(self):
class TestItem(Item):
name = Field()
item = TestItem({"name": "lower"})
copied_item = item.copy()
assert id(item) != id(copied_item)
copied_item["name"] = copied_item["name"].upper()
assert item["name"] != copied_item["name"]
def test_deepcopy(self):
class TestItem(Item):
tags = Field()
item = TestItem({"tags": ["tag1"]})
copied_item = item.deepcopy()
item["tags"].append("tag2")
assert item["tags"] != copied_item["tags"]
class TestItemMeta:
def test_new_method_propagates_classcell(self):
new_mock = mock.Mock(side_effect=ABCMeta.__new__)
base = ItemMeta.__bases__[0]
with mock.patch.object(base, "__new__", new_mock):
class MyItem(Item):
def f(self):
# For rationale of this see:
# https://github.com/python/cpython/blob/ee1a81b77444c6715cbe610e951c655b6adab88b/Lib/test/test_super.py#L222
return __class__
MyItem()
(first_call, second_call) = new_mock.call_args_list[-2:]
*_, attrs = first_call[0]
assert "__classcell__" not in attrs
*_, attrs = second_call[0]
assert "__classcell__" in attrs
class TestItemMetaClassCellRegression:
def test_item_meta_classcell_regression(self):
class MyItem(Item, metaclass=ItemMeta):
def __init__(self, *args, **kwargs): # pylint: disable=useless-parent-delegation
# This call to super() trigger the __classcell__ propagation
# requirement. When not done properly raises an error:
# TypeError: __class__ set to <class '__main__.MyItem'>
# defining 'MyItem' as <class '__main__.MyItem'>
super().__init__(*args, **kwargs)
| python | BSD-3-Clause | d1bd8eb49f7aba9289e4ff692006cead8bcd9080 | 2026-01-04T14:38:41.023839Z | false |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.