repo
stringlengths
7
90
file_url
stringlengths
81
315
file_path
stringlengths
4
228
content
stringlengths
0
32.8k
language
stringclasses
1 value
license
stringclasses
7 values
commit_sha
stringlengths
40
40
retrieved_at
stringdate
2026-01-04 14:38:15
2026-01-05 02:33:18
truncated
bool
2 classes
scrapy/scrapy
https://github.com/scrapy/scrapy/blob/d1bd8eb49f7aba9289e4ff692006cead8bcd9080/tests/test_extension_periodic_log.py
tests/test_extension_periodic_log.py
from __future__ import annotations import datetime from typing import TYPE_CHECKING, Any from scrapy.extensions.periodic_log import PeriodicLog from scrapy.utils.test import get_crawler from .spiders import MetaSpider if TYPE_CHECKING: from collections.abc import Callable stats_dump_1 = { "log_count/INFO": 10, "log_count/WARNING": 1, "start_time": datetime.datetime(2023, 6, 16, 8, 59, 18, 993170), "scheduler/enqueued/memory": 190, "scheduler/enqueued": 190, "scheduler/dequeued/memory": 166, "scheduler/dequeued": 166, "downloader/request_count": 166, "downloader/request_method_count/GET": 166, "downloader/request_bytes": 56803, "downloader/response_count": 150, "downloader/response_status_count/200": 150, "downloader/response_bytes": 595698, "httpcompression/response_bytes": 3186068, "httpcompression/response_count": 150, "response_received_count": 150, "request_depth_max": 9, "dupefilter/filtered": 180, "item_scraped_count": 140, } stats_dump_2 = { "log_count/INFO": 12, "log_count/WARNING": 1, "start_time": datetime.datetime(2023, 6, 16, 8, 59, 18, 993170), "scheduler/enqueued/memory": 337, "scheduler/enqueued": 337, "scheduler/dequeued/memory": 280, "scheduler/dequeued": 280, "downloader/request_count": 280, "downloader/request_method_count/GET": 280, "downloader/request_bytes": 95754, "downloader/response_count": 264, "downloader/response_status_count/200": 264, "downloader/response_bytes": 1046274, "httpcompression/response_bytes": 5614484, "httpcompression/response_count": 264, "response_received_count": 264, "request_depth_max": 16, "dupefilter/filtered": 320, "item_scraped_count": 248, } class CustomPeriodicLog(PeriodicLog): def set_a(self): self.stats._stats = stats_dump_1 def set_b(self): self.stats._stats = stats_dump_2 def extension(settings: dict[str, Any] | None = None) -> CustomPeriodicLog: crawler = get_crawler(MetaSpider, settings) return CustomPeriodicLog.from_crawler(crawler) class TestPeriodicLog: def test_extension_enabled(self): # Expected that settings for this extension loaded successfully # And on certain conditions - extension raising NotConfigured # "PERIODIC_LOG_STATS": True -> set to {"enabled": True} # due to TypeError exception from settings.getdict assert extension({"PERIODIC_LOG_STATS": True, "LOGSTATS_INTERVAL": 60}) # "PERIODIC_LOG_STATS": "True" -> set to {"enabled": True} # due to JSONDecodeError(ValueError) exception from settings.getdict assert extension({"PERIODIC_LOG_STATS": "True", "LOGSTATS_INTERVAL": 60}) # The ame for PERIODIC_LOG_DELTA: assert extension({"PERIODIC_LOG_DELTA": True, "LOGSTATS_INTERVAL": 60}) assert extension({"PERIODIC_LOG_DELTA": "True", "LOGSTATS_INTERVAL": 60}) def test_log_delta(self): def emulate(settings=None): spider = MetaSpider() ext = extension(settings) ext.spider_opened(spider) ext.set_a() a = ext.log_delta() ext.set_a() b = ext.log_delta() ext.spider_closed(spider, reason="finished") return ext, a, b def check(settings: dict[str, Any], condition: Callable) -> None: ext, a, b = emulate(settings) assert list(a["delta"].keys()) == [ k for k, v in ext.stats._stats.items() if condition(k, v) ] assert list(b["delta"].keys()) == [ k for k, v in ext.stats._stats.items() if condition(k, v) ] # Including all check({"PERIODIC_LOG_DELTA": True}, lambda k, v: isinstance(v, (int, float))) # include: check( {"PERIODIC_LOG_DELTA": {"include": ["downloader/"]}}, lambda k, v: isinstance(v, (int, float)) and "downloader/" in k, ) # include multiple check( {"PERIODIC_LOG_DELTA": {"include": ["downloader/", "scheduler/"]}}, lambda k, v: isinstance(v, (int, float)) and ("downloader/" in k or "scheduler/" in k), ) # exclude check( {"PERIODIC_LOG_DELTA": {"exclude": ["downloader/"]}}, lambda k, v: isinstance(v, (int, float)) and "downloader/" not in k, ) # exclude multiple check( {"PERIODIC_LOG_DELTA": {"exclude": ["downloader/", "scheduler/"]}}, lambda k, v: isinstance(v, (int, float)) and ("downloader/" not in k and "scheduler/" not in k), ) # include exclude combined check( {"PERIODIC_LOG_DELTA": {"include": ["downloader/"], "exclude": ["bytes"]}}, lambda k, v: isinstance(v, (int, float)) and ("downloader/" in k and "bytes" not in k), ) def test_log_stats(self): def emulate(settings=None): spider = MetaSpider() ext = extension(settings) ext.spider_opened(spider) ext.set_a() a = ext.log_crawler_stats() ext.set_a() b = ext.log_crawler_stats() ext.spider_closed(spider, reason="finished") return ext, a, b def check(settings: dict[str, Any], condition: Callable) -> None: ext, a, b = emulate(settings) assert list(a["stats"].keys()) == [ k for k, v in ext.stats._stats.items() if condition(k, v) ] assert list(b["stats"].keys()) == [ k for k, v in ext.stats._stats.items() if condition(k, v) ] # Including all check({"PERIODIC_LOG_STATS": True}, lambda k, v: True) # include: check( {"PERIODIC_LOG_STATS": {"include": ["downloader/"]}}, lambda k, v: "downloader/" in k, ) # include multiple check( {"PERIODIC_LOG_STATS": {"include": ["downloader/", "scheduler/"]}}, lambda k, v: "downloader/" in k or "scheduler/" in k, ) # exclude check( {"PERIODIC_LOG_STATS": {"exclude": ["downloader/"]}}, lambda k, v: "downloader/" not in k, ) # exclude multiple check( {"PERIODIC_LOG_STATS": {"exclude": ["downloader/", "scheduler/"]}}, lambda k, v: "downloader/" not in k and "scheduler/" not in k, ) # include exclude combined check( {"PERIODIC_LOG_STATS": {"include": ["downloader/"], "exclude": ["bytes"]}}, lambda k, v: "downloader/" in k and "bytes" not in k, )
python
BSD-3-Clause
d1bd8eb49f7aba9289e4ff692006cead8bcd9080
2026-01-04T14:38:41.023839Z
false
scrapy/scrapy
https://github.com/scrapy/scrapy/blob/d1bd8eb49f7aba9289e4ff692006cead8bcd9080/tests/test_downloader_handlers.py
tests/test_downloader_handlers.py
"""Tests for DownloadHandlers and for specific non-HTTP download handlers.""" from __future__ import annotations import contextlib import os from pathlib import Path from tempfile import mkdtemp, mkstemp from unittest import mock import pytest from w3lib.url import path_to_file_uri from scrapy.core.downloader.handlers import DownloadHandlers from scrapy.core.downloader.handlers.datauri import DataURIDownloadHandler from scrapy.core.downloader.handlers.file import FileDownloadHandler from scrapy.core.downloader.handlers.s3 import S3DownloadHandler from scrapy.exceptions import NotConfigured, ScrapyDeprecationWarning from scrapy.http import Request from scrapy.responsetypes import responsetypes from scrapy.utils.boto import is_botocore_available from scrapy.utils.defer import deferred_f_from_coro_f from scrapy.utils.misc import build_from_crawler from scrapy.utils.test import get_crawler class DummyDH: lazy = False async def download_request(self, request): pass class DummyLazyDH: # Default (but deprecated) is lazy for backward compatibility async def download_request(self, request): pass class OffDH: lazy = False def __init__(self, crawler): raise NotConfigured @classmethod def from_crawler(cls, crawler): return cls(crawler) class BuggyDH: lazy = False def __init__(self, crawler): raise ValueError @classmethod def from_crawler(cls, crawler): return cls(crawler) class TestLoad: def test_enabled_handler(self): handlers = {"scheme": DummyDH} crawler = get_crawler(settings_dict={"DOWNLOAD_HANDLERS": handlers}) dh = DownloadHandlers(crawler) assert "scheme" in dh._schemes assert "scheme" in dh._handlers assert "scheme" not in dh._notconfigured def test_not_configured_handler(self): handlers = {"scheme": OffDH} crawler = get_crawler(settings_dict={"DOWNLOAD_HANDLERS": handlers}) dh = DownloadHandlers(crawler) assert "scheme" in dh._schemes assert "scheme" not in dh._handlers assert "scheme" in dh._notconfigured def test_buggy_handler(self, caplog: pytest.LogCaptureFixture) -> None: handlers = {"scheme": BuggyDH} crawler = get_crawler(settings_dict={"DOWNLOAD_HANDLERS": handlers}) dh = DownloadHandlers(crawler) assert "scheme" in dh._schemes assert "scheme" not in dh._handlers assert "scheme" in dh._notconfigured assert ( 'Loading "<class \'tests.test_downloader_handlers.BuggyDH\'>" for scheme "scheme"' in caplog.text ) def test_disabled_handler(self): handlers = {"scheme": None} crawler = get_crawler(settings_dict={"DOWNLOAD_HANDLERS": handlers}) dh = DownloadHandlers(crawler) assert "scheme" not in dh._schemes for scheme in handlers: # force load handlers dh._get_handler(scheme) assert "scheme" not in dh._handlers assert "scheme" in dh._notconfigured def test_lazy_handlers(self): handlers = {"scheme": DummyLazyDH} crawler = get_crawler(settings_dict={"DOWNLOAD_HANDLERS": handlers}) with pytest.warns( ScrapyDeprecationWarning, match="DummyLazyDH doesn't define a 'lazy' attribute", ): dh = DownloadHandlers(crawler) assert "scheme" in dh._schemes assert "scheme" not in dh._handlers for scheme in handlers: # force load lazy handler dh._get_handler(scheme) assert "scheme" in dh._handlers assert "scheme" not in dh._notconfigured class TestFile: def setup_method(self): # add a special char to check that they are handled correctly self.fd, self.tmpname = mkstemp(suffix="^") Path(self.tmpname).write_text("0123456789", encoding="utf-8") download_handler = build_from_crawler(FileDownloadHandler, get_crawler()) self.download_request = download_handler.download_request def teardown_method(self): os.close(self.fd) Path(self.tmpname).unlink() @deferred_f_from_coro_f async def test_download(self): request = Request(path_to_file_uri(self.tmpname)) assert request.url.upper().endswith("%5E") response = await self.download_request(request) assert response.url == request.url assert response.status == 200 assert response.body == b"0123456789" assert response.protocol is None @deferred_f_from_coro_f async def test_non_existent(self): request = Request(path_to_file_uri(mkdtemp())) # the specific exception differs between platforms with pytest.raises(OSError): # noqa: PT011 await self.download_request(request) class HttpDownloadHandlerMock: def __init__(self, *args, **kwargs): pass async def download_request(self, request): return request @pytest.mark.requires_botocore class TestS3Anon: def setup_method(self): crawler = get_crawler() with mock.patch( "scrapy.core.downloader.handlers.s3.HTTP11DownloadHandler", HttpDownloadHandlerMock, ): self.s3reqh = build_from_crawler(S3DownloadHandler, crawler) self.download_request = self.s3reqh.download_request @deferred_f_from_coro_f async def test_anon_request(self): req = Request("s3://aws-publicdatasets/") httpreq = await self.download_request(req) assert hasattr(self.s3reqh, "anon") assert self.s3reqh.anon assert httpreq.url == "http://aws-publicdatasets.s3.amazonaws.com/" @pytest.mark.requires_botocore class TestS3: def setup_method(self): # test use same example keys than amazon developer guide # http://s3.amazonaws.com/awsdocs/S3/20060301/s3-dg-20060301.pdf # and the tests described here are the examples from that manual crawler = get_crawler( settings_dict={ "AWS_ACCESS_KEY_ID": "0PN5J17HBGZHT7JJ3X82", "AWS_SECRET_ACCESS_KEY": "uV3F3YluFJax1cknvbcGwgjvx4QpvB+leU8dUj2o", } ) with mock.patch( "scrapy.core.downloader.handlers.s3.HTTP11DownloadHandler", HttpDownloadHandlerMock, ): s3reqh = build_from_crawler(S3DownloadHandler, crawler) self.download_request = s3reqh.download_request @contextlib.contextmanager def _mocked_date(self, date): try: import botocore.auth # noqa: F401,PLC0415 except ImportError: yield else: # We need to mock botocore.auth.formatdate, because otherwise # botocore overrides Date header with current date and time # and Authorization header is different each time with mock.patch("botocore.auth.formatdate") as mock_formatdate: mock_formatdate.return_value = date yield @deferred_f_from_coro_f async def test_request_signing1(self): # gets an object from the johnsmith bucket. date = "Tue, 27 Mar 2007 19:36:42 +0000" req = Request("s3://johnsmith/photos/puppy.jpg", headers={"Date": date}) with self._mocked_date(date): httpreq = await self.download_request(req) assert ( httpreq.headers["Authorization"] == b"AWS 0PN5J17HBGZHT7JJ3X82:xXjDGYUmKxnwqr5KXNPGldn5LbA=" ) @deferred_f_from_coro_f async def test_request_signing2(self): # puts an object into the johnsmith bucket. date = "Tue, 27 Mar 2007 21:15:45 +0000" req = Request( "s3://johnsmith/photos/puppy.jpg", method="PUT", headers={ "Content-Type": "image/jpeg", "Date": date, "Content-Length": "94328", }, ) with self._mocked_date(date): httpreq = await self.download_request(req) assert ( httpreq.headers["Authorization"] == b"AWS 0PN5J17HBGZHT7JJ3X82:hcicpDDvL9SsO6AkvxqmIWkmOuQ=" ) @deferred_f_from_coro_f async def test_request_signing3(self): # lists the content of the johnsmith bucket. date = "Tue, 27 Mar 2007 19:42:41 +0000" req = Request( "s3://johnsmith/?prefix=photos&max-keys=50&marker=puppy", method="GET", headers={ "User-Agent": "Mozilla/5.0", "Date": date, }, ) with self._mocked_date(date): httpreq = await self.download_request(req) assert ( httpreq.headers["Authorization"] == b"AWS 0PN5J17HBGZHT7JJ3X82:jsRt/rhG+Vtp88HrYL706QhE4w4=" ) @deferred_f_from_coro_f async def test_request_signing4(self): # fetches the access control policy sub-resource for the 'johnsmith' bucket. date = "Tue, 27 Mar 2007 19:44:46 +0000" req = Request("s3://johnsmith/?acl", method="GET", headers={"Date": date}) with self._mocked_date(date): httpreq = await self.download_request(req) assert ( httpreq.headers["Authorization"] == b"AWS 0PN5J17HBGZHT7JJ3X82:thdUi9VAkzhkniLj96JIrOPGi0g=" ) @deferred_f_from_coro_f async def test_request_signing6(self): # uploads an object to a CNAME style virtual hosted bucket with metadata. date = "Tue, 27 Mar 2007 21:06:08 +0000" req = Request( "s3://static.johnsmith.net:8080/db-backup.dat.gz", method="PUT", headers={ "User-Agent": "curl/7.15.5", "Host": "static.johnsmith.net:8080", "Date": date, "x-amz-acl": "public-read", "content-type": "application/x-download", "Content-MD5": "4gJE4saaMU4BqNR0kLY+lw==", "X-Amz-Meta-ReviewedBy": "joe@johnsmith.net,jane@johnsmith.net", "X-Amz-Meta-FileChecksum": "0x02661779", "X-Amz-Meta-ChecksumAlgorithm": "crc32", "Content-Disposition": "attachment; filename=database.dat", "Content-Encoding": "gzip", "Content-Length": "5913339", }, ) with self._mocked_date(date): httpreq = await self.download_request(req) assert ( httpreq.headers["Authorization"] == b"AWS 0PN5J17HBGZHT7JJ3X82:C0FlOtU8Ylb9KDTpZqYkZPX91iI=" ) @deferred_f_from_coro_f async def test_request_signing7(self): # ensure that spaces are quoted properly before signing date = "Tue, 27 Mar 2007 19:42:41 +0000" req = Request( "s3://johnsmith/photos/my puppy.jpg?response-content-disposition=my puppy.jpg", method="GET", headers={"Date": date}, ) with self._mocked_date(date): httpreq = await self.download_request(req) assert ( httpreq.headers["Authorization"] == b"AWS 0PN5J17HBGZHT7JJ3X82:+CfvG8EZ3YccOrRVMXNaK2eKZmM=" ) @pytest.mark.skipif(is_botocore_available(), reason="Requires not having botocore") def test_s3_no_botocore() -> None: crawler = get_crawler() with pytest.raises(NotConfigured, match="missing botocore library"): build_from_crawler(S3DownloadHandler, crawler) class TestDataURI: def setup_method(self): crawler = get_crawler() download_handler = build_from_crawler(DataURIDownloadHandler, crawler) self.download_request = download_handler.download_request @deferred_f_from_coro_f async def test_response_attrs(self): uri = "data:,A%20brief%20note" request = Request(uri) response = await self.download_request(request) assert response.url == uri assert not response.headers @deferred_f_from_coro_f async def test_default_mediatype_encoding(self): request = Request("data:,A%20brief%20note") response = await self.download_request(request) assert response.text == "A brief note" assert type(response) is responsetypes.from_mimetype("text/plain") # pylint: disable=unidiomatic-typecheck assert response.encoding == "US-ASCII" @deferred_f_from_coro_f async def test_default_mediatype(self): request = Request("data:;charset=iso-8859-7,%be%d3%be") response = await self.download_request(request) assert response.text == "\u038e\u03a3\u038e" assert type(response) is responsetypes.from_mimetype("text/plain") # pylint: disable=unidiomatic-typecheck assert response.encoding == "iso-8859-7" @deferred_f_from_coro_f async def test_text_charset(self): request = Request("data:text/plain;charset=iso-8859-7,%be%d3%be") response = await self.download_request(request) assert response.text == "\u038e\u03a3\u038e" assert response.body == b"\xbe\xd3\xbe" assert response.encoding == "iso-8859-7" @deferred_f_from_coro_f async def test_mediatype_parameters(self): request = Request( "data:text/plain;foo=%22foo;bar%5C%22%22;" "charset=utf-8;bar=%22foo;%5C%22 foo ;/,%22" ",%CE%8E%CE%A3%CE%8E" ) response = await self.download_request(request) assert response.text == "\u038e\u03a3\u038e" assert type(response) is responsetypes.from_mimetype("text/plain") # pylint: disable=unidiomatic-typecheck assert response.encoding == "utf-8" @deferred_f_from_coro_f async def test_base64(self): request = Request("data:text/plain;base64,SGVsbG8sIHdvcmxkLg%3D%3D") response = await self.download_request(request) assert response.text == "Hello, world." @deferred_f_from_coro_f async def test_protocol(self): request = Request("data:,") response = await self.download_request(request) assert response.protocol is None
python
BSD-3-Clause
d1bd8eb49f7aba9289e4ff692006cead8bcd9080
2026-01-04T14:38:41.023839Z
false
scrapy/scrapy
https://github.com/scrapy/scrapy/blob/d1bd8eb49f7aba9289e4ff692006cead8bcd9080/tests/test_downloadermiddleware_cookies.py
tests/test_downloadermiddleware_cookies.py
import logging import pytest from testfixtures import LogCapture from scrapy.downloadermiddlewares.cookies import CookiesMiddleware from scrapy.downloadermiddlewares.defaultheaders import DefaultHeadersMiddleware from scrapy.downloadermiddlewares.redirect import RedirectMiddleware from scrapy.exceptions import NotConfigured from scrapy.http import Request, Response from scrapy.utils.python import to_bytes from scrapy.utils.spider import DefaultSpider from scrapy.utils.test import get_crawler UNSET = object() def _cookie_to_set_cookie_value(cookie): """Given a cookie defined as a dictionary with name and value keys, and optional path and domain keys, return the equivalent string that can be associated to a ``Set-Cookie`` header.""" decoded = {} for key in ("name", "value", "path", "domain"): if cookie.get(key) is None: if key in ("name", "value"): return None continue if isinstance(cookie[key], (bool, float, int, str)): decoded[key] = str(cookie[key]) else: try: decoded[key] = cookie[key].decode("utf8") except UnicodeDecodeError: decoded[key] = cookie[key].decode("latin1", errors="replace") cookie_str = f"{decoded.pop('name')}={decoded.pop('value')}" for key, value in decoded.items(): # path, domain cookie_str += f"; {key.capitalize()}={value}" return cookie_str def _cookies_to_set_cookie_list(cookies): """Given a group of cookie defined either as a dictionary or as a list of dictionaries (i.e. in a format supported by the cookies parameter of Request), return the equivalen list of strings that can be associated to a ``Set-Cookie`` header.""" if not cookies: return [] if isinstance(cookies, dict): cookies = ({"name": k, "value": v} for k, v in cookies.items()) return filter(None, (_cookie_to_set_cookie_value(cookie) for cookie in cookies)) class TestCookiesMiddleware: def assertCookieValEqual(self, first, second, msg=None): def split_cookies(cookies): return sorted([s.strip() for s in to_bytes(cookies).split(b";")]) assert split_cookies(first) == split_cookies(second), msg def setup_method(self): crawler = get_crawler(DefaultSpider) crawler.spider = crawler._create_spider() self.mw = CookiesMiddleware.from_crawler(crawler) self.redirect_middleware = RedirectMiddleware.from_crawler(crawler) def teardown_method(self): del self.mw del self.redirect_middleware def test_basic(self): req = Request("http://scrapytest.org/") assert self.mw.process_request(req) is None assert "Cookie" not in req.headers headers = {"Set-Cookie": "C1=value1; path=/"} res = Response("http://scrapytest.org/", headers=headers) assert self.mw.process_response(req, res) is res req2 = Request("http://scrapytest.org/sub1/") assert self.mw.process_request(req2) is None assert req2.headers.get("Cookie") == b"C1=value1" def test_setting_false_cookies_enabled(self): with pytest.raises(NotConfigured): CookiesMiddleware.from_crawler( get_crawler(settings_dict={"COOKIES_ENABLED": False}) ) def test_setting_default_cookies_enabled(self): assert isinstance( CookiesMiddleware.from_crawler(get_crawler()), CookiesMiddleware ) def test_setting_true_cookies_enabled(self): assert isinstance( CookiesMiddleware.from_crawler( get_crawler(settings_dict={"COOKIES_ENABLED": True}) ), CookiesMiddleware, ) def test_setting_enabled_cookies_debug(self): crawler = get_crawler(settings_dict={"COOKIES_DEBUG": True}) mw = CookiesMiddleware.from_crawler(crawler) with LogCapture( "scrapy.downloadermiddlewares.cookies", propagate=False, level=logging.DEBUG, ) as log: req = Request("http://scrapytest.org/") res = Response( "http://scrapytest.org/", headers={"Set-Cookie": "C1=value1; path=/"} ) mw.process_response(req, res) req2 = Request("http://scrapytest.org/sub1/") mw.process_request(req2) log.check( ( "scrapy.downloadermiddlewares.cookies", "DEBUG", "Received cookies from: <200 http://scrapytest.org/>\n" "Set-Cookie: C1=value1; path=/\n", ), ( "scrapy.downloadermiddlewares.cookies", "DEBUG", "Sending cookies to: <GET http://scrapytest.org/sub1/>\n" "Cookie: C1=value1\n", ), ) def test_setting_disabled_cookies_debug(self): crawler = get_crawler(settings_dict={"COOKIES_DEBUG": False}) mw = CookiesMiddleware.from_crawler(crawler) with LogCapture( "scrapy.downloadermiddlewares.cookies", propagate=False, level=logging.DEBUG, ) as log: req = Request("http://scrapytest.org/") res = Response( "http://scrapytest.org/", headers={"Set-Cookie": "C1=value1; path=/"} ) mw.process_response(req, res) req2 = Request("http://scrapytest.org/sub1/") mw.process_request(req2) log.check() def test_do_not_break_on_non_utf8_header(self): req = Request("http://scrapytest.org/") assert self.mw.process_request(req) is None assert "Cookie" not in req.headers headers = {"Set-Cookie": b"C1=in\xa3valid; path=/", "Other": b"ignore\xa3me"} res = Response("http://scrapytest.org/", headers=headers) assert self.mw.process_response(req, res) is res req2 = Request("http://scrapytest.org/sub1/") assert self.mw.process_request(req2) is None assert "Cookie" in req2.headers def test_dont_merge_cookies(self): # merge some cookies into jar headers = {"Set-Cookie": "C1=value1; path=/"} req = Request("http://scrapytest.org/") res = Response("http://scrapytest.org/", headers=headers) assert self.mw.process_response(req, res) is res # test Cookie header is not seted to request req = Request("http://scrapytest.org/dontmerge", meta={"dont_merge_cookies": 1}) assert self.mw.process_request(req) is None assert "Cookie" not in req.headers # check that returned cookies are not merged back to jar res = Response( "http://scrapytest.org/dontmerge", headers={"Set-Cookie": "dont=mergeme; path=/"}, ) assert self.mw.process_response(req, res) is res # check that cookies are merged back req = Request("http://scrapytest.org/mergeme") assert self.mw.process_request(req) is None assert req.headers.get("Cookie") == b"C1=value1" # check that cookies are merged when dont_merge_cookies is passed as 0 req = Request("http://scrapytest.org/mergeme", meta={"dont_merge_cookies": 0}) assert self.mw.process_request(req) is None assert req.headers.get("Cookie") == b"C1=value1" def test_complex_cookies(self): # merge some cookies into jar cookies = [ { "name": "C1", "value": "value1", "path": "/foo", "domain": "scrapytest.org", }, { "name": "C2", "value": "value2", "path": "/bar", "domain": "scrapytest.org", }, { "name": "C3", "value": "value3", "path": "/foo", "domain": "scrapytest.org", }, {"name": "C4", "value": "value4", "path": "/foo", "domain": "scrapy.org"}, ] req = Request("http://scrapytest.org/", cookies=cookies) self.mw.process_request(req) # embed C1 and C3 for scrapytest.org/foo req = Request("http://scrapytest.org/foo") self.mw.process_request(req) assert req.headers.get("Cookie") in ( b"C1=value1; C3=value3", b"C3=value3; C1=value1", ) # embed C2 for scrapytest.org/bar req = Request("http://scrapytest.org/bar") self.mw.process_request(req) assert req.headers.get("Cookie") == b"C2=value2" # embed nothing for scrapytest.org/baz req = Request("http://scrapytest.org/baz") self.mw.process_request(req) assert "Cookie" not in req.headers def test_merge_request_cookies(self): req = Request("http://scrapytest.org/", cookies={"galleta": "salada"}) assert self.mw.process_request(req) is None assert req.headers.get("Cookie") == b"galleta=salada" headers = {"Set-Cookie": "C1=value1; path=/"} res = Response("http://scrapytest.org/", headers=headers) assert self.mw.process_response(req, res) is res req2 = Request("http://scrapytest.org/sub1/") assert self.mw.process_request(req2) is None self.assertCookieValEqual( req2.headers.get("Cookie"), b"C1=value1; galleta=salada" ) def test_cookiejar_key(self): req = Request( "http://scrapytest.org/", cookies={"galleta": "salada"}, meta={"cookiejar": "store1"}, ) assert self.mw.process_request(req) is None assert req.headers.get("Cookie") == b"galleta=salada" headers = {"Set-Cookie": "C1=value1; path=/"} res = Response("http://scrapytest.org/", headers=headers, request=req) assert self.mw.process_response(req, res) is res req2 = Request("http://scrapytest.org/", meta=res.meta) assert self.mw.process_request(req2) is None self.assertCookieValEqual( req2.headers.get("Cookie"), b"C1=value1; galleta=salada" ) req3 = Request( "http://scrapytest.org/", cookies={"galleta": "dulce"}, meta={"cookiejar": "store2"}, ) assert self.mw.process_request(req3) is None assert req3.headers.get("Cookie") == b"galleta=dulce" headers = {"Set-Cookie": "C2=value2; path=/"} res2 = Response("http://scrapytest.org/", headers=headers, request=req3) assert self.mw.process_response(req3, res2) is res2 req4 = Request("http://scrapytest.org/", meta=res2.meta) assert self.mw.process_request(req4) is None self.assertCookieValEqual( req4.headers.get("Cookie"), b"C2=value2; galleta=dulce" ) # cookies from hosts with port req5_1 = Request("http://scrapytest.org:1104/") assert self.mw.process_request(req5_1) is None headers = {"Set-Cookie": "C1=value1; path=/"} res5_1 = Response( "http://scrapytest.org:1104/", headers=headers, request=req5_1 ) assert self.mw.process_response(req5_1, res5_1) is res5_1 req5_2 = Request("http://scrapytest.org:1104/some-redirected-path") assert self.mw.process_request(req5_2) is None assert req5_2.headers.get("Cookie") == b"C1=value1" req5_3 = Request("http://scrapytest.org/some-redirected-path") assert self.mw.process_request(req5_3) is None assert req5_3.headers.get("Cookie") == b"C1=value1" # skip cookie retrieval for not http request req6 = Request("file:///scrapy/sometempfile") assert self.mw.process_request(req6) is None assert req6.headers.get("Cookie") is None def test_local_domain(self): request = Request("http://example-host/", cookies={"currencyCookie": "USD"}) assert self.mw.process_request(request) is None assert "Cookie" in request.headers assert request.headers["Cookie"] == b"currencyCookie=USD" @pytest.mark.xfail(reason="Cookie header is not currently being processed") def test_keep_cookie_from_default_request_headers_middleware(self): DEFAULT_REQUEST_HEADERS = {"Cookie": "default=value; asdf=qwerty"} mw_default_headers = DefaultHeadersMiddleware(DEFAULT_REQUEST_HEADERS.items()) # overwrite with values from 'cookies' request argument req1 = Request("http://example.org", cookies={"default": "something"}) assert mw_default_headers.process_request(req1) is None assert self.mw.process_request(req1) is None self.assertCookieValEqual( req1.headers["Cookie"], b"default=something; asdf=qwerty" ) # keep both req2 = Request("http://example.com", cookies={"a": "b"}) assert mw_default_headers.process_request(req2) is None assert self.mw.process_request(req2) is None self.assertCookieValEqual( req2.headers["Cookie"], b"default=value; a=b; asdf=qwerty" ) @pytest.mark.xfail(reason="Cookie header is not currently being processed") def test_keep_cookie_header(self): # keep only cookies from 'Cookie' request header req1 = Request("http://scrapytest.org", headers={"Cookie": "a=b; c=d"}) assert self.mw.process_request(req1) is None self.assertCookieValEqual(req1.headers["Cookie"], "a=b; c=d") # keep cookies from both 'Cookie' request header and 'cookies' keyword req2 = Request( "http://scrapytest.org", headers={"Cookie": "a=b; c=d"}, cookies={"e": "f"} ) assert self.mw.process_request(req2) is None self.assertCookieValEqual(req2.headers["Cookie"], "a=b; c=d; e=f") # overwrite values from 'Cookie' request header with 'cookies' keyword req3 = Request( "http://scrapytest.org", headers={"Cookie": "a=b; c=d"}, cookies={"a": "new", "e": "f"}, ) assert self.mw.process_request(req3) is None self.assertCookieValEqual(req3.headers["Cookie"], "a=new; c=d; e=f") def test_request_cookies_encoding(self): # 1) UTF8-encoded bytes req1 = Request("http://example.org", cookies={"a": "á".encode()}) assert self.mw.process_request(req1) is None self.assertCookieValEqual(req1.headers["Cookie"], b"a=\xc3\xa1") # 2) Non UTF8-encoded bytes req2 = Request("http://example.org", cookies={"a": "á".encode("latin1")}) assert self.mw.process_request(req2) is None self.assertCookieValEqual(req2.headers["Cookie"], b"a=\xc3\xa1") # 3) String req3 = Request("http://example.org", cookies={"a": "á"}) assert self.mw.process_request(req3) is None self.assertCookieValEqual(req3.headers["Cookie"], b"a=\xc3\xa1") @pytest.mark.xfail(reason="Cookie header is not currently being processed") def test_request_headers_cookie_encoding(self): # 1) UTF8-encoded bytes req1 = Request("http://example.org", headers={"Cookie": "a=á".encode()}) assert self.mw.process_request(req1) is None self.assertCookieValEqual(req1.headers["Cookie"], b"a=\xc3\xa1") # 2) Non UTF8-encoded bytes req2 = Request("http://example.org", headers={"Cookie": "a=á".encode("latin1")}) assert self.mw.process_request(req2) is None self.assertCookieValEqual(req2.headers["Cookie"], b"a=\xc3\xa1") # 3) String req3 = Request("http://example.org", headers={"Cookie": "a=á"}) assert self.mw.process_request(req3) is None self.assertCookieValEqual(req3.headers["Cookie"], b"a=\xc3\xa1") def test_invalid_cookies(self): """ Invalid cookies are logged as warnings and discarded """ with LogCapture( "scrapy.downloadermiddlewares.cookies", propagate=False, level=logging.INFO, ) as lc: cookies1 = [{"value": "bar"}, {"name": "key", "value": "value1"}] req1 = Request("http://example.org/1", cookies=cookies1) assert self.mw.process_request(req1) is None cookies2 = [{"name": "foo"}, {"name": "key", "value": "value2"}] req2 = Request("http://example.org/2", cookies=cookies2) assert self.mw.process_request(req2) is None cookies3 = [{"name": "foo", "value": None}, {"name": "key", "value": ""}] req3 = Request("http://example.org/3", cookies=cookies3) assert self.mw.process_request(req3) is None lc.check( ( "scrapy.downloadermiddlewares.cookies", "WARNING", "Invalid cookie found in request <GET http://example.org/1>:" " {'value': 'bar', 'secure': False} ('name' is missing)", ), ( "scrapy.downloadermiddlewares.cookies", "WARNING", "Invalid cookie found in request <GET http://example.org/2>:" " {'name': 'foo', 'secure': False} ('value' is missing)", ), ( "scrapy.downloadermiddlewares.cookies", "WARNING", "Invalid cookie found in request <GET http://example.org/3>:" " {'name': 'foo', 'value': None, 'secure': False} ('value' is missing)", ), ) self.assertCookieValEqual(req1.headers["Cookie"], "key=value1") self.assertCookieValEqual(req2.headers["Cookie"], "key=value2") self.assertCookieValEqual(req3.headers["Cookie"], "key=") def test_primitive_type_cookies(self): # Boolean req1 = Request("http://example.org", cookies={"a": True}) assert self.mw.process_request(req1) is None self.assertCookieValEqual(req1.headers["Cookie"], b"a=True") # Float req2 = Request("http://example.org", cookies={"a": 9.5}) assert self.mw.process_request(req2) is None self.assertCookieValEqual(req2.headers["Cookie"], b"a=9.5") # Integer req3 = Request("http://example.org", cookies={"a": 10}) assert self.mw.process_request(req3) is None self.assertCookieValEqual(req3.headers["Cookie"], b"a=10") # String req4 = Request("http://example.org", cookies={"a": "b"}) assert self.mw.process_request(req4) is None self.assertCookieValEqual(req4.headers["Cookie"], b"a=b") def _test_cookie_redirect( self, source, target, *, cookies1, cookies2, ): input_cookies = {"a": "b"} if not isinstance(source, dict): source = {"url": source} if not isinstance(target, dict): target = {"url": target} target.setdefault("status", 301) request1 = Request(cookies=input_cookies, **source) self.mw.process_request(request1) cookies = request1.headers.get("Cookie") assert cookies == (b"a=b" if cookies1 else None) response = Response( headers={ "Location": target["url"], }, **target, ) assert self.mw.process_response(request1, response) == response request2 = self.redirect_middleware.process_response(request1, response) assert isinstance(request2, Request) self.mw.process_request(request2) cookies = request2.headers.get("Cookie") assert cookies == (b"a=b" if cookies2 else None) def test_cookie_redirect_same_domain(self): self._test_cookie_redirect( "https://toscrape.com", "https://toscrape.com", cookies1=True, cookies2=True, ) def test_cookie_redirect_same_domain_forcing_get(self): self._test_cookie_redirect( "https://toscrape.com", {"url": "https://toscrape.com", "status": 302}, cookies1=True, cookies2=True, ) def test_cookie_redirect_different_domain(self): self._test_cookie_redirect( "https://toscrape.com", "https://example.com", cookies1=True, cookies2=False, ) def test_cookie_redirect_different_domain_forcing_get(self): self._test_cookie_redirect( "https://toscrape.com", {"url": "https://example.com", "status": 302}, cookies1=True, cookies2=False, ) def _test_cookie_header_redirect( self, source, target, *, cookies2, ): """Test the handling of a user-defined Cookie header when building a redirect follow-up request. We follow RFC 6265 for cookie handling. The Cookie header can only contain a list of key-value pairs (i.e. no additional cookie parameters like Domain or Path). Because of that, we follow the same rules that we would follow for the handling of the Set-Cookie response header when the Domain is not set: the cookies must be limited to the target URL domain (not even subdomains can receive those cookies). .. note:: This method tests the scenario where the cookie middleware is disabled. Because of known issue #1992, when the cookies middleware is enabled we do not need to be concerned about the Cookie header getting leaked to unintended domains, because the middleware empties the header from every request. """ if not isinstance(source, dict): source = {"url": source} if not isinstance(target, dict): target = {"url": target} target.setdefault("status", 301) request1 = Request(headers={"Cookie": b"a=b"}, **source) response = Response( headers={ "Location": target["url"], }, **target, ) request2 = self.redirect_middleware.process_response(request1, response) assert isinstance(request2, Request) cookies = request2.headers.get("Cookie") assert cookies == (b"a=b" if cookies2 else None) def test_cookie_header_redirect_same_domain(self): self._test_cookie_header_redirect( "https://toscrape.com", "https://toscrape.com", cookies2=True, ) def test_cookie_header_redirect_same_domain_forcing_get(self): self._test_cookie_header_redirect( "https://toscrape.com", {"url": "https://toscrape.com", "status": 302}, cookies2=True, ) def test_cookie_header_redirect_different_domain(self): self._test_cookie_header_redirect( "https://toscrape.com", "https://example.com", cookies2=False, ) def test_cookie_header_redirect_different_domain_forcing_get(self): self._test_cookie_header_redirect( "https://toscrape.com", {"url": "https://example.com", "status": 302}, cookies2=False, ) def _test_user_set_cookie_domain_followup( self, url1, url2, domain, *, cookies1, cookies2, ): input_cookies = [ { "name": "a", "value": "b", "domain": domain, } ] request1 = Request(url1, cookies=input_cookies) self.mw.process_request(request1) cookies = request1.headers.get("Cookie") assert cookies == (b"a=b" if cookies1 else None) request2 = Request(url2) self.mw.process_request(request2) cookies = request2.headers.get("Cookie") assert cookies == (b"a=b" if cookies2 else None) def test_user_set_cookie_domain_suffix_private(self): self._test_user_set_cookie_domain_followup( "https://books.toscrape.com", "https://quotes.toscrape.com", "toscrape.com", cookies1=True, cookies2=True, ) def test_user_set_cookie_domain_suffix_public_period(self): self._test_user_set_cookie_domain_followup( "https://foo.co.uk", "https://bar.co.uk", "co.uk", cookies1=False, cookies2=False, ) def test_user_set_cookie_domain_suffix_public_private(self): self._test_user_set_cookie_domain_followup( "https://foo.blogspot.com", "https://bar.blogspot.com", "blogspot.com", cookies1=False, cookies2=False, ) def test_user_set_cookie_domain_public_period(self): self._test_user_set_cookie_domain_followup( "https://co.uk", "https://co.uk", "co.uk", cookies1=True, cookies2=True, ) def _test_server_set_cookie_domain_followup( self, url1, url2, domain, *, cookies, ): request1 = Request(url1) self.mw.process_request(request1) input_cookies = [ { "name": "a", "value": "b", "domain": domain, } ] headers = { "Set-Cookie": _cookies_to_set_cookie_list(input_cookies), } response = Response(url1, status=200, headers=headers) assert self.mw.process_response(request1, response) == response request2 = Request(url2) self.mw.process_request(request2) actual_cookies = request2.headers.get("Cookie") assert actual_cookies == (b"a=b" if cookies else None) def test_server_set_cookie_domain_suffix_private(self): self._test_server_set_cookie_domain_followup( "https://books.toscrape.com", "https://quotes.toscrape.com", "toscrape.com", cookies=True, ) def test_server_set_cookie_domain_suffix_public_period(self): self._test_server_set_cookie_domain_followup( "https://foo.co.uk", "https://bar.co.uk", "co.uk", cookies=False, ) def test_server_set_cookie_domain_suffix_public_private(self): self._test_server_set_cookie_domain_followup( "https://foo.blogspot.com", "https://bar.blogspot.com", "blogspot.com", cookies=False, ) def test_server_set_cookie_domain_public_period(self): self._test_server_set_cookie_domain_followup( "https://co.uk", "https://co.uk", "co.uk", cookies=True, ) def _test_cookie_redirect_scheme_change( self, secure, from_scheme, to_scheme, cookies1, cookies2, cookies3 ): """When a redirect causes the URL scheme to change from *from_scheme* to *to_scheme*, while domain and port remain the same, and given a cookie on the initial request with its secure attribute set to *secure*, check if the cookie should be set on the Cookie header of the initial request (*cookies1*), if it should be kept by the redirect middleware (*cookies2*), and if it should be present on the Cookie header in the redirected request (*cookie3*).""" cookie_kwargs = {} if secure is not UNSET: cookie_kwargs["secure"] = secure input_cookies = [{"name": "a", "value": "b", **cookie_kwargs}] request1 = Request(f"{from_scheme}://a.example", cookies=input_cookies) self.mw.process_request(request1) cookies = request1.headers.get("Cookie") assert cookies == (b"a=b" if cookies1 else None) response = Response( f"{from_scheme}://a.example", headers={"Location": f"{to_scheme}://a.example"}, status=301, ) assert self.mw.process_response(request1, response) == response request2 = self.redirect_middleware.process_response(request1, response) assert isinstance(request2, Request) cookies = request2.headers.get("Cookie") assert cookies == (b"a=b" if cookies2 else None) self.mw.process_request(request2) cookies = request2.headers.get("Cookie") assert cookies == (b"a=b" if cookies3 else None) def test_cookie_redirect_secure_undefined_downgrade(self): self._test_cookie_redirect_scheme_change( secure=UNSET, from_scheme="https", to_scheme="http", cookies1=True, cookies2=False, cookies3=False, ) def test_cookie_redirect_secure_undefined_upgrade(self): self._test_cookie_redirect_scheme_change( secure=UNSET, from_scheme="http", to_scheme="https", cookies1=True, cookies2=True, cookies3=True, ) def test_cookie_redirect_secure_false_downgrade(self): self._test_cookie_redirect_scheme_change( secure=False, from_scheme="https", to_scheme="http", cookies1=True, cookies2=False, cookies3=True, ) def test_cookie_redirect_secure_false_upgrade(self): self._test_cookie_redirect_scheme_change( secure=False, from_scheme="http", to_scheme="https", cookies1=True, cookies2=True, cookies3=True, ) def test_cookie_redirect_secure_true_downgrade(self): self._test_cookie_redirect_scheme_change( secure=True, from_scheme="https", to_scheme="http", cookies1=True, cookies2=False, cookies3=False, ) def test_cookie_redirect_secure_true_upgrade(self): self._test_cookie_redirect_scheme_change( secure=True, from_scheme="http", to_scheme="https", cookies1=False, cookies2=False, cookies3=True, )
python
BSD-3-Clause
d1bd8eb49f7aba9289e4ff692006cead8bcd9080
2026-01-04T14:38:41.023839Z
false
scrapy/scrapy
https://github.com/scrapy/scrapy/blob/d1bd8eb49f7aba9289e4ff692006cead8bcd9080/tests/test_command_genspider.py
tests/test_command_genspider.py
from __future__ import annotations import re from pathlib import Path import pytest from tests.test_commands import TestProjectBase from tests.utils.cmdline import call, proc def find_in_file(filename: Path, regex: str) -> re.Match | None: """Find first pattern occurrence in file""" pattern = re.compile(regex) with filename.open("r", encoding="utf-8") as f: for line in f: match = pattern.search(line) if match is not None: return match return None class TestGenspiderCommand(TestProjectBase): def test_arguments(self, proj_path: Path) -> None: spider = proj_path / self.project_name / "spiders" / "test_name.py" # only pass one argument. spider script shouldn't be created assert call("genspider", "test_name", cwd=proj_path) == 2 assert not spider.exists() # pass two arguments <name> <domain>. spider script should be created assert call("genspider", "test_name", "test.com", cwd=proj_path) == 0 assert spider.exists() @pytest.mark.parametrize( "tplname", [ "basic", "crawl", "xmlfeed", "csvfeed", ], ) def test_template(self, tplname: str, proj_path: Path) -> None: args = [f"--template={tplname}"] if tplname else [] spname = "test_spider" spmodule = f"{self.project_name}.spiders.{spname}" spfile = proj_path / self.project_name / "spiders" / f"{spname}.py" _, out, _ = proc("genspider", spname, "test.com", *args, cwd=proj_path) assert ( f"Created spider {spname!r} using template {tplname!r} in module:\n {spmodule}" in out ) assert spfile.exists() modify_time_before = spfile.stat().st_mtime _, out, _ = proc("genspider", spname, "test.com", *args, cwd=proj_path) assert f"Spider {spname!r} already exists in module" in out modify_time_after = spfile.stat().st_mtime assert modify_time_after == modify_time_before def test_list(self, proj_path: Path) -> None: assert call("genspider", "--list", cwd=proj_path) == 0 def test_dump(self, proj_path: Path) -> None: assert call("genspider", "--dump=basic", cwd=proj_path) == 0 assert call("genspider", "-d", "basic", cwd=proj_path) == 0 def test_same_name_as_project(self, proj_path: Path) -> None: assert call("genspider", self.project_name, cwd=proj_path) == 2 assert not ( proj_path / self.project_name / "spiders" / f"{self.project_name}.py" ).exists() @pytest.mark.parametrize("force", [True, False]) def test_same_filename_as_existing_spider( self, force: bool, proj_path: Path ) -> None: file_name = "example" file_path = proj_path / self.project_name / "spiders" / f"{file_name}.py" assert call("genspider", file_name, "example.com", cwd=proj_path) == 0 assert file_path.exists() # change name of spider but not its file name with file_path.open("r+", encoding="utf-8") as spider_file: file_data = spider_file.read() file_data = file_data.replace('name = "example"', 'name = "renamed"') spider_file.seek(0) spider_file.write(file_data) spider_file.truncate() modify_time_before = file_path.stat().st_mtime file_contents_before = file_data if force: _, out, _ = proc( "genspider", "--force", file_name, "example.com", cwd=proj_path ) assert ( f"Created spider {file_name!r} using template 'basic' in module" in out ) modify_time_after = file_path.stat().st_mtime assert modify_time_after != modify_time_before file_contents_after = file_path.read_text(encoding="utf-8") assert file_contents_after != file_contents_before else: _, out, _ = proc("genspider", file_name, "example.com", cwd=proj_path) assert f"{file_path.resolve()} already exists" in out modify_time_after = file_path.stat().st_mtime assert modify_time_after == modify_time_before file_contents_after = file_path.read_text(encoding="utf-8") assert file_contents_after == file_contents_before @pytest.mark.parametrize( ("url", "domain"), [ ("test.com", "test.com"), ("https://test.com", "test.com"), ], ) def test_url(self, url: str, domain: str, proj_path: Path) -> None: assert call("genspider", "--force", "test_name", url, cwd=proj_path) == 0 spider = proj_path / self.project_name / "spiders" / "test_name.py" m = find_in_file(spider, r"allowed_domains\s*=\s*\[['\"](.+)['\"]\]") assert m is not None assert m.group(1) == domain m = find_in_file(spider, r"start_urls\s*=\s*\[['\"](.+)['\"]\]") assert m is not None assert m.group(1) == f"https://{domain}" @pytest.mark.parametrize( ("url", "expected", "template"), [ # basic ("https://test.com", "https://test.com", "basic"), ("http://test.com", "http://test.com", "basic"), ("http://test.com/other/path", "http://test.com/other/path", "basic"), ("test.com/other/path", "https://test.com/other/path", "basic"), # crawl ("https://test.com", "https://test.com", "crawl"), ("http://test.com", "http://test.com", "crawl"), ("http://test.com/other/path", "http://test.com/other/path", "crawl"), ("test.com/other/path", "https://test.com/other/path", "crawl"), ("test.com", "https://test.com", "crawl"), # xmlfeed ("https://test.com/feed.xml", "https://test.com/feed.xml", "xmlfeed"), ("http://test.com/feed.xml", "http://test.com/feed.xml", "xmlfeed"), ("test.com/feed.xml", "https://test.com/feed.xml", "xmlfeed"), # csvfeed ("https://test.com/feed.csv", "https://test.com/feed.csv", "csvfeed"), ("http://test.com/feed.xml", "http://test.com/feed.xml", "csvfeed"), ("test.com/feed.csv", "https://test.com/feed.csv", "csvfeed"), ], ) def test_template_start_urls( self, url: str, expected: str, template: str, proj_path: Path ) -> None: assert ( call( "genspider", "-t", template, "--force", "test_name", url, cwd=proj_path ) == 0 ) spider = proj_path / self.project_name / "spiders" / "test_name.py" m = find_in_file(spider, r"start_urls\s*=\s*\[['\"](.+)['\"]\]") assert m is not None assert m.group(1) == expected class TestGenspiderStandaloneCommand: def test_generate_standalone_spider(self, tmp_path: Path) -> None: call("genspider", "example", "example.com", cwd=tmp_path) assert Path(tmp_path, "example.py").exists() @pytest.mark.parametrize("force", [True, False]) def test_same_name_as_existing_file(self, force: bool, tmp_path: Path) -> None: file_name = "example" file_path = Path(tmp_path, file_name + ".py") _, out, _ = proc("genspider", file_name, "example.com", cwd=tmp_path) assert f"Created spider {file_name!r} using template 'basic' " in out assert file_path.exists() modify_time_before = file_path.stat().st_mtime file_contents_before = file_path.read_text(encoding="utf-8") if force: # use different template to ensure contents were changed _, out, _ = proc( "genspider", "--force", "-t", "crawl", file_name, "example.com", cwd=tmp_path, ) assert f"Created spider {file_name!r} using template 'crawl' " in out modify_time_after = file_path.stat().st_mtime assert modify_time_after != modify_time_before file_contents_after = file_path.read_text(encoding="utf-8") assert file_contents_after != file_contents_before else: _, out, _ = proc("genspider", file_name, "example.com", cwd=tmp_path) assert ( f"{Path(tmp_path, file_name + '.py').resolve()} already exists" in out ) modify_time_after = file_path.stat().st_mtime assert modify_time_after == modify_time_before file_contents_after = file_path.read_text(encoding="utf-8") assert file_contents_after == file_contents_before
python
BSD-3-Clause
d1bd8eb49f7aba9289e4ff692006cead8bcd9080
2026-01-04T14:38:41.023839Z
false
scrapy/scrapy
https://github.com/scrapy/scrapy/blob/d1bd8eb49f7aba9289e4ff692006cead8bcd9080/tests/test_commands.py
tests/test_commands.py
from __future__ import annotations import argparse import json from io import StringIO from shutil import copytree from typing import TYPE_CHECKING from unittest import mock import pytest import scrapy from scrapy.cmdline import _pop_command_name, _print_unknown_command_msg from scrapy.commands import ScrapyCommand, ScrapyHelpFormatter, view from scrapy.settings import Settings from scrapy.utils.reactor import _asyncio_reactor_path from tests.utils.cmdline import call, proc if TYPE_CHECKING: from pathlib import Path class EmptyCommand(ScrapyCommand): def short_desc(self) -> str: return "" def run(self, args: list[str], opts: argparse.Namespace) -> None: pass class TestCommandSettings: def setup_method(self): self.command = EmptyCommand() self.command.settings = Settings() self.parser = argparse.ArgumentParser( formatter_class=ScrapyHelpFormatter, conflict_handler="resolve" ) self.command.add_options(self.parser) def test_settings_json_string(self): feeds_json = '{"data.json": {"format": "json"}, "data.xml": {"format": "xml"}}' opts, args = self.parser.parse_known_args( args=["-s", f"FEEDS={feeds_json}", "spider.py"] ) self.command.process_options(args, opts) assert isinstance(self.command.settings["FEEDS"], scrapy.settings.BaseSettings) assert dict(self.command.settings["FEEDS"]) == json.loads(feeds_json) def test_help_formatter(self): formatter = ScrapyHelpFormatter(prog="scrapy") part_strings = [ "usage: scrapy genspider [options] <name> <domain>\n\n", "\n", "optional arguments:\n", "\n", "Global Options:\n", ] assert formatter._join_parts(part_strings) == ( "Usage\n=====\n scrapy genspider [options] <name> <domain>\n\n\n" "Optional Arguments\n==================\n\n" "Global Options\n--------------\n" ) class TestProjectBase: """A base class for tests that may need a Scrapy project.""" project_name = "testproject" @pytest.fixture(scope="session") def _proj_path_cached(self, tmp_path_factory: pytest.TempPathFactory) -> Path: """Create a Scrapy project in a temporary directory and return its path. Used as a cache for ``proj_path``. """ tmp_path = tmp_path_factory.mktemp("proj") call("startproject", self.project_name, cwd=tmp_path) return tmp_path / self.project_name @pytest.fixture def proj_path(self, tmp_path: Path, _proj_path_cached: Path) -> Path: """Copy a pre-generated Scrapy project into a temporary directory and return its path.""" proj_path = tmp_path / self.project_name copytree(_proj_path_cached, proj_path) return proj_path class TestCommandCrawlerProcess(TestProjectBase): """Test that the command uses the expected kind of *CrawlerProcess and produces expected errors when needed.""" name = "crawl" NORMAL_MSG = "Using CrawlerProcess" ASYNC_MSG = "Using AsyncCrawlerProcess" @pytest.fixture(autouse=True) def create_files(self, proj_path: Path) -> None: proj_mod_path = proj_path / self.project_name (proj_mod_path / "spiders" / "sp.py").write_text(""" import scrapy class MySpider(scrapy.Spider): name = 'sp' custom_settings = {} async def start(self): self.logger.debug('It works!') return yield """) (proj_mod_path / "spiders" / "aiosp.py").write_text(""" import asyncio import scrapy class MySpider(scrapy.Spider): name = 'aiosp' custom_settings = {} async def start(self): await asyncio.sleep(0.01) self.logger.debug('It works!') return yield """) self._append_settings(proj_mod_path, "LOG_LEVEL = 'DEBUG'\n") @staticmethod def _append_settings(proj_mod_path: Path, text: str) -> None: """Add text to the end of the project settings.py.""" with (proj_mod_path / "settings.py").open("a", encoding="utf-8") as f: f.write(text) @staticmethod def _replace_custom_settings( proj_mod_path: Path, spider_name: str, text: str ) -> None: """Replace custom_settings in the given spider file with the given text.""" spider_path = proj_mod_path / "spiders" / f"{spider_name}.py" with spider_path.open("r+", encoding="utf-8") as f: content = f.read() content = content.replace( "custom_settings = {}", f"custom_settings = {text}" ) f.seek(0) f.write(content) f.truncate() def _assert_spider_works(self, msg: str, proj_path: Path, *args: str) -> None: """The command uses the expected *CrawlerProcess, the spider works.""" _, _, err = proc(self.name, *args, cwd=proj_path) assert msg in err assert "It works!" in err assert "Spider closed (finished)" in err def _assert_spider_asyncio_fail( self, msg: str, proj_path: Path, *args: str ) -> None: """The command uses the expected *CrawlerProcess, the spider fails to use asyncio.""" _, _, err = proc(self.name, *args, cwd=proj_path) assert msg in err assert "no running event loop" in err def test_project_settings(self, proj_path: Path) -> None: """The reactor is set via the project default settings (to the asyncio value). AsyncCrawlerProcess, the asyncio reactor, both spiders work.""" for spider in ["sp", "aiosp"]: self._assert_spider_works(self.ASYNC_MSG, proj_path, spider) def test_cmdline_asyncio(self, proj_path: Path) -> None: """The reactor is set via the command line to the asyncio value. AsyncCrawlerProcess, the asyncio reactor, both spiders work.""" for spider in ["sp", "aiosp"]: self._assert_spider_works( self.ASYNC_MSG, proj_path, spider, "-s", f"TWISTED_REACTOR={_asyncio_reactor_path}", ) def test_project_settings_explicit_asyncio(self, proj_path: Path) -> None: """The reactor explicitly is set via the project settings to the asyncio value. AsyncCrawlerProcess, the asyncio reactor, both spiders work.""" self._append_settings( proj_path / self.project_name, f"TWISTED_REACTOR = '{_asyncio_reactor_path}'\n", ) for spider in ["sp", "aiosp"]: self._assert_spider_works(self.ASYNC_MSG, proj_path, spider) def test_cmdline_empty(self, proj_path: Path) -> None: """The reactor is set via the command line to the empty value. CrawlerProcess, the default reactor, only the normal spider works.""" self._assert_spider_works( self.NORMAL_MSG, proj_path, "sp", "-s", "TWISTED_REACTOR=" ) self._assert_spider_asyncio_fail( self.NORMAL_MSG, proj_path, "aiosp", "-s", "TWISTED_REACTOR=" ) def test_project_settings_empty(self, proj_path: Path) -> None: """The reactor is set via the project settings to the empty value. CrawlerProcess, the default reactor, only the normal spider works.""" self._append_settings(proj_path / self.project_name, "TWISTED_REACTOR = None\n") self._assert_spider_works(self.NORMAL_MSG, proj_path, "sp") self._assert_spider_asyncio_fail( self.NORMAL_MSG, proj_path, "aiosp", "-s", "TWISTED_REACTOR=" ) def test_spider_settings_asyncio(self, proj_path: Path) -> None: """The reactor is set via the spider settings to the asyncio value. AsyncCrawlerProcess, the asyncio reactor, both spiders work.""" for spider in ["sp", "aiosp"]: self._replace_custom_settings( proj_path / self.project_name, spider, f"{{'TWISTED_REACTOR': '{_asyncio_reactor_path}'}}", ) self._assert_spider_works(self.ASYNC_MSG, proj_path, spider) def test_spider_settings_asyncio_cmdline_empty(self, proj_path: Path) -> None: """The reactor is set via the spider settings to the asyncio value and via command line to the empty value. The command line value takes precedence so the spider settings don't matter. CrawlerProcess, the default reactor, only the normal spider works.""" for spider in ["sp", "aiosp"]: self._replace_custom_settings( proj_path / self.project_name, spider, f"{{'TWISTED_REACTOR': '{_asyncio_reactor_path}'}}", ) self._assert_spider_works( self.NORMAL_MSG, proj_path, "sp", "-s", "TWISTED_REACTOR=" ) self._assert_spider_asyncio_fail( self.NORMAL_MSG, proj_path, "aiosp", "-s", "TWISTED_REACTOR=" ) def test_project_empty_spider_settings_asyncio(self, proj_path: Path) -> None: """The reactor is set via the project settings to the empty value and via the spider settings to the asyncio value. CrawlerProcess is chosen based on the project settings, but the asyncio reactor is chosen based on the spider settings. CrawlerProcess, the asyncio reactor, both spiders work.""" self._append_settings(proj_path / self.project_name, "TWISTED_REACTOR = None\n") for spider in ["sp", "aiosp"]: self._replace_custom_settings( proj_path / self.project_name, spider, f"{{'TWISTED_REACTOR': '{_asyncio_reactor_path}'}}", ) self._assert_spider_works(self.NORMAL_MSG, proj_path, spider) def test_project_asyncio_spider_settings_select(self, proj_path: Path) -> None: """The reactor is set via the project settings to the asyncio value and via the spider settings to the select value. AsyncCrawlerProcess is chosen based on the project settings, and the conflicting reactor setting in the spider settings causes an exception. AsyncCrawlerProcess, the asyncio reactor, both spiders produce a mismatched reactor exception.""" self._append_settings( proj_path / self.project_name, f"TWISTED_REACTOR = '{_asyncio_reactor_path}'\n", ) for spider in ["sp", "aiosp"]: self._replace_custom_settings( proj_path / self.project_name, spider, "{'TWISTED_REACTOR': 'twisted.internet.selectreactor.SelectReactor'}", ) _, _, err = proc(self.name, spider, cwd=proj_path) assert self.ASYNC_MSG in err assert ( "The installed reactor (twisted.internet.asyncioreactor.AsyncioSelectorReactor)" " does not match the requested one" " (twisted.internet.selectreactor.SelectReactor)" ) in err def test_project_asyncio_spider_settings_select_forced( self, proj_path: Path ) -> None: """The reactor is set via the project settings to the asyncio value and via the spider settings to the select value, CrawlerProcess is forced via the project settings. The reactor is chosen based on the spider settings. CrawlerProcess, the select reactor, only the normal spider works.""" self._append_settings( proj_path / self.project_name, "FORCE_CRAWLER_PROCESS = True\n" ) for spider in ["sp", "aiosp"]: self._replace_custom_settings( proj_path / self.project_name, spider, "{'TWISTED_REACTOR': 'twisted.internet.selectreactor.SelectReactor'}", ) self._assert_spider_works(self.NORMAL_MSG, proj_path, "sp") self._assert_spider_asyncio_fail(self.NORMAL_MSG, proj_path, "aiosp") class TestMiscCommands(TestProjectBase): def test_list(self, proj_path: Path) -> None: assert call("list", cwd=proj_path) == 0 def test_list_subdir(self, proj_path: Path) -> None: """Test that commands work in a subdirectory of the project.""" subdir = proj_path / "subdir" subdir.mkdir(exist_ok=True) assert call("list", cwd=subdir) == 0 def test_command_not_found(self) -> None: na_msg = """ The list command is not available from this location. These commands are only available from within a project: check, crawl, edit, list, parse. """ not_found_msg = """ Unknown command: abc """ params = [ ("list", False, na_msg), ("abc", False, not_found_msg), ("abc", True, not_found_msg), ] for cmdname, inproject, message in params: with mock.patch("sys.stdout", new=StringIO()) as out: _print_unknown_command_msg(Settings(), cmdname, inproject) assert out.getvalue().strip() == message.strip() class TestBenchCommand: def test_run(self) -> None: _, _, err = proc( "bench", "-s", "LOGSTATS_INTERVAL=0.001", "-s", "CLOSESPIDER_TIMEOUT=0.01", ) assert "INFO: Crawled" in err assert "Unhandled Error" not in err assert "log_count/ERROR" not in err class TestViewCommand: def test_methods(self) -> None: command = view.Command() command.settings = Settings() parser = argparse.ArgumentParser( prog="scrapy", prefix_chars="-", formatter_class=ScrapyHelpFormatter, conflict_handler="resolve", ) command.add_options(parser) assert command.short_desc() == "Open URL in browser, as seen by Scrapy" assert "URL using the Scrapy downloader and show its" in command.long_desc() class TestHelpMessage(TestProjectBase): COMMANDS = [ "parse", "startproject", "view", "crawl", "edit", "list", "fetch", "settings", "shell", "runspider", "version", "genspider", "check", "bench", ] def test_help_messages(self, proj_path: Path) -> None: for command in self.COMMANDS: _, out, _ = proc(command, "-h", cwd=proj_path) assert "Usage" in out class TestPopCommandName: def test_valid_command(self) -> None: argv = ["scrapy", "crawl", "my_spider"] command = _pop_command_name(argv) assert command == "crawl" assert argv == ["scrapy", "my_spider"] def test_no_command(self) -> None: argv = ["scrapy"] command = _pop_command_name(argv) assert command is None assert argv == ["scrapy"] def test_option_before_command(self) -> None: argv = ["scrapy", "-h", "crawl"] command = _pop_command_name(argv) assert command == "crawl" assert argv == ["scrapy", "-h"] def test_option_after_command(self) -> None: argv = ["scrapy", "crawl", "-h"] command = _pop_command_name(argv) assert command == "crawl" assert argv == ["scrapy", "-h"]
python
BSD-3-Clause
d1bd8eb49f7aba9289e4ff692006cead8bcd9080
2026-01-04T14:38:41.023839Z
false
scrapy/scrapy
https://github.com/scrapy/scrapy/blob/d1bd8eb49f7aba9289e4ff692006cead8bcd9080/tests/test_utils_conf.py
tests/test_utils_conf.py
import pytest from scrapy.exceptions import UsageError from scrapy.settings import BaseSettings, Settings from scrapy.utils.conf import ( arglist_to_dict, build_component_list, feed_complete_default_values_from_settings, feed_process_params_from_cli, ) class TestBuildComponentList: def test_build_dict(self): d = {"one": 1, "two": None, "three": 8, "four": 4} assert build_component_list(d, convert=lambda x: x) == ["one", "four", "three"] def test_duplicate_components_in_basesettings(self): # Higher priority takes precedence duplicate_bs = BaseSettings({"one": 1, "two": 2}, priority=0) duplicate_bs.set("ONE", 4, priority=10) assert build_component_list(duplicate_bs, convert=lambda x: x.lower()) == [ "two", "one", ] duplicate_bs.set("one", duplicate_bs["one"], priority=20) assert build_component_list(duplicate_bs, convert=lambda x: x.lower()) == [ "one", "two", ] # Same priority raises ValueError duplicate_bs.set("ONE", duplicate_bs["ONE"], priority=20) with pytest.raises( ValueError, match=r"Some paths in .* convert to the same object" ): build_component_list(duplicate_bs, convert=lambda x: x.lower()) def test_valid_numbers(self): # work well with None and numeric values d = {"a": 10, "b": None, "c": 15, "d": 5.0} assert build_component_list(d, convert=lambda x: x) == ["d", "a", "c"] d = { "a": 33333333333333333333, "b": 11111111111111111111, "c": 22222222222222222222, } assert build_component_list(d, convert=lambda x: x) == ["b", "c", "a"] def test_arglist_to_dict(): assert arglist_to_dict(["arg1=val1", "arg2=val2"]) == { "arg1": "val1", "arg2": "val2", } class TestFeedExportConfig: def test_feed_export_config_invalid_format(self): settings = Settings() with pytest.raises(UsageError): feed_process_params_from_cli(settings, ["items.dat"]) def test_feed_export_config_mismatch(self): settings = Settings() with pytest.raises(UsageError): feed_process_params_from_cli(settings, ["items1.dat", "items2.dat"]) def test_feed_export_config_explicit_formats(self): settings = Settings() assert { "items_1.dat": {"format": "json"}, "items_2.dat": {"format": "xml"}, "items_3.dat": {"format": "csv"}, } == feed_process_params_from_cli( settings, ["items_1.dat:json", "items_2.dat:xml", "items_3.dat:csv"] ) def test_feed_export_config_implicit_formats(self): settings = Settings() assert { "items_1.json": {"format": "json"}, "items_2.xml": {"format": "xml"}, "items_3.csv": {"format": "csv"}, } == feed_process_params_from_cli( settings, ["items_1.json", "items_2.xml", "items_3.csv"] ) def test_feed_export_config_stdout(self): settings = Settings() assert {"stdout:": {"format": "pickle"}} == feed_process_params_from_cli( settings, ["-:pickle"] ) def test_feed_export_config_overwrite(self): settings = Settings() assert { "output.json": {"format": "json", "overwrite": True} } == feed_process_params_from_cli( settings, [], overwrite_output=["output.json"] ) def test_output_and_overwrite_output(self): with pytest.raises(UsageError): feed_process_params_from_cli( Settings(), ["output1.json"], overwrite_output=["output2.json"] ) def test_feed_complete_default_values_from_settings_empty(self): feed = {} settings = Settings( { "FEED_EXPORT_ENCODING": "custom encoding", "FEED_EXPORT_FIELDS": ["f1", "f2", "f3"], "FEED_EXPORT_INDENT": 42, "FEED_STORE_EMPTY": True, "FEED_URI_PARAMS": (1, 2, 3, 4), "FEED_EXPORT_BATCH_ITEM_COUNT": 2, } ) new_feed = feed_complete_default_values_from_settings(feed, settings) assert new_feed == { "encoding": "custom encoding", "fields": ["f1", "f2", "f3"], "indent": 42, "store_empty": True, "uri_params": (1, 2, 3, 4), "batch_item_count": 2, "item_export_kwargs": {}, } def test_feed_complete_default_values_from_settings_non_empty(self): feed = { "encoding": "other encoding", "fields": None, } settings = Settings( { "FEED_EXPORT_ENCODING": "custom encoding", "FEED_EXPORT_FIELDS": ["f1", "f2", "f3"], "FEED_EXPORT_INDENT": 42, "FEED_STORE_EMPTY": True, "FEED_EXPORT_BATCH_ITEM_COUNT": 2, } ) new_feed = feed_complete_default_values_from_settings(feed, settings) assert new_feed == { "encoding": "other encoding", "fields": None, "indent": 42, "store_empty": True, "uri_params": None, "batch_item_count": 2, "item_export_kwargs": {}, }
python
BSD-3-Clause
d1bd8eb49f7aba9289e4ff692006cead8bcd9080
2026-01-04T14:38:41.023839Z
false
scrapy/scrapy
https://github.com/scrapy/scrapy/blob/d1bd8eb49f7aba9289e4ff692006cead8bcd9080/tests/test_scheduler.py
tests/test_scheduler.py
from __future__ import annotations import shutil import tempfile import warnings from abc import ABC, abstractmethod from collections import deque from typing import Any, NamedTuple import pytest from twisted.internet.defer import inlineCallbacks from scrapy.core.downloader import Downloader from scrapy.core.scheduler import BaseScheduler, Scheduler from scrapy.crawler import Crawler from scrapy.http import Request from scrapy.spiders import Spider from scrapy.utils.defer import _schedule_coro from scrapy.utils.httpobj import urlparse_cached from scrapy.utils.misc import load_object from scrapy.utils.test import get_crawler from tests.mockserver.http import MockServer class MemoryScheduler(BaseScheduler): paused = False def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) self.queue = deque( Request(value) if isinstance(value, str) else value for value in getattr(self, "queue", []) ) def enqueue_request(self, request: Request) -> bool: self.queue.append(request) return True def has_pending_requests(self) -> bool: return self.paused or bool(self.queue) def next_request(self) -> Request | None: if self.paused: return None try: return self.queue.pop() except IndexError: return None def pause(self) -> None: self.paused = True def unpause(self) -> None: self.paused = False class MockEngine(NamedTuple): downloader: MockDownloader class MockSlot(NamedTuple): active: list[Any] class MockDownloader: def __init__(self): self.slots = {} def get_slot_key(self, request): if Downloader.DOWNLOAD_SLOT in request.meta: return request.meta[Downloader.DOWNLOAD_SLOT] return urlparse_cached(request).hostname or "" def increment(self, slot_key): slot = self.slots.setdefault(slot_key, MockSlot(active=[])) slot.active.append(1) def decrement(self, slot_key): slot = self.slots.get(slot_key) slot.active.pop() def close(self): pass class MockCrawler(Crawler): def __init__(self, priority_queue_cls, jobdir): settings = { "SCHEDULER_DEBUG": False, "SCHEDULER_DISK_QUEUE": "scrapy.squeues.PickleLifoDiskQueue", "SCHEDULER_MEMORY_QUEUE": "scrapy.squeues.LifoMemoryQueue", "SCHEDULER_PRIORITY_QUEUE": priority_queue_cls, "JOBDIR": jobdir, "DUPEFILTER_CLASS": "scrapy.dupefilters.BaseDupeFilter", } super().__init__(Spider, settings) self.engine = MockEngine(downloader=MockDownloader()) self.stats = load_object(self.settings["STATS_CLASS"])(self) class SchedulerHandler(ABC): jobdir = None @property @abstractmethod def priority_queue_cls(self) -> str: raise NotImplementedError def create_scheduler(self): self.mock_crawler = MockCrawler(self.priority_queue_cls, self.jobdir) self.scheduler = Scheduler.from_crawler(self.mock_crawler) self.spider = Spider(name="spider") self.scheduler.open(self.spider) def close_scheduler(self): self.scheduler.close("finished") _schedule_coro(self.mock_crawler.stop_async()) self.mock_crawler.engine.downloader.close() def setup_method(self): self.create_scheduler() def teardown_method(self): self.close_scheduler() _PRIORITIES = [ ("http://foo.com/a", -2), ("http://foo.com/d", 1), ("http://foo.com/b", -1), ("http://foo.com/c", 0), ("http://foo.com/e", 2), ] _URLS = {"http://foo.com/a", "http://foo.com/b", "http://foo.com/c"} class TestSchedulerInMemoryBase(SchedulerHandler): def test_length(self): assert not self.scheduler.has_pending_requests() assert len(self.scheduler) == 0 for url in _URLS: self.scheduler.enqueue_request(Request(url)) assert self.scheduler.has_pending_requests() assert len(self.scheduler) == len(_URLS) def test_dequeue(self): for url in _URLS: self.scheduler.enqueue_request(Request(url)) urls = set() while self.scheduler.has_pending_requests(): urls.add(self.scheduler.next_request().url) assert urls == _URLS def test_dequeue_priorities(self): for url, priority in _PRIORITIES: self.scheduler.enqueue_request(Request(url, priority=priority)) priorities = [] while self.scheduler.has_pending_requests(): priorities.append(self.scheduler.next_request().priority) assert priorities == sorted([x[1] for x in _PRIORITIES], key=lambda x: -x) class TestSchedulerOnDiskBase(SchedulerHandler): def setup_method(self): self.jobdir = tempfile.mkdtemp() self.create_scheduler() def teardown_method(self): self.close_scheduler() shutil.rmtree(self.jobdir) self.jobdir = None def test_length(self): assert not self.scheduler.has_pending_requests() assert len(self.scheduler) == 0 for url in _URLS: self.scheduler.enqueue_request(Request(url)) self.close_scheduler() self.create_scheduler() assert self.scheduler.has_pending_requests() assert len(self.scheduler) == len(_URLS) def test_dequeue(self): for url in _URLS: self.scheduler.enqueue_request(Request(url)) self.close_scheduler() self.create_scheduler() urls = set() while self.scheduler.has_pending_requests(): urls.add(self.scheduler.next_request().url) assert urls == _URLS def test_dequeue_priorities(self): for url, priority in _PRIORITIES: self.scheduler.enqueue_request(Request(url, priority=priority)) self.close_scheduler() self.create_scheduler() priorities = [] while self.scheduler.has_pending_requests(): priorities.append(self.scheduler.next_request().priority) assert priorities == sorted([x[1] for x in _PRIORITIES], key=lambda x: -x) class TestSchedulerInMemory(TestSchedulerInMemoryBase): @property def priority_queue_cls(self) -> str: return "scrapy.pqueues.ScrapyPriorityQueue" class TestSchedulerOnDisk(TestSchedulerOnDiskBase): @property def priority_queue_cls(self) -> str: return "scrapy.pqueues.ScrapyPriorityQueue" _URLS_WITH_SLOTS = [ ("http://foo.com/a", "a"), ("http://foo.com/b", "a"), ("http://foo.com/c", "b"), ("http://foo.com/d", "b"), ("http://foo.com/e", "c"), ("http://foo.com/f", "c"), ] class TestMigration: def test_migration(self, tmpdir): class PrevSchedulerHandler(SchedulerHandler): jobdir = tmpdir @property def priority_queue_cls(self) -> str: return "scrapy.pqueues.ScrapyPriorityQueue" class NextSchedulerHandler(SchedulerHandler): jobdir = tmpdir @property def priority_queue_cls(self) -> str: return "scrapy.pqueues.DownloaderAwarePriorityQueue" prev_scheduler_handler = PrevSchedulerHandler() prev_scheduler_handler.create_scheduler() for url in _URLS: prev_scheduler_handler.scheduler.enqueue_request(Request(url)) prev_scheduler_handler.close_scheduler() next_scheduler_handler = NextSchedulerHandler() with pytest.raises( ValueError, match="DownloaderAwarePriorityQueue accepts ``slot_startprios`` as a dict", ): next_scheduler_handler.create_scheduler() def _is_scheduling_fair(enqueued_slots, dequeued_slots): """ We enqueued same number of requests for every slot. Assert correct order, e.g. >>> enqueued = ['a', 'b', 'c'] * 2 >>> correct = ['a', 'c', 'b', 'b', 'a', 'c'] >>> incorrect = ['a', 'a', 'b', 'c', 'c', 'b'] >>> _is_scheduling_fair(enqueued, correct) True >>> _is_scheduling_fair(enqueued, incorrect) False """ if len(dequeued_slots) != len(enqueued_slots): return False slots_number = len(set(enqueued_slots)) for i in range(0, len(dequeued_slots), slots_number): part = dequeued_slots[i : i + slots_number] if len(part) != len(set(part)): return False return True class DownloaderAwareSchedulerTestMixin: reopen = False @property def priority_queue_cls(self) -> str: return "scrapy.pqueues.DownloaderAwarePriorityQueue" def test_logic(self): for url, slot in _URLS_WITH_SLOTS: request = Request(url) request.meta[Downloader.DOWNLOAD_SLOT] = slot self.scheduler.enqueue_request(request) if self.reopen: self.close_scheduler() self.create_scheduler() dequeued_slots = [] requests = [] downloader = self.mock_crawler.engine.downloader while self.scheduler.has_pending_requests(): request = self.scheduler.next_request() slot = downloader.get_slot_key(request) dequeued_slots.append(slot) downloader.increment(slot) requests.append(request) for request in requests: slot = downloader.get_slot_key(request) downloader.decrement(slot) assert _is_scheduling_fair([s for u, s in _URLS_WITH_SLOTS], dequeued_slots) assert sum(len(s.active) for s in downloader.slots.values()) == 0 class TestSchedulerWithDownloaderAwareInMemory( DownloaderAwareSchedulerTestMixin, TestSchedulerInMemoryBase ): pass class TestSchedulerWithDownloaderAwareOnDisk( DownloaderAwareSchedulerTestMixin, TestSchedulerOnDiskBase ): reopen = True class StartUrlsSpider(Spider): def __init__(self, start_urls): self.start_urls = start_urls super().__init__(name="StartUrlsSpider") def parse(self, response): pass class TestIntegrationWithDownloaderAwareInMemory: def setup_method(self): self.crawler = get_crawler( spidercls=StartUrlsSpider, settings_dict={ "SCHEDULER_PRIORITY_QUEUE": "scrapy.pqueues.DownloaderAwarePriorityQueue", "DUPEFILTER_CLASS": "scrapy.dupefilters.BaseDupeFilter", }, ) @inlineCallbacks def test_integration_downloader_aware_priority_queue(self): with MockServer() as mockserver: url = mockserver.url("/status?n=200", is_secure=False) start_urls = [url] * 6 yield self.crawler.crawl(start_urls) assert self.crawler.stats.get_value("downloader/response_count") == len( start_urls ) class TestIncompatibility: def _incompatible(self): settings = { "SCHEDULER_PRIORITY_QUEUE": "scrapy.pqueues.DownloaderAwarePriorityQueue", "CONCURRENT_REQUESTS_PER_IP": 1, } crawler = get_crawler(Spider, settings) scheduler = Scheduler.from_crawler(crawler) spider = Spider(name="spider") scheduler.open(spider) def test_incompatibility(self): with warnings.catch_warnings(): warnings.filterwarnings("ignore") with pytest.raises( ValueError, match="does not support CONCURRENT_REQUESTS_PER_IP" ): self._incompatible()
python
BSD-3-Clause
d1bd8eb49f7aba9289e4ff692006cead8bcd9080
2026-01-04T14:38:41.023839Z
false
scrapy/scrapy
https://github.com/scrapy/scrapy/blob/d1bd8eb49f7aba9289e4ff692006cead8bcd9080/tests/test_crawler.py
tests/test_crawler.py
import asyncio import logging import platform import re import signal import subprocess import sys import warnings from abc import ABC, abstractmethod from collections.abc import Generator from pathlib import Path from typing import Any import pytest from packaging.version import parse as parse_version from pexpect.popen_spawn import PopenSpawn from twisted.internet.defer import Deferred, inlineCallbacks from w3lib import __version__ as w3lib_version from zope.interface.exceptions import MultipleInvalid import scrapy from scrapy import Spider from scrapy.crawler import ( AsyncCrawlerProcess, AsyncCrawlerRunner, Crawler, CrawlerProcess, CrawlerRunner, ) from scrapy.exceptions import ScrapyDeprecationWarning from scrapy.extensions.throttle import AutoThrottle from scrapy.settings import Settings, default_settings from scrapy.utils.asyncio import call_later from scrapy.utils.defer import ( deferred_f_from_coro_f, deferred_from_coro, maybe_deferred_to_future, ) from scrapy.utils.log import ( _uninstall_scrapy_root_handler, configure_logging, get_scrapy_root_handler, ) from scrapy.utils.spider import DefaultSpider from scrapy.utils.test import get_crawler, get_reactor_settings from tests.mockserver.http import MockServer from tests.utils import get_script_run_env BASE_SETTINGS: dict[str, Any] = {} def get_raw_crawler(spidercls=None, settings_dict=None): """get_crawler alternative that only calls the __init__ method of the crawler.""" settings = Settings() settings.setdict(get_reactor_settings()) settings.setdict(settings_dict or {}) return Crawler(spidercls or DefaultSpider, settings) class TestBaseCrawler: def assertOptionIsDefault(self, settings, key): assert isinstance(settings, Settings) assert settings[key] == getattr(default_settings, key) class TestCrawler(TestBaseCrawler): def test_populate_spidercls_settings(self): spider_settings = {"TEST1": "spider", "TEST2": "spider"} project_settings = { **BASE_SETTINGS, "TEST1": "project", "TEST3": "project", **get_reactor_settings(), } class CustomSettingsSpider(DefaultSpider): custom_settings = spider_settings settings = Settings() settings.setdict(project_settings, priority="project") crawler = Crawler(CustomSettingsSpider, settings) crawler._apply_settings() assert crawler.settings.get("TEST1") == "spider" assert crawler.settings.get("TEST2") == "spider" assert crawler.settings.get("TEST3") == "project" assert not settings.frozen assert crawler.settings.frozen def test_crawler_accepts_dict(self): crawler = get_crawler(DefaultSpider, {"foo": "bar"}) assert crawler.settings["foo"] == "bar" self.assertOptionIsDefault(crawler.settings, "RETRY_ENABLED") def test_crawler_accepts_None(self): with warnings.catch_warnings(): warnings.simplefilter("ignore", ScrapyDeprecationWarning) crawler = Crawler(DefaultSpider) self.assertOptionIsDefault(crawler.settings, "RETRY_ENABLED") def test_crawler_rejects_spider_objects(self): with pytest.raises(ValueError, match="spidercls argument must be a class"): Crawler(DefaultSpider()) @inlineCallbacks def test_crawler_crawl_twice_seq_unsupported(self): crawler = get_raw_crawler(NoRequestsSpider, BASE_SETTINGS) yield crawler.crawl() with pytest.raises(RuntimeError, match="more than once on the same instance"): yield crawler.crawl() @deferred_f_from_coro_f async def test_crawler_crawl_async_twice_seq_unsupported(self): crawler = get_raw_crawler(NoRequestsSpider, BASE_SETTINGS) await crawler.crawl_async() with pytest.raises(RuntimeError, match="more than once on the same instance"): await crawler.crawl_async() @inlineCallbacks def test_crawler_crawl_twice_parallel_unsupported(self): crawler = get_raw_crawler(NoRequestsSpider, BASE_SETTINGS) d1 = crawler.crawl() d2 = crawler.crawl() yield d1 with pytest.raises(RuntimeError, match="Crawling already taking place"): yield d2 @pytest.mark.only_asyncio @deferred_f_from_coro_f async def test_crawler_crawl_async_twice_parallel_unsupported(self): crawler = get_raw_crawler(NoRequestsSpider, BASE_SETTINGS) t1 = asyncio.create_task(crawler.crawl_async()) t2 = asyncio.create_task(crawler.crawl_async()) await t1 with pytest.raises(RuntimeError, match="Crawling already taking place"): await t2 def test_get_addon(self): class ParentAddon: pass class TrackingAddon(ParentAddon): instances = [] def __init__(self): TrackingAddon.instances.append(self) def update_settings(self, settings): pass settings = { **BASE_SETTINGS, "ADDONS": { TrackingAddon: 0, }, } crawler = get_crawler(settings_dict=settings) assert len(TrackingAddon.instances) == 1 expected = TrackingAddon.instances[-1] addon = crawler.get_addon(TrackingAddon) assert addon == expected addon = crawler.get_addon(DefaultSpider) assert addon is None addon = crawler.get_addon(ParentAddon) assert addon == expected class ChildAddon(TrackingAddon): pass addon = crawler.get_addon(ChildAddon) assert addon is None @inlineCallbacks def test_get_downloader_middleware(self): class ParentDownloaderMiddleware: pass class TrackingDownloaderMiddleware(ParentDownloaderMiddleware): instances = [] def __init__(self): TrackingDownloaderMiddleware.instances.append(self) class MySpider(Spider): name = "myspider" @classmethod def from_crawler(cls, crawler): return cls(crawler=crawler) def __init__(self, crawler, **kwargs: Any): super().__init__(**kwargs) self.crawler = crawler async def start(self): MySpider.result = crawler.get_downloader_middleware(MySpider.cls) return yield settings = { **BASE_SETTINGS, "DOWNLOADER_MIDDLEWARES": { TrackingDownloaderMiddleware: 0, }, } crawler = get_raw_crawler(MySpider, settings) MySpider.cls = TrackingDownloaderMiddleware yield crawler.crawl() assert len(TrackingDownloaderMiddleware.instances) == 1 assert MySpider.result == TrackingDownloaderMiddleware.instances[-1] crawler = get_raw_crawler(MySpider, settings) MySpider.cls = DefaultSpider yield crawler.crawl() assert MySpider.result is None crawler = get_raw_crawler(MySpider, settings) MySpider.cls = ParentDownloaderMiddleware yield crawler.crawl() assert MySpider.result == TrackingDownloaderMiddleware.instances[-1] class ChildDownloaderMiddleware(TrackingDownloaderMiddleware): pass crawler = get_raw_crawler(MySpider, settings) MySpider.cls = ChildDownloaderMiddleware yield crawler.crawl() assert MySpider.result is None def test_get_downloader_middleware_not_crawling(self): crawler = get_raw_crawler(settings_dict=BASE_SETTINGS) with pytest.raises(RuntimeError): crawler.get_downloader_middleware(DefaultSpider) @inlineCallbacks def test_get_downloader_middleware_no_engine(self): class MySpider(Spider): name = "myspider" @classmethod def from_crawler(cls, crawler): try: crawler.get_downloader_middleware(DefaultSpider) except Exception as e: MySpider.result = e raise crawler = get_raw_crawler(MySpider, BASE_SETTINGS) with pytest.raises(RuntimeError): yield crawler.crawl() @inlineCallbacks def test_get_extension(self): class ParentExtension: pass class TrackingExtension(ParentExtension): instances = [] def __init__(self): TrackingExtension.instances.append(self) class MySpider(Spider): name = "myspider" @classmethod def from_crawler(cls, crawler): return cls(crawler=crawler) def __init__(self, crawler, **kwargs: Any): super().__init__(**kwargs) self.crawler = crawler async def start(self): MySpider.result = crawler.get_extension(MySpider.cls) return yield settings = { **BASE_SETTINGS, "EXTENSIONS": { TrackingExtension: 0, }, } crawler = get_raw_crawler(MySpider, settings) MySpider.cls = TrackingExtension yield crawler.crawl() assert len(TrackingExtension.instances) == 1 assert MySpider.result == TrackingExtension.instances[-1] crawler = get_raw_crawler(MySpider, settings) MySpider.cls = DefaultSpider yield crawler.crawl() assert MySpider.result is None crawler = get_raw_crawler(MySpider, settings) MySpider.cls = ParentExtension yield crawler.crawl() assert MySpider.result == TrackingExtension.instances[-1] class ChildExtension(TrackingExtension): pass crawler = get_raw_crawler(MySpider, settings) MySpider.cls = ChildExtension yield crawler.crawl() assert MySpider.result is None def test_get_extension_not_crawling(self): crawler = get_raw_crawler(settings_dict=BASE_SETTINGS) with pytest.raises(RuntimeError): crawler.get_extension(DefaultSpider) @inlineCallbacks def test_get_extension_no_engine(self): class MySpider(Spider): name = "myspider" @classmethod def from_crawler(cls, crawler): try: crawler.get_extension(DefaultSpider) except Exception as e: MySpider.result = e raise crawler = get_raw_crawler(MySpider, BASE_SETTINGS) with pytest.raises(RuntimeError): yield crawler.crawl() @inlineCallbacks def test_get_item_pipeline(self): class ParentItemPipeline: pass class TrackingItemPipeline(ParentItemPipeline): instances = [] def __init__(self): TrackingItemPipeline.instances.append(self) class MySpider(Spider): name = "myspider" @classmethod def from_crawler(cls, crawler): return cls(crawler=crawler) def __init__(self, crawler, **kwargs: Any): super().__init__(**kwargs) self.crawler = crawler async def start(self): MySpider.result = crawler.get_item_pipeline(MySpider.cls) return yield settings = { **BASE_SETTINGS, "ITEM_PIPELINES": { TrackingItemPipeline: 0, }, } crawler = get_raw_crawler(MySpider, settings) MySpider.cls = TrackingItemPipeline yield crawler.crawl() assert len(TrackingItemPipeline.instances) == 1 assert MySpider.result == TrackingItemPipeline.instances[-1] crawler = get_raw_crawler(MySpider, settings) MySpider.cls = DefaultSpider yield crawler.crawl() assert MySpider.result is None crawler = get_raw_crawler(MySpider, settings) MySpider.cls = ParentItemPipeline yield crawler.crawl() assert MySpider.result == TrackingItemPipeline.instances[-1] class ChildItemPipeline(TrackingItemPipeline): pass crawler = get_raw_crawler(MySpider, settings) MySpider.cls = ChildItemPipeline yield crawler.crawl() assert MySpider.result is None def test_get_item_pipeline_not_crawling(self): crawler = get_raw_crawler(settings_dict=BASE_SETTINGS) with pytest.raises(RuntimeError): crawler.get_item_pipeline(DefaultSpider) @inlineCallbacks def test_get_item_pipeline_no_engine(self): class MySpider(Spider): name = "myspider" @classmethod def from_crawler(cls, crawler): try: crawler.get_item_pipeline(DefaultSpider) except Exception as e: MySpider.result = e raise crawler = get_raw_crawler(MySpider, BASE_SETTINGS) with pytest.raises(RuntimeError): yield crawler.crawl() @inlineCallbacks def test_get_spider_middleware(self): class ParentSpiderMiddleware: pass class TrackingSpiderMiddleware(ParentSpiderMiddleware): instances = [] def __init__(self): TrackingSpiderMiddleware.instances.append(self) class MySpider(Spider): name = "myspider" @classmethod def from_crawler(cls, crawler): return cls(crawler=crawler) def __init__(self, crawler, **kwargs: Any): super().__init__(**kwargs) self.crawler = crawler async def start(self): MySpider.result = crawler.get_spider_middleware(MySpider.cls) return yield settings = { **BASE_SETTINGS, "SPIDER_MIDDLEWARES": { TrackingSpiderMiddleware: 0, }, } crawler = get_raw_crawler(MySpider, settings) MySpider.cls = TrackingSpiderMiddleware yield crawler.crawl() assert len(TrackingSpiderMiddleware.instances) == 1 assert MySpider.result == TrackingSpiderMiddleware.instances[-1] crawler = get_raw_crawler(MySpider, settings) MySpider.cls = DefaultSpider yield crawler.crawl() assert MySpider.result is None crawler = get_raw_crawler(MySpider, settings) MySpider.cls = ParentSpiderMiddleware yield crawler.crawl() assert MySpider.result == TrackingSpiderMiddleware.instances[-1] class ChildSpiderMiddleware(TrackingSpiderMiddleware): pass crawler = get_raw_crawler(MySpider, settings) MySpider.cls = ChildSpiderMiddleware yield crawler.crawl() assert MySpider.result is None def test_get_spider_middleware_not_crawling(self): crawler = get_raw_crawler(settings_dict=BASE_SETTINGS) with pytest.raises(RuntimeError): crawler.get_spider_middleware(DefaultSpider) @inlineCallbacks def test_get_spider_middleware_no_engine(self): class MySpider(Spider): name = "myspider" @classmethod def from_crawler(cls, crawler): try: crawler.get_spider_middleware(DefaultSpider) except Exception as e: MySpider.result = e raise crawler = get_raw_crawler(MySpider, BASE_SETTINGS) with pytest.raises(RuntimeError): yield crawler.crawl() class TestSpiderSettings: def test_spider_custom_settings(self): class MySpider(scrapy.Spider): name = "spider" custom_settings = {"AUTOTHROTTLE_ENABLED": True} crawler = get_crawler(MySpider) enabled_exts = [e.__class__ for e in crawler.extensions.middlewares] assert AutoThrottle in enabled_exts class TestCrawlerLogging: def test_no_root_handler_installed(self): handler = get_scrapy_root_handler() if handler is not None: logging.root.removeHandler(handler) class MySpider(scrapy.Spider): name = "spider" get_crawler(MySpider) assert get_scrapy_root_handler() is None @deferred_f_from_coro_f async def test_spider_custom_settings_log_level(self, tmp_path): log_file = Path(tmp_path, "log.txt") log_file.write_text("previous message\n", encoding="utf-8") info_count = None class MySpider(scrapy.Spider): name = "spider" custom_settings = { "LOG_LEVEL": "INFO", "LOG_FILE": str(log_file), } async def start(self): info_count_start = crawler.stats.get_value("log_count/INFO") logging.debug("debug message") # noqa: LOG015 logging.info("info message") # noqa: LOG015 logging.warning("warning message") # noqa: LOG015 logging.error("error message") # noqa: LOG015 nonlocal info_count info_count = ( crawler.stats.get_value("log_count/INFO") - info_count_start ) return yield try: configure_logging() assert get_scrapy_root_handler().level == logging.DEBUG crawler = get_crawler(MySpider) assert get_scrapy_root_handler().level == logging.INFO await crawler.crawl_async() finally: _uninstall_scrapy_root_handler() logged = log_file.read_text(encoding="utf-8") assert "previous message" in logged assert "debug message" not in logged assert "info message" in logged assert "warning message" in logged assert "error message" in logged assert crawler.stats.get_value("log_count/ERROR") == 1 assert crawler.stats.get_value("log_count/WARNING") == 1 assert info_count == 1 assert crawler.stats.get_value("log_count/DEBUG", 0) == 0 def test_spider_custom_settings_log_append(self, tmp_path): log_file = Path(tmp_path, "log.txt") log_file.write_text("previous message\n", encoding="utf-8") class MySpider(scrapy.Spider): name = "spider" custom_settings = { "LOG_FILE": str(log_file), "LOG_FILE_APPEND": False, } try: configure_logging() get_crawler(MySpider) logging.debug("debug message") # noqa: LOG015 finally: _uninstall_scrapy_root_handler() logged = log_file.read_text(encoding="utf-8") assert "previous message" not in logged assert "debug message" in logged class SpiderLoaderWithWrongInterface: def unneeded_method(self): pass class TestCrawlerRunner(TestBaseCrawler): def test_spider_manager_verify_interface(self): settings = Settings( { "SPIDER_LOADER_CLASS": SpiderLoaderWithWrongInterface, } ) with pytest.raises(MultipleInvalid): CrawlerRunner(settings) def test_crawler_runner_accepts_dict(self): runner = CrawlerRunner({"foo": "bar"}) assert runner.settings["foo"] == "bar" self.assertOptionIsDefault(runner.settings, "RETRY_ENABLED") def test_crawler_runner_accepts_None(self): runner = CrawlerRunner() self.assertOptionIsDefault(runner.settings, "RETRY_ENABLED") class TestAsyncCrawlerRunner(TestBaseCrawler): def test_spider_manager_verify_interface(self): settings = Settings( { "SPIDER_LOADER_CLASS": SpiderLoaderWithWrongInterface, } ) with pytest.raises(MultipleInvalid): AsyncCrawlerRunner(settings) def test_crawler_runner_accepts_dict(self): runner = AsyncCrawlerRunner({"foo": "bar"}) assert runner.settings["foo"] == "bar" self.assertOptionIsDefault(runner.settings, "RETRY_ENABLED") def test_crawler_runner_accepts_None(self): runner = AsyncCrawlerRunner() self.assertOptionIsDefault(runner.settings, "RETRY_ENABLED") class TestCrawlerProcess(TestBaseCrawler): def test_crawler_process_accepts_dict(self): runner = CrawlerProcess({"foo": "bar"}, install_root_handler=False) assert runner.settings["foo"] == "bar" self.assertOptionIsDefault(runner.settings, "RETRY_ENABLED") def test_crawler_process_accepts_None(self): runner = CrawlerProcess(install_root_handler=False) self.assertOptionIsDefault(runner.settings, "RETRY_ENABLED") @pytest.mark.only_asyncio class TestAsyncCrawlerProcess(TestBaseCrawler): def test_crawler_process_accepts_dict(self): runner = AsyncCrawlerProcess({"foo": "bar"}, install_root_handler=False) assert runner.settings["foo"] == "bar" self.assertOptionIsDefault(runner.settings, "RETRY_ENABLED") def test_crawler_process_accepts_None(self): runner = AsyncCrawlerProcess(install_root_handler=False) self.assertOptionIsDefault(runner.settings, "RETRY_ENABLED") class ExceptionSpider(scrapy.Spider): name = "exception" @classmethod def from_crawler(cls, crawler, *args, **kwargs): raise ValueError("Exception in from_crawler method") class NoRequestsSpider(scrapy.Spider): name = "no_request" async def start(self): return yield class TestCrawlerRunnerHasSpider: @staticmethod def _runner(): return CrawlerRunner(get_reactor_settings()) @staticmethod def _crawl(runner, spider): return runner.crawl(spider) @inlineCallbacks def test_crawler_runner_bootstrap_successful(self): runner = self._runner() yield self._crawl(runner, NoRequestsSpider) assert not runner.bootstrap_failed @inlineCallbacks def test_crawler_runner_bootstrap_successful_for_several(self): runner = self._runner() yield self._crawl(runner, NoRequestsSpider) yield self._crawl(runner, NoRequestsSpider) assert not runner.bootstrap_failed @inlineCallbacks def test_crawler_runner_bootstrap_failed(self): runner = self._runner() try: yield self._crawl(runner, ExceptionSpider) except ValueError: pass else: pytest.fail("Exception should be raised from spider") assert runner.bootstrap_failed @inlineCallbacks def test_crawler_runner_bootstrap_failed_for_several(self): runner = self._runner() try: yield self._crawl(runner, ExceptionSpider) except ValueError: pass else: pytest.fail("Exception should be raised from spider") yield self._crawl(runner, NoRequestsSpider) assert runner.bootstrap_failed @inlineCallbacks def test_crawler_runner_asyncio_enabled_true( self, reactor_pytest: str ) -> Generator[Deferred[Any], Any, None]: if reactor_pytest != "asyncio": runner = CrawlerRunner( settings={ "TWISTED_REACTOR": "twisted.internet.asyncioreactor.AsyncioSelectorReactor", } ) with pytest.raises( Exception, match=r"The installed reactor \(.*?\) does not match the requested one \(.*?\)", ): yield self._crawl(runner, NoRequestsSpider) else: CrawlerRunner( settings={ "TWISTED_REACTOR": "twisted.internet.asyncioreactor.AsyncioSelectorReactor", } ) @pytest.mark.only_asyncio class TestAsyncCrawlerRunnerHasSpider(TestCrawlerRunnerHasSpider): @staticmethod def _runner(): return AsyncCrawlerRunner(get_reactor_settings()) @staticmethod def _crawl(runner, spider): return deferred_from_coro(runner.crawl(spider)) def test_crawler_runner_asyncio_enabled_true(self): pytest.skip("This test is only for CrawlerRunner") class ScriptRunnerMixin(ABC): @property @abstractmethod def script_dir(self) -> Path: raise NotImplementedError @staticmethod def get_script_dir(name: str) -> Path: return Path(__file__).parent.resolve() / name def get_script_args(self, script_name: str, *script_args: str) -> list[str]: script_path = self.script_dir / script_name return [sys.executable, str(script_path), *script_args] def run_script(self, script_name: str, *script_args: str) -> str: args = self.get_script_args(script_name, *script_args) p = subprocess.Popen( args, env=get_script_run_env(), stdout=subprocess.PIPE, stderr=subprocess.PIPE, ) _, stderr = p.communicate() return stderr.decode("utf-8") class TestCrawlerProcessSubprocessBase(ScriptRunnerMixin): """Common tests between CrawlerProcess and AsyncCrawlerProcess, with the same file names and expectations. """ def test_simple(self): log = self.run_script("simple.py") assert "Spider closed (finished)" in log assert ( "Using reactor: twisted.internet.asyncioreactor.AsyncioSelectorReactor" in log ) def test_multi(self): log = self.run_script("multi.py") assert "Spider closed (finished)" in log assert ( "Using reactor: twisted.internet.asyncioreactor.AsyncioSelectorReactor" in log ) assert "ReactorAlreadyInstalledError" not in log def test_reactor_default(self): log = self.run_script("reactor_default.py") assert "Spider closed (finished)" not in log assert ( "does not match the requested one " "(twisted.internet.asyncioreactor.AsyncioSelectorReactor)" ) in log def test_asyncio_enabled_no_reactor(self): log = self.run_script("asyncio_enabled_no_reactor.py") assert "Spider closed (finished)" in log assert ( "Using reactor: twisted.internet.asyncioreactor.AsyncioSelectorReactor" in log ) assert "RuntimeError" not in log def test_asyncio_enabled_reactor(self): log = self.run_script("asyncio_enabled_reactor.py") assert "Spider closed (finished)" in log assert ( "Using reactor: twisted.internet.asyncioreactor.AsyncioSelectorReactor" in log ) assert "RuntimeError" not in log @pytest.mark.skipif( parse_version(w3lib_version) >= parse_version("2.0.0"), reason="w3lib 2.0.0 and later do not allow invalid domains.", ) def test_ipv6_default_name_resolver(self): log = self.run_script("default_name_resolver.py") assert "Spider closed (finished)" in log assert ( "'downloader/exception_type_count/twisted.internet.error.DNSLookupError': 1," in log ) assert ( "twisted.internet.error.DNSLookupError: DNS lookup failed: no results for hostname lookup: ::1." in log ) def test_caching_hostname_resolver_ipv6(self): log = self.run_script("caching_hostname_resolver_ipv6.py") assert "Spider closed (finished)" in log assert "twisted.internet.error.DNSLookupError" not in log def test_caching_hostname_resolver_finite_execution( self, mockserver: MockServer ) -> None: log = self.run_script("caching_hostname_resolver.py", mockserver.url("/")) assert "Spider closed (finished)" in log assert "ERROR: Error downloading" not in log assert "TimeoutError" not in log assert "twisted.internet.error.DNSLookupError" not in log def test_twisted_reactor_asyncio(self): log = self.run_script("twisted_reactor_asyncio.py") assert "Spider closed (finished)" in log assert ( "Using reactor: twisted.internet.asyncioreactor.AsyncioSelectorReactor" in log ) def test_twisted_reactor_asyncio_custom_settings(self): log = self.run_script("twisted_reactor_custom_settings.py") assert "Spider closed (finished)" in log assert ( "Using reactor: twisted.internet.asyncioreactor.AsyncioSelectorReactor" in log ) def test_twisted_reactor_asyncio_custom_settings_same(self): log = self.run_script("twisted_reactor_custom_settings_same.py") assert "Spider closed (finished)" in log assert ( "Using reactor: twisted.internet.asyncioreactor.AsyncioSelectorReactor" in log ) @pytest.mark.requires_uvloop def test_custom_loop_asyncio(self): log = self.run_script("asyncio_custom_loop.py") assert "Spider closed (finished)" in log assert ( "Using reactor: twisted.internet.asyncioreactor.AsyncioSelectorReactor" in log ) assert "Using asyncio event loop: uvloop.Loop" in log @pytest.mark.requires_uvloop def test_custom_loop_asyncio_deferred_signal(self): log = self.run_script("asyncio_deferred_signal.py", "uvloop.Loop") assert "Spider closed (finished)" in log assert ( "Using reactor: twisted.internet.asyncioreactor.AsyncioSelectorReactor" in log ) assert "Using asyncio event loop: uvloop.Loop" in log assert "async pipeline opened!" in log @pytest.mark.requires_uvloop def test_asyncio_enabled_reactor_same_loop(self): log = self.run_script("asyncio_enabled_reactor_same_loop.py") assert "Spider closed (finished)" in log assert ( "Using reactor: twisted.internet.asyncioreactor.AsyncioSelectorReactor" in log ) assert "Using asyncio event loop: uvloop.Loop" in log @pytest.mark.requires_uvloop def test_asyncio_enabled_reactor_different_loop(self): log = self.run_script("asyncio_enabled_reactor_different_loop.py") assert "Spider closed (finished)" not in log assert ( "does not match the one specified in the ASYNCIO_EVENT_LOOP " "setting (uvloop.Loop)" ) in log def test_default_loop_asyncio_deferred_signal(self): log = self.run_script("asyncio_deferred_signal.py") assert "Spider closed (finished)" in log assert ( "Using reactor: twisted.internet.asyncioreactor.AsyncioSelectorReactor" in log ) assert "Using asyncio event loop: uvloop.Loop" not in log assert "async pipeline opened!" in log def test_args_change_settings(self): log = self.run_script("args_settings.py") assert "Spider closed (finished)" in log assert "The value of FOO is 42" in log def test_shutdown_graceful(self): sig = signal.SIGINT if sys.platform != "win32" else signal.SIGBREAK args = self.get_script_args("sleeping.py", "3") p = PopenSpawn(args, timeout=5) p.expect_exact("Spider opened") p.expect_exact("Crawled (200)") p.kill(sig) p.expect_exact("shutting down gracefully") p.expect_exact("Spider closed (shutdown)") p.wait() @inlineCallbacks def test_shutdown_forced(self): sig = signal.SIGINT if sys.platform != "win32" else signal.SIGBREAK args = self.get_script_args("sleeping.py", "10") p = PopenSpawn(args, timeout=5) p.expect_exact("Spider opened") p.expect_exact("Crawled (200)") p.kill(sig) p.expect_exact("shutting down gracefully") # sending the second signal too fast often causes problems d = Deferred() call_later(0.01, d.callback, None) yield d p.kill(sig) p.expect_exact("forcing unclean shutdown") p.wait() class TestCrawlerProcessSubprocess(TestCrawlerProcessSubprocessBase): @property def script_dir(self) -> Path: return self.get_script_dir("CrawlerProcess") def test_reactor_default_twisted_reactor_select(self): log = self.run_script("reactor_default_twisted_reactor_select.py")
python
BSD-3-Clause
d1bd8eb49f7aba9289e4ff692006cead8bcd9080
2026-01-04T14:38:41.023839Z
true
scrapy/scrapy
https://github.com/scrapy/scrapy/blob/d1bd8eb49f7aba9289e4ff692006cead8bcd9080/tests/test_request_cb_kwargs.py
tests/test_request_cb_kwargs.py
from testfixtures import LogCapture from twisted.internet.defer import inlineCallbacks from scrapy.http import Request from scrapy.utils.test import get_crawler from tests.mockserver.http import MockServer from tests.spiders import MockServerSpider class InjectArgumentsDownloaderMiddleware: """ Make sure downloader middlewares are able to update the keyword arguments """ def process_request(self, request): if request.callback.__name__ == "parse_downloader_mw": request.cb_kwargs["from_process_request"] = True def process_response(self, request, response): if request.callback.__name__ == "parse_downloader_mw": request.cb_kwargs["from_process_response"] = True return response class InjectArgumentsSpiderMiddleware: """ Make sure spider middlewares are able to update the keyword arguments """ async def process_start(self, start): async for request in start: if request.callback.__name__ == "parse_spider_mw": request.cb_kwargs["from_process_start"] = True yield request def process_spider_input(self, response): request = response.request if request.callback.__name__ == "parse_spider_mw": request.cb_kwargs["from_process_spider_input"] = True def process_spider_output(self, response, result): for element in result: if ( isinstance(element, Request) and element.callback.__name__ == "parse_spider_mw_2" ): element.cb_kwargs["from_process_spider_output"] = True yield element class KeywordArgumentsSpider(MockServerSpider): name = "kwargs" custom_settings = { "DOWNLOADER_MIDDLEWARES": { InjectArgumentsDownloaderMiddleware: 750, }, "SPIDER_MIDDLEWARES": { InjectArgumentsSpiderMiddleware: 750, }, } checks: list[bool] = [] async def start(self): data = {"key": "value", "number": 123, "callback": "some_callback"} yield Request(self.mockserver.url("/first"), self.parse_first, cb_kwargs=data) yield Request( self.mockserver.url("/general_with"), self.parse_general, cb_kwargs=data ) yield Request(self.mockserver.url("/general_without"), self.parse_general) yield Request(self.mockserver.url("/no_kwargs"), self.parse_no_kwargs) yield Request( self.mockserver.url("/default"), self.parse_default, cb_kwargs=data ) yield Request( self.mockserver.url("/takes_less"), self.parse_takes_less, cb_kwargs=data ) yield Request( self.mockserver.url("/takes_more"), self.parse_takes_more, cb_kwargs=data ) yield Request(self.mockserver.url("/downloader_mw"), self.parse_downloader_mw) yield Request(self.mockserver.url("/spider_mw"), self.parse_spider_mw) def parse_first(self, response, key, number): self.checks.append(key == "value") self.checks.append(number == 123) self.crawler.stats.inc_value("boolean_checks", 2) yield response.follow( self.mockserver.url("/two"), self.parse_second, cb_kwargs={"new_key": "new_value"}, ) def parse_second(self, response, new_key): self.checks.append(new_key == "new_value") self.crawler.stats.inc_value("boolean_checks") def parse_general(self, response, **kwargs): if response.url.endswith("/general_with"): self.checks.append(kwargs["key"] == "value") self.checks.append(kwargs["number"] == 123) self.checks.append(kwargs["callback"] == "some_callback") self.crawler.stats.inc_value("boolean_checks", 3) elif response.url.endswith("/general_without"): self.checks.append( kwargs == {} # pylint: disable=use-implicit-booleaness-not-comparison ) self.crawler.stats.inc_value("boolean_checks") def parse_no_kwargs(self, response): self.checks.append(response.url.endswith("/no_kwargs")) self.crawler.stats.inc_value("boolean_checks") def parse_default(self, response, key, number=None, default=99): self.checks.append(response.url.endswith("/default")) self.checks.append(key == "value") self.checks.append(number == 123) self.checks.append(default == 99) self.crawler.stats.inc_value("boolean_checks", 4) def parse_takes_less(self, response, key, callback): """ Should raise TypeError: parse_takes_less() got an unexpected keyword argument 'number' """ def parse_takes_more(self, response, key, number, callback, other): """ Should raise TypeError: parse_takes_more() missing 1 required positional argument: 'other' """ def parse_downloader_mw( self, response, from_process_request, from_process_response ): self.checks.append(bool(from_process_request)) self.checks.append(bool(from_process_response)) self.crawler.stats.inc_value("boolean_checks", 2) def parse_spider_mw(self, response, from_process_spider_input, from_process_start): self.checks.append(bool(from_process_spider_input)) self.checks.append(bool(from_process_start)) self.crawler.stats.inc_value("boolean_checks", 2) return Request(self.mockserver.url("/spider_mw_2"), self.parse_spider_mw_2) def parse_spider_mw_2(self, response, from_process_spider_output): self.checks.append(bool(from_process_spider_output)) self.crawler.stats.inc_value("boolean_checks", 1) class TestCallbackKeywordArguments: @classmethod def setup_class(cls): cls.mockserver = MockServer() cls.mockserver.__enter__() @classmethod def teardown_class(cls): cls.mockserver.__exit__(None, None, None) @inlineCallbacks def test_callback_kwargs(self): crawler = get_crawler(KeywordArgumentsSpider) with LogCapture() as log: yield crawler.crawl(mockserver=self.mockserver) assert all(crawler.spider.checks) assert len(crawler.spider.checks) == crawler.stats.get_value("boolean_checks") # check exceptions for argument mismatch exceptions = {} for line in log.records: for key in ("takes_less", "takes_more"): if key in line.getMessage(): exceptions[key] = line assert exceptions["takes_less"].exc_info[0] is TypeError assert str(exceptions["takes_less"].exc_info[1]).endswith( "parse_takes_less() got an unexpected keyword argument 'number'" ), "Exception message: " + str(exceptions["takes_less"].exc_info[1]) assert exceptions["takes_more"].exc_info[0] is TypeError assert str(exceptions["takes_more"].exc_info[1]).endswith( "parse_takes_more() missing 1 required positional argument: 'other'" ), "Exception message: " + str(exceptions["takes_more"].exc_info[1])
python
BSD-3-Clause
d1bd8eb49f7aba9289e4ff692006cead8bcd9080
2026-01-04T14:38:41.023839Z
false
scrapy/scrapy
https://github.com/scrapy/scrapy/blob/d1bd8eb49f7aba9289e4ff692006cead8bcd9080/tests/test_spidermiddleware_base.py
tests/test_spidermiddleware_base.py
from __future__ import annotations from typing import TYPE_CHECKING, Any import pytest from scrapy import Request, Spider from scrapy.http import Response from scrapy.spidermiddlewares.base import BaseSpiderMiddleware from scrapy.utils.test import get_crawler if TYPE_CHECKING: from scrapy.crawler import Crawler @pytest.fixture def crawler() -> Crawler: return get_crawler(Spider) def test_trivial(crawler: Crawler) -> None: class TrivialSpiderMiddleware(BaseSpiderMiddleware): pass mw = TrivialSpiderMiddleware.from_crawler(crawler) assert hasattr(mw, "crawler") assert mw.crawler is crawler test_req = Request("data:,") spider_output = [test_req, {"foo": "bar"}] for processed in [ list(mw.process_spider_output(Response("data:,"), spider_output)), list(mw.process_start_requests(spider_output, None)), # type: ignore[arg-type] ]: assert processed == [test_req, {"foo": "bar"}] def test_processed_request(crawler: Crawler) -> None: class ProcessReqSpiderMiddleware(BaseSpiderMiddleware): def get_processed_request( self, request: Request, response: Response | None ) -> Request | None: if request.url == "data:2,": return None if request.url == "data:3,": return Request("data:30,") return request mw = ProcessReqSpiderMiddleware.from_crawler(crawler) test_req1 = Request("data:1,") test_req2 = Request("data:2,") test_req3 = Request("data:3,") spider_output = [test_req1, {"foo": "bar"}, test_req2, test_req3] for processed in [ list(mw.process_spider_output(Response("data:,"), spider_output)), list(mw.process_start_requests(spider_output, None)), # type: ignore[arg-type] ]: assert len(processed) == 3 assert isinstance(processed[0], Request) assert processed[0].url == "data:1," assert processed[1] == {"foo": "bar"} assert isinstance(processed[2], Request) assert processed[2].url == "data:30," def test_processed_item(crawler: Crawler) -> None: class ProcessItemSpiderMiddleware(BaseSpiderMiddleware): def get_processed_item(self, item: Any, response: Response | None) -> Any: if item["foo"] == 2: return None if item["foo"] == 3: item["foo"] = 30 return item mw = ProcessItemSpiderMiddleware.from_crawler(crawler) test_req = Request("data:,") spider_output = [{"foo": 1}, {"foo": 2}, test_req, {"foo": 3}] for processed in [ list(mw.process_spider_output(Response("data:,"), spider_output)), list(mw.process_start_requests(spider_output, None)), # type: ignore[arg-type] ]: assert processed == [{"foo": 1}, test_req, {"foo": 30}] def test_processed_both(crawler: Crawler) -> None: class ProcessBothSpiderMiddleware(BaseSpiderMiddleware): def get_processed_request( self, request: Request, response: Response | None ) -> Request | None: if request.url == "data:2,": return None if request.url == "data:3,": return Request("data:30,") return request def get_processed_item(self, item: Any, response: Response | None) -> Any: if item["foo"] == 2: return None if item["foo"] == 3: item["foo"] = 30 return item mw = ProcessBothSpiderMiddleware.from_crawler(crawler) test_req1 = Request("data:1,") test_req2 = Request("data:2,") test_req3 = Request("data:3,") spider_output = [ test_req1, {"foo": 1}, {"foo": 2}, test_req2, {"foo": 3}, test_req3, ] for processed in [ list(mw.process_spider_output(Response("data:,"), spider_output)), list(mw.process_start_requests(spider_output, None)), # type: ignore[arg-type] ]: assert len(processed) == 4 assert isinstance(processed[0], Request) assert processed[0].url == "data:1," assert processed[1] == {"foo": 1} assert processed[2] == {"foo": 30} assert isinstance(processed[3], Request) assert processed[3].url == "data:30,"
python
BSD-3-Clause
d1bd8eb49f7aba9289e4ff692006cead8bcd9080
2026-01-04T14:38:41.023839Z
false
scrapy/scrapy
https://github.com/scrapy/scrapy/blob/d1bd8eb49f7aba9289e4ff692006cead8bcd9080/tests/test_downloadermiddleware_httpcache.py
tests/test_downloadermiddleware_httpcache.py
from __future__ import annotations import email.utils import shutil import tempfile import time from contextlib import contextmanager from typing import TYPE_CHECKING, Any import pytest from scrapy.downloadermiddlewares.httpcache import HttpCacheMiddleware from scrapy.exceptions import IgnoreRequest from scrapy.http import HtmlResponse, Request, Response from scrapy.spiders import Spider from scrapy.utils.test import get_crawler if TYPE_CHECKING: from collections.abc import Generator from scrapy.crawler import Crawler class TestBase: """Base class with common setup and helper methods.""" policy_class: str storage_class: str def setup_method(self): self.yesterday = email.utils.formatdate(time.time() - 86400) self.today = email.utils.formatdate() self.tomorrow = email.utils.formatdate(time.time() + 86400) self.tmpdir = tempfile.mkdtemp() self.request = Request("http://www.example.com", headers={"User-Agent": "test"}) self.response = Response( "http://www.example.com", headers={"Content-Type": "text/html"}, body=b"test body", status=202, ) def teardown_method(self): shutil.rmtree(self.tmpdir) def _get_settings(self, **new_settings: Any) -> dict[str, Any]: settings = { "HTTPCACHE_ENABLED": True, "HTTPCACHE_DIR": self.tmpdir, "HTTPCACHE_EXPIRATION_SECS": 1, "HTTPCACHE_IGNORE_HTTP_CODES": [], "HTTPCACHE_POLICY": self.policy_class, "HTTPCACHE_STORAGE": self.storage_class, } settings.update(new_settings) return settings @contextmanager def _get_crawler(self, **new_settings: Any) -> Generator[Crawler]: settings = self._get_settings(**new_settings) crawler = get_crawler(Spider, settings) crawler.spider = crawler._create_spider("example.com") assert crawler.stats crawler.stats.open_spider() try: yield crawler finally: crawler.stats.close_spider() @contextmanager def _storage(self, **new_settings: Any): with self._middleware(**new_settings) as mw: yield mw.storage, mw.crawler @contextmanager def _middleware(self, **new_settings: Any) -> Generator[HttpCacheMiddleware]: with self._get_crawler(**new_settings) as crawler: assert crawler.spider mw = HttpCacheMiddleware.from_crawler(crawler) mw.spider_opened(crawler.spider) try: yield mw finally: mw.spider_closed(crawler.spider) def assertEqualResponse(self, response1, response2): assert response1.url == response2.url assert response1.status == response2.status assert response1.headers == response2.headers assert response1.body == response2.body def assertEqualRequest(self, request1, request2): assert request1.url == request2.url assert request1.headers == request2.headers assert request1.body == request2.body def assertEqualRequestButWithCacheValidators(self, request1, request2): assert request1.url == request2.url assert b"If-None-Match" not in request1.headers assert b"If-Modified-Since" not in request1.headers assert any( h in request2.headers for h in (b"If-None-Match", b"If-Modified-Since") ) assert request1.body == request2.body class StorageTestMixin: """Mixin containing storage-specific test methods.""" def test_storage(self): with self._storage() as (storage, crawler): request2 = self.request.copy() assert storage.retrieve_response(crawler.spider, request2) is None storage.store_response(crawler.spider, self.request, self.response) response2 = storage.retrieve_response(crawler.spider, request2) assert isinstance(response2, HtmlResponse) # content-type header self.assertEqualResponse(self.response, response2) time.sleep(2) # wait for cache to expire assert storage.retrieve_response(crawler.spider, request2) is None def test_storage_never_expire(self): with self._storage(HTTPCACHE_EXPIRATION_SECS=0) as (storage, crawler): assert storage.retrieve_response(crawler.spider, self.request) is None storage.store_response(crawler.spider, self.request, self.response) time.sleep(0.5) # give the chance to expire assert storage.retrieve_response(crawler.spider, self.request) def test_storage_no_content_type_header(self): """Test that the response body is used to get the right response class even if there is no Content-Type header""" with self._storage() as (storage, crawler): assert storage.retrieve_response(crawler.spider, self.request) is None response = Response( "http://www.example.com", body=b"<!DOCTYPE html>\n<title>.</title>", status=202, ) storage.store_response(crawler.spider, self.request, response) cached_response = storage.retrieve_response(crawler.spider, self.request) assert isinstance(cached_response, HtmlResponse) self.assertEqualResponse(response, cached_response) class PolicyTestMixin: """Mixin containing policy-specific test methods.""" def test_dont_cache(self): with self._middleware() as mw: self.request.meta["dont_cache"] = True mw.process_response(self.request, self.response) assert mw.storage.retrieve_response(mw.crawler.spider, self.request) is None with self._middleware() as mw: self.request.meta["dont_cache"] = False mw.process_response(self.request, self.response) if mw.policy.should_cache_response(self.response, self.request): assert isinstance( mw.storage.retrieve_response(mw.crawler.spider, self.request), self.response.__class__, ) class DummyPolicyTestMixin(PolicyTestMixin): """Mixin containing dummy policy specific test methods.""" def test_middleware(self): with self._middleware() as mw: assert mw.process_request(self.request) is None mw.process_response(self.request, self.response) response = mw.process_request(self.request) assert isinstance(response, HtmlResponse) self.assertEqualResponse(self.response, response) assert "cached" in response.flags def test_different_request_response_urls(self): with self._middleware() as mw: req = Request("http://host.com/path") res = Response("http://host2.net/test.html") assert mw.process_request(req) is None mw.process_response(req, res) cached = mw.process_request(req) assert isinstance(cached, Response) self.assertEqualResponse(res, cached) assert "cached" in cached.flags def test_middleware_ignore_missing(self): with self._middleware(HTTPCACHE_IGNORE_MISSING=True) as mw: with pytest.raises(IgnoreRequest): mw.process_request(self.request) mw.process_response(self.request, self.response) response = mw.process_request(self.request) assert isinstance(response, HtmlResponse) self.assertEqualResponse(self.response, response) assert "cached" in response.flags def test_middleware_ignore_schemes(self): # http responses are cached by default req, res = Request("http://test.com/"), Response("http://test.com/") with self._middleware() as mw: assert mw.process_request(req) is None mw.process_response(req, res) cached = mw.process_request(req) assert isinstance(cached, Response), type(cached) self.assertEqualResponse(res, cached) assert "cached" in cached.flags # file response is not cached by default req, res = Request("file:///tmp/t.txt"), Response("file:///tmp/t.txt") with self._middleware() as mw: assert mw.process_request(req) is None mw.process_response(req, res) assert mw.storage.retrieve_response(mw.crawler.spider, req) is None assert mw.process_request(req) is None # s3 scheme response is cached by default req, res = Request("s3://bucket/key"), Response("http://bucket/key") with self._middleware() as mw: assert mw.process_request(req) is None mw.process_response(req, res) cached = mw.process_request(req) assert isinstance(cached, Response), type(cached) self.assertEqualResponse(res, cached) assert "cached" in cached.flags # ignore s3 scheme req, res = Request("s3://bucket/key2"), Response("http://bucket/key2") with self._middleware(HTTPCACHE_IGNORE_SCHEMES=["s3"]) as mw: assert mw.process_request(req) is None mw.process_response(req, res) assert mw.storage.retrieve_response(mw.crawler.spider, req) is None assert mw.process_request(req) is None def test_middleware_ignore_http_codes(self): # test response is not cached with self._middleware(HTTPCACHE_IGNORE_HTTP_CODES=[202]) as mw: assert mw.process_request(self.request) is None mw.process_response(self.request, self.response) assert mw.storage.retrieve_response(mw.crawler.spider, self.request) is None assert mw.process_request(self.request) is None # test response is cached with self._middleware(HTTPCACHE_IGNORE_HTTP_CODES=[203]) as mw: mw.process_response(self.request, self.response) response = mw.process_request(self.request) assert isinstance(response, HtmlResponse) self.assertEqualResponse(self.response, response) assert "cached" in response.flags class RFC2616PolicyTestMixin(PolicyTestMixin): """Mixin containing RFC2616 policy specific test methods.""" @staticmethod def _process_requestresponse( mw: HttpCacheMiddleware, request: Request, response: Response | None ) -> Response | Request: result = None try: result = mw.process_request(request) if result: assert isinstance(result, (Request, Response)) return result assert response is not None result = mw.process_response(request, response) assert isinstance(result, Response) return result except Exception: print("Request", request) print("Response", response) print("Result", result) raise def test_request_cacheability(self): res0 = Response( self.request.url, status=200, headers={"Expires": self.tomorrow} ) req0 = Request("http://example.com") req1 = req0.replace(headers={"Cache-Control": "no-store"}) req2 = req0.replace(headers={"Cache-Control": "no-cache"}) with self._middleware() as mw: # response for a request with no-store must not be cached res1 = self._process_requestresponse(mw, req1, res0) self.assertEqualResponse(res1, res0) assert mw.storage.retrieve_response(mw.crawler.spider, req1) is None # Re-do request without no-store and expect it to be cached res2 = self._process_requestresponse(mw, req0, res0) assert "cached" not in res2.flags res3 = mw.process_request(req0) assert "cached" in res3.flags self.assertEqualResponse(res2, res3) # request with no-cache directive must not return cached response # but it allows new response to be stored res0b = res0.replace(body=b"foo") res4 = self._process_requestresponse(mw, req2, res0b) self.assertEqualResponse(res4, res0b) assert "cached" not in res4.flags res5 = self._process_requestresponse(mw, req0, None) self.assertEqualResponse(res5, res0b) assert "cached" in res5.flags def test_response_cacheability(self): responses = [ # 304 is not cacheable no matter what servers sends (False, 304, {}), (False, 304, {"Last-Modified": self.yesterday}), (False, 304, {"Expires": self.tomorrow}), (False, 304, {"Etag": "bar"}), (False, 304, {"Cache-Control": "max-age=3600"}), # Always obey no-store cache control (False, 200, {"Cache-Control": "no-store"}), (False, 200, {"Cache-Control": "no-store, max-age=300"}), # invalid ( False, 200, {"Cache-Control": "no-store", "Expires": self.tomorrow}, ), # invalid # Ignore responses missing expiration and/or validation headers (False, 200, {}), (False, 302, {}), (False, 307, {}), (False, 404, {}), # Cache responses with expiration and/or validation headers (True, 200, {"Last-Modified": self.yesterday}), (True, 203, {"Last-Modified": self.yesterday}), (True, 300, {"Last-Modified": self.yesterday}), (True, 301, {"Last-Modified": self.yesterday}), (True, 308, {"Last-Modified": self.yesterday}), (True, 401, {"Last-Modified": self.yesterday}), (True, 404, {"Cache-Control": "public, max-age=600"}), (True, 302, {"Expires": self.tomorrow}), (True, 200, {"Etag": "foo"}), ] with self._middleware() as mw: for idx, (shouldcache, status, headers) in enumerate(responses): req0 = Request(f"http://example-{idx}.com") res0 = Response(req0.url, status=status, headers=headers) res1 = self._process_requestresponse(mw, req0, res0) res304 = res0.replace(status=304) res2 = self._process_requestresponse( mw, req0, res304 if shouldcache else res0 ) self.assertEqualResponse(res1, res0) self.assertEqualResponse(res2, res0) resc = mw.storage.retrieve_response(mw.crawler.spider, req0) if shouldcache: self.assertEqualResponse(resc, res1) assert "cached" in res2.flags assert res2.status != 304 else: assert not resc assert "cached" not in res2.flags # cache unconditionally unless response contains no-store or is a 304 with self._middleware(HTTPCACHE_ALWAYS_STORE=True) as mw: for idx, (_, status, headers) in enumerate(responses): shouldcache = ( "no-store" not in headers.get("Cache-Control", "") and status != 304 ) req0 = Request(f"http://example2-{idx}.com") res0 = Response(req0.url, status=status, headers=headers) res1 = self._process_requestresponse(mw, req0, res0) res304 = res0.replace(status=304) res2 = self._process_requestresponse( mw, req0, res304 if shouldcache else res0 ) self.assertEqualResponse(res1, res0) self.assertEqualResponse(res2, res0) resc = mw.storage.retrieve_response(mw.crawler.spider, req0) if shouldcache: self.assertEqualResponse(resc, res1) assert "cached" in res2.flags assert res2.status != 304 else: assert not resc assert "cached" not in res2.flags def test_cached_and_fresh(self): sampledata = [ (200, {"Date": self.yesterday, "Expires": self.tomorrow}), (200, {"Date": self.yesterday, "Cache-Control": "max-age=86405"}), (200, {"Age": "299", "Cache-Control": "max-age=300"}), # Obey max-age if present over any others ( 200, { "Date": self.today, "Age": "86405", "Cache-Control": "max-age=" + str(86400 * 3), "Expires": self.yesterday, "Last-Modified": self.yesterday, }, ), # obey Expires if max-age is not present ( 200, { "Date": self.yesterday, "Age": "86400", "Cache-Control": "public", "Expires": self.tomorrow, "Last-Modified": self.yesterday, }, ), # Default missing Date header to right now (200, {"Expires": self.tomorrow}), # Firefox - Expires if age is greater than 10% of (Date - Last-Modified) ( 200, { "Date": self.today, "Last-Modified": self.yesterday, "Age": str(86400 / 10 - 1), }, ), # Firefox - Set one year maxage to permanent redirects missing expiration info (300, {}), (301, {}), (308, {}), ] with self._middleware() as mw: for idx, (status, headers) in enumerate(sampledata): req0 = Request(f"http://example-{idx}.com") res0 = Response(req0.url, status=status, headers=headers) # cache fresh response res1 = self._process_requestresponse(mw, req0, res0) self.assertEqualResponse(res1, res0) assert "cached" not in res1.flags # return fresh cached response without network interaction res2 = self._process_requestresponse(mw, req0, None) self.assertEqualResponse(res1, res2) assert "cached" in res2.flags # validate cached response if request max-age set as 0 req1 = req0.replace(headers={"Cache-Control": "max-age=0"}) res304 = res0.replace(status=304) assert mw.process_request(req1) is None res3 = self._process_requestresponse(mw, req1, res304) self.assertEqualResponse(res1, res3) assert "cached" in res3.flags def test_cached_and_stale(self): sampledata = [ (200, {"Date": self.today, "Expires": self.yesterday}), ( 200, { "Date": self.today, "Expires": self.yesterday, "Last-Modified": self.yesterday, }, ), (200, {"Expires": self.yesterday}), (200, {"Expires": self.yesterday, "ETag": "foo"}), (200, {"Expires": self.yesterday, "Last-Modified": self.yesterday}), (200, {"Expires": self.tomorrow, "Age": "86405"}), (200, {"Cache-Control": "max-age=86400", "Age": "86405"}), # no-cache forces expiration, also revalidation if validators exists (200, {"Cache-Control": "no-cache"}), (200, {"Cache-Control": "no-cache", "ETag": "foo"}), (200, {"Cache-Control": "no-cache", "Last-Modified": self.yesterday}), ( 200, { "Cache-Control": "no-cache,must-revalidate", "Last-Modified": self.yesterday, }, ), ( 200, { "Cache-Control": "must-revalidate", "Expires": self.yesterday, "Last-Modified": self.yesterday, }, ), (200, {"Cache-Control": "max-age=86400,must-revalidate", "Age": "86405"}), ] with self._middleware() as mw: for idx, (status, headers) in enumerate(sampledata): req0 = Request(f"http://example-{idx}.com") res0a = Response(req0.url, status=status, headers=headers) # cache expired response res1 = self._process_requestresponse(mw, req0, res0a) self.assertEqualResponse(res1, res0a) assert "cached" not in res1.flags # Same request but as cached response is stale a new response must # be returned res0b = res0a.replace(body=b"bar") res2 = self._process_requestresponse(mw, req0, res0b) self.assertEqualResponse(res2, res0b) assert "cached" not in res2.flags cc = headers.get("Cache-Control", "") # Previous response expired too, subsequent request to same # resource must revalidate and succeed on 304 if validators # are present if "ETag" in headers or "Last-Modified" in headers: res0c = res0b.replace(status=304) res3 = self._process_requestresponse(mw, req0, res0c) self.assertEqualResponse(res3, res0b) assert "cached" in res3.flags # get cached response on server errors unless must-revalidate # in cached response res0d = res0b.replace(status=500) res4 = self._process_requestresponse(mw, req0, res0d) if "must-revalidate" in cc: assert "cached" not in res4.flags self.assertEqualResponse(res4, res0d) else: assert "cached" in res4.flags self.assertEqualResponse(res4, res0b) # Requests with max-stale can fetch expired cached responses # unless cached response has must-revalidate req1 = req0.replace(headers={"Cache-Control": "max-stale"}) res5 = self._process_requestresponse(mw, req1, res0b) self.assertEqualResponse(res5, res0b) if "no-cache" in cc or "must-revalidate" in cc: assert "cached" not in res5.flags else: assert "cached" in res5.flags def test_process_exception(self): with self._middleware() as mw: res0 = Response(self.request.url, headers={"Expires": self.yesterday}) req0 = Request(self.request.url) self._process_requestresponse(mw, req0, res0) for e in mw.DOWNLOAD_EXCEPTIONS: # Simulate encountering an error on download attempts assert mw.process_request(req0) is None res1 = mw.process_exception(req0, e("foo")) # Use cached response as recovery assert "cached" in res1.flags self.assertEqualResponse(res0, res1) # Do not use cached response for unhandled exceptions mw.process_request(req0) assert mw.process_exception(req0, Exception("foo")) is None def test_ignore_response_cache_controls(self): sampledata = [ (200, {"Date": self.yesterday, "Expires": self.tomorrow}), (200, {"Date": self.yesterday, "Cache-Control": "no-store,max-age=86405"}), (200, {"Age": "299", "Cache-Control": "max-age=300,no-cache"}), (300, {"Cache-Control": "no-cache"}), (200, {"Expires": self.tomorrow, "Cache-Control": "no-store"}), ] with self._middleware( HTTPCACHE_IGNORE_RESPONSE_CACHE_CONTROLS=["no-cache", "no-store"] ) as mw: for idx, (status, headers) in enumerate(sampledata): req0 = Request(f"http://example-{idx}.com") res0 = Response(req0.url, status=status, headers=headers) # cache fresh response res1 = self._process_requestresponse(mw, req0, res0) self.assertEqualResponse(res1, res0) assert "cached" not in res1.flags # return fresh cached response without network interaction res2 = self._process_requestresponse(mw, req0, None) self.assertEqualResponse(res1, res2) assert "cached" in res2.flags # Concrete test classes that combine storage and policy mixins class TestFilesystemStorageWithDummyPolicy( TestBase, StorageTestMixin, DummyPolicyTestMixin ): storage_class = "scrapy.extensions.httpcache.FilesystemCacheStorage" policy_class = "scrapy.extensions.httpcache.DummyPolicy" class TestFilesystemStorageWithRFC2616Policy( TestBase, StorageTestMixin, RFC2616PolicyTestMixin ): storage_class = "scrapy.extensions.httpcache.FilesystemCacheStorage" policy_class = "scrapy.extensions.httpcache.RFC2616Policy" class TestDbmStorageWithDummyPolicy(TestBase, StorageTestMixin, DummyPolicyTestMixin): storage_class = "scrapy.extensions.httpcache.DbmCacheStorage" policy_class = "scrapy.extensions.httpcache.DummyPolicy" class TestDbmStorageWithRFC2616Policy( TestBase, StorageTestMixin, RFC2616PolicyTestMixin ): storage_class = "scrapy.extensions.httpcache.DbmCacheStorage" policy_class = "scrapy.extensions.httpcache.RFC2616Policy" class TestDbmStorageWithCustomDbmModule(TestDbmStorageWithDummyPolicy): dbm_module = "tests.mocks.dummydbm" def _get_settings(self, **new_settings) -> dict[str, Any]: new_settings.setdefault("HTTPCACHE_DBM_MODULE", self.dbm_module) return super()._get_settings(**new_settings) def test_custom_dbm_module_loaded(self): # make sure our dbm module has been loaded with self._storage() as (storage, _): assert storage.dbmodule.__name__ == self.dbm_module class TestFilesystemStorageGzipWithDummyPolicy(TestFilesystemStorageWithDummyPolicy): def _get_settings(self, **new_settings) -> dict[str, Any]: new_settings.setdefault("HTTPCACHE_GZIP", True) return super()._get_settings(**new_settings)
python
BSD-3-Clause
d1bd8eb49f7aba9289e4ff692006cead8bcd9080
2026-01-04T14:38:41.023839Z
false
scrapy/scrapy
https://github.com/scrapy/scrapy/blob/d1bd8eb49f7aba9289e4ff692006cead8bcd9080/tests/test_addons.py
tests/test_addons.py
import itertools from typing import Any from unittest.mock import patch from twisted.internet.defer import inlineCallbacks from scrapy import Spider from scrapy.crawler import Crawler, CrawlerRunner from scrapy.exceptions import NotConfigured from scrapy.settings import BaseSettings, Settings from scrapy.utils.test import get_crawler, get_reactor_settings class SimpleAddon: def update_settings(self, settings): pass def get_addon_cls(config: dict[str, Any]) -> type: class AddonWithConfig: def update_settings(self, settings: BaseSettings): settings.update(config, priority="addon") return AddonWithConfig class CreateInstanceAddon: def __init__(self, crawler: Crawler) -> None: super().__init__() self.crawler = crawler self.config = crawler.settings.getdict("MYADDON") @classmethod def from_crawler(cls, crawler: Crawler): return cls(crawler) def update_settings(self, settings): settings.update(self.config, "addon") class TestAddon: def test_update_settings(self): settings = BaseSettings() settings.set("KEY1", "default", priority="default") settings.set("KEY2", "project", priority="project") addon_config = {"KEY1": "addon", "KEY2": "addon", "KEY3": "addon"} testaddon = get_addon_cls(addon_config)() testaddon.update_settings(settings) assert settings["KEY1"] == "addon" assert settings["KEY2"] == "project" assert settings["KEY3"] == "addon" class TestAddonManager: def test_load_settings(self): settings_dict = { "ADDONS": {"tests.test_addons.SimpleAddon": 0}, } crawler = get_crawler(settings_dict=settings_dict) manager = crawler.addons assert isinstance(manager.addons[0], SimpleAddon) def test_notconfigured(self): class NotConfiguredAddon: def update_settings(self, settings): raise NotConfigured settings_dict = { "ADDONS": {NotConfiguredAddon: 0}, } crawler = get_crawler(settings_dict=settings_dict) manager = crawler.addons assert not manager.addons def test_load_settings_order(self): # Get three addons with different settings addonlist = [] for i in range(3): addon = get_addon_cls({"KEY1": i}) addon.number = i addonlist.append(addon) # Test for every possible ordering for ordered_addons in itertools.permutations(addonlist): expected_order = [a.number for a in ordered_addons] settings = {"ADDONS": {a: i for i, a in enumerate(ordered_addons)}} crawler = get_crawler(settings_dict=settings) manager = crawler.addons assert [a.number for a in manager.addons] == expected_order assert crawler.settings.getint("KEY1") == expected_order[-1] def test_build_from_crawler(self): settings_dict = { "ADDONS": {"tests.test_addons.CreateInstanceAddon": 0}, "MYADDON": {"MYADDON_KEY": "val"}, } crawler = get_crawler(settings_dict=settings_dict) manager = crawler.addons assert isinstance(manager.addons[0], CreateInstanceAddon) assert crawler.settings.get("MYADDON_KEY") == "val" def test_settings_priority(self): config = { "KEY": 15, # priority=addon } settings_dict = { "ADDONS": {get_addon_cls(config): 1}, **get_reactor_settings(), } crawler = get_crawler(settings_dict=settings_dict) assert crawler.settings.getint("KEY") == 15 settings = Settings(settings_dict) settings.set("KEY", 0, priority="default") runner = CrawlerRunner(settings) crawler = runner.create_crawler(Spider) crawler._apply_settings() assert crawler.settings.getint("KEY") == 15 settings_dict = { "KEY": 20, # priority=project "ADDONS": {get_addon_cls(config): 1}, **get_reactor_settings(), } settings = Settings(settings_dict) settings.set("KEY", 0, priority="default") runner = CrawlerRunner(settings) crawler = runner.create_crawler(Spider) assert crawler.settings.getint("KEY") == 20 def test_fallback_workflow(self): FALLBACK_SETTING = "MY_FALLBACK_DOWNLOAD_HANDLER" class AddonWithFallback: def update_settings(self, settings): if not settings.get(FALLBACK_SETTING): settings.set( FALLBACK_SETTING, settings.getwithbase("DOWNLOAD_HANDLERS")["https"], "addon", ) settings["DOWNLOAD_HANDLERS"]["https"] = "AddonHandler" settings_dict = { "ADDONS": {AddonWithFallback: 1}, } crawler = get_crawler(settings_dict=settings_dict) assert ( crawler.settings.getwithbase("DOWNLOAD_HANDLERS")["https"] == "AddonHandler" ) assert ( crawler.settings.get(FALLBACK_SETTING) == "scrapy.core.downloader.handlers.http11.HTTP11DownloadHandler" ) settings_dict = { "ADDONS": {AddonWithFallback: 1}, "DOWNLOAD_HANDLERS": {"https": "UserHandler"}, } crawler = get_crawler(settings_dict=settings_dict) assert ( crawler.settings.getwithbase("DOWNLOAD_HANDLERS")["https"] == "AddonHandler" ) assert crawler.settings.get(FALLBACK_SETTING) == "UserHandler" def test_logging_message(self): class LoggedAddon: def update_settings(self, settings): pass with ( patch("scrapy.addons.logger") as logger_mock, patch("scrapy.addons.build_from_crawler") as build_from_crawler_mock, ): settings_dict = { "ADDONS": {LoggedAddon: 1}, } addon = LoggedAddon() build_from_crawler_mock.return_value = addon crawler = get_crawler(settings_dict=settings_dict) logger_mock.info.assert_called_once_with( "Enabled addons:\n%(addons)s", {"addons": [addon]}, extra={"crawler": crawler}, ) @inlineCallbacks def test_enable_addon_in_spider(self): class MySpider(Spider): name = "myspider" @classmethod def from_crawler(cls, crawler, *args, **kwargs): spider = super().from_crawler(crawler, *args, **kwargs) addon_config = {"KEY": "addon"} addon_cls = get_addon_cls(addon_config) spider.settings.set("ADDONS", {addon_cls: 1}, priority="spider") return spider settings = Settings() settings.setdict(get_reactor_settings()) settings.set("KEY", "default", priority="default") runner = CrawlerRunner(settings) crawler = runner.create_crawler(MySpider) assert crawler.settings.get("KEY") == "default" yield crawler.crawl() assert crawler.settings.get("KEY") == "addon"
python
BSD-3-Clause
d1bd8eb49f7aba9289e4ff692006cead8bcd9080
2026-01-04T14:38:41.023839Z
false
scrapy/scrapy
https://github.com/scrapy/scrapy/blob/d1bd8eb49f7aba9289e4ff692006cead8bcd9080/tests/test_downloadermiddleware_downloadtimeout.py
tests/test_downloadermiddleware_downloadtimeout.py
from scrapy.downloadermiddlewares.downloadtimeout import DownloadTimeoutMiddleware from scrapy.http import Request from scrapy.spiders import Spider from scrapy.utils.test import get_crawler class TestDownloadTimeoutMiddleware: def get_request_spider_mw(self, settings=None): crawler = get_crawler(Spider, settings) spider = crawler._create_spider("foo") request = Request("http://scrapytest.org/") return request, spider, DownloadTimeoutMiddleware.from_crawler(crawler) def test_default_download_timeout(self): req, spider, mw = self.get_request_spider_mw() mw.spider_opened(spider) assert mw.process_request(req) is None assert req.meta.get("download_timeout") == 180 def test_string_download_timeout(self): req, spider, mw = self.get_request_spider_mw({"DOWNLOAD_TIMEOUT": "20.1"}) mw.spider_opened(spider) assert mw.process_request(req) is None assert req.meta.get("download_timeout") == 20.1 def test_setting_has_download_timeout(self): req, spider, mw = self.get_request_spider_mw({"DOWNLOAD_TIMEOUT": 2}) mw.spider_opened(spider) assert mw.process_request(req) is None assert req.meta.get("download_timeout") == 2 def test_request_has_download_timeout(self): req, spider, mw = self.get_request_spider_mw({"DOWNLOAD_TIMEOUT": 2}) mw.spider_opened(spider) req.meta["download_timeout"] = 1 assert mw.process_request(req) is None assert req.meta.get("download_timeout") == 1
python
BSD-3-Clause
d1bd8eb49f7aba9289e4ff692006cead8bcd9080
2026-01-04T14:38:41.023839Z
false
scrapy/scrapy
https://github.com/scrapy/scrapy/blob/d1bd8eb49f7aba9289e4ff692006cead8bcd9080/tests/test_utils_httpobj.py
tests/test_utils_httpobj.py
from urllib.parse import urlparse from scrapy.http import Request from scrapy.utils.httpobj import urlparse_cached def test_urlparse_cached(): url = "http://www.example.com/index.html" request1 = Request(url) request2 = Request(url) req1a = urlparse_cached(request1) req1b = urlparse_cached(request1) req2 = urlparse_cached(request2) urlp = urlparse(url) assert req1a == req2 assert req1a == urlp assert req1a is req1b assert req1a is not req2 assert req1a is not req2
python
BSD-3-Clause
d1bd8eb49f7aba9289e4ff692006cead8bcd9080
2026-01-04T14:38:41.023839Z
false
scrapy/scrapy
https://github.com/scrapy/scrapy/blob/d1bd8eb49f7aba9289e4ff692006cead8bcd9080/tests/CrawlerProcess/reactor_default.py
tests/CrawlerProcess/reactor_default.py
from twisted.internet import reactor # noqa: F401,TID253 from twisted.python import log import scrapy from scrapy.crawler import CrawlerProcess class NoRequestsSpider(scrapy.Spider): name = "no_request" async def start(self): return yield process = CrawlerProcess(settings={}) d = process.crawl(NoRequestsSpider) d.addErrback(log.err) process.start()
python
BSD-3-Clause
d1bd8eb49f7aba9289e4ff692006cead8bcd9080
2026-01-04T14:38:41.023839Z
false
scrapy/scrapy
https://github.com/scrapy/scrapy/blob/d1bd8eb49f7aba9289e4ff692006cead8bcd9080/tests/CrawlerProcess/twisted_reactor_asyncio.py
tests/CrawlerProcess/twisted_reactor_asyncio.py
import scrapy from scrapy.crawler import CrawlerProcess class AsyncioReactorSpider(scrapy.Spider): name = "asyncio_reactor" process = CrawlerProcess( settings={ "TWISTED_REACTOR": "twisted.internet.asyncioreactor.AsyncioSelectorReactor", } ) process.crawl(AsyncioReactorSpider) process.start()
python
BSD-3-Clause
d1bd8eb49f7aba9289e4ff692006cead8bcd9080
2026-01-04T14:38:41.023839Z
false
scrapy/scrapy
https://github.com/scrapy/scrapy/blob/d1bd8eb49f7aba9289e4ff692006cead8bcd9080/tests/CrawlerProcess/asyncio_deferred_signal.py
tests/CrawlerProcess/asyncio_deferred_signal.py
from __future__ import annotations import asyncio import sys from scrapy import Spider from scrapy.crawler import CrawlerProcess from scrapy.utils.defer import deferred_from_coro class UppercasePipeline: async def _open_spider(self, spider): spider.logger.info("async pipeline opened!") await asyncio.sleep(0.1) def open_spider(self, spider): return deferred_from_coro(self._open_spider(spider)) def process_item(self, item): return {"url": item["url"].upper()} class UrlSpider(Spider): name = "url_spider" start_urls = ["data:,"] custom_settings = { "ITEM_PIPELINES": {UppercasePipeline: 100}, } def parse(self, response): yield {"url": response.url} if __name__ == "__main__": ASYNCIO_EVENT_LOOP: str | None try: ASYNCIO_EVENT_LOOP = sys.argv[1] except IndexError: ASYNCIO_EVENT_LOOP = None process = CrawlerProcess( settings={ "TWISTED_REACTOR": "twisted.internet.asyncioreactor.AsyncioSelectorReactor", "ASYNCIO_EVENT_LOOP": ASYNCIO_EVENT_LOOP, } ) process.crawl(UrlSpider) process.start()
python
BSD-3-Clause
d1bd8eb49f7aba9289e4ff692006cead8bcd9080
2026-01-04T14:38:41.023839Z
false
scrapy/scrapy
https://github.com/scrapy/scrapy/blob/d1bd8eb49f7aba9289e4ff692006cead8bcd9080/tests/CrawlerProcess/asyncio_enabled_no_reactor.py
tests/CrawlerProcess/asyncio_enabled_no_reactor.py
import scrapy from scrapy.crawler import CrawlerProcess from scrapy.utils.reactor import is_asyncio_reactor_installed class ReactorCheckExtension: def __init__(self): if not is_asyncio_reactor_installed(): raise RuntimeError("ReactorCheckExtension requires the asyncio reactor.") class NoRequestsSpider(scrapy.Spider): name = "no_request" async def start(self): return yield process = CrawlerProcess( settings={ "TWISTED_REACTOR": "twisted.internet.asyncioreactor.AsyncioSelectorReactor", "EXTENSIONS": {ReactorCheckExtension: 0}, } ) process.crawl(NoRequestsSpider) process.start()
python
BSD-3-Clause
d1bd8eb49f7aba9289e4ff692006cead8bcd9080
2026-01-04T14:38:41.023839Z
false
scrapy/scrapy
https://github.com/scrapy/scrapy/blob/d1bd8eb49f7aba9289e4ff692006cead8bcd9080/tests/CrawlerProcess/asyncio_enabled_reactor.py
tests/CrawlerProcess/asyncio_enabled_reactor.py
import scrapy from scrapy.crawler import CrawlerProcess from scrapy.utils.asyncio import is_asyncio_available from scrapy.utils.reactor import ( install_reactor, is_asyncio_reactor_installed, is_reactor_installed, ) if is_reactor_installed(): raise RuntimeError( "Reactor already installed before is_asyncio_reactor_installed()." ) try: is_asyncio_reactor_installed() except RuntimeError: pass else: raise RuntimeError("is_asyncio_reactor_installed() did not raise RuntimeError.") try: is_asyncio_available() except RuntimeError: pass else: raise RuntimeError("is_asyncio_available() did not raise RuntimeError.") if is_reactor_installed(): raise RuntimeError( "Reactor already installed after is_asyncio_reactor_installed()." ) install_reactor("twisted.internet.asyncioreactor.AsyncioSelectorReactor") if not is_asyncio_reactor_installed(): raise RuntimeError("Wrong reactor installed after install_reactor().") class ReactorCheckExtension: def __init__(self): if not is_asyncio_reactor_installed(): raise RuntimeError("ReactorCheckExtension requires the asyncio reactor.") if not is_asyncio_available(): raise RuntimeError("ReactorCheckExtension requires asyncio support.") class NoRequestsSpider(scrapy.Spider): name = "no_request" async def start(self): return yield process = CrawlerProcess( settings={ "TWISTED_REACTOR": "twisted.internet.asyncioreactor.AsyncioSelectorReactor", "EXTENSIONS": {ReactorCheckExtension: 0}, } ) process.crawl(NoRequestsSpider) process.start()
python
BSD-3-Clause
d1bd8eb49f7aba9289e4ff692006cead8bcd9080
2026-01-04T14:38:41.023839Z
false
scrapy/scrapy
https://github.com/scrapy/scrapy/blob/d1bd8eb49f7aba9289e4ff692006cead8bcd9080/tests/CrawlerProcess/twisted_reactor_select.py
tests/CrawlerProcess/twisted_reactor_select.py
import scrapy from scrapy.crawler import CrawlerProcess class SelectReactorSpider(scrapy.Spider): name = "epoll_reactor" process = CrawlerProcess( settings={ "TWISTED_REACTOR": "twisted.internet.selectreactor.SelectReactor", } ) process.crawl(SelectReactorSpider) process.start()
python
BSD-3-Clause
d1bd8eb49f7aba9289e4ff692006cead8bcd9080
2026-01-04T14:38:41.023839Z
false
scrapy/scrapy
https://github.com/scrapy/scrapy/blob/d1bd8eb49f7aba9289e4ff692006cead8bcd9080/tests/CrawlerProcess/caching_hostname_resolver.py
tests/CrawlerProcess/caching_hostname_resolver.py
import sys import scrapy from scrapy.crawler import CrawlerProcess class CachingHostnameResolverSpider(scrapy.Spider): """ Finishes in a finite amount of time (does not hang indefinitely in the DNS resolution) """ name = "caching_hostname_resolver_spider" async def start(self): yield scrapy.Request(self.url) def parse(self, response): for _ in range(10): yield scrapy.Request( response.url, dont_filter=True, callback=self.ignore_response ) def ignore_response(self, response): self.logger.info(repr(response.ip_address)) if __name__ == "__main__": process = CrawlerProcess( settings={ "RETRY_ENABLED": False, "DNS_RESOLVER": "scrapy.resolver.CachingHostnameResolver", } ) process.crawl(CachingHostnameResolverSpider, url=sys.argv[1]) process.start()
python
BSD-3-Clause
d1bd8eb49f7aba9289e4ff692006cead8bcd9080
2026-01-04T14:38:41.023839Z
false
scrapy/scrapy
https://github.com/scrapy/scrapy/blob/d1bd8eb49f7aba9289e4ff692006cead8bcd9080/tests/CrawlerProcess/multi.py
tests/CrawlerProcess/multi.py
import scrapy from scrapy.crawler import CrawlerProcess class NoRequestsSpider(scrapy.Spider): name = "no_request" async def start(self): return yield process = CrawlerProcess(settings={}) process.crawl(NoRequestsSpider) process.crawl(NoRequestsSpider) process.start()
python
BSD-3-Clause
d1bd8eb49f7aba9289e4ff692006cead8bcd9080
2026-01-04T14:38:41.023839Z
false
scrapy/scrapy
https://github.com/scrapy/scrapy/blob/d1bd8eb49f7aba9289e4ff692006cead8bcd9080/tests/CrawlerProcess/twisted_reactor_poll.py
tests/CrawlerProcess/twisted_reactor_poll.py
import scrapy from scrapy.crawler import CrawlerProcess class PollReactorSpider(scrapy.Spider): name = "poll_reactor" process = CrawlerProcess( settings={ "TWISTED_REACTOR": "twisted.internet.pollreactor.PollReactor", } ) process.crawl(PollReactorSpider) process.start()
python
BSD-3-Clause
d1bd8eb49f7aba9289e4ff692006cead8bcd9080
2026-01-04T14:38:41.023839Z
false
scrapy/scrapy
https://github.com/scrapy/scrapy/blob/d1bd8eb49f7aba9289e4ff692006cead8bcd9080/tests/CrawlerProcess/reactor_select_twisted_reactor_select.py
tests/CrawlerProcess/reactor_select_twisted_reactor_select.py
from twisted.internet import selectreactor import scrapy from scrapy.crawler import CrawlerProcess selectreactor.install() class NoRequestsSpider(scrapy.Spider): name = "no_request" async def start(self): return yield process = CrawlerProcess( settings={ "TWISTED_REACTOR": "twisted.internet.selectreactor.SelectReactor", } ) process.crawl(NoRequestsSpider) process.start()
python
BSD-3-Clause
d1bd8eb49f7aba9289e4ff692006cead8bcd9080
2026-01-04T14:38:41.023839Z
false
scrapy/scrapy
https://github.com/scrapy/scrapy/blob/d1bd8eb49f7aba9289e4ff692006cead8bcd9080/tests/CrawlerProcess/sleeping.py
tests/CrawlerProcess/sleeping.py
import sys from twisted.internet.defer import Deferred import scrapy from scrapy.crawler import CrawlerProcess from scrapy.utils.defer import maybe_deferred_to_future class SleepingSpider(scrapy.Spider): name = "sleeping" start_urls = ["data:,;"] async def parse(self, response): from twisted.internet import reactor d = Deferred() reactor.callLater(int(sys.argv[1]), d.callback, None) await maybe_deferred_to_future(d) process = CrawlerProcess(settings={}) process.crawl(SleepingSpider) process.start()
python
BSD-3-Clause
d1bd8eb49f7aba9289e4ff692006cead8bcd9080
2026-01-04T14:38:41.023839Z
false
scrapy/scrapy
https://github.com/scrapy/scrapy/blob/d1bd8eb49f7aba9289e4ff692006cead8bcd9080/tests/CrawlerProcess/twisted_reactor_custom_settings_conflict.py
tests/CrawlerProcess/twisted_reactor_custom_settings_conflict.py
from twisted.python import log import scrapy from scrapy.crawler import CrawlerProcess class SelectReactorSpider(scrapy.Spider): name = "select_reactor" custom_settings = { "TWISTED_REACTOR": "twisted.internet.selectreactor.SelectReactor", } class AsyncioReactorSpider(scrapy.Spider): name = "asyncio_reactor" custom_settings = { "TWISTED_REACTOR": "twisted.internet.asyncioreactor.AsyncioSelectorReactor", } process = CrawlerProcess() d1 = process.crawl(SelectReactorSpider) d1.addErrback(log.err) d2 = process.crawl(AsyncioReactorSpider) d2.addErrback(log.err) process.start()
python
BSD-3-Clause
d1bd8eb49f7aba9289e4ff692006cead8bcd9080
2026-01-04T14:38:41.023839Z
false
scrapy/scrapy
https://github.com/scrapy/scrapy/blob/d1bd8eb49f7aba9289e4ff692006cead8bcd9080/tests/CrawlerProcess/args_settings.py
tests/CrawlerProcess/args_settings.py
from typing import Any import scrapy from scrapy.crawler import Crawler, CrawlerProcess class NoRequestsSpider(scrapy.Spider): name = "no_request" @classmethod def from_crawler(cls, crawler: Crawler, *args: Any, **kwargs: Any): spider = super().from_crawler(crawler, *args, **kwargs) spider.settings.set("FOO", kwargs.get("foo")) return spider async def start(self): self.logger.info(f"The value of FOO is {self.settings.getint('FOO')}") return yield process = CrawlerProcess(settings={}) process.crawl(NoRequestsSpider, foo=42) process.start()
python
BSD-3-Clause
d1bd8eb49f7aba9289e4ff692006cead8bcd9080
2026-01-04T14:38:41.023839Z
false
scrapy/scrapy
https://github.com/scrapy/scrapy/blob/d1bd8eb49f7aba9289e4ff692006cead8bcd9080/tests/CrawlerProcess/simple.py
tests/CrawlerProcess/simple.py
import scrapy from scrapy.crawler import CrawlerProcess class NoRequestsSpider(scrapy.Spider): name = "no_request" async def start(self): return yield process = CrawlerProcess(settings={}) process.crawl(NoRequestsSpider) process.start()
python
BSD-3-Clause
d1bd8eb49f7aba9289e4ff692006cead8bcd9080
2026-01-04T14:38:41.023839Z
false
scrapy/scrapy
https://github.com/scrapy/scrapy/blob/d1bd8eb49f7aba9289e4ff692006cead8bcd9080/tests/CrawlerProcess/twisted_reactor_custom_settings_same.py
tests/CrawlerProcess/twisted_reactor_custom_settings_same.py
import scrapy from scrapy.crawler import CrawlerProcess class AsyncioReactorSpider1(scrapy.Spider): name = "asyncio_reactor1" custom_settings = { "TWISTED_REACTOR": "twisted.internet.asyncioreactor.AsyncioSelectorReactor", } class AsyncioReactorSpider2(scrapy.Spider): name = "asyncio_reactor2" custom_settings = { "TWISTED_REACTOR": "twisted.internet.asyncioreactor.AsyncioSelectorReactor", } process = CrawlerProcess() process.crawl(AsyncioReactorSpider1) process.crawl(AsyncioReactorSpider2) process.start()
python
BSD-3-Clause
d1bd8eb49f7aba9289e4ff692006cead8bcd9080
2026-01-04T14:38:41.023839Z
false
scrapy/scrapy
https://github.com/scrapy/scrapy/blob/d1bd8eb49f7aba9289e4ff692006cead8bcd9080/tests/CrawlerProcess/twisted_reactor_custom_settings.py
tests/CrawlerProcess/twisted_reactor_custom_settings.py
import scrapy from scrapy.crawler import CrawlerProcess class AsyncioReactorSpider(scrapy.Spider): name = "asyncio_reactor" custom_settings = { "TWISTED_REACTOR": "twisted.internet.asyncioreactor.AsyncioSelectorReactor", } process = CrawlerProcess() process.crawl(AsyncioReactorSpider) process.start()
python
BSD-3-Clause
d1bd8eb49f7aba9289e4ff692006cead8bcd9080
2026-01-04T14:38:41.023839Z
false
scrapy/scrapy
https://github.com/scrapy/scrapy/blob/d1bd8eb49f7aba9289e4ff692006cead8bcd9080/tests/CrawlerProcess/asyncio_enabled_reactor_different_loop.py
tests/CrawlerProcess/asyncio_enabled_reactor_different_loop.py
import asyncio import sys from twisted.internet import asyncioreactor from twisted.python import log import scrapy from scrapy.crawler import CrawlerProcess if sys.platform == "win32": asyncio.set_event_loop_policy(asyncio.WindowsSelectorEventLoopPolicy()) asyncioreactor.install() class NoRequestsSpider(scrapy.Spider): name = "no_request" async def start(self): return yield process = CrawlerProcess( settings={ "TWISTED_REACTOR": "twisted.internet.asyncioreactor.AsyncioSelectorReactor", "ASYNCIO_EVENT_LOOP": "uvloop.Loop", } ) d = process.crawl(NoRequestsSpider) d.addErrback(log.err) process.start()
python
BSD-3-Clause
d1bd8eb49f7aba9289e4ff692006cead8bcd9080
2026-01-04T14:38:41.023839Z
false
scrapy/scrapy
https://github.com/scrapy/scrapy/blob/d1bd8eb49f7aba9289e4ff692006cead8bcd9080/tests/CrawlerProcess/reactor_select_subclass_twisted_reactor_select.py
tests/CrawlerProcess/reactor_select_subclass_twisted_reactor_select.py
from twisted.internet.main import installReactor from twisted.internet.selectreactor import SelectReactor from twisted.python import log import scrapy from scrapy.crawler import CrawlerProcess class SelectReactorSubclass(SelectReactor): pass reactor = SelectReactorSubclass() installReactor(reactor) class NoRequestsSpider(scrapy.Spider): name = "no_request" async def start(self): return yield process = CrawlerProcess( settings={ "TWISTED_REACTOR": "twisted.internet.selectreactor.SelectReactor", } ) d = process.crawl(NoRequestsSpider) d.addErrback(log.err) process.start()
python
BSD-3-Clause
d1bd8eb49f7aba9289e4ff692006cead8bcd9080
2026-01-04T14:38:41.023839Z
false
scrapy/scrapy
https://github.com/scrapy/scrapy/blob/d1bd8eb49f7aba9289e4ff692006cead8bcd9080/tests/CrawlerProcess/reactor_default_twisted_reactor_select.py
tests/CrawlerProcess/reactor_default_twisted_reactor_select.py
from twisted.internet import reactor # noqa: F401,TID253 from twisted.python import log import scrapy from scrapy.crawler import CrawlerProcess class NoRequestsSpider(scrapy.Spider): name = "no_request" async def start(self): return yield process = CrawlerProcess( settings={ "TWISTED_REACTOR": "twisted.internet.selectreactor.SelectReactor", } ) d = process.crawl(NoRequestsSpider) d.addErrback(log.err) process.start()
python
BSD-3-Clause
d1bd8eb49f7aba9289e4ff692006cead8bcd9080
2026-01-04T14:38:41.023839Z
false
scrapy/scrapy
https://github.com/scrapy/scrapy/blob/d1bd8eb49f7aba9289e4ff692006cead8bcd9080/tests/CrawlerProcess/default_name_resolver.py
tests/CrawlerProcess/default_name_resolver.py
import scrapy from scrapy.crawler import CrawlerProcess class IPv6Spider(scrapy.Spider): """ Raises a twisted.internet.error.DNSLookupError: the default name resolver does not handle IPv6 addresses. """ name = "ipv6_spider" start_urls = ["http://[::1]"] if __name__ == "__main__": process = CrawlerProcess(settings={"RETRY_ENABLED": False}) process.crawl(IPv6Spider) process.start()
python
BSD-3-Clause
d1bd8eb49f7aba9289e4ff692006cead8bcd9080
2026-01-04T14:38:41.023839Z
false
scrapy/scrapy
https://github.com/scrapy/scrapy/blob/d1bd8eb49f7aba9289e4ff692006cead8bcd9080/tests/CrawlerProcess/caching_hostname_resolver_ipv6.py
tests/CrawlerProcess/caching_hostname_resolver_ipv6.py
import scrapy from scrapy.crawler import CrawlerProcess class CachingHostnameResolverSpider(scrapy.Spider): """ Finishes without a twisted.internet.error.DNSLookupError exception """ name = "caching_hostname_resolver_spider" start_urls = ["http://[::1]"] if __name__ == "__main__": process = CrawlerProcess( settings={ "RETRY_ENABLED": False, "DNS_RESOLVER": "scrapy.resolver.CachingHostnameResolver", } ) process.crawl(CachingHostnameResolverSpider) process.start()
python
BSD-3-Clause
d1bd8eb49f7aba9289e4ff692006cead8bcd9080
2026-01-04T14:38:41.023839Z
false
scrapy/scrapy
https://github.com/scrapy/scrapy/blob/d1bd8eb49f7aba9289e4ff692006cead8bcd9080/tests/CrawlerProcess/reactor_select.py
tests/CrawlerProcess/reactor_select.py
from twisted.internet import selectreactor from twisted.python import log import scrapy from scrapy.crawler import CrawlerProcess selectreactor.install() class NoRequestsSpider(scrapy.Spider): name = "no_request" async def start(self): return yield process = CrawlerProcess(settings={}) d = process.crawl(NoRequestsSpider) d.addErrback(log.err) process.start()
python
BSD-3-Clause
d1bd8eb49f7aba9289e4ff692006cead8bcd9080
2026-01-04T14:38:41.023839Z
false
scrapy/scrapy
https://github.com/scrapy/scrapy/blob/d1bd8eb49f7aba9289e4ff692006cead8bcd9080/tests/CrawlerProcess/asyncio_custom_loop.py
tests/CrawlerProcess/asyncio_custom_loop.py
import scrapy from scrapy.crawler import CrawlerProcess class NoRequestsSpider(scrapy.Spider): name = "no_request" async def start(self): return yield process = CrawlerProcess( settings={ "TWISTED_REACTOR": "twisted.internet.asyncioreactor.AsyncioSelectorReactor", "ASYNCIO_EVENT_LOOP": "uvloop.Loop", } ) process.crawl(NoRequestsSpider) process.start()
python
BSD-3-Clause
d1bd8eb49f7aba9289e4ff692006cead8bcd9080
2026-01-04T14:38:41.023839Z
false
scrapy/scrapy
https://github.com/scrapy/scrapy/blob/d1bd8eb49f7aba9289e4ff692006cead8bcd9080/tests/CrawlerProcess/asyncio_enabled_reactor_same_loop.py
tests/CrawlerProcess/asyncio_enabled_reactor_same_loop.py
import asyncio import sys from twisted.internet import asyncioreactor from uvloop import Loop import scrapy from scrapy.crawler import CrawlerProcess if sys.platform == "win32": asyncio.set_event_loop_policy(asyncio.WindowsSelectorEventLoopPolicy()) asyncio.set_event_loop(Loop()) asyncioreactor.install() class NoRequestsSpider(scrapy.Spider): name = "no_request" async def start(self): return yield process = CrawlerProcess( settings={ "TWISTED_REACTOR": "twisted.internet.asyncioreactor.AsyncioSelectorReactor", "ASYNCIO_EVENT_LOOP": "uvloop.Loop", } ) process.crawl(NoRequestsSpider) process.start()
python
BSD-3-Clause
d1bd8eb49f7aba9289e4ff692006cead8bcd9080
2026-01-04T14:38:41.023839Z
false
scrapy/scrapy
https://github.com/scrapy/scrapy/blob/d1bd8eb49f7aba9289e4ff692006cead8bcd9080/tests/test_spiderloader/__init__.py
tests/test_spiderloader/__init__.py
import contextlib import shutil import sys import warnings from pathlib import Path from unittest import mock import pytest from zope.interface.verify import verifyObject # ugly hack to avoid cyclic imports of scrapy.spiders when running this test # alone import scrapy from scrapy.crawler import CrawlerRunner from scrapy.http import Request from scrapy.interfaces import ISpiderLoader from scrapy.settings import Settings from scrapy.spiderloader import DummySpiderLoader, SpiderLoader, get_spider_loader module_dir = Path(__file__).resolve().parent def _copytree(source: Path, target: Path): with contextlib.suppress(shutil.Error): shutil.copytree(source, target) @pytest.fixture def spider_loader_env(tmp_path): orig_spiders_dir = module_dir / "test_spiders" spiders_dir = tmp_path / "test_spiders_xxx" _copytree(orig_spiders_dir, spiders_dir) sys.path.append(str(tmp_path)) settings = Settings({"SPIDER_MODULES": ["test_spiders_xxx"]}) yield settings, spiders_dir sys.modules.pop("test_spiders_xxx", None) sys.path.remove(str(tmp_path)) @pytest.fixture def spider_loader(spider_loader_env): settings, _ = spider_loader_env return SpiderLoader.from_settings(settings) class TestSpiderLoader: def test_interface(self, spider_loader): verifyObject(ISpiderLoader, spider_loader) def test_list(self, spider_loader): assert set(spider_loader.list()) == { "spider1", "spider2", "spider3", "spider4", } def test_load(self, spider_loader): spider1 = spider_loader.load("spider1") assert spider1.__name__ == "Spider1" def test_find_by_request(self, spider_loader): assert spider_loader.find_by_request(Request("http://scrapy1.org/test")) == [ "spider1" ] assert spider_loader.find_by_request(Request("http://scrapy2.org/test")) == [ "spider2" ] assert set( spider_loader.find_by_request(Request("http://scrapy3.org/test")) ) == {"spider1", "spider2"} assert spider_loader.find_by_request(Request("http://scrapy999.org/test")) == [] assert spider_loader.find_by_request(Request("http://spider3.com")) == [] assert spider_loader.find_by_request( Request("http://spider3.com/onlythis") ) == ["spider3"] def test_load_spider_module(self): module = "tests.test_spiderloader.test_spiders.spider1" settings = Settings({"SPIDER_MODULES": [module]}) spider_loader = SpiderLoader.from_settings(settings) assert len(spider_loader._spiders) == 1 def test_load_spider_module_multiple(self): prefix = "tests.test_spiderloader.test_spiders." module = ",".join(prefix + s for s in ("spider1", "spider2")) settings = Settings({"SPIDER_MODULES": module}) spider_loader = SpiderLoader.from_settings(settings) assert len(spider_loader._spiders) == 2 def test_load_base_spider(self): module = "tests.test_spiderloader.test_spiders.spider0" settings = Settings({"SPIDER_MODULES": [module]}) spider_loader = SpiderLoader.from_settings(settings) assert len(spider_loader._spiders) == 0 def test_load_spider_module_from_addons(self): module = "tests.test_spiderloader.spiders_from_addons.spider0" class SpiderModuleAddon: @classmethod def update_pre_crawler_settings(cls, settings): settings.set( "SPIDER_MODULES", [module], "project", ) runner = CrawlerRunner({"ADDONS": {SpiderModuleAddon: 1}}) crawler = runner.create_crawler("spider_from_addon") assert issubclass(crawler.spidercls, scrapy.Spider) assert crawler.spidercls.name == "spider_from_addon" assert len(crawler.settings["SPIDER_MODULES"]) == 1 def test_crawler_runner_loading(self): module = "tests.test_spiderloader.test_spiders.spider1" runner = CrawlerRunner( { "SPIDER_MODULES": [module], } ) with pytest.raises(KeyError, match="Spider not found"): runner.create_crawler("spider2") crawler = runner.create_crawler("spider1") assert issubclass(crawler.spidercls, scrapy.Spider) assert crawler.spidercls.name == "spider1" def test_bad_spider_modules_exception(self): module = "tests.test_spiderloader.test_spiders.doesnotexist" settings = Settings({"SPIDER_MODULES": [module]}) with pytest.raises(ImportError): SpiderLoader.from_settings(settings) def test_bad_spider_modules_warning(self): with warnings.catch_warnings(record=True) as w: module = "tests.test_spiderloader.test_spiders.doesnotexist" settings = Settings( {"SPIDER_MODULES": [module], "SPIDER_LOADER_WARN_ONLY": True} ) spider_loader = SpiderLoader.from_settings(settings) if str(w[0].message).startswith("_SixMetaPathImporter"): # needed on 3.10 because of https://github.com/benjaminp/six/issues/349, # at least until all six versions we can import (including botocore.vendored.six) # are updated to 1.16.0+ w.pop(0) assert "Could not load spiders from module" in str(w[0].message) spiders = spider_loader.list() assert not spiders def test_syntax_error_exception(self): module = "tests.test_spiderloader.test_spiders.spider1" with mock.patch.object(SpiderLoader, "_load_spiders") as m: m.side_effect = SyntaxError settings = Settings({"SPIDER_MODULES": [module]}) with pytest.raises(SyntaxError): SpiderLoader.from_settings(settings) def test_syntax_error_warning(self): with ( warnings.catch_warnings(record=True) as w, mock.patch.object(SpiderLoader, "_load_spiders") as m, ): m.side_effect = SyntaxError module = "tests.test_spiderloader.test_spiders.spider1" settings = Settings( {"SPIDER_MODULES": [module], "SPIDER_LOADER_WARN_ONLY": True} ) spider_loader = SpiderLoader.from_settings(settings) if str(w[0].message).startswith("_SixMetaPathImporter"): # needed on 3.10 because of https://github.com/benjaminp/six/issues/349, # at least until all six versions we can import (including botocore.vendored.six) # are updated to 1.16.0+ w.pop(0) assert "Could not load spiders from module" in str(w[0].message) spiders = spider_loader.list() assert not spiders class TestDuplicateSpiderNameLoader: def test_dupename_warning(self, spider_loader_env): settings, spiders_dir = spider_loader_env # copy 1 spider module so as to have duplicate spider name shutil.copyfile(spiders_dir / "spider3.py", spiders_dir / "spider3dupe.py") with warnings.catch_warnings(record=True) as w: spider_loader = SpiderLoader.from_settings(settings) assert len(w) == 1 msg = str(w[0].message) assert "several spiders with the same name" in msg assert "'spider3'" in msg assert msg.count("'spider3'") == 2 assert "'spider1'" not in msg assert "'spider2'" not in msg assert "'spider4'" not in msg spiders = set(spider_loader.list()) assert spiders == {"spider1", "spider2", "spider3", "spider4"} def test_multiple_dupename_warning(self, spider_loader_env): settings, spiders_dir = spider_loader_env # copy 2 spider modules so as to have duplicate spider name # This should issue 2 warning, 1 for each duplicate spider name shutil.copyfile(spiders_dir / "spider1.py", spiders_dir / "spider1dupe.py") shutil.copyfile(spiders_dir / "spider2.py", spiders_dir / "spider2dupe.py") with warnings.catch_warnings(record=True) as w: spider_loader = SpiderLoader.from_settings(settings) assert len(w) == 1 msg = str(w[0].message) assert "several spiders with the same name" in msg assert "'spider1'" in msg assert msg.count("'spider1'") == 2 assert "'spider2'" in msg assert msg.count("'spider2'") == 2 assert "'spider3'" not in msg assert "'spider4'" not in msg spiders = set(spider_loader.list()) assert spiders == {"spider1", "spider2", "spider3", "spider4"} class CustomSpiderLoader(SpiderLoader): pass def test_custom_spider_loader(): settings = Settings( { "SPIDER_LOADER_CLASS": CustomSpiderLoader, } ) spider_loader = get_spider_loader(settings) assert isinstance(spider_loader, CustomSpiderLoader) def test_dummy_spider_loader(spider_loader_env): settings, _ = spider_loader_env spider_loader = DummySpiderLoader.from_settings(settings) assert not spider_loader.list() with pytest.raises(KeyError): spider_loader.load("spider1")
python
BSD-3-Clause
d1bd8eb49f7aba9289e4ff692006cead8bcd9080
2026-01-04T14:38:41.023839Z
false
scrapy/scrapy
https://github.com/scrapy/scrapy/blob/d1bd8eb49f7aba9289e4ff692006cead8bcd9080/tests/test_spiderloader/test_spiders/spider2.py
tests/test_spiderloader/test_spiders/spider2.py
from scrapy.spiders import Spider class Spider2(Spider): name = "spider2" allowed_domains = ["scrapy2.org", "scrapy3.org"]
python
BSD-3-Clause
d1bd8eb49f7aba9289e4ff692006cead8bcd9080
2026-01-04T14:38:41.023839Z
false
scrapy/scrapy
https://github.com/scrapy/scrapy/blob/d1bd8eb49f7aba9289e4ff692006cead8bcd9080/tests/test_spiderloader/test_spiders/spider1.py
tests/test_spiderloader/test_spiders/spider1.py
from scrapy.spiders import Spider class Spider1(Spider): name = "spider1" allowed_domains = ["scrapy1.org", "scrapy3.org"]
python
BSD-3-Clause
d1bd8eb49f7aba9289e4ff692006cead8bcd9080
2026-01-04T14:38:41.023839Z
false
scrapy/scrapy
https://github.com/scrapy/scrapy/blob/d1bd8eb49f7aba9289e4ff692006cead8bcd9080/tests/test_spiderloader/test_spiders/__init__.py
tests/test_spiderloader/test_spiders/__init__.py
python
BSD-3-Clause
d1bd8eb49f7aba9289e4ff692006cead8bcd9080
2026-01-04T14:38:41.023839Z
false
scrapy/scrapy
https://github.com/scrapy/scrapy/blob/d1bd8eb49f7aba9289e4ff692006cead8bcd9080/tests/test_spiderloader/test_spiders/spider3.py
tests/test_spiderloader/test_spiders/spider3.py
from scrapy.spiders import Spider class Spider3(Spider): name = "spider3" allowed_domains = ["spider3.com"] @classmethod def handles_request(cls, request): return request.url == "http://spider3.com/onlythis"
python
BSD-3-Clause
d1bd8eb49f7aba9289e4ff692006cead8bcd9080
2026-01-04T14:38:41.023839Z
false
scrapy/scrapy
https://github.com/scrapy/scrapy/blob/d1bd8eb49f7aba9289e4ff692006cead8bcd9080/tests/test_spiderloader/test_spiders/spider0.py
tests/test_spiderloader/test_spiders/spider0.py
from scrapy.spiders import Spider class Spider0(Spider): allowed_domains = ["scrapy1.org", "scrapy3.org"]
python
BSD-3-Clause
d1bd8eb49f7aba9289e4ff692006cead8bcd9080
2026-01-04T14:38:41.023839Z
false
scrapy/scrapy
https://github.com/scrapy/scrapy/blob/d1bd8eb49f7aba9289e4ff692006cead8bcd9080/tests/test_spiderloader/test_spiders/nested/spider4.py
tests/test_spiderloader/test_spiders/nested/spider4.py
from scrapy.spiders import Spider class Spider4(Spider): name = "spider4" allowed_domains = ["spider4.com"] @classmethod def handles_request(cls, request): return request.url == "http://spider4.com/onlythis"
python
BSD-3-Clause
d1bd8eb49f7aba9289e4ff692006cead8bcd9080
2026-01-04T14:38:41.023839Z
false
scrapy/scrapy
https://github.com/scrapy/scrapy/blob/d1bd8eb49f7aba9289e4ff692006cead8bcd9080/tests/test_spiderloader/test_spiders/nested/__init__.py
tests/test_spiderloader/test_spiders/nested/__init__.py
python
BSD-3-Clause
d1bd8eb49f7aba9289e4ff692006cead8bcd9080
2026-01-04T14:38:41.023839Z
false
scrapy/scrapy
https://github.com/scrapy/scrapy/blob/d1bd8eb49f7aba9289e4ff692006cead8bcd9080/tests/test_spiderloader/spiders_from_addons/__init__.py
tests/test_spiderloader/spiders_from_addons/__init__.py
python
BSD-3-Clause
d1bd8eb49f7aba9289e4ff692006cead8bcd9080
2026-01-04T14:38:41.023839Z
false
scrapy/scrapy
https://github.com/scrapy/scrapy/blob/d1bd8eb49f7aba9289e4ff692006cead8bcd9080/tests/test_spiderloader/spiders_from_addons/spider0.py
tests/test_spiderloader/spiders_from_addons/spider0.py
from scrapy.spiders import Spider class SpiderFromAddon(Spider): name = "spider_from_addon" allowed_domains = ["scrapy1.org", "scrapy3.org"]
python
BSD-3-Clause
d1bd8eb49f7aba9289e4ff692006cead8bcd9080
2026-01-04T14:38:41.023839Z
false
scrapy/scrapy
https://github.com/scrapy/scrapy/blob/d1bd8eb49f7aba9289e4ff692006cead8bcd9080/tests/mocks/dummydbm.py
tests/mocks/dummydbm.py
"""DBM-like dummy module""" from collections import defaultdict from typing import Any class DummyDB(dict): """Provide dummy DBM-like interface.""" def close(self): pass error = KeyError _DATABASES: defaultdict[Any, DummyDB] = defaultdict(DummyDB) def open(file, flag="r", mode=0o666): # noqa: A001 """Open or create a dummy database compatible. Arguments ``flag`` and ``mode`` are ignored. """ # return same instance for same file argument return _DATABASES[file]
python
BSD-3-Clause
d1bd8eb49f7aba9289e4ff692006cead8bcd9080
2026-01-04T14:38:41.023839Z
false
scrapy/scrapy
https://github.com/scrapy/scrapy/blob/d1bd8eb49f7aba9289e4ff692006cead8bcd9080/tests/mocks/__init__.py
tests/mocks/__init__.py
python
BSD-3-Clause
d1bd8eb49f7aba9289e4ff692006cead8bcd9080
2026-01-04T14:38:41.023839Z
false
scrapy/scrapy
https://github.com/scrapy/scrapy/blob/d1bd8eb49f7aba9289e4ff692006cead8bcd9080/tests/test_settings/default_settings.py
tests/test_settings/default_settings.py
TEST_DEFAULT = "defvalue" TEST_DICT = {"key": "val"}
python
BSD-3-Clause
d1bd8eb49f7aba9289e4ff692006cead8bcd9080
2026-01-04T14:38:41.023839Z
false
scrapy/scrapy
https://github.com/scrapy/scrapy/blob/d1bd8eb49f7aba9289e4ff692006cead8bcd9080/tests/test_settings/__init__.py
tests/test_settings/__init__.py
# pylint: disable=unsubscriptable-object,unsupported-membership-test,use-implicit-booleaness-not-comparison # (too many false positives) import warnings from unittest import mock import pytest from scrapy.core.downloader.handlers.file import FileDownloadHandler from scrapy.settings import ( SETTINGS_PRIORITIES, BaseSettings, Settings, SettingsAttribute, get_settings_priority, ) from scrapy.utils.misc import build_from_crawler from scrapy.utils.test import get_crawler from . import default_settings class TestSettingsGlobalFuncs: def test_get_settings_priority(self): for prio_str, prio_num in SETTINGS_PRIORITIES.items(): assert get_settings_priority(prio_str) == prio_num assert get_settings_priority(99) == 99 class TestSettingsAttribute: def setup_method(self): self.attribute = SettingsAttribute("value", 10) def test_set_greater_priority(self): self.attribute.set("value2", 20) assert self.attribute.value == "value2" assert self.attribute.priority == 20 def test_set_equal_priority(self): self.attribute.set("value2", 10) assert self.attribute.value == "value2" assert self.attribute.priority == 10 def test_set_less_priority(self): self.attribute.set("value2", 0) assert self.attribute.value == "value" assert self.attribute.priority == 10 def test_overwrite_basesettings(self): original_dict = {"one": 10, "two": 20} original_settings = BaseSettings(original_dict, 0) attribute = SettingsAttribute(original_settings, 0) new_dict = {"three": 11, "four": 21} attribute.set(new_dict, 10) assert isinstance(attribute.value, BaseSettings) assert set(attribute.value) == set(new_dict) assert set(original_settings) == set(original_dict) new_settings = BaseSettings({"five": 12}, 0) attribute.set(new_settings, 0) # Insufficient priority assert set(attribute.value) == set(new_dict) attribute.set(new_settings, 10) assert set(attribute.value) == set(new_settings) def test_repr(self): assert repr(self.attribute) == "<SettingsAttribute value='value' priority=10>" class TestBaseSettings: def setup_method(self): self.settings = BaseSettings() def test_setdefault_not_existing_value(self): settings = BaseSettings() value = settings.setdefault("TEST_OPTION", "value") assert settings["TEST_OPTION"] == "value" assert value == "value" assert value is not None def test_setdefault_existing_value(self): settings = BaseSettings({"TEST_OPTION": "value"}) value = settings.setdefault("TEST_OPTION", None) assert settings["TEST_OPTION"] == "value" assert value == "value" def test_set_new_attribute(self): self.settings.set("TEST_OPTION", "value", 0) assert "TEST_OPTION" in self.settings.attributes attr = self.settings.attributes["TEST_OPTION"] assert isinstance(attr, SettingsAttribute) assert attr.value == "value" assert attr.priority == 0 def test_set_settingsattribute(self): myattr = SettingsAttribute(0, 30) # Note priority 30 self.settings.set("TEST_ATTR", myattr, 10) assert self.settings.get("TEST_ATTR") == 0 assert self.settings.getpriority("TEST_ATTR") == 30 def test_set_instance_identity_on_update(self): attr = SettingsAttribute("value", 0) self.settings.attributes = {"TEST_OPTION": attr} self.settings.set("TEST_OPTION", "othervalue", 10) assert "TEST_OPTION" in self.settings.attributes assert attr is self.settings.attributes["TEST_OPTION"] def test_set_calls_settings_attributes_methods_on_update(self): attr = SettingsAttribute("value", 10) with ( mock.patch.object(attr, "__setattr__") as mock_setattr, mock.patch.object(attr, "set") as mock_set, ): self.settings.attributes = {"TEST_OPTION": attr} for priority in (0, 10, 20): self.settings.set("TEST_OPTION", "othervalue", priority) mock_set.assert_called_once_with("othervalue", priority) assert not mock_setattr.called mock_set.reset_mock() mock_setattr.reset_mock() def test_setitem(self): settings = BaseSettings() settings.set("key", "a", "default") settings["key"] = "b" assert settings["key"] == "b" assert settings.getpriority("key") == 20 settings["key"] = "c" assert settings["key"] == "c" settings["key2"] = "x" assert "key2" in settings assert settings["key2"] == "x" assert settings.getpriority("key2") == 20 def test_setdict_alias(self): with mock.patch.object(self.settings, "set") as mock_set: self.settings.setdict({"TEST_1": "value1", "TEST_2": "value2"}, 10) assert mock_set.call_count == 2 calls = [ mock.call("TEST_1", "value1", 10), mock.call("TEST_2", "value2", 10), ] mock_set.assert_has_calls(calls, any_order=True) def test_setmodule_only_load_uppercase_vars(self): class ModuleMock: UPPERCASE_VAR = "value" MIXEDcase_VAR = "othervalue" lowercase_var = "anothervalue" self.settings.attributes = {} self.settings.setmodule(ModuleMock(), 10) assert "UPPERCASE_VAR" in self.settings.attributes assert "MIXEDcase_VAR" not in self.settings.attributes assert "lowercase_var" not in self.settings.attributes assert len(self.settings.attributes) == 1 def test_setmodule_alias(self): with mock.patch.object(self.settings, "set") as mock_set: self.settings.setmodule(default_settings, 10) mock_set.assert_any_call("TEST_DEFAULT", "defvalue", 10) mock_set.assert_any_call("TEST_DICT", {"key": "val"}, 10) def test_setmodule_by_path(self): self.settings.attributes = {} self.settings.setmodule(default_settings, 10) ctrl_attributes = self.settings.attributes.copy() self.settings.attributes = {} self.settings.setmodule("tests.test_settings.default_settings", 10) assert set(self.settings.attributes) == set(ctrl_attributes) for key in ctrl_attributes: attr = self.settings.attributes[key] ctrl_attr = ctrl_attributes[key] assert attr.value == ctrl_attr.value assert attr.priority == ctrl_attr.priority def test_update(self): settings = BaseSettings({"key_lowprio": 0}, priority=0) settings.set("key_highprio", 10, priority=50) custom_settings = BaseSettings( {"key_lowprio": 1, "key_highprio": 11}, priority=30 ) custom_settings.set("newkey_one", None, priority=50) custom_dict = {"key_lowprio": 2, "key_highprio": 12, "newkey_two": None} settings.update(custom_dict, priority=20) assert settings["key_lowprio"] == 2 assert settings.getpriority("key_lowprio") == 20 assert settings["key_highprio"] == 10 assert "newkey_two" in settings assert settings.getpriority("newkey_two") == 20 settings.update(custom_settings) assert settings["key_lowprio"] == 1 assert settings.getpriority("key_lowprio") == 30 assert settings["key_highprio"] == 10 assert "newkey_one" in settings assert settings.getpriority("newkey_one") == 50 settings.update({"key_lowprio": 3}, priority=20) assert settings["key_lowprio"] == 1 @pytest.mark.xfail( raises=TypeError, reason="BaseSettings.update doesn't support kwargs input" ) def test_update_kwargs(self): settings = BaseSettings({"key": 0}) settings.update(key=1) # pylint: disable=unexpected-keyword-arg @pytest.mark.xfail( raises=AttributeError, reason="BaseSettings.update doesn't support iterable input", ) def test_update_iterable(self): settings = BaseSettings({"key": 0}) settings.update([("key", 1)]) def test_update_jsonstring(self): settings = BaseSettings({"number": 0, "dict": BaseSettings({"key": "val"})}) settings.update('{"number": 1, "newnumber": 2}') assert settings["number"] == 1 assert settings["newnumber"] == 2 settings.set("dict", '{"key": "newval", "newkey": "newval2"}') assert settings["dict"]["key"] == "newval" assert settings["dict"]["newkey"] == "newval2" def test_delete(self): settings = BaseSettings({"key": None}) settings.set("key_highprio", None, priority=50) settings.delete("key") settings.delete("key_highprio") assert "key" not in settings assert "key_highprio" in settings del settings["key_highprio"] assert "key_highprio" not in settings with pytest.raises(KeyError): settings.delete("notkey") with pytest.raises(KeyError): del settings["notkey"] def test_get(self): test_configuration = { "TEST_ENABLED1": "1", "TEST_ENABLED2": True, "TEST_ENABLED3": 1, "TEST_ENABLED4": "True", "TEST_ENABLED5": "true", "TEST_ENABLED_WRONG": "on", "TEST_DISABLED1": "0", "TEST_DISABLED2": False, "TEST_DISABLED3": 0, "TEST_DISABLED4": "False", "TEST_DISABLED5": "false", "TEST_DISABLED_WRONG": "off", "TEST_INT1": 123, "TEST_INT2": "123", "TEST_FLOAT1": 123.45, "TEST_FLOAT2": "123.45", "TEST_LIST1": ["one", "two"], "TEST_LIST2": "one,two", "TEST_LIST3": "", "TEST_STR": "value", "TEST_DICT1": {"key1": "val1", "ke2": 3}, "TEST_DICT2": '{"key1": "val1", "ke2": 3}', } settings = self.settings settings.attributes = { key: SettingsAttribute(value, 0) for key, value in test_configuration.items() } assert settings.getbool("TEST_ENABLED1") assert settings.getbool("TEST_ENABLED2") assert settings.getbool("TEST_ENABLED3") assert settings.getbool("TEST_ENABLED4") assert settings.getbool("TEST_ENABLED5") assert not settings.getbool("TEST_ENABLEDx") assert settings.getbool("TEST_ENABLEDx", True) assert not settings.getbool("TEST_DISABLED1") assert not settings.getbool("TEST_DISABLED2") assert not settings.getbool("TEST_DISABLED3") assert not settings.getbool("TEST_DISABLED4") assert not settings.getbool("TEST_DISABLED5") assert settings.getint("TEST_INT1") == 123 assert settings.getint("TEST_INT2") == 123 assert settings.getint("TEST_INTx") == 0 assert settings.getint("TEST_INTx", 45) == 45 assert settings.getfloat("TEST_FLOAT1") == 123.45 assert settings.getfloat("TEST_FLOAT2") == 123.45 assert settings.getfloat("TEST_FLOATx") == 0.0 assert settings.getfloat("TEST_FLOATx", 55.0) == 55.0 assert settings.getlist("TEST_LIST1") == ["one", "two"] assert settings.getlist("TEST_LIST2") == ["one", "two"] assert settings.getlist("TEST_LIST3") == [] assert settings.getlist("TEST_LISTx") == [] assert settings.getlist("TEST_LISTx", ["default"]) == ["default"] assert settings["TEST_STR"] == "value" assert settings.get("TEST_STR") == "value" assert settings["TEST_STRx"] is None assert settings.get("TEST_STRx") is None assert settings.get("TEST_STRx", "default") == "default" assert settings.getdict("TEST_DICT1") == {"key1": "val1", "ke2": 3} assert settings.getdict("TEST_DICT2") == {"key1": "val1", "ke2": 3} assert settings.getdict("TEST_DICT3") == {} assert settings.getdict("TEST_DICT3", {"key1": 5}) == {"key1": 5} with pytest.raises( ValueError, match=r"dictionary update sequence element #0 has length 3; 2 is required|sequence of pairs expected", ): settings.getdict("TEST_LIST1") with pytest.raises( ValueError, match="Supported values for boolean settings are" ): settings.getbool("TEST_ENABLED_WRONG") with pytest.raises( ValueError, match="Supported values for boolean settings are" ): settings.getbool("TEST_DISABLED_WRONG") def test_getpriority(self): settings = BaseSettings({"key": "value"}, priority=99) assert settings.getpriority("key") == 99 assert settings.getpriority("nonexistentkey") is None def test_getwithbase(self): s = BaseSettings( { "TEST_BASE": BaseSettings({1: 1, 2: 2}, "project"), "TEST": BaseSettings({1: 10, 3: 30}, "default"), "HASNOBASE": BaseSettings({3: 3000}, "default"), } ) s["TEST"].set(2, 200, "cmdline") assert set(s.getwithbase("TEST")) == {1, 2, 3} assert set(s.getwithbase("HASNOBASE")) == set(s["HASNOBASE"]) assert s.getwithbase("NONEXISTENT") == {} def test_maxpriority(self): # Empty settings should return 'default' assert self.settings.maxpriority() == 0 self.settings.set("A", 0, 10) self.settings.set("B", 0, 30) assert self.settings.maxpriority() == 30 def test_copy(self): values = { "TEST_BOOL": True, "TEST_LIST": ["one", "two"], "TEST_LIST_OF_LISTS": [ ["first_one", "first_two"], ["second_one", "second_two"], ], } self.settings.setdict(values) copy = self.settings.copy() self.settings.set("TEST_BOOL", False) assert copy.get("TEST_BOOL") test_list = self.settings.get("TEST_LIST") test_list.append("three") assert copy.get("TEST_LIST") == ["one", "two"] test_list_of_lists = self.settings.get("TEST_LIST_OF_LISTS") test_list_of_lists[0].append("first_three") assert copy.get("TEST_LIST_OF_LISTS")[0] == ["first_one", "first_two"] def test_copy_to_dict(self): s = BaseSettings( { "TEST_STRING": "a string", "TEST_LIST": [1, 2], "TEST_BOOLEAN": False, "TEST_BASE": BaseSettings({1: 1, 2: 2}, "project"), "TEST": BaseSettings({1: 10, 3: 30}, "default"), "HASNOBASE": BaseSettings({3: 3000}, "default"), } ) assert s.copy_to_dict() == { "HASNOBASE": {3: 3000}, "TEST": {1: 10, 3: 30}, "TEST_BASE": {1: 1, 2: 2}, "TEST_LIST": [1, 2], "TEST_BOOLEAN": False, "TEST_STRING": "a string", } def test_freeze(self): self.settings.freeze() with pytest.raises( TypeError, match="Trying to modify an immutable Settings object" ): self.settings.set("TEST_BOOL", False) def test_frozencopy(self): frozencopy = self.settings.frozencopy() assert frozencopy.frozen assert frozencopy is not self.settings class TestSettings: def setup_method(self): self.settings = Settings() @mock.patch.dict("scrapy.settings.SETTINGS_PRIORITIES", {"default": 10}) @mock.patch("scrapy.settings.default_settings", default_settings) def test_initial_defaults(self): settings = Settings() assert len(settings.attributes) == 2 assert "TEST_DEFAULT" in settings.attributes attr = settings.attributes["TEST_DEFAULT"] assert isinstance(attr, SettingsAttribute) assert attr.value == "defvalue" assert attr.priority == 10 @mock.patch.dict("scrapy.settings.SETTINGS_PRIORITIES", {}) @mock.patch("scrapy.settings.default_settings", {}) def test_initial_values(self): settings = Settings({"TEST_OPTION": "value"}, 10) assert len(settings.attributes) == 1 assert "TEST_OPTION" in settings.attributes attr = settings.attributes["TEST_OPTION"] assert isinstance(attr, SettingsAttribute) assert attr.value == "value" assert attr.priority == 10 @mock.patch("scrapy.settings.default_settings", default_settings) def test_autopromote_dicts(self): settings = Settings() mydict = settings.get("TEST_DICT") assert isinstance(mydict, BaseSettings) assert "key" in mydict assert mydict["key"] == "val" assert mydict.getpriority("key") == 0 @mock.patch("scrapy.settings.default_settings", default_settings) def test_getdict_autodegrade_basesettings(self): settings = Settings() mydict = settings.getdict("TEST_DICT") assert isinstance(mydict, dict) assert len(mydict) == 1 assert "key" in mydict assert mydict["key"] == "val" def test_passing_objects_as_values(self): class TestPipeline: def process_item(self, i): return i settings = Settings( { "ITEM_PIPELINES": { TestPipeline: 800, }, "DOWNLOAD_HANDLERS": { "ftp": FileDownloadHandler, }, } ) assert "ITEM_PIPELINES" in settings.attributes mypipeline, priority = settings.getdict("ITEM_PIPELINES").popitem() assert priority == 800 assert mypipeline == TestPipeline assert isinstance(mypipeline(), TestPipeline) assert mypipeline().process_item("item") == "item" myhandler = settings.getdict("DOWNLOAD_HANDLERS").pop("ftp") assert myhandler == FileDownloadHandler myhandler_instance = build_from_crawler(myhandler, get_crawler()) assert isinstance(myhandler_instance, FileDownloadHandler) assert hasattr(myhandler_instance, "download_request") def test_pop_item_with_default_value(self): settings = Settings() with pytest.raises(KeyError): settings.pop("DUMMY_CONFIG") dummy_config_value = settings.pop("DUMMY_CONFIG", "dummy_value") assert dummy_config_value == "dummy_value" def test_pop_item_with_immutable_settings(self): settings = Settings( {"DUMMY_CONFIG": "dummy_value", "OTHER_DUMMY_CONFIG": "other_dummy_value"} ) assert settings.pop("DUMMY_CONFIG") == "dummy_value" settings.freeze() with pytest.raises( TypeError, match="Trying to modify an immutable Settings object" ): settings.pop("OTHER_DUMMY_CONFIG") @pytest.mark.parametrize( ("before", "name", "item", "after"), [ ({}, "FOO", "BAR", {"FOO": ["BAR"]}), ({"FOO": []}, "FOO", "BAR", {"FOO": ["BAR"]}), ({"FOO": ["BAR"]}, "FOO", "BAZ", {"FOO": ["BAR", "BAZ"]}), ({"FOO": ["BAR"]}, "FOO", "BAR", {"FOO": ["BAR"]}), ({"FOO": ""}, "FOO", "BAR", {"FOO": ["BAR"]}), ({"FOO": "BAR"}, "FOO", "BAR", {"FOO": "BAR"}), ({"FOO": "BAR"}, "FOO", "BAZ", {"FOO": ["BAR", "BAZ"]}), ({"FOO": "BAR,BAZ"}, "FOO", "BAZ", {"FOO": "BAR,BAZ"}), ({"FOO": "BAR,BAZ"}, "FOO", "QUX", {"FOO": ["BAR", "BAZ", "QUX"]}), ], ) def test_add_to_list(before, name, item, after): settings = BaseSettings(before, priority=0) settings.add_to_list(name, item) expected_priority = settings.getpriority(name) or 0 expected_settings = BaseSettings(after, priority=expected_priority) assert settings == expected_settings, ( f"{settings[name]=} != {expected_settings[name]=}" ) assert settings.getpriority(name) == expected_settings.getpriority(name) @pytest.mark.parametrize( ("before", "name", "item", "after"), [ ({}, "FOO", "BAR", ValueError), ({"FOO": ["BAR"]}, "FOO", "BAR", {"FOO": []}), ({"FOO": ["BAR"]}, "FOO", "BAZ", ValueError), ({"FOO": ["BAR", "BAZ"]}, "FOO", "BAR", {"FOO": ["BAZ"]}), ({"FOO": ""}, "FOO", "BAR", ValueError), ({"FOO": "[]"}, "FOO", "BAR", ValueError), ({"FOO": "BAR"}, "FOO", "BAR", {"FOO": []}), ({"FOO": "BAR"}, "FOO", "BAZ", ValueError), ({"FOO": "BAR,BAZ"}, "FOO", "BAR", {"FOO": ["BAZ"]}), ], ) def test_remove_from_list(before, name, item, after): settings = BaseSettings(before, priority=0) if isinstance(after, type) and issubclass(after, Exception): with pytest.raises(after): settings.remove_from_list(name, item) return settings.remove_from_list(name, item) expected_priority = settings.getpriority(name) or 0 expected_settings = BaseSettings(after, priority=expected_priority) assert settings == expected_settings, ( f"{settings[name]=} != {expected_settings[name]=}" ) assert settings.getpriority(name) == expected_settings.getpriority(name) def test_deprecated_concurrent_requests_per_ip_setting(): with warnings.catch_warnings(record=True) as warns: settings = Settings({"CONCURRENT_REQUESTS_PER_IP": 1}) settings.get("CONCURRENT_REQUESTS_PER_IP") assert ( str(warns[0].message) == "The CONCURRENT_REQUESTS_PER_IP setting is deprecated, use CONCURRENT_REQUESTS_PER_DOMAIN instead." ) class Component1: pass Component1Alias = Component1 class Component1Subclass(Component1): pass Component1SubclassAlias = Component1Subclass class Component2: pass class Component3: pass class Component4: pass @pytest.mark.parametrize( ("before", "name", "old_cls", "new_cls", "priority", "after"), [ ({}, "FOO", Component1, Component2, None, KeyError), ( {"FOO": {Component1: 1}}, "FOO", Component1, Component2, None, {"FOO": {Component2: 1}}, ), ( {"FOO": {Component1: 1}}, "FOO", Component1, Component2, 2, {"FOO": {Component2: 2}}, ), ( {"FOO": {"tests.test_settings.Component1": 1}}, "FOO", Component1, Component2, None, {"FOO": {Component2: 1}}, ), ( {"FOO": {Component1Alias: 1}}, "FOO", Component1, Component2, None, {"FOO": {Component2: 1}}, ), ( {"FOO": {Component1Alias: 1}}, "FOO", Component1, Component2, 2, {"FOO": {Component2: 2}}, ), ( {"FOO": {"tests.test_settings.Component1Alias": 1}}, "FOO", Component1, Component2, None, {"FOO": {Component2: 1}}, ), ( {"FOO": {"tests.test_settings.Component1Alias": 1}}, "FOO", Component1, Component2, 2, {"FOO": {Component2: 2}}, ), ( { "FOO": { "tests.test_settings.Component1": 1, "tests.test_settings.Component1Alias": 2, } }, "FOO", Component1, Component2, None, {"FOO": {Component2: 2}}, ), ( { "FOO": { "tests.test_settings.Component1": 1, "tests.test_settings.Component1Alias": 2, } }, "FOO", Component1, Component2, 3, {"FOO": {Component2: 3}}, ), ( {"FOO": '{"tests.test_settings.Component1": 1}'}, "FOO", Component1, Component2, None, {"FOO": {Component2: 1}}, ), ( {"FOO": '{"tests.test_settings.Component1": 1}'}, "FOO", Component1, Component2, 2, {"FOO": {Component2: 2}}, ), ( {"FOO": '{"tests.test_settings.Component1Alias": 1}'}, "FOO", Component1, Component2, None, {"FOO": {Component2: 1}}, ), ( {"FOO": '{"tests.test_settings.Component1Alias": 1}'}, "FOO", Component1, Component2, 2, {"FOO": {Component2: 2}}, ), ( { "FOO": '{"tests.test_settings.Component1": 1, "tests.test_settings.Component1Alias": 2}' }, "FOO", Component1, Component2, None, {"FOO": {Component2: 2}}, ), ( { "FOO": '{"tests.test_settings.Component1": 1, "tests.test_settings.Component1Alias": 2}' }, "FOO", Component1, Component2, 3, {"FOO": {Component2: 3}}, ), # If old_cls has None as value, raise KeyError. ( {"FOO": {Component1: None}}, "FOO", Component1, Component2, None, KeyError, ), ( {"FOO": '{"tests.test_settings.Component1": null}'}, "FOO", Component1, Component2, None, KeyError, ), ( {"FOO": {Component1: None, "tests.test_settings.Component1": None}}, "FOO", Component1, Component2, None, KeyError, ), ( {"FOO": {Component1: 1, "tests.test_settings.Component1": None}}, "FOO", Component1, Component2, None, KeyError, ), ( {"FOO": {Component1: None, "tests.test_settings.Component1": 1}}, "FOO", Component1, Component2, None, KeyError, ), # Unrelated components are kept as is, as expected. ( { "FOO": { Component1: 1, "tests.test_settings.Component2": 2, Component3: 3, } }, "FOO", Component3, Component4, None, { "FOO": { Component1: 1, "tests.test_settings.Component2": 2, Component4: 3, } }, ), ], ) def test_replace_in_component_priority_dict( before, name, old_cls, new_cls, priority, after ): settings = BaseSettings(before, priority=0) if isinstance(after, type) and issubclass(after, Exception): with pytest.raises(after): settings.replace_in_component_priority_dict( name, old_cls, new_cls, priority ) return expected_priority = settings.getpriority(name) or 0 settings.replace_in_component_priority_dict(name, old_cls, new_cls, priority) expected_settings = BaseSettings(after, priority=expected_priority) assert settings == expected_settings assert settings.getpriority(name) == expected_settings.getpriority(name) @pytest.mark.parametrize( ("before", "name", "cls", "priority", "after"), [ # Set ({}, "FOO", Component1, None, {"FOO": {Component1: None}}), ({}, "FOO", Component1, 0, {"FOO": {Component1: 0}}), ({}, "FOO", Component1, 1, {"FOO": {Component1: 1}}), # Add ( {"FOO": {Component1: 0}}, "FOO", Component2, None, {"FOO": {Component1: 0, Component2: None}}, ), ( {"FOO": {Component1: 0}}, "FOO", Component2, 0, {"FOO": {Component1: 0, Component2: 0}}, ), ( {"FOO": {Component1: 0}}, "FOO", Component2, 1, {"FOO": {Component1: 0, Component2: 1}}, ), # Replace ( { "FOO": { Component1: None, "tests.test_settings.Component1": 0, "tests.test_settings.Component1Alias": 1, Component1Subclass: None, "tests.test_settings.Component1Subclass": 0, "tests.test_settings.Component1SubclassAlias": 1, } }, "FOO", Component1, None, { "FOO": { Component1: None, Component1Subclass: None, "tests.test_settings.Component1Subclass": 0, "tests.test_settings.Component1SubclassAlias": 1, } }, ), ( { "FOO": { Component1: 0, "tests.test_settings.Component1": 1, "tests.test_settings.Component1Alias": None, Component1Subclass: 0, "tests.test_settings.Component1Subclass": 1, "tests.test_settings.Component1SubclassAlias": None, } }, "FOO", Component1, 0, { "FOO": { Component1: 0, Component1Subclass: 0, "tests.test_settings.Component1Subclass": 1, "tests.test_settings.Component1SubclassAlias": None, } }, ), ( { "FOO": { Component1: 1, "tests.test_settings.Component1": None, "tests.test_settings.Component1Alias": 0, Component1Subclass: 1, "tests.test_settings.Component1Subclass": None, "tests.test_settings.Component1SubclassAlias": 0, } }, "FOO", Component1, 1, { "FOO": { Component1: 1, Component1Subclass: 1, "tests.test_settings.Component1Subclass": None, "tests.test_settings.Component1SubclassAlias": 0, } }, ), # String-based setting values ( {"FOO": '{"tests.test_settings.Component1": 0}'}, "FOO", Component2, None, {"FOO": {"tests.test_settings.Component1": 0, Component2: None}}, ), ( { "FOO": """{ "tests.test_settings.Component1": 0, "tests.test_settings.Component1Alias": 1, "tests.test_settings.Component1Subclass": 0, "tests.test_settings.Component1SubclassAlias": 1 }""" }, "FOO", Component1, None, { "FOO": { Component1: None, "tests.test_settings.Component1Subclass": 0, "tests.test_settings.Component1SubclassAlias": 1, } }, ), ], ) def test_set_in_component_priority_dict(before, name, cls, priority, after): settings = BaseSettings(before, priority=0) expected_priority = settings.getpriority(name) or 0 settings.set_in_component_priority_dict(name, cls, priority) expected_settings = BaseSettings(after, priority=expected_priority) assert settings == expected_settings assert settings.getpriority(name) == expected_settings.getpriority(name), ( f"{settings.getpriority(name)=} != {expected_settings.getpriority(name)=}" ) @pytest.mark.parametrize( ("before", "name", "cls", "priority", "after"), [ # Set ({}, "FOO", Component1, None, {"FOO": {Component1: None}}), ({}, "FOO", Component1, 0, {"FOO": {Component1: 0}}),
python
BSD-3-Clause
d1bd8eb49f7aba9289e4ff692006cead8bcd9080
2026-01-04T14:38:41.023839Z
true
scrapy/scrapy
https://github.com/scrapy/scrapy/blob/d1bd8eb49f7aba9289e4ff692006cead8bcd9080/tests/AsyncCrawlerRunner/custom_loop_different.py
tests/AsyncCrawlerRunner/custom_loop_different.py
from twisted.internet.task import react from scrapy import Spider from scrapy.crawler import AsyncCrawlerRunner from scrapy.utils.defer import deferred_f_from_coro_f from scrapy.utils.log import configure_logging from scrapy.utils.reactor import install_reactor class NoRequestsSpider(Spider): name = "no_request" custom_settings = { "TWISTED_REACTOR": "twisted.internet.asyncioreactor.AsyncioSelectorReactor", "ASYNCIO_EVENT_LOOP": "uvloop.Loop", } async def start(self): return yield @deferred_f_from_coro_f async def main(reactor): configure_logging() runner = AsyncCrawlerRunner() await runner.crawl(NoRequestsSpider) install_reactor("twisted.internet.asyncioreactor.AsyncioSelectorReactor") react(main)
python
BSD-3-Clause
d1bd8eb49f7aba9289e4ff692006cead8bcd9080
2026-01-04T14:38:41.023839Z
false
scrapy/scrapy
https://github.com/scrapy/scrapy/blob/d1bd8eb49f7aba9289e4ff692006cead8bcd9080/tests/AsyncCrawlerRunner/multi_parallel.py
tests/AsyncCrawlerRunner/multi_parallel.py
from twisted.internet.task import react from scrapy import Spider from scrapy.crawler import AsyncCrawlerRunner from scrapy.utils.defer import deferred_f_from_coro_f from scrapy.utils.log import configure_logging from scrapy.utils.reactor import install_reactor class NoRequestsSpider(Spider): name = "no_request" async def start(self): return yield @deferred_f_from_coro_f async def main(reactor): configure_logging() runner = AsyncCrawlerRunner() runner.crawl(NoRequestsSpider) runner.crawl(NoRequestsSpider) await runner.join() install_reactor("twisted.internet.asyncioreactor.AsyncioSelectorReactor") react(main)
python
BSD-3-Clause
d1bd8eb49f7aba9289e4ff692006cead8bcd9080
2026-01-04T14:38:41.023839Z
false
scrapy/scrapy
https://github.com/scrapy/scrapy/blob/d1bd8eb49f7aba9289e4ff692006cead8bcd9080/tests/AsyncCrawlerRunner/multi_seq.py
tests/AsyncCrawlerRunner/multi_seq.py
from twisted.internet.task import react from scrapy import Spider from scrapy.crawler import AsyncCrawlerRunner from scrapy.utils.defer import deferred_f_from_coro_f from scrapy.utils.log import configure_logging from scrapy.utils.reactor import install_reactor class NoRequestsSpider(Spider): name = "no_request" async def start(self): return yield @deferred_f_from_coro_f async def main(reactor): configure_logging() runner = AsyncCrawlerRunner() await runner.crawl(NoRequestsSpider) await runner.crawl(NoRequestsSpider) install_reactor("twisted.internet.asyncioreactor.AsyncioSelectorReactor") react(main)
python
BSD-3-Clause
d1bd8eb49f7aba9289e4ff692006cead8bcd9080
2026-01-04T14:38:41.023839Z
false
scrapy/scrapy
https://github.com/scrapy/scrapy/blob/d1bd8eb49f7aba9289e4ff692006cead8bcd9080/tests/AsyncCrawlerRunner/simple_default_reactor.py
tests/AsyncCrawlerRunner/simple_default_reactor.py
from twisted.internet.task import react from scrapy import Spider from scrapy.crawler import AsyncCrawlerRunner from scrapy.utils.defer import deferred_f_from_coro_f from scrapy.utils.log import configure_logging class NoRequestsSpider(Spider): name = "no_request" async def start(self): return yield @deferred_f_from_coro_f async def main(reactor): configure_logging() runner = AsyncCrawlerRunner() await runner.crawl(NoRequestsSpider) react(main)
python
BSD-3-Clause
d1bd8eb49f7aba9289e4ff692006cead8bcd9080
2026-01-04T14:38:41.023839Z
false
scrapy/scrapy
https://github.com/scrapy/scrapy/blob/d1bd8eb49f7aba9289e4ff692006cead8bcd9080/tests/AsyncCrawlerRunner/custom_loop_same.py
tests/AsyncCrawlerRunner/custom_loop_same.py
from twisted.internet.task import react from scrapy import Spider from scrapy.crawler import AsyncCrawlerRunner from scrapy.utils.defer import deferred_f_from_coro_f from scrapy.utils.log import configure_logging from scrapy.utils.reactor import install_reactor class NoRequestsSpider(Spider): name = "no_request" custom_settings = { "TWISTED_REACTOR": "twisted.internet.asyncioreactor.AsyncioSelectorReactor", "ASYNCIO_EVENT_LOOP": "uvloop.Loop", } async def start(self): return yield @deferred_f_from_coro_f async def main(reactor): configure_logging() runner = AsyncCrawlerRunner() await runner.crawl(NoRequestsSpider) install_reactor("twisted.internet.asyncioreactor.AsyncioSelectorReactor", "uvloop.Loop") react(main)
python
BSD-3-Clause
d1bd8eb49f7aba9289e4ff692006cead8bcd9080
2026-01-04T14:38:41.023839Z
false
scrapy/scrapy
https://github.com/scrapy/scrapy/blob/d1bd8eb49f7aba9289e4ff692006cead8bcd9080/tests/AsyncCrawlerRunner/simple.py
tests/AsyncCrawlerRunner/simple.py
from twisted.internet.task import react from scrapy import Spider from scrapy.crawler import AsyncCrawlerRunner from scrapy.utils.defer import deferred_f_from_coro_f from scrapy.utils.log import configure_logging from scrapy.utils.reactor import install_reactor class NoRequestsSpider(Spider): name = "no_request" async def start(self): return yield @deferred_f_from_coro_f async def main(reactor): configure_logging() runner = AsyncCrawlerRunner() await runner.crawl(NoRequestsSpider) install_reactor("twisted.internet.asyncioreactor.AsyncioSelectorReactor") react(main)
python
BSD-3-Clause
d1bd8eb49f7aba9289e4ff692006cead8bcd9080
2026-01-04T14:38:41.023839Z
false
scrapy/scrapy
https://github.com/scrapy/scrapy/blob/d1bd8eb49f7aba9289e4ff692006cead8bcd9080/tests/test_cmdline/extensions.py
tests/test_cmdline/extensions.py
"""A test extension used to check the settings loading order""" class TestExtension: def __init__(self, settings): settings.set("TEST1", f"{settings['TEST1']} + started") @classmethod def from_crawler(cls, crawler): return cls(crawler.settings) class DummyExtension: pass
python
BSD-3-Clause
d1bd8eb49f7aba9289e4ff692006cead8bcd9080
2026-01-04T14:38:41.023839Z
false
scrapy/scrapy
https://github.com/scrapy/scrapy/blob/d1bd8eb49f7aba9289e4ff692006cead8bcd9080/tests/test_cmdline/settings.py
tests/test_cmdline/settings.py
from pathlib import Path EXTENSIONS = { "tests.test_cmdline.extensions.TestExtension": 0, } TEST1 = "default" FEEDS = { Path("items.csv"): { "format": "csv", "fields": ["price", "name"], }, }
python
BSD-3-Clause
d1bd8eb49f7aba9289e4ff692006cead8bcd9080
2026-01-04T14:38:41.023839Z
false
scrapy/scrapy
https://github.com/scrapy/scrapy/blob/d1bd8eb49f7aba9289e4ff692006cead8bcd9080/tests/test_cmdline/__init__.py
tests/test_cmdline/__init__.py
import json import os import pstats import shutil import sys import tempfile from io import StringIO from pathlib import Path from subprocess import PIPE, Popen from scrapy.utils.test import get_testenv class TestCmdline: def setup_method(self): self.env = get_testenv() tests_path = Path(__file__).parent.parent self.env["PYTHONPATH"] += os.pathsep + str(tests_path.parent) self.env["SCRAPY_SETTINGS_MODULE"] = "tests.test_cmdline.settings" def _execute(self, *new_args, **kwargs): encoding = sys.stdout.encoding or "utf-8" args = (sys.executable, "-m", "scrapy.cmdline", *new_args) proc = Popen(args, stdout=PIPE, stderr=PIPE, env=self.env, **kwargs) comm = proc.communicate()[0].strip() return comm.decode(encoding) def test_default_settings(self): assert self._execute("settings", "--get", "TEST1") == "default" def test_override_settings_using_set_arg(self): assert ( self._execute("settings", "--get", "TEST1", "-s", "TEST1=override") == "override" ) def test_profiling(self): path = Path(tempfile.mkdtemp()) filename = path / "res.prof" try: self._execute("version", "--profile", str(filename)) assert filename.exists() out = StringIO() stats = pstats.Stats(str(filename), stream=out) stats.print_stats() out.seek(0) stats = out.read() assert str(Path("scrapy", "commands", "version.py")) in stats assert "tottime" in stats finally: shutil.rmtree(path) def test_override_dict_settings(self): EXT_PATH = "tests.test_cmdline.extensions.DummyExtension" EXTENSIONS = {EXT_PATH: 200} settingsstr = self._execute( "settings", "--get", "EXTENSIONS", "-s", "EXTENSIONS=" + json.dumps(EXTENSIONS), ) # XXX: There's gotta be a smarter way to do this... assert "..." not in settingsstr for char in ("'", "<", ">"): settingsstr = settingsstr.replace(char, '"') settingsdict = json.loads(settingsstr) assert set(settingsdict.keys()) == set(EXTENSIONS.keys()) assert settingsdict[EXT_PATH] == 200 def test_pathlib_path_as_feeds_key(self): assert self._execute("settings", "--get", "FEEDS") == json.dumps( {"items.csv": {"format": "csv", "fields": ["price", "name"]}} )
python
BSD-3-Clause
d1bd8eb49f7aba9289e4ff692006cead8bcd9080
2026-01-04T14:38:41.023839Z
false
scrapy/scrapy
https://github.com/scrapy/scrapy/blob/d1bd8eb49f7aba9289e4ff692006cead8bcd9080/tests/AsyncCrawlerProcess/reactor_default.py
tests/AsyncCrawlerProcess/reactor_default.py
from twisted.internet import reactor # noqa: F401,TID253 import scrapy from scrapy.crawler import AsyncCrawlerProcess class NoRequestsSpider(scrapy.Spider): name = "no_request" async def start(self): return yield process = AsyncCrawlerProcess(settings={}) d = process.crawl(NoRequestsSpider) process.start()
python
BSD-3-Clause
d1bd8eb49f7aba9289e4ff692006cead8bcd9080
2026-01-04T14:38:41.023839Z
false
scrapy/scrapy
https://github.com/scrapy/scrapy/blob/d1bd8eb49f7aba9289e4ff692006cead8bcd9080/tests/AsyncCrawlerProcess/asyncio_custom_loop_custom_settings_different.py
tests/AsyncCrawlerProcess/asyncio_custom_loop_custom_settings_different.py
import scrapy from scrapy.crawler import AsyncCrawlerProcess class NoRequestsSpider(scrapy.Spider): name = "no_request" custom_settings = { "ASYNCIO_EVENT_LOOP": "uvloop.Loop", } async def start(self): return yield process = AsyncCrawlerProcess( settings={ "TWISTED_REACTOR": "twisted.internet.asyncioreactor.AsyncioSelectorReactor", "ASYNCIO_EVENT_LOOP": None, } ) process.crawl(NoRequestsSpider) process.start()
python
BSD-3-Clause
d1bd8eb49f7aba9289e4ff692006cead8bcd9080
2026-01-04T14:38:41.023839Z
false
scrapy/scrapy
https://github.com/scrapy/scrapy/blob/d1bd8eb49f7aba9289e4ff692006cead8bcd9080/tests/AsyncCrawlerProcess/twisted_reactor_asyncio.py
tests/AsyncCrawlerProcess/twisted_reactor_asyncio.py
import scrapy from scrapy.crawler import AsyncCrawlerProcess class AsyncioReactorSpider(scrapy.Spider): name = "asyncio_reactor" process = AsyncCrawlerProcess( settings={ "TWISTED_REACTOR": "twisted.internet.asyncioreactor.AsyncioSelectorReactor", } ) process.crawl(AsyncioReactorSpider) process.start()
python
BSD-3-Clause
d1bd8eb49f7aba9289e4ff692006cead8bcd9080
2026-01-04T14:38:41.023839Z
false
scrapy/scrapy
https://github.com/scrapy/scrapy/blob/d1bd8eb49f7aba9289e4ff692006cead8bcd9080/tests/AsyncCrawlerProcess/asyncio_deferred_signal.py
tests/AsyncCrawlerProcess/asyncio_deferred_signal.py
from __future__ import annotations import asyncio import sys from scrapy import Spider from scrapy.crawler import AsyncCrawlerProcess from scrapy.utils.defer import deferred_from_coro class UppercasePipeline: async def _open_spider(self, spider): spider.logger.info("async pipeline opened!") await asyncio.sleep(0.1) def open_spider(self, spider): return deferred_from_coro(self._open_spider(spider)) def process_item(self, item): return {"url": item["url"].upper()} class UrlSpider(Spider): name = "url_spider" start_urls = ["data:,"] custom_settings = { "ITEM_PIPELINES": {UppercasePipeline: 100}, } def parse(self, response): yield {"url": response.url} if __name__ == "__main__": ASYNCIO_EVENT_LOOP: str | None try: ASYNCIO_EVENT_LOOP = sys.argv[1] except IndexError: ASYNCIO_EVENT_LOOP = None process = AsyncCrawlerProcess( settings={ "TWISTED_REACTOR": "twisted.internet.asyncioreactor.AsyncioSelectorReactor", "ASYNCIO_EVENT_LOOP": ASYNCIO_EVENT_LOOP, } ) process.crawl(UrlSpider) process.start()
python
BSD-3-Clause
d1bd8eb49f7aba9289e4ff692006cead8bcd9080
2026-01-04T14:38:41.023839Z
false
scrapy/scrapy
https://github.com/scrapy/scrapy/blob/d1bd8eb49f7aba9289e4ff692006cead8bcd9080/tests/AsyncCrawlerProcess/asyncio_enabled_no_reactor.py
tests/AsyncCrawlerProcess/asyncio_enabled_no_reactor.py
import scrapy from scrapy.crawler import AsyncCrawlerProcess from scrapy.utils.reactor import is_asyncio_reactor_installed class ReactorCheckExtension: def __init__(self): if not is_asyncio_reactor_installed(): raise RuntimeError("ReactorCheckExtension requires the asyncio reactor.") class NoRequestsSpider(scrapy.Spider): name = "no_request" async def start(self): return yield process = AsyncCrawlerProcess( settings={ "TWISTED_REACTOR": "twisted.internet.asyncioreactor.AsyncioSelectorReactor", "EXTENSIONS": {ReactorCheckExtension: 0}, } ) process.crawl(NoRequestsSpider) process.start()
python
BSD-3-Clause
d1bd8eb49f7aba9289e4ff692006cead8bcd9080
2026-01-04T14:38:41.023839Z
false
scrapy/scrapy
https://github.com/scrapy/scrapy/blob/d1bd8eb49f7aba9289e4ff692006cead8bcd9080/tests/AsyncCrawlerProcess/asyncio_enabled_reactor.py
tests/AsyncCrawlerProcess/asyncio_enabled_reactor.py
import scrapy from scrapy.crawler import AsyncCrawlerProcess from scrapy.utils.reactor import ( install_reactor, is_asyncio_reactor_installed, is_reactor_installed, ) if is_reactor_installed(): raise RuntimeError( "Reactor already installed before is_asyncio_reactor_installed()." ) try: is_asyncio_reactor_installed() except RuntimeError: pass else: raise RuntimeError("is_asyncio_reactor_installed() did not raise RuntimeError.") if is_reactor_installed(): raise RuntimeError( "Reactor already installed after is_asyncio_reactor_installed()." ) install_reactor("twisted.internet.asyncioreactor.AsyncioSelectorReactor") if not is_asyncio_reactor_installed(): raise RuntimeError("Wrong reactor installed after install_reactor().") class ReactorCheckExtension: def __init__(self): if not is_asyncio_reactor_installed(): raise RuntimeError("ReactorCheckExtension requires the asyncio reactor.") class NoRequestsSpider(scrapy.Spider): name = "no_request" async def start(self): return yield process = AsyncCrawlerProcess( settings={ "TWISTED_REACTOR": "twisted.internet.asyncioreactor.AsyncioSelectorReactor", "EXTENSIONS": {ReactorCheckExtension: 0}, } ) process.crawl(NoRequestsSpider) process.start()
python
BSD-3-Clause
d1bd8eb49f7aba9289e4ff692006cead8bcd9080
2026-01-04T14:38:41.023839Z
false
scrapy/scrapy
https://github.com/scrapy/scrapy/blob/d1bd8eb49f7aba9289e4ff692006cead8bcd9080/tests/AsyncCrawlerProcess/caching_hostname_resolver.py
tests/AsyncCrawlerProcess/caching_hostname_resolver.py
import sys import scrapy from scrapy.crawler import AsyncCrawlerProcess class CachingHostnameResolverSpider(scrapy.Spider): """ Finishes in a finite amount of time (does not hang indefinitely in the DNS resolution) """ name = "caching_hostname_resolver_spider" async def start(self): yield scrapy.Request(self.url) def parse(self, response): for _ in range(10): yield scrapy.Request( response.url, dont_filter=True, callback=self.ignore_response ) def ignore_response(self, response): self.logger.info(repr(response.ip_address)) if __name__ == "__main__": process = AsyncCrawlerProcess( settings={ "RETRY_ENABLED": False, "DNS_RESOLVER": "scrapy.resolver.CachingHostnameResolver", } ) process.crawl(CachingHostnameResolverSpider, url=sys.argv[1]) process.start()
python
BSD-3-Clause
d1bd8eb49f7aba9289e4ff692006cead8bcd9080
2026-01-04T14:38:41.023839Z
false
scrapy/scrapy
https://github.com/scrapy/scrapy/blob/d1bd8eb49f7aba9289e4ff692006cead8bcd9080/tests/AsyncCrawlerProcess/multi.py
tests/AsyncCrawlerProcess/multi.py
import scrapy from scrapy.crawler import AsyncCrawlerProcess class NoRequestsSpider(scrapy.Spider): name = "no_request" async def start(self): return yield process = AsyncCrawlerProcess(settings={}) process.crawl(NoRequestsSpider) process.crawl(NoRequestsSpider) process.start()
python
BSD-3-Clause
d1bd8eb49f7aba9289e4ff692006cead8bcd9080
2026-01-04T14:38:41.023839Z
false
scrapy/scrapy
https://github.com/scrapy/scrapy/blob/d1bd8eb49f7aba9289e4ff692006cead8bcd9080/tests/AsyncCrawlerProcess/sleeping.py
tests/AsyncCrawlerProcess/sleeping.py
import asyncio import sys import scrapy from scrapy.crawler import AsyncCrawlerProcess class SleepingSpider(scrapy.Spider): name = "sleeping" start_urls = ["data:,;"] async def parse(self, response): await asyncio.sleep(int(sys.argv[1])) process = AsyncCrawlerProcess(settings={}) process.crawl(SleepingSpider) process.start()
python
BSD-3-Clause
d1bd8eb49f7aba9289e4ff692006cead8bcd9080
2026-01-04T14:38:41.023839Z
false
scrapy/scrapy
https://github.com/scrapy/scrapy/blob/d1bd8eb49f7aba9289e4ff692006cead8bcd9080/tests/AsyncCrawlerProcess/asyncio_custom_loop_custom_settings_same.py
tests/AsyncCrawlerProcess/asyncio_custom_loop_custom_settings_same.py
import scrapy from scrapy.crawler import AsyncCrawlerProcess class NoRequestsSpider(scrapy.Spider): name = "no_request" custom_settings = { "ASYNCIO_EVENT_LOOP": "uvloop.Loop", } async def start(self): return yield process = AsyncCrawlerProcess( settings={ "TWISTED_REACTOR": "twisted.internet.asyncioreactor.AsyncioSelectorReactor", "ASYNCIO_EVENT_LOOP": "uvloop.Loop", } ) process.crawl(NoRequestsSpider) process.start()
python
BSD-3-Clause
d1bd8eb49f7aba9289e4ff692006cead8bcd9080
2026-01-04T14:38:41.023839Z
false
scrapy/scrapy
https://github.com/scrapy/scrapy/blob/d1bd8eb49f7aba9289e4ff692006cead8bcd9080/tests/AsyncCrawlerProcess/args_settings.py
tests/AsyncCrawlerProcess/args_settings.py
from typing import Any import scrapy from scrapy.crawler import AsyncCrawlerProcess, Crawler class NoRequestsSpider(scrapy.Spider): name = "no_request" @classmethod def from_crawler(cls, crawler: Crawler, *args: Any, **kwargs: Any): spider = super().from_crawler(crawler, *args, **kwargs) spider.settings.set("FOO", kwargs.get("foo")) return spider async def start(self): self.logger.info(f"The value of FOO is {self.settings.getint('FOO')}") return yield process = AsyncCrawlerProcess(settings={}) process.crawl(NoRequestsSpider, foo=42) process.start()
python
BSD-3-Clause
d1bd8eb49f7aba9289e4ff692006cead8bcd9080
2026-01-04T14:38:41.023839Z
false
scrapy/scrapy
https://github.com/scrapy/scrapy/blob/d1bd8eb49f7aba9289e4ff692006cead8bcd9080/tests/AsyncCrawlerProcess/twisted_reactor_custom_settings_select.py
tests/AsyncCrawlerProcess/twisted_reactor_custom_settings_select.py
from __future__ import annotations import logging from typing import TYPE_CHECKING import scrapy from scrapy.crawler import AsyncCrawlerProcess if TYPE_CHECKING: from asyncio import Task class AsyncioReactorSpider(scrapy.Spider): name = "asyncio_reactor" custom_settings = { "TWISTED_REACTOR": "twisted.internet.selectreactor.SelectReactor", } def log_task_exception(task: Task) -> None: try: task.result() except Exception: logging.exception("Crawl task failed") # noqa: LOG015 process = AsyncCrawlerProcess() task = process.crawl(AsyncioReactorSpider) task.add_done_callback(log_task_exception) process.start()
python
BSD-3-Clause
d1bd8eb49f7aba9289e4ff692006cead8bcd9080
2026-01-04T14:38:41.023839Z
false
scrapy/scrapy
https://github.com/scrapy/scrapy/blob/d1bd8eb49f7aba9289e4ff692006cead8bcd9080/tests/AsyncCrawlerProcess/simple.py
tests/AsyncCrawlerProcess/simple.py
import scrapy from scrapy.crawler import AsyncCrawlerProcess class NoRequestsSpider(scrapy.Spider): name = "no_request" async def start(self): return yield process = AsyncCrawlerProcess(settings={}) process.crawl(NoRequestsSpider) process.start()
python
BSD-3-Clause
d1bd8eb49f7aba9289e4ff692006cead8bcd9080
2026-01-04T14:38:41.023839Z
false
scrapy/scrapy
https://github.com/scrapy/scrapy/blob/d1bd8eb49f7aba9289e4ff692006cead8bcd9080/tests/AsyncCrawlerProcess/twisted_reactor_custom_settings_same.py
tests/AsyncCrawlerProcess/twisted_reactor_custom_settings_same.py
import scrapy from scrapy.crawler import AsyncCrawlerProcess class AsyncioReactorSpider1(scrapy.Spider): name = "asyncio_reactor1" custom_settings = { "TWISTED_REACTOR": "twisted.internet.asyncioreactor.AsyncioSelectorReactor", } class AsyncioReactorSpider2(scrapy.Spider): name = "asyncio_reactor2" custom_settings = { "TWISTED_REACTOR": "twisted.internet.asyncioreactor.AsyncioSelectorReactor", } process = AsyncCrawlerProcess() process.crawl(AsyncioReactorSpider1) process.crawl(AsyncioReactorSpider2) process.start()
python
BSD-3-Clause
d1bd8eb49f7aba9289e4ff692006cead8bcd9080
2026-01-04T14:38:41.023839Z
false
scrapy/scrapy
https://github.com/scrapy/scrapy/blob/d1bd8eb49f7aba9289e4ff692006cead8bcd9080/tests/AsyncCrawlerProcess/twisted_reactor_custom_settings.py
tests/AsyncCrawlerProcess/twisted_reactor_custom_settings.py
import scrapy from scrapy.crawler import AsyncCrawlerProcess class AsyncioReactorSpider(scrapy.Spider): name = "asyncio_reactor" custom_settings = { "TWISTED_REACTOR": "twisted.internet.asyncioreactor.AsyncioSelectorReactor", } process = AsyncCrawlerProcess() process.crawl(AsyncioReactorSpider) process.start()
python
BSD-3-Clause
d1bd8eb49f7aba9289e4ff692006cead8bcd9080
2026-01-04T14:38:41.023839Z
false
scrapy/scrapy
https://github.com/scrapy/scrapy/blob/d1bd8eb49f7aba9289e4ff692006cead8bcd9080/tests/AsyncCrawlerProcess/asyncio_enabled_reactor_different_loop.py
tests/AsyncCrawlerProcess/asyncio_enabled_reactor_different_loop.py
import asyncio import sys from twisted.internet import asyncioreactor import scrapy from scrapy.crawler import AsyncCrawlerProcess if sys.platform == "win32": asyncio.set_event_loop_policy(asyncio.WindowsSelectorEventLoopPolicy()) asyncioreactor.install(asyncio.get_event_loop()) class NoRequestsSpider(scrapy.Spider): name = "no_request" async def start(self): return yield process = AsyncCrawlerProcess( settings={ "TWISTED_REACTOR": "twisted.internet.asyncioreactor.AsyncioSelectorReactor", "ASYNCIO_EVENT_LOOP": "uvloop.Loop", } ) process.crawl(NoRequestsSpider) process.start()
python
BSD-3-Clause
d1bd8eb49f7aba9289e4ff692006cead8bcd9080
2026-01-04T14:38:41.023839Z
false
scrapy/scrapy
https://github.com/scrapy/scrapy/blob/d1bd8eb49f7aba9289e4ff692006cead8bcd9080/tests/AsyncCrawlerProcess/default_name_resolver.py
tests/AsyncCrawlerProcess/default_name_resolver.py
import scrapy from scrapy.crawler import AsyncCrawlerProcess class IPv6Spider(scrapy.Spider): """ Raises a twisted.internet.error.DNSLookupError: the default name resolver does not handle IPv6 addresses. """ name = "ipv6_spider" start_urls = ["http://[::1]"] if __name__ == "__main__": process = AsyncCrawlerProcess(settings={"RETRY_ENABLED": False}) process.crawl(IPv6Spider) process.start()
python
BSD-3-Clause
d1bd8eb49f7aba9289e4ff692006cead8bcd9080
2026-01-04T14:38:41.023839Z
false
scrapy/scrapy
https://github.com/scrapy/scrapy/blob/d1bd8eb49f7aba9289e4ff692006cead8bcd9080/tests/AsyncCrawlerProcess/caching_hostname_resolver_ipv6.py
tests/AsyncCrawlerProcess/caching_hostname_resolver_ipv6.py
import scrapy from scrapy.crawler import AsyncCrawlerProcess class CachingHostnameResolverSpider(scrapy.Spider): """ Finishes without a twisted.internet.error.DNSLookupError exception """ name = "caching_hostname_resolver_spider" start_urls = ["http://[::1]"] if __name__ == "__main__": process = AsyncCrawlerProcess( settings={ "RETRY_ENABLED": False, "DNS_RESOLVER": "scrapy.resolver.CachingHostnameResolver", } ) process.crawl(CachingHostnameResolverSpider) process.start()
python
BSD-3-Clause
d1bd8eb49f7aba9289e4ff692006cead8bcd9080
2026-01-04T14:38:41.023839Z
false
scrapy/scrapy
https://github.com/scrapy/scrapy/blob/d1bd8eb49f7aba9289e4ff692006cead8bcd9080/tests/AsyncCrawlerProcess/asyncio_custom_loop.py
tests/AsyncCrawlerProcess/asyncio_custom_loop.py
import scrapy from scrapy.crawler import AsyncCrawlerProcess class NoRequestsSpider(scrapy.Spider): name = "no_request" async def start(self): return yield process = AsyncCrawlerProcess( settings={ "TWISTED_REACTOR": "twisted.internet.asyncioreactor.AsyncioSelectorReactor", "ASYNCIO_EVENT_LOOP": "uvloop.Loop", } ) process.crawl(NoRequestsSpider) process.start()
python
BSD-3-Clause
d1bd8eb49f7aba9289e4ff692006cead8bcd9080
2026-01-04T14:38:41.023839Z
false
scrapy/scrapy
https://github.com/scrapy/scrapy/blob/d1bd8eb49f7aba9289e4ff692006cead8bcd9080/tests/AsyncCrawlerProcess/asyncio_enabled_reactor_same_loop.py
tests/AsyncCrawlerProcess/asyncio_enabled_reactor_same_loop.py
import asyncio import sys from twisted.internet import asyncioreactor from uvloop import Loop import scrapy from scrapy.crawler import AsyncCrawlerProcess if sys.platform == "win32": asyncio.set_event_loop_policy(asyncio.WindowsSelectorEventLoopPolicy()) asyncio.set_event_loop(Loop()) asyncioreactor.install(asyncio.get_event_loop()) class NoRequestsSpider(scrapy.Spider): name = "no_request" async def start(self): return yield process = AsyncCrawlerProcess( settings={ "TWISTED_REACTOR": "twisted.internet.asyncioreactor.AsyncioSelectorReactor", "ASYNCIO_EVENT_LOOP": "uvloop.Loop", } ) process.crawl(NoRequestsSpider) process.start()
python
BSD-3-Clause
d1bd8eb49f7aba9289e4ff692006cead8bcd9080
2026-01-04T14:38:41.023839Z
false
scrapy/scrapy
https://github.com/scrapy/scrapy/blob/d1bd8eb49f7aba9289e4ff692006cead8bcd9080/tests/CrawlerRunner/explicit_default_reactor.py
tests/CrawlerRunner/explicit_default_reactor.py
from twisted.internet.task import react from scrapy import Spider from scrapy.crawler import CrawlerRunner from scrapy.utils.log import configure_logging class NoRequestsSpider(Spider): name = "no_request" custom_settings = { "TWISTED_REACTOR": None, } async def start(self): return yield def main(reactor): configure_logging( {"LOG_FORMAT": "%(levelname)s: %(message)s", "LOG_LEVEL": "DEBUG"} ) runner = CrawlerRunner() return runner.crawl(NoRequestsSpider) react(main)
python
BSD-3-Clause
d1bd8eb49f7aba9289e4ff692006cead8bcd9080
2026-01-04T14:38:41.023839Z
false
scrapy/scrapy
https://github.com/scrapy/scrapy/blob/d1bd8eb49f7aba9289e4ff692006cead8bcd9080/tests/CrawlerRunner/custom_loop_different.py
tests/CrawlerRunner/custom_loop_different.py
from twisted.internet.task import react from scrapy import Spider from scrapy.crawler import CrawlerRunner from scrapy.utils.log import configure_logging from scrapy.utils.reactor import install_reactor class NoRequestsSpider(Spider): name = "no_request" custom_settings = { "TWISTED_REACTOR": "twisted.internet.asyncioreactor.AsyncioSelectorReactor", "ASYNCIO_EVENT_LOOP": "uvloop.Loop", } async def start(self): return yield def main(reactor): configure_logging() runner = CrawlerRunner() return runner.crawl(NoRequestsSpider) install_reactor("twisted.internet.asyncioreactor.AsyncioSelectorReactor") react(main)
python
BSD-3-Clause
d1bd8eb49f7aba9289e4ff692006cead8bcd9080
2026-01-04T14:38:41.023839Z
false
scrapy/scrapy
https://github.com/scrapy/scrapy/blob/d1bd8eb49f7aba9289e4ff692006cead8bcd9080/tests/CrawlerRunner/multi_parallel.py
tests/CrawlerRunner/multi_parallel.py
from twisted.internet.task import react from scrapy import Spider from scrapy.crawler import CrawlerRunner from scrapy.utils.log import configure_logging from scrapy.utils.reactor import install_reactor class NoRequestsSpider(Spider): name = "no_request" async def start(self): return yield def main(reactor): configure_logging() runner = CrawlerRunner() runner.crawl(NoRequestsSpider) runner.crawl(NoRequestsSpider) return runner.join() install_reactor("twisted.internet.asyncioreactor.AsyncioSelectorReactor") react(main)
python
BSD-3-Clause
d1bd8eb49f7aba9289e4ff692006cead8bcd9080
2026-01-04T14:38:41.023839Z
false
scrapy/scrapy
https://github.com/scrapy/scrapy/blob/d1bd8eb49f7aba9289e4ff692006cead8bcd9080/tests/CrawlerRunner/ip_address.py
tests/CrawlerRunner/ip_address.py
# ruff: noqa: E402 from scrapy.utils.reactor import install_reactor from tests.mockserver.dns import MockDNSServer from tests.mockserver.http import MockServer install_reactor("twisted.internet.asyncioreactor.AsyncioSelectorReactor") from twisted.names import cache, resolve from twisted.names import hosts as hostsModule from twisted.names.client import Resolver from twisted.python.runtime import platform from scrapy import Request, Spider from scrapy.crawler import CrawlerRunner from scrapy.utils.httpobj import urlparse_cached from scrapy.utils.log import configure_logging # https://stackoverflow.com/a/32784190 def createResolver(servers=None, resolvconf=None, hosts=None): if hosts is None: hosts = b"/etc/hosts" if platform.getType() == "posix" else r"c:\windows\hosts" theResolver = Resolver(resolvconf, servers) hostResolver = hostsModule.Resolver(hosts) chain = [hostResolver, cache.CacheResolver(), theResolver] return resolve.ResolverChain(chain) class LocalhostSpider(Spider): name = "localhost_spider" async def start(self): yield Request(self.url) def parse(self, response): netloc = urlparse_cached(response).netloc host = netloc.split(":")[0] self.logger.info(f"Host: {host}") self.logger.info(f"Type: {type(response.ip_address)}") self.logger.info(f"IP address: {response.ip_address}") if __name__ == "__main__": from twisted.internet import reactor with MockServer() as mock_http_server, MockDNSServer() as mock_dns_server: port = mock_http_server.http_port url = f"http://not.a.real.domain:{port}/echo" servers = [(mock_dns_server.host, mock_dns_server.port)] reactor.installResolver(createResolver(servers=servers)) configure_logging() runner = CrawlerRunner() d = runner.crawl(LocalhostSpider, url=url) d.addBoth(lambda _: reactor.stop()) reactor.run()
python
BSD-3-Clause
d1bd8eb49f7aba9289e4ff692006cead8bcd9080
2026-01-04T14:38:41.023839Z
false
scrapy/scrapy
https://github.com/scrapy/scrapy/blob/d1bd8eb49f7aba9289e4ff692006cead8bcd9080/tests/CrawlerRunner/multi_seq.py
tests/CrawlerRunner/multi_seq.py
from twisted.internet.defer import inlineCallbacks from twisted.internet.task import react from scrapy import Spider from scrapy.crawler import CrawlerRunner from scrapy.utils.log import configure_logging from scrapy.utils.reactor import install_reactor class NoRequestsSpider(Spider): name = "no_request" async def start(self): return yield @inlineCallbacks def main(reactor): configure_logging() runner = CrawlerRunner() yield runner.crawl(NoRequestsSpider) yield runner.crawl(NoRequestsSpider) install_reactor("twisted.internet.asyncioreactor.AsyncioSelectorReactor") react(main)
python
BSD-3-Clause
d1bd8eb49f7aba9289e4ff692006cead8bcd9080
2026-01-04T14:38:41.023839Z
false
scrapy/scrapy
https://github.com/scrapy/scrapy/blob/d1bd8eb49f7aba9289e4ff692006cead8bcd9080/tests/CrawlerRunner/custom_loop_same.py
tests/CrawlerRunner/custom_loop_same.py
from twisted.internet.task import react from scrapy import Spider from scrapy.crawler import CrawlerRunner from scrapy.utils.log import configure_logging from scrapy.utils.reactor import install_reactor class NoRequestsSpider(Spider): name = "no_request" custom_settings = { "TWISTED_REACTOR": "twisted.internet.asyncioreactor.AsyncioSelectorReactor", "ASYNCIO_EVENT_LOOP": "uvloop.Loop", } async def start(self): return yield def main(reactor): configure_logging() runner = CrawlerRunner() return runner.crawl(NoRequestsSpider) install_reactor("twisted.internet.asyncioreactor.AsyncioSelectorReactor", "uvloop.Loop") react(main)
python
BSD-3-Clause
d1bd8eb49f7aba9289e4ff692006cead8bcd9080
2026-01-04T14:38:41.023839Z
false
scrapy/scrapy
https://github.com/scrapy/scrapy/blob/d1bd8eb49f7aba9289e4ff692006cead8bcd9080/tests/CrawlerRunner/change_reactor.py
tests/CrawlerRunner/change_reactor.py
from scrapy import Spider from scrapy.crawler import CrawlerRunner from scrapy.utils.log import configure_logging class NoRequestsSpider(Spider): name = "no_request" custom_settings = { "TWISTED_REACTOR": "twisted.internet.asyncioreactor.AsyncioSelectorReactor", } async def start(self): return yield configure_logging({"LOG_FORMAT": "%(levelname)s: %(message)s", "LOG_LEVEL": "DEBUG"}) from scrapy.utils.reactor import install_reactor # noqa: E402 install_reactor("twisted.internet.asyncioreactor.AsyncioSelectorReactor") runner = CrawlerRunner() d = runner.crawl(NoRequestsSpider) from twisted.internet import reactor # noqa: E402,TID253 d.addBoth(callback=lambda _: reactor.stop()) reactor.run()
python
BSD-3-Clause
d1bd8eb49f7aba9289e4ff692006cead8bcd9080
2026-01-04T14:38:41.023839Z
false
scrapy/scrapy
https://github.com/scrapy/scrapy/blob/d1bd8eb49f7aba9289e4ff692006cead8bcd9080/tests/CrawlerRunner/simple.py
tests/CrawlerRunner/simple.py
from twisted.internet.task import react from scrapy import Spider from scrapy.crawler import CrawlerRunner from scrapy.utils.log import configure_logging from scrapy.utils.reactor import install_reactor class NoRequestsSpider(Spider): name = "no_request" async def start(self): return yield def main(reactor): configure_logging() runner = CrawlerRunner() return runner.crawl(NoRequestsSpider) install_reactor("twisted.internet.asyncioreactor.AsyncioSelectorReactor") react(main)
python
BSD-3-Clause
d1bd8eb49f7aba9289e4ff692006cead8bcd9080
2026-01-04T14:38:41.023839Z
false
scrapy/scrapy
https://github.com/scrapy/scrapy/blob/d1bd8eb49f7aba9289e4ff692006cead8bcd9080/tests/test_utils_misc/test_return_with_argument_inside_generator.py
tests/test_utils_misc/test_return_with_argument_inside_generator.py
import warnings from functools import partial from unittest import mock import pytest from scrapy.utils.misc import ( is_generator_with_return_value, warn_on_generator_with_return_value, ) def _indentation_error(*args, **kwargs): raise IndentationError def top_level_return_something(): """ docstring """ url = """ https://example.org """ yield url return 1 def top_level_return_none(): """ docstring """ url = """ https://example.org """ yield url def generator_that_returns_stuff(): yield 1 yield 2 return 3 class TestUtilsMisc: @pytest.fixture def mock_spider(self): class MockSettings: def __init__(self, settings_dict=None): self.settings_dict = settings_dict or { "WARN_ON_GENERATOR_RETURN_VALUE": True } def getbool(self, name, default=False): return self.settings_dict.get(name, default) class MockSpider: def __init__(self): self.settings = MockSettings() return MockSpider() def test_generators_return_something(self, mock_spider): def f1(): yield 1 return 2 def g1(): yield 1 return "asdf" def h1(): yield 1 def helper(): return 0 yield helper() return 2 def i1(): """ docstring """ url = """ https://example.org """ yield url return 1 assert is_generator_with_return_value(top_level_return_something) assert is_generator_with_return_value(f1) assert is_generator_with_return_value(g1) assert is_generator_with_return_value(h1) assert is_generator_with_return_value(i1) with warnings.catch_warnings(record=True) as w: warn_on_generator_with_return_value(mock_spider, top_level_return_something) assert len(w) == 1 assert ( 'The "MockSpider.top_level_return_something" method is a generator' in str(w[0].message) ) with warnings.catch_warnings(record=True) as w: warn_on_generator_with_return_value(mock_spider, f1) assert len(w) == 1 assert 'The "MockSpider.f1" method is a generator' in str(w[0].message) with warnings.catch_warnings(record=True) as w: warn_on_generator_with_return_value(mock_spider, g1) assert len(w) == 1 assert 'The "MockSpider.g1" method is a generator' in str(w[0].message) with warnings.catch_warnings(record=True) as w: warn_on_generator_with_return_value(mock_spider, h1) assert len(w) == 1 assert 'The "MockSpider.h1" method is a generator' in str(w[0].message) with warnings.catch_warnings(record=True) as w: warn_on_generator_with_return_value(mock_spider, i1) assert len(w) == 1 assert 'The "MockSpider.i1" method is a generator' in str(w[0].message) def test_generators_return_none(self, mock_spider): def f2(): yield 1 def g2(): yield 1 def h2(): yield 1 def i2(): yield 1 yield from generator_that_returns_stuff() def j2(): yield 1 def helper(): return 0 yield helper() def k2(): """ docstring """ url = """ https://example.org """ yield url def l2(): return assert not is_generator_with_return_value(top_level_return_none) assert not is_generator_with_return_value(f2) assert not is_generator_with_return_value(g2) assert not is_generator_with_return_value(h2) assert not is_generator_with_return_value(i2) assert not is_generator_with_return_value(j2) # not recursive assert not is_generator_with_return_value(k2) # not recursive assert not is_generator_with_return_value(l2) with warnings.catch_warnings(record=True) as w: warn_on_generator_with_return_value(mock_spider, top_level_return_none) assert len(w) == 0 with warnings.catch_warnings(record=True) as w: warn_on_generator_with_return_value(mock_spider, f2) assert len(w) == 0 with warnings.catch_warnings(record=True) as w: warn_on_generator_with_return_value(mock_spider, g2) assert len(w) == 0 with warnings.catch_warnings(record=True) as w: warn_on_generator_with_return_value(mock_spider, h2) assert len(w) == 0 with warnings.catch_warnings(record=True) as w: warn_on_generator_with_return_value(mock_spider, i2) assert len(w) == 0 with warnings.catch_warnings(record=True) as w: warn_on_generator_with_return_value(mock_spider, j2) assert len(w) == 0 with warnings.catch_warnings(record=True) as w: warn_on_generator_with_return_value(mock_spider, k2) assert len(w) == 0 with warnings.catch_warnings(record=True) as w: warn_on_generator_with_return_value(mock_spider, l2) assert len(w) == 0 def test_generators_return_none_with_decorator(self, mock_spider): def decorator(func): def inner_func(): func() return inner_func @decorator def f3(): yield 1 @decorator def g3(): yield 1 @decorator def h3(): yield 1 @decorator def i3(): yield 1 yield from generator_that_returns_stuff() @decorator def j3(): yield 1 def helper(): return 0 yield helper() @decorator def k3(): """ docstring """ url = """ https://example.org """ yield url @decorator def l3(): return assert not is_generator_with_return_value(top_level_return_none) assert not is_generator_with_return_value(f3) assert not is_generator_with_return_value(g3) assert not is_generator_with_return_value(h3) assert not is_generator_with_return_value(i3) assert not is_generator_with_return_value(j3) # not recursive assert not is_generator_with_return_value(k3) # not recursive assert not is_generator_with_return_value(l3) with warnings.catch_warnings(record=True) as w: warn_on_generator_with_return_value(mock_spider, top_level_return_none) assert len(w) == 0 with warnings.catch_warnings(record=True) as w: warn_on_generator_with_return_value(mock_spider, f3) assert len(w) == 0 with warnings.catch_warnings(record=True) as w: warn_on_generator_with_return_value(mock_spider, g3) assert len(w) == 0 with warnings.catch_warnings(record=True) as w: warn_on_generator_with_return_value(mock_spider, h3) assert len(w) == 0 with warnings.catch_warnings(record=True) as w: warn_on_generator_with_return_value(mock_spider, i3) assert len(w) == 0 with warnings.catch_warnings(record=True) as w: warn_on_generator_with_return_value(mock_spider, j3) assert len(w) == 0 with warnings.catch_warnings(record=True) as w: warn_on_generator_with_return_value(mock_spider, k3) assert len(w) == 0 with warnings.catch_warnings(record=True) as w: warn_on_generator_with_return_value(mock_spider, l3) assert len(w) == 0 @mock.patch( "scrapy.utils.misc.is_generator_with_return_value", new=_indentation_error ) def test_indentation_error(self, mock_spider): with warnings.catch_warnings(record=True) as w: warn_on_generator_with_return_value(mock_spider, top_level_return_none) assert len(w) == 1 assert "Unable to determine" in str(w[0].message) def test_partial(self): def cb(arg1, arg2): yield {} partial_cb = partial(cb, arg1=42) assert not is_generator_with_return_value(partial_cb) def test_warn_on_generator_with_return_value_settings_disabled(self): class MockSettings: def __init__(self, settings_dict=None): self.settings_dict = settings_dict or {} def getbool(self, name, default=False): return self.settings_dict.get(name, default) class MockSpider: def __init__(self): self.settings = MockSettings({"WARN_ON_GENERATOR_RETURN_VALUE": False}) spider = MockSpider() def gen_with_return(): yield 1 return "value" with warnings.catch_warnings(record=True) as w: warn_on_generator_with_return_value(spider, gen_with_return) assert len(w) == 0 spider.settings.settings_dict["WARN_ON_GENERATOR_RETURN_VALUE"] = True with warnings.catch_warnings(record=True) as w: warn_on_generator_with_return_value(spider, gen_with_return) assert len(w) == 1 assert "is a generator" in str(w[0].message)
python
BSD-3-Clause
d1bd8eb49f7aba9289e4ff692006cead8bcd9080
2026-01-04T14:38:41.023839Z
false
scrapy/scrapy
https://github.com/scrapy/scrapy/blob/d1bd8eb49f7aba9289e4ff692006cead8bcd9080/tests/test_utils_misc/__init__.py
tests/test_utils_misc/__init__.py
import os import sys from pathlib import Path from unittest import mock import pytest from scrapy.item import Field, Item from scrapy.utils.misc import ( arg_to_iter, build_from_crawler, load_object, rel_has_nofollow, set_environ, walk_modules, ) class TestUtilsMisc: def test_load_object_class(self): obj = load_object(Field) assert obj is Field obj = load_object("scrapy.item.Field") assert obj is Field def test_load_object_function(self): obj = load_object(load_object) assert obj is load_object obj = load_object("scrapy.utils.misc.load_object") assert obj is load_object def test_load_object_exceptions(self): with pytest.raises(ImportError): load_object("nomodule999.mod.function") with pytest.raises(NameError): load_object("scrapy.utils.misc.load_object999") with pytest.raises(TypeError): load_object({}) def test_walk_modules(self): mods = walk_modules("tests.test_utils_misc.test_walk_modules") expected = [ "tests.test_utils_misc.test_walk_modules", "tests.test_utils_misc.test_walk_modules.mod", "tests.test_utils_misc.test_walk_modules.mod.mod0", "tests.test_utils_misc.test_walk_modules.mod1", ] assert {m.__name__ for m in mods} == set(expected) mods = walk_modules("tests.test_utils_misc.test_walk_modules.mod") expected = [ "tests.test_utils_misc.test_walk_modules.mod", "tests.test_utils_misc.test_walk_modules.mod.mod0", ] assert {m.__name__ for m in mods} == set(expected) mods = walk_modules("tests.test_utils_misc.test_walk_modules.mod1") expected = [ "tests.test_utils_misc.test_walk_modules.mod1", ] assert {m.__name__ for m in mods} == set(expected) with pytest.raises(ImportError): walk_modules("nomodule999") def test_walk_modules_egg(self): egg = str(Path(__file__).parent / "test.egg") sys.path.append(egg) try: mods = walk_modules("testegg") expected = [ "testegg.spiders", "testegg.spiders.a", "testegg.spiders.b", "testegg", ] assert {m.__name__ for m in mods} == set(expected) finally: sys.path.remove(egg) def test_arg_to_iter(self): class TestItem(Item): name = Field() assert hasattr(arg_to_iter(None), "__iter__") assert hasattr(arg_to_iter(100), "__iter__") assert hasattr(arg_to_iter("lala"), "__iter__") assert hasattr(arg_to_iter([1, 2, 3]), "__iter__") assert hasattr(arg_to_iter(c for c in "abcd"), "__iter__") assert not list(arg_to_iter(None)) assert list(arg_to_iter("lala")) == ["lala"] assert list(arg_to_iter(100)) == [100] assert list(arg_to_iter(c for c in "abc")) == ["a", "b", "c"] assert list(arg_to_iter([1, 2, 3])) == [1, 2, 3] assert list(arg_to_iter({"a": 1})) == [{"a": 1}] assert list(arg_to_iter(TestItem(name="john"))) == [TestItem(name="john")] def test_build_from_crawler(self): crawler = mock.MagicMock(spec_set=["settings"]) args = (True, 100.0) kwargs = {"key": "val"} def _test_with_crawler(mock, crawler): build_from_crawler(mock, crawler, *args, **kwargs) if hasattr(mock, "from_crawler"): mock.from_crawler.assert_called_once_with(crawler, *args, **kwargs) assert mock.call_count == 0 else: mock.assert_called_once_with(*args, **kwargs) # Check usage of correct constructor using 2 mocks: # 1. with no alternative constructors # 2. with from_crawler() constructor spec_sets = ( ["__qualname__"], ["__qualname__", "from_crawler"], ) for specs in spec_sets: m = mock.MagicMock(spec_set=specs) _test_with_crawler(m, crawler) m.reset_mock() # Check adoption of crawler m = mock.MagicMock(spec_set=["__qualname__", "from_crawler"]) m.from_crawler.return_value = None with pytest.raises(TypeError): build_from_crawler(m, crawler, *args, **kwargs) def test_set_environ(self): assert os.environ.get("some_test_environ") is None with set_environ(some_test_environ="test_value"): assert os.environ.get("some_test_environ") == "test_value" assert os.environ.get("some_test_environ") is None os.environ["some_test_environ"] = "test" assert os.environ.get("some_test_environ") == "test" with set_environ(some_test_environ="test_value"): assert os.environ.get("some_test_environ") == "test_value" assert os.environ.get("some_test_environ") == "test" def test_rel_has_nofollow(self): assert rel_has_nofollow("ugc nofollow") is True assert rel_has_nofollow("ugc,nofollow") is True assert rel_has_nofollow("ugc") is False assert rel_has_nofollow("nofollow") is True assert rel_has_nofollow("nofollowfoo") is False assert rel_has_nofollow("foonofollow") is False assert rel_has_nofollow("ugc, , nofollow") is True
python
BSD-3-Clause
d1bd8eb49f7aba9289e4ff692006cead8bcd9080
2026-01-04T14:38:41.023839Z
false
scrapy/scrapy
https://github.com/scrapy/scrapy/blob/d1bd8eb49f7aba9289e4ff692006cead8bcd9080/tests/test_utils_misc/test_walk_modules/mod1.py
tests/test_utils_misc/test_walk_modules/mod1.py
python
BSD-3-Clause
d1bd8eb49f7aba9289e4ff692006cead8bcd9080
2026-01-04T14:38:41.023839Z
false
scrapy/scrapy
https://github.com/scrapy/scrapy/blob/d1bd8eb49f7aba9289e4ff692006cead8bcd9080/tests/test_utils_misc/test_walk_modules/__init__.py
tests/test_utils_misc/test_walk_modules/__init__.py
python
BSD-3-Clause
d1bd8eb49f7aba9289e4ff692006cead8bcd9080
2026-01-04T14:38:41.023839Z
false
scrapy/scrapy
https://github.com/scrapy/scrapy/blob/d1bd8eb49f7aba9289e4ff692006cead8bcd9080/tests/test_utils_misc/test_walk_modules/mod/__init__.py
tests/test_utils_misc/test_walk_modules/mod/__init__.py
python
BSD-3-Clause
d1bd8eb49f7aba9289e4ff692006cead8bcd9080
2026-01-04T14:38:41.023839Z
false
scrapy/scrapy
https://github.com/scrapy/scrapy/blob/d1bd8eb49f7aba9289e4ff692006cead8bcd9080/tests/test_utils_misc/test_walk_modules/mod/mod0.py
tests/test_utils_misc/test_walk_modules/mod/mod0.py
python
BSD-3-Clause
d1bd8eb49f7aba9289e4ff692006cead8bcd9080
2026-01-04T14:38:41.023839Z
false
scrapy/scrapy
https://github.com/scrapy/scrapy/blob/d1bd8eb49f7aba9289e4ff692006cead8bcd9080/tests/utils/__init__.py
tests/utils/__init__.py
import os from pathlib import Path from twisted.internet.defer import Deferred def twisted_sleep(seconds): from twisted.internet import reactor d = Deferred() reactor.callLater(seconds, d.callback, None) return d def get_script_run_env() -> dict[str, str]: """Return a OS environment dict suitable to run scripts shipped with tests.""" tests_path = Path(__file__).parent.parent pythonpath = str(tests_path) + os.pathsep + os.environ.get("PYTHONPATH", "") env = os.environ.copy() env["PYTHONPATH"] = pythonpath return env
python
BSD-3-Clause
d1bd8eb49f7aba9289e4ff692006cead8bcd9080
2026-01-04T14:38:41.023839Z
false
scrapy/scrapy
https://github.com/scrapy/scrapy/blob/d1bd8eb49f7aba9289e4ff692006cead8bcd9080/tests/utils/cmdline.py
tests/utils/cmdline.py
from __future__ import annotations import subprocess import sys from typing import Any import pytest from scrapy.utils.test import get_testenv def call(*args: str, **popen_kwargs: Any) -> int: args = (sys.executable, "-m", "scrapy.cmdline", *args) return subprocess.call( args, stdout=subprocess.DEVNULL, stderr=subprocess.DEVNULL, env=get_testenv(), **popen_kwargs, ) def proc(*args: str, **popen_kwargs: Any) -> tuple[int, str, str]: args = (sys.executable, "-m", "scrapy.cmdline", *args) try: p = subprocess.run( args, check=False, capture_output=True, encoding="utf-8", timeout=15, env=get_testenv(), **popen_kwargs, ) except subprocess.TimeoutExpired: pytest.fail("Command took too much time to complete") return p.returncode, p.stdout, p.stderr
python
BSD-3-Clause
d1bd8eb49f7aba9289e4ff692006cead8bcd9080
2026-01-04T14:38:41.023839Z
false
scrapy/scrapy
https://github.com/scrapy/scrapy/blob/d1bd8eb49f7aba9289e4ff692006cead8bcd9080/tests/test_cmdline_crawl_with_pipeline/__init__.py
tests/test_cmdline_crawl_with_pipeline/__init__.py
import sys from pathlib import Path from subprocess import PIPE, Popen from tests import TWISTED_KEEPS_TRACEBACKS class TestCmdlineCrawlPipeline: def _execute(self, spname): args = (sys.executable, "-m", "scrapy.cmdline", "crawl", spname) cwd = Path(__file__).resolve().parent proc = Popen(args, stdout=PIPE, stderr=PIPE, cwd=cwd) _, stderr = proc.communicate() return proc.returncode, stderr def test_open_spider_normally_in_pipeline(self): returncode, _ = self._execute("normal") assert returncode == 0 def test_exception_at_open_spider_in_pipeline(self): returncode, stderr = self._execute("exception") # An unhandled exception in a pipeline should not stop the crawl assert returncode == 0 if TWISTED_KEEPS_TRACEBACKS: assert b'RuntimeError("exception")' in stderr else: assert b"RuntimeError: exception" in stderr
python
BSD-3-Clause
d1bd8eb49f7aba9289e4ff692006cead8bcd9080
2026-01-04T14:38:41.023839Z
false
scrapy/scrapy
https://github.com/scrapy/scrapy/blob/d1bd8eb49f7aba9289e4ff692006cead8bcd9080/tests/test_cmdline_crawl_with_pipeline/test_spider/pipelines.py
tests/test_cmdline_crawl_with_pipeline/test_spider/pipelines.py
class TestSpiderPipeline: def open_spider(self, spider): pass def process_item(self, item): return item class TestSpiderExceptionPipeline: def open_spider(self, spider): raise RuntimeError("exception") def process_item(self, item): return item
python
BSD-3-Clause
d1bd8eb49f7aba9289e4ff692006cead8bcd9080
2026-01-04T14:38:41.023839Z
false
scrapy/scrapy
https://github.com/scrapy/scrapy/blob/d1bd8eb49f7aba9289e4ff692006cead8bcd9080/tests/test_cmdline_crawl_with_pipeline/test_spider/settings.py
tests/test_cmdline_crawl_with_pipeline/test_spider/settings.py
BOT_NAME = "test_spider" SPIDER_MODULES = ["test_spider.spiders"]
python
BSD-3-Clause
d1bd8eb49f7aba9289e4ff692006cead8bcd9080
2026-01-04T14:38:41.023839Z
false