repo stringlengths 7 90 | file_url stringlengths 81 315 | file_path stringlengths 4 228 | content stringlengths 0 32.8k | language stringclasses 1
value | license stringclasses 7
values | commit_sha stringlengths 40 40 | retrieved_at stringdate 2026-01-04 14:38:15 2026-01-05 02:33:18 | truncated bool 2
classes |
|---|---|---|---|---|---|---|---|---|
scrapy/scrapy | https://github.com/scrapy/scrapy/blob/d1bd8eb49f7aba9289e4ff692006cead8bcd9080/tests/test_spidermiddleware_output_chain.py | tests/test_spidermiddleware_output_chain.py | from testfixtures import LogCapture
from scrapy import Request, Spider
from scrapy.utils.defer import deferred_f_from_coro_f
from scrapy.utils.test import get_crawler
from tests.mockserver.http import MockServer
class _BaseSpiderMiddleware:
def __init__(self, crawler):
self.crawler = crawler
@classmethod
def from_crawler(cls, crawler):
return cls(crawler)
class LogExceptionMiddleware(_BaseSpiderMiddleware):
def process_spider_exception(self, response, exception):
self.crawler.spider.logger.info(
"Middleware: %s exception caught", exception.__class__.__name__
)
# ================================================================================
# (0) recover from an exception on a spider callback
class RecoveryMiddleware(_BaseSpiderMiddleware):
def process_spider_exception(self, response, exception):
self.crawler.spider.logger.info(
"Middleware: %s exception caught", exception.__class__.__name__
)
return [
{"from": "process_spider_exception"},
Request(response.url, meta={"dont_fail": True}, dont_filter=True),
]
class RecoverySpider(Spider):
name = "RecoverySpider"
custom_settings = {
"SPIDER_MIDDLEWARES_BASE": {},
"SPIDER_MIDDLEWARES": {
RecoveryMiddleware: 10,
},
}
async def start(self):
yield Request(self.mockserver.url("/status?n=200"))
def parse(self, response):
yield {"test": 1}
self.logger.info("DONT_FAIL: %s", response.meta.get("dont_fail"))
if not response.meta.get("dont_fail"):
raise TabError
class RecoveryAsyncGenSpider(RecoverySpider):
name = "RecoveryAsyncGenSpider"
async def parse(self, response):
for r in super().parse(response):
yield r
# ================================================================================
# (1) exceptions from a spider middleware's process_spider_input method
class FailProcessSpiderInputMiddleware(_BaseSpiderMiddleware):
def process_spider_input(self, response):
self.crawler.spider.logger.info("Middleware: will raise IndexError")
raise IndexError
class ProcessSpiderInputSpiderWithoutErrback(Spider):
name = "ProcessSpiderInputSpiderWithoutErrback"
custom_settings = {
"SPIDER_MIDDLEWARES": {
# spider
FailProcessSpiderInputMiddleware: 8,
LogExceptionMiddleware: 6,
# engine
}
}
async def start(self):
yield Request(url=self.mockserver.url("/status?n=200"), callback=self.parse)
def parse(self, response):
return {"from": "callback"}
class ProcessSpiderInputSpiderWithErrback(ProcessSpiderInputSpiderWithoutErrback):
name = "ProcessSpiderInputSpiderWithErrback"
async def start(self):
yield Request(
self.mockserver.url("/status?n=200"), self.parse, errback=self.errback
)
def errback(self, failure):
self.logger.info("Got a Failure on the Request errback")
return {"from": "errback"}
# ================================================================================
# (2) exceptions from a spider callback (generator)
class GeneratorCallbackSpider(Spider):
name = "GeneratorCallbackSpider"
custom_settings = {
"SPIDER_MIDDLEWARES": {
LogExceptionMiddleware: 10,
},
}
async def start(self):
yield Request(self.mockserver.url("/status?n=200"))
def parse(self, response):
yield {"test": 1}
yield {"test": 2}
raise ImportError
class AsyncGeneratorCallbackSpider(GeneratorCallbackSpider):
async def parse(self, response):
yield {"test": 1}
yield {"test": 2}
raise ImportError
# ================================================================================
# (2.1) exceptions from a spider callback (generator, middleware right after callback)
class GeneratorCallbackSpiderMiddlewareRightAfterSpider(GeneratorCallbackSpider):
name = "GeneratorCallbackSpiderMiddlewareRightAfterSpider"
custom_settings = {
"SPIDER_MIDDLEWARES": {
LogExceptionMiddleware: 100000,
},
}
# ================================================================================
# (3) exceptions from a spider callback (not a generator)
class NotGeneratorCallbackSpider(Spider):
name = "NotGeneratorCallbackSpider"
custom_settings = {
"SPIDER_MIDDLEWARES": {
LogExceptionMiddleware: 10,
},
}
async def start(self):
yield Request(self.mockserver.url("/status?n=200"))
def parse(self, response):
return [{"test": 1}, {"test": 1 / 0}]
# ================================================================================
# (3.1) exceptions from a spider callback (not a generator, middleware right after callback)
class NotGeneratorCallbackSpiderMiddlewareRightAfterSpider(NotGeneratorCallbackSpider):
name = "NotGeneratorCallbackSpiderMiddlewareRightAfterSpider"
custom_settings = {
"SPIDER_MIDDLEWARES": {
LogExceptionMiddleware: 100000,
},
}
# ================================================================================
# (4) exceptions from a middleware process_spider_output method (generator)
class _GeneratorDoNothingMiddleware(_BaseSpiderMiddleware):
def process_spider_output(self, response, result):
for r in result:
r["processed"].append(f"{self.__class__.__name__}.process_spider_output")
yield r
def process_spider_exception(self, response, exception):
method = f"{self.__class__.__name__}.process_spider_exception"
self.crawler.spider.logger.info(
"%s: %s caught", method, exception.__class__.__name__
)
class GeneratorFailMiddleware(_BaseSpiderMiddleware):
def process_spider_output(self, response, result):
for r in result:
r["processed"].append(f"{self.__class__.__name__}.process_spider_output")
yield r
raise LookupError
def process_spider_exception(self, response, exception):
method = f"{self.__class__.__name__}.process_spider_exception"
self.crawler.spider.logger.info(
"%s: %s caught", method, exception.__class__.__name__
)
yield {"processed": [method]}
class GeneratorDoNothingAfterFailureMiddleware(_GeneratorDoNothingMiddleware):
pass
class GeneratorRecoverMiddleware(_BaseSpiderMiddleware):
def process_spider_output(self, response, result):
for r in result:
r["processed"].append(f"{self.__class__.__name__}.process_spider_output")
yield r
def process_spider_exception(self, response, exception):
method = f"{self.__class__.__name__}.process_spider_exception"
self.crawler.spider.logger.info(
"%s: %s caught", method, exception.__class__.__name__
)
yield {"processed": [method]}
class GeneratorDoNothingAfterRecoveryMiddleware(_GeneratorDoNothingMiddleware):
pass
class GeneratorOutputChainSpider(Spider):
name = "GeneratorOutputChainSpider"
custom_settings = {
"SPIDER_MIDDLEWARES": {
GeneratorFailMiddleware: 10,
GeneratorDoNothingAfterFailureMiddleware: 8,
GeneratorRecoverMiddleware: 5,
GeneratorDoNothingAfterRecoveryMiddleware: 3,
},
}
async def start(self):
yield Request(self.mockserver.url("/status?n=200"))
def parse(self, response):
yield {"processed": ["parse-first-item"]}
yield {"processed": ["parse-second-item"]}
# ================================================================================
# (5) exceptions from a middleware process_spider_output method (not generator)
class _NotGeneratorDoNothingMiddleware(_BaseSpiderMiddleware):
def process_spider_output(self, response, result):
out = []
for r in result:
r["processed"].append(f"{self.__class__.__name__}.process_spider_output")
out.append(r)
return out
def process_spider_exception(self, response, exception):
method = f"{self.__class__.__name__}.process_spider_exception"
self.crawler.spider.logger.info(
"%s: %s caught", method, exception.__class__.__name__
)
class NotGeneratorFailMiddleware(_BaseSpiderMiddleware):
def process_spider_output(self, response, result):
out = []
for r in result:
r["processed"].append(f"{self.__class__.__name__}.process_spider_output")
out.append(r)
raise ReferenceError
def process_spider_exception(self, response, exception):
method = f"{self.__class__.__name__}.process_spider_exception"
self.crawler.spider.logger.info(
"%s: %s caught", method, exception.__class__.__name__
)
return [{"processed": [method]}]
class NotGeneratorDoNothingAfterFailureMiddleware(_NotGeneratorDoNothingMiddleware):
pass
class NotGeneratorRecoverMiddleware(_BaseSpiderMiddleware):
def process_spider_output(self, response, result):
out = []
for r in result:
r["processed"].append(f"{self.__class__.__name__}.process_spider_output")
out.append(r)
return out
def process_spider_exception(self, response, exception):
method = f"{self.__class__.__name__}.process_spider_exception"
self.crawler.spider.logger.info(
"%s: %s caught", method, exception.__class__.__name__
)
return [{"processed": [method]}]
class NotGeneratorDoNothingAfterRecoveryMiddleware(_NotGeneratorDoNothingMiddleware):
pass
class NotGeneratorOutputChainSpider(Spider):
name = "NotGeneratorOutputChainSpider"
custom_settings = {
"SPIDER_MIDDLEWARES": {
NotGeneratorFailMiddleware: 10,
NotGeneratorDoNothingAfterFailureMiddleware: 8,
NotGeneratorRecoverMiddleware: 5,
NotGeneratorDoNothingAfterRecoveryMiddleware: 3,
},
}
async def start(self):
yield Request(self.mockserver.url("/status?n=200"))
def parse(self, response):
return [
{"processed": ["parse-first-item"]},
{"processed": ["parse-second-item"]},
]
# ================================================================================
class TestSpiderMiddleware:
mockserver: MockServer
@classmethod
def setup_class(cls):
cls.mockserver = MockServer()
cls.mockserver.__enter__()
@classmethod
def teardown_class(cls):
cls.mockserver.__exit__(None, None, None)
async def crawl_log(self, spider: type[Spider]) -> LogCapture:
crawler = get_crawler(spider)
with LogCapture() as log:
await crawler.crawl_async(mockserver=self.mockserver)
return log
@deferred_f_from_coro_f
async def test_recovery(self):
"""
(0) Recover from an exception in a spider callback. The final item count should be 3
(one yielded from the callback method before the exception is raised, one directly
from the recovery middleware and one from the spider when processing the request that
was enqueued from the recovery middleware)
"""
log = await self.crawl_log(RecoverySpider)
assert "Middleware: TabError exception caught" in str(log)
assert str(log).count("Middleware: TabError exception caught") == 1
assert "'item_scraped_count': 3" in str(log)
@deferred_f_from_coro_f
async def test_recovery_asyncgen(self):
"""
Same as test_recovery but with an async callback.
"""
log = await self.crawl_log(RecoveryAsyncGenSpider)
assert "Middleware: TabError exception caught" in str(log)
assert str(log).count("Middleware: TabError exception caught") == 1
assert "'item_scraped_count': 3" in str(log)
@deferred_f_from_coro_f
async def test_process_spider_input_without_errback(self):
"""
(1.1) An exception from the process_spider_input chain should be caught by the
process_spider_exception chain from the start if the Request has no errback
"""
log1 = await self.crawl_log(ProcessSpiderInputSpiderWithoutErrback)
assert "Middleware: will raise IndexError" in str(log1)
assert "Middleware: IndexError exception caught" in str(log1)
@deferred_f_from_coro_f
async def test_process_spider_input_with_errback(self):
"""
(1.2) An exception from the process_spider_input chain should not be caught by the
process_spider_exception chain if the Request has an errback
"""
log1 = await self.crawl_log(ProcessSpiderInputSpiderWithErrback)
assert "Middleware: IndexError exception caught" not in str(log1)
assert "Middleware: will raise IndexError" in str(log1)
assert "Got a Failure on the Request errback" in str(log1)
assert "{'from': 'errback'}" in str(log1)
assert "{'from': 'callback'}" not in str(log1)
assert "'item_scraped_count': 1" in str(log1)
@deferred_f_from_coro_f
async def test_generator_callback(self):
"""
(2) An exception from a spider callback (returning a generator) should
be caught by the process_spider_exception chain. Items yielded before the
exception is raised should be processed normally.
"""
log2 = await self.crawl_log(GeneratorCallbackSpider)
assert "Middleware: ImportError exception caught" in str(log2)
assert "'item_scraped_count': 2" in str(log2)
@deferred_f_from_coro_f
async def test_async_generator_callback(self):
"""
Same as test_generator_callback but with an async callback.
"""
log2 = await self.crawl_log(AsyncGeneratorCallbackSpider)
assert "Middleware: ImportError exception caught" in str(log2)
assert "'item_scraped_count': 2" in str(log2)
@deferred_f_from_coro_f
async def test_generator_callback_right_after_callback(self):
"""
(2.1) Special case of (2): Exceptions should be caught
even if the middleware is placed right after the spider
"""
log21 = await self.crawl_log(GeneratorCallbackSpiderMiddlewareRightAfterSpider)
assert "Middleware: ImportError exception caught" in str(log21)
assert "'item_scraped_count': 2" in str(log21)
@deferred_f_from_coro_f
async def test_not_a_generator_callback(self):
"""
(3) An exception from a spider callback (returning a list) should
be caught by the process_spider_exception chain. No items should be processed.
"""
log3 = await self.crawl_log(NotGeneratorCallbackSpider)
assert "Middleware: ZeroDivisionError exception caught" in str(log3)
assert "item_scraped_count" not in str(log3)
@deferred_f_from_coro_f
async def test_not_a_generator_callback_right_after_callback(self):
"""
(3.1) Special case of (3): Exceptions should be caught
even if the middleware is placed right after the spider
"""
log31 = await self.crawl_log(
NotGeneratorCallbackSpiderMiddlewareRightAfterSpider
)
assert "Middleware: ZeroDivisionError exception caught" in str(log31)
assert "item_scraped_count" not in str(log31)
@deferred_f_from_coro_f
async def test_generator_output_chain(self):
"""
(4) An exception from a middleware's process_spider_output method should be sent
to the process_spider_exception method from the next middleware in the chain.
The result of the recovery by the process_spider_exception method should be handled
by the process_spider_output method from the next middleware.
The final item count should be 2 (one from the spider callback and one from the
process_spider_exception chain)
"""
log4 = await self.crawl_log(GeneratorOutputChainSpider)
assert "'item_scraped_count': 2" in str(log4)
assert (
"GeneratorRecoverMiddleware.process_spider_exception: LookupError caught"
in str(log4)
)
assert (
"GeneratorDoNothingAfterFailureMiddleware.process_spider_exception: LookupError caught"
in str(log4)
)
assert (
"GeneratorFailMiddleware.process_spider_exception: LookupError caught"
not in str(log4)
)
assert (
"GeneratorDoNothingAfterRecoveryMiddleware.process_spider_exception: LookupError caught"
not in str(log4)
)
item_from_callback = {
"processed": [
"parse-first-item",
"GeneratorFailMiddleware.process_spider_output",
"GeneratorDoNothingAfterFailureMiddleware.process_spider_output",
"GeneratorRecoverMiddleware.process_spider_output",
"GeneratorDoNothingAfterRecoveryMiddleware.process_spider_output",
]
}
item_recovered = {
"processed": [
"GeneratorRecoverMiddleware.process_spider_exception",
"GeneratorDoNothingAfterRecoveryMiddleware.process_spider_output",
]
}
assert str(item_from_callback) in str(log4)
assert str(item_recovered) in str(log4)
assert "parse-second-item" not in str(log4)
@deferred_f_from_coro_f
async def test_not_a_generator_output_chain(self):
"""
(5) An exception from a middleware's process_spider_output method should be sent
to the process_spider_exception method from the next middleware in the chain.
The result of the recovery by the process_spider_exception method should be handled
by the process_spider_output method from the next middleware.
The final item count should be 1 (from the process_spider_exception chain, the items
from the spider callback are lost)
"""
log5 = await self.crawl_log(NotGeneratorOutputChainSpider)
assert "'item_scraped_count': 1" in str(log5)
assert (
"GeneratorRecoverMiddleware.process_spider_exception: ReferenceError caught"
in str(log5)
)
assert (
"GeneratorDoNothingAfterFailureMiddleware.process_spider_exception: ReferenceError caught"
in str(log5)
)
assert (
"GeneratorFailMiddleware.process_spider_exception: ReferenceError caught"
not in str(log5)
)
assert (
"GeneratorDoNothingAfterRecoveryMiddleware.process_spider_exception: ReferenceError caught"
not in str(log5)
)
item_recovered = {
"processed": [
"NotGeneratorRecoverMiddleware.process_spider_exception",
"NotGeneratorDoNothingAfterRecoveryMiddleware.process_spider_output",
]
}
assert str(item_recovered) in str(log5)
assert "parse-first-item" not in str(log5)
assert "parse-second-item" not in str(log5)
| python | BSD-3-Clause | d1bd8eb49f7aba9289e4ff692006cead8bcd9080 | 2026-01-04T14:38:41.023839Z | false |
scrapy/scrapy | https://github.com/scrapy/scrapy/blob/d1bd8eb49f7aba9289e4ff692006cead8bcd9080/tests/test_command_fetch.py | tests/test_command_fetch.py | from __future__ import annotations
from typing import TYPE_CHECKING
from tests.utils.cmdline import proc
if TYPE_CHECKING:
from tests.mockserver.http import MockServer
class TestFetchCommand:
def test_output(self, mockserver: MockServer) -> None:
_, out, _ = proc("fetch", mockserver.url("/text"))
assert out.strip() == "Works"
def test_redirect_default(self, mockserver: MockServer) -> None:
_, out, _ = proc("fetch", mockserver.url("/redirect"))
assert out.strip() == "Redirected here"
def test_redirect_disabled(self, mockserver: MockServer) -> None:
_, _, err = proc(
"fetch", "--no-redirect", mockserver.url("/redirect-no-meta-refresh")
)
err = err.strip()
assert "downloader/response_status_count/302" in err
assert "downloader/response_status_count/200" not in err
def test_headers(self, mockserver: MockServer) -> None:
_, out, _ = proc("fetch", mockserver.url("/text"), "--headers")
out = out.replace("\r", "") # required on win32
assert "Server: TwistedWeb" in out
assert "Content-Type: text/plain" in out
| python | BSD-3-Clause | d1bd8eb49f7aba9289e4ff692006cead8bcd9080 | 2026-01-04T14:38:41.023839Z | false |
scrapy/scrapy | https://github.com/scrapy/scrapy/blob/d1bd8eb49f7aba9289e4ff692006cead8bcd9080/tests/test_pipeline_files.py | tests/test_pipeline_files.py | import dataclasses
import os
import random
import time
import warnings
from abc import ABC, abstractmethod
from datetime import datetime
from ftplib import FTP
from io import BytesIO
from pathlib import Path
from posixpath import split
from shutil import rmtree
from tempfile import mkdtemp
from typing import Any
from unittest import mock
from unittest.mock import MagicMock
from urllib.parse import urlparse
import attr
import pytest
from itemadapter import ItemAdapter
from twisted.internet.defer import inlineCallbacks
from scrapy.exceptions import NotConfigured
from scrapy.http import Request, Response
from scrapy.item import Field, Item
from scrapy.pipelines.files import (
FilesPipeline,
FSFilesStore,
FTPFilesStore,
GCSFilesStore,
S3FilesStore,
)
from scrapy.settings import Settings
from scrapy.utils.defer import deferred_f_from_coro_f
from scrapy.utils.spider import DefaultSpider
from scrapy.utils.test import get_crawler
from tests.mockserver.ftp import MockFTPServer
from .test_pipeline_media import _mocked_download_func
def get_gcs_content_and_delete(
bucket: Any, path: str
) -> tuple[bytes, list[dict[str, str]], Any]:
from google.cloud import storage # noqa: PLC0415
client = storage.Client(project=os.environ.get("GCS_PROJECT_ID"))
bucket = client.get_bucket(bucket)
blob = bucket.get_blob(path)
content = blob.download_as_string()
acl = list(blob.acl) # loads acl before it will be deleted
bucket.delete_blob(path)
return content, acl, blob
def get_ftp_content_and_delete(
path: str,
host: str,
port: int,
username: str,
password: str,
use_active_mode: bool = False,
) -> bytes:
ftp = FTP()
ftp.connect(host, port)
ftp.login(username, password)
if use_active_mode:
ftp.set_pasv(False)
ftp_data: list[bytes] = []
def buffer_data(data: bytes) -> None:
ftp_data.append(data)
ftp.retrbinary(f"RETR {path}", buffer_data)
dirname, filename = split(path)
ftp.cwd(dirname)
ftp.delete(filename)
return b"".join(ftp_data)
class TestFilesPipeline:
def setup_method(self):
self.tempdir = mkdtemp()
settings_dict = {"FILES_STORE": self.tempdir}
crawler = get_crawler(DefaultSpider, settings_dict=settings_dict)
crawler.spider = crawler._create_spider()
crawler.engine = MagicMock(download_async=_mocked_download_func)
self.pipeline = FilesPipeline.from_crawler(crawler)
self.pipeline.open_spider()
def teardown_method(self):
rmtree(self.tempdir)
def test_file_path(self):
file_path = self.pipeline.file_path
assert (
file_path(Request("https://dev.mydeco.com/mydeco.pdf"))
== "full/c9b564df929f4bc635bdd19fde4f3d4847c757c5.pdf"
)
assert (
file_path(
Request(
"http://www.maddiebrown.co.uk///catalogue-items//image_54642_12175_95307.txt"
)
)
== "full/4ce274dd83db0368bafd7e406f382ae088e39219.txt"
)
assert (
file_path(
Request("https://dev.mydeco.com/two/dirs/with%20spaces%2Bsigns.doc")
)
== "full/94ccc495a17b9ac5d40e3eabf3afcb8c2c9b9e1a.doc"
)
assert (
file_path(
Request(
"http://www.dfsonline.co.uk/get_prod_image.php?img=status_0907_mdm.jpg"
)
)
== "full/4507be485f38b0da8a0be9eb2e1dfab8a19223f2.jpg"
)
assert (
file_path(Request("http://www.dorma.co.uk/images/product_details/2532/"))
== "full/97ee6f8a46cbbb418ea91502fd24176865cf39b2"
)
assert (
file_path(Request("http://www.dorma.co.uk/images/product_details/2532"))
== "full/244e0dd7d96a3b7b01f54eded250c9e272577aa1"
)
assert (
file_path(
Request("http://www.dorma.co.uk/images/product_details/2532"),
response=Response("http://www.dorma.co.uk/images/product_details/2532"),
info=object(),
)
== "full/244e0dd7d96a3b7b01f54eded250c9e272577aa1"
)
assert (
file_path(
Request(
"http://www.dfsonline.co.uk/get_prod_image.php?img=status_0907_mdm.jpg.bohaha"
)
)
== "full/76c00cef2ef669ae65052661f68d451162829507"
)
assert (
file_path(
Request(
"data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAAR0AAACxCAMAAADOHZloAAACClBMVEX/\
//+F0tzCwMK76ZKQ21AMqr7oAAC96JvD5aWM2kvZ78J0N7fmAAC46Y4Ap7y"
)
)
== "full/178059cbeba2e34120a67f2dc1afc3ecc09b61cb.png"
)
def test_fs_store(self):
assert isinstance(self.pipeline.store, FSFilesStore)
assert self.pipeline.store.basedir == self.tempdir
path = "some/image/key.jpg"
fullpath = Path(self.tempdir, "some", "image", "key.jpg")
assert self.pipeline.store._get_filesystem_path(path) == fullpath
@deferred_f_from_coro_f
async def test_file_not_expired(self):
item_url = "http://example.com/file.pdf"
item = _create_item_with_files(item_url)
patchers = [
mock.patch.object(FilesPipeline, "inc_stats", return_value=True),
mock.patch.object(
FSFilesStore,
"stat_file",
return_value={"checksum": "abc", "last_modified": time.time()},
),
mock.patch.object(
FilesPipeline,
"get_media_requests",
return_value=[_prepare_request_object(item_url)],
),
]
for p in patchers:
p.start()
result = await self.pipeline.process_item(item)
assert result["files"][0]["checksum"] == "abc"
assert result["files"][0]["status"] == "uptodate"
for p in patchers:
p.stop()
@deferred_f_from_coro_f
async def test_file_expired(self):
item_url = "http://example.com/file2.pdf"
item = _create_item_with_files(item_url)
patchers = [
mock.patch.object(
FSFilesStore,
"stat_file",
return_value={
"checksum": "abc",
"last_modified": time.time()
- (self.pipeline.expires * 60 * 60 * 24 * 2),
},
),
mock.patch.object(
FilesPipeline,
"get_media_requests",
return_value=[_prepare_request_object(item_url)],
),
mock.patch.object(FilesPipeline, "inc_stats", return_value=True),
]
for p in patchers:
p.start()
result = await self.pipeline.process_item(item)
assert result["files"][0]["checksum"] != "abc"
assert result["files"][0]["status"] == "downloaded"
for p in patchers:
p.stop()
@deferred_f_from_coro_f
async def test_file_cached(self):
item_url = "http://example.com/file3.pdf"
item = _create_item_with_files(item_url)
patchers = [
mock.patch.object(FilesPipeline, "inc_stats", return_value=True),
mock.patch.object(
FSFilesStore,
"stat_file",
return_value={
"checksum": "abc",
"last_modified": time.time()
- (self.pipeline.expires * 60 * 60 * 24 * 2),
},
),
mock.patch.object(
FilesPipeline,
"get_media_requests",
return_value=[_prepare_request_object(item_url, flags=["cached"])],
),
]
for p in patchers:
p.start()
result = await self.pipeline.process_item(item)
assert result["files"][0]["checksum"] != "abc"
assert result["files"][0]["status"] == "cached"
for p in patchers:
p.stop()
def test_file_path_from_item(self):
"""
Custom file path based on item data, overriding default implementation
"""
class CustomFilesPipeline(FilesPipeline):
def file_path(self, request, response=None, info=None, item=None):
return f"full/{item.get('path')}"
file_path = CustomFilesPipeline.from_crawler(
get_crawler(None, {"FILES_STORE": self.tempdir})
).file_path
item = {"path": "path-to-store-file"}
request = Request("http://example.com")
assert file_path(request, item=item) == "full/path-to-store-file"
@pytest.mark.parametrize(
"bad_type",
[
"http://example.com/file.pdf",
("http://example.com/file.pdf",),
{"url": "http://example.com/file.pdf"},
123,
None,
],
)
def test_rejects_non_list_file_urls(self, tmp_path, bad_type):
pipeline = FilesPipeline.from_crawler(
get_crawler(None, {"FILES_STORE": str(tmp_path)})
)
item = ItemWithFiles()
item["file_urls"] = bad_type
with pytest.raises(TypeError, match="file_urls must be a list of URLs"):
list(pipeline.get_media_requests(item, None))
class TestFilesPipelineFieldsMixin(ABC):
@property
@abstractmethod
def item_class(self) -> Any:
raise NotImplementedError
def test_item_fields_default(self, tmp_path):
url = "http://www.example.com/files/1.txt"
item = self.item_class(name="item1", file_urls=[url])
pipeline = FilesPipeline.from_crawler(
get_crawler(None, {"FILES_STORE": tmp_path})
)
requests = list(pipeline.get_media_requests(item, None))
assert requests[0].url == url
results = [(True, {"url": url})]
item = pipeline.item_completed(results, item, None)
files = ItemAdapter(item).get("files")
assert files == [results[0][1]]
assert isinstance(item, self.item_class)
def test_item_fields_override_settings(self, tmp_path):
url = "http://www.example.com/files/1.txt"
item = self.item_class(name="item1", custom_file_urls=[url])
pipeline = FilesPipeline.from_crawler(
get_crawler(
None,
{
"FILES_STORE": tmp_path,
"FILES_URLS_FIELD": "custom_file_urls",
"FILES_RESULT_FIELD": "custom_files",
},
)
)
requests = list(pipeline.get_media_requests(item, None))
assert requests[0].url == url
results = [(True, {"url": url})]
item = pipeline.item_completed(results, item, None)
custom_files = ItemAdapter(item).get("custom_files")
assert custom_files == [results[0][1]]
assert isinstance(item, self.item_class)
class TestFilesPipelineFieldsDict(TestFilesPipelineFieldsMixin):
item_class = dict
class FilesPipelineTestItem(Item):
name = Field()
# default fields
file_urls = Field()
files = Field()
# overridden fields
custom_file_urls = Field()
custom_files = Field()
class TestFilesPipelineFieldsItem(TestFilesPipelineFieldsMixin):
item_class = FilesPipelineTestItem
@dataclasses.dataclass
class FilesPipelineTestDataClass:
name: str
# default fields
file_urls: list = dataclasses.field(default_factory=list)
files: list = dataclasses.field(default_factory=list)
# overridden fields
custom_file_urls: list = dataclasses.field(default_factory=list)
custom_files: list = dataclasses.field(default_factory=list)
class TestFilesPipelineFieldsDataClass(TestFilesPipelineFieldsMixin):
item_class = FilesPipelineTestDataClass
@attr.s
class FilesPipelineTestAttrsItem:
name = attr.ib(default="")
# default fields
file_urls: list[str] = attr.ib(default=list)
files: list[dict[str, str]] = attr.ib(default=list)
# overridden fields
custom_file_urls: list[str] = attr.ib(default=list)
custom_files: list[dict[str, str]] = attr.ib(default=list)
class TestFilesPipelineFieldsAttrsItem(TestFilesPipelineFieldsMixin):
item_class = FilesPipelineTestAttrsItem
class TestFilesPipelineCustomSettings:
default_cls_settings = {
"EXPIRES": 90,
"FILES_URLS_FIELD": "file_urls",
"FILES_RESULT_FIELD": "files",
}
file_cls_attr_settings_map = {
("EXPIRES", "FILES_EXPIRES", "expires"),
("FILES_URLS_FIELD", "FILES_URLS_FIELD", "files_urls_field"),
("FILES_RESULT_FIELD", "FILES_RESULT_FIELD", "files_result_field"),
}
def _generate_fake_settings(self, tmp_path, prefix=None):
def random_string():
return "".join([chr(random.randint(97, 123)) for _ in range(10)])
settings = {
"FILES_EXPIRES": random.randint(100, 1000),
"FILES_URLS_FIELD": random_string(),
"FILES_RESULT_FIELD": random_string(),
"FILES_STORE": tmp_path,
}
if not prefix:
return settings
return {
prefix.upper() + "_" + k if k != "FILES_STORE" else k: v
for k, v in settings.items()
}
def _generate_fake_pipeline(self):
class UserDefinedFilePipeline(FilesPipeline):
EXPIRES = 1001
FILES_URLS_FIELD = "alfa"
FILES_RESULT_FIELD = "beta"
return UserDefinedFilePipeline
def test_different_settings_for_different_instances(self, tmp_path):
"""
If there are different instances with different settings they should keep
different settings.
"""
custom_settings = self._generate_fake_settings(tmp_path)
another_pipeline = FilesPipeline.from_crawler(
get_crawler(None, custom_settings)
)
one_pipeline = FilesPipeline(tmp_path, crawler=get_crawler(None))
for pipe_attr, settings_attr, pipe_ins_attr in self.file_cls_attr_settings_map:
default_value = self.default_cls_settings[pipe_attr]
assert getattr(one_pipeline, pipe_attr) == default_value
custom_value = custom_settings[settings_attr]
assert default_value != custom_value
assert getattr(another_pipeline, pipe_ins_attr) == custom_value
def test_subclass_attributes_preserved_if_no_settings(self, tmp_path):
"""
If subclasses override class attributes and there are no special settings those values should be kept.
"""
pipe_cls = self._generate_fake_pipeline()
pipe = pipe_cls.from_crawler(get_crawler(None, {"FILES_STORE": tmp_path}))
for pipe_attr, settings_attr, pipe_ins_attr in self.file_cls_attr_settings_map:
custom_value = getattr(pipe, pipe_ins_attr)
assert custom_value != self.default_cls_settings[pipe_attr]
assert getattr(pipe, pipe_ins_attr) == getattr(pipe, pipe_attr)
def test_subclass_attrs_preserved_custom_settings(self, tmp_path):
"""
If file settings are defined but they are not defined for subclass
settings should be preserved.
"""
pipeline_cls = self._generate_fake_pipeline()
settings = self._generate_fake_settings(tmp_path)
pipeline = pipeline_cls.from_crawler(get_crawler(None, settings))
for pipe_attr, settings_attr, pipe_ins_attr in self.file_cls_attr_settings_map:
value = getattr(pipeline, pipe_ins_attr)
setting_value = settings.get(settings_attr)
assert value != self.default_cls_settings[pipe_attr]
assert value == setting_value
def test_no_custom_settings_for_subclasses(self, tmp_path):
"""
If there are no settings for subclass and no subclass attributes, pipeline should use
attributes of base class.
"""
class UserDefinedFilesPipeline(FilesPipeline):
pass
user_pipeline = UserDefinedFilesPipeline.from_crawler(
get_crawler(None, {"FILES_STORE": tmp_path})
)
for pipe_attr, settings_attr, pipe_ins_attr in self.file_cls_attr_settings_map:
# Values from settings for custom pipeline should be set on pipeline instance.
custom_value = self.default_cls_settings.get(pipe_attr.upper())
assert getattr(user_pipeline, pipe_ins_attr) == custom_value
def test_custom_settings_for_subclasses(self, tmp_path):
"""
If there are custom settings for subclass and NO class attributes, pipeline should use custom
settings.
"""
class UserDefinedFilesPipeline(FilesPipeline):
pass
prefix = UserDefinedFilesPipeline.__name__.upper()
settings = self._generate_fake_settings(tmp_path, prefix=prefix)
user_pipeline = UserDefinedFilesPipeline.from_crawler(
get_crawler(None, settings)
)
for pipe_attr, settings_attr, pipe_inst_attr in self.file_cls_attr_settings_map:
# Values from settings for custom pipeline should be set on pipeline instance.
custom_value = settings.get(prefix + "_" + settings_attr)
assert custom_value != self.default_cls_settings[pipe_attr]
assert getattr(user_pipeline, pipe_inst_attr) == custom_value
def test_custom_settings_and_class_attrs_for_subclasses(self, tmp_path):
"""
If there are custom settings for subclass AND class attributes
setting keys are preferred and override attributes.
"""
pipeline_cls = self._generate_fake_pipeline()
prefix = pipeline_cls.__name__.upper()
settings = self._generate_fake_settings(tmp_path, prefix=prefix)
user_pipeline = pipeline_cls.from_crawler(get_crawler(None, settings))
for (
pipe_cls_attr,
settings_attr,
pipe_inst_attr,
) in self.file_cls_attr_settings_map:
custom_value = settings.get(prefix + "_" + settings_attr)
assert custom_value != self.default_cls_settings[pipe_cls_attr]
assert getattr(user_pipeline, pipe_inst_attr) == custom_value
def test_cls_attrs_with_DEFAULT_prefix(self, tmp_path):
class UserDefinedFilesPipeline(FilesPipeline):
DEFAULT_FILES_RESULT_FIELD = "this"
DEFAULT_FILES_URLS_FIELD = "that"
pipeline = UserDefinedFilesPipeline.from_crawler(
get_crawler(None, {"FILES_STORE": tmp_path})
)
assert (
pipeline.files_result_field
== UserDefinedFilesPipeline.DEFAULT_FILES_RESULT_FIELD
)
assert (
pipeline.files_urls_field
== UserDefinedFilesPipeline.DEFAULT_FILES_URLS_FIELD
)
def test_user_defined_subclass_default_key_names(self, tmp_path):
"""Test situation when user defines subclass of FilesPipeline,
but uses attribute names for default pipeline (without prefixing
them with pipeline class name).
"""
settings = self._generate_fake_settings(tmp_path)
class UserPipe(FilesPipeline):
pass
pipeline_cls = UserPipe.from_crawler(get_crawler(None, settings))
for pipe_attr, settings_attr, pipe_inst_attr in self.file_cls_attr_settings_map:
expected_value = settings.get(settings_attr)
assert getattr(pipeline_cls, pipe_inst_attr) == expected_value
def test_file_pipeline_using_pathlike_objects(self, tmp_path):
class CustomFilesPipelineWithPathLikeDir(FilesPipeline):
def file_path(self, request, response=None, info=None, *, item=None):
return Path("subdir") / Path(request.url).name
pipeline = CustomFilesPipelineWithPathLikeDir.from_crawler(
get_crawler(None, {"FILES_STORE": tmp_path})
)
request = Request("http://example.com/image01.jpg")
assert pipeline.file_path(request) == Path("subdir/image01.jpg")
def test_files_store_constructor_with_pathlike_object(self, tmp_path):
fs_store = FSFilesStore(tmp_path)
assert fs_store.basedir == str(tmp_path)
@pytest.mark.requires_botocore
class TestS3FilesStore:
@inlineCallbacks
def test_persist(self):
bucket = "mybucket"
key = "export.csv"
uri = f"s3://{bucket}/{key}"
buffer = mock.MagicMock()
meta = {"foo": "bar"}
path = ""
content_type = "image/png"
store = S3FilesStore(uri)
from botocore.stub import Stubber # noqa: PLC0415
with Stubber(store.s3_client) as stub:
stub.add_response(
"put_object",
expected_params={
"ACL": S3FilesStore.POLICY,
"Body": buffer,
"Bucket": bucket,
"CacheControl": S3FilesStore.HEADERS["Cache-Control"],
"ContentType": content_type,
"Key": key,
"Metadata": meta,
},
service_response={},
)
yield store.persist_file(
path,
buffer,
info=None,
meta=meta,
headers={"Content-Type": content_type},
)
stub.assert_no_pending_responses()
# The call to read does not happen with Stubber
assert buffer.method_calls == [mock.call.seek(0)]
@inlineCallbacks
def test_stat(self):
bucket = "mybucket"
key = "export.csv"
uri = f"s3://{bucket}/{key}"
checksum = "3187896a9657a28163abb31667df64c8"
last_modified = datetime(2019, 12, 1)
store = S3FilesStore(uri)
from botocore.stub import Stubber # noqa: PLC0415
with Stubber(store.s3_client) as stub:
stub.add_response(
"head_object",
expected_params={
"Bucket": bucket,
"Key": key,
},
service_response={
"ETag": f'"{checksum}"',
"LastModified": last_modified,
},
)
file_stats = yield store.stat_file("", info=None)
assert file_stats == {
"checksum": checksum,
"last_modified": last_modified.timestamp(),
}
stub.assert_no_pending_responses()
@pytest.mark.skipif(
"GCS_PROJECT_ID" not in os.environ, reason="GCS_PROJECT_ID not found"
)
class TestGCSFilesStore:
@inlineCallbacks
def test_persist(self):
uri = os.environ.get("GCS_TEST_FILE_URI")
if not uri:
pytest.skip("No GCS URI available for testing")
data = b"TestGCSFilesStore: \xe2\x98\x83"
buf = BytesIO(data)
meta = {"foo": "bar"}
path = "full/filename"
store = GCSFilesStore(uri)
store.POLICY = "authenticatedRead"
expected_policy = {"role": "READER", "entity": "allAuthenticatedUsers"}
yield store.persist_file(path, buf, info=None, meta=meta, headers=None)
s = yield store.stat_file(path, info=None)
assert "last_modified" in s
assert "checksum" in s
assert s["checksum"] == "cdcda85605e46d0af6110752770dce3c"
u = urlparse(uri)
content, acl, blob = get_gcs_content_and_delete(u.hostname, u.path[1:] + path)
assert content == data
assert blob.metadata == {"foo": "bar"}
assert blob.cache_control == GCSFilesStore.CACHE_CONTROL
assert blob.content_type == "application/octet-stream"
assert expected_policy in acl
@inlineCallbacks
def test_blob_path_consistency(self):
"""Test to make sure that paths used to store files is the same as the one used to get
already uploaded files.
"""
try:
import google.cloud.storage # noqa: F401,PLC0415
except ModuleNotFoundError:
pytest.skip("google-cloud-storage is not installed")
with (
mock.patch("google.cloud.storage"),
mock.patch("scrapy.pipelines.files.time"),
):
uri = "gs://my_bucket/my_prefix/"
store = GCSFilesStore(uri)
store.bucket = mock.Mock()
path = "full/my_data.txt"
yield store.persist_file(
path, mock.Mock(), info=None, meta=None, headers=None
)
yield store.stat_file(path, info=None)
expected_blob_path = store.prefix + path
store.bucket.blob.assert_called_with(expected_blob_path)
store.bucket.get_blob.assert_called_with(expected_blob_path)
class TestFTPFileStore:
@inlineCallbacks
def test_persist(self):
data = b"TestFTPFilesStore: \xe2\x98\x83"
buf = BytesIO(data)
meta = {"foo": "bar"}
path = "full/filename"
with MockFTPServer() as ftp_server:
store = FTPFilesStore(ftp_server.url("/"))
empty_dict = yield store.stat_file(path, info=None)
assert empty_dict == {}
yield store.persist_file(path, buf, info=None, meta=meta, headers=None)
stat = yield store.stat_file(path, info=None)
assert "last_modified" in stat
assert "checksum" in stat
assert stat["checksum"] == "d113d66b2ec7258724a268bd88eef6b6"
path = f"{store.basedir}/{path}"
content = get_ftp_content_and_delete(
path,
store.host,
store.port,
store.username,
store.password,
store.USE_ACTIVE_MODE,
)
assert data == content
class ItemWithFiles(Item):
file_urls = Field()
files = Field()
def _create_item_with_files(*files):
item = ItemWithFiles()
item["file_urls"] = files
return item
def _prepare_request_object(item_url, flags=None):
return Request(
item_url,
meta={"response": Response(item_url, status=200, body=b"data", flags=flags)},
)
# this is separate from the one in test_pipeline_media.py to specifically test FilesPipeline subclasses
class TestBuildFromCrawler:
def setup_method(self):
self.tempdir = mkdtemp()
self.crawler = get_crawler(None, {"FILES_STORE": self.tempdir})
def teardown_method(self):
rmtree(self.tempdir)
def test_simple(self):
class Pipeline(FilesPipeline):
pass
with warnings.catch_warnings(record=True) as w:
pipe = Pipeline.from_crawler(self.crawler)
assert pipe.crawler == self.crawler
assert pipe._fingerprinter
assert len(w) == 0
assert pipe.store
def test_has_from_crawler_and_init(self):
class Pipeline(FilesPipeline):
_from_crawler_called = False
@classmethod
def from_crawler(cls, crawler):
settings = crawler.settings
store_uri = settings["FILES_STORE"]
o = cls(store_uri, crawler=crawler)
o._from_crawler_called = True
return o
with warnings.catch_warnings(record=True) as w:
pipe = Pipeline.from_crawler(self.crawler)
assert pipe.crawler == self.crawler
assert pipe._fingerprinter
assert len(w) == 0
assert pipe.store
assert pipe._from_crawler_called
@pytest.mark.parametrize("store", [None, ""])
def test_files_pipeline_raises_notconfigured_when_files_store_invalid(store):
settings = Settings()
settings.clear()
settings.set("FILES_STORE", store, priority="cmdline")
crawler = get_crawler(settings_dict=settings)
with pytest.raises(NotConfigured):
FilesPipeline.from_crawler(crawler)
| python | BSD-3-Clause | d1bd8eb49f7aba9289e4ff692006cead8bcd9080 | 2026-01-04T14:38:41.023839Z | false |
scrapy/scrapy | https://github.com/scrapy/scrapy/blob/d1bd8eb49f7aba9289e4ff692006cead8bcd9080/tests/test_mail.py | tests/test_mail.py | from email.charset import Charset
from io import BytesIO
from twisted.internet import defer
from twisted.internet._sslverify import ClientTLSOptions
from scrapy.mail import MailSender
class TestMailSender:
def test_send(self):
mailsender = MailSender(debug=True)
mailsender.send(
to=["test@scrapy.org"],
subject="subject",
body="body",
_callback=self._catch_mail_sent,
)
assert self.catched_msg
assert self.catched_msg["to"] == ["test@scrapy.org"]
assert self.catched_msg["subject"] == "subject"
assert self.catched_msg["body"] == "body"
msg = self.catched_msg["msg"]
assert msg["to"] == "test@scrapy.org"
assert msg["subject"] == "subject"
assert msg.get_payload() == "body"
assert msg.get("Content-Type") == "text/plain"
def test_send_single_values_to_and_cc(self):
mailsender = MailSender(debug=True)
mailsender.send(
to="test@scrapy.org",
subject="subject",
body="body",
cc="test@scrapy.org",
_callback=self._catch_mail_sent,
)
def test_send_html(self):
mailsender = MailSender(debug=True)
mailsender.send(
to=["test@scrapy.org"],
subject="subject",
body="<p>body</p>",
mimetype="text/html",
_callback=self._catch_mail_sent,
)
msg = self.catched_msg["msg"]
assert msg.get_payload() == "<p>body</p>"
assert msg.get("Content-Type") == "text/html"
def test_send_attach(self):
attach = BytesIO()
attach.write(b"content")
attach.seek(0)
attachs = [("attachment", "text/plain", attach)]
mailsender = MailSender(debug=True)
mailsender.send(
to=["test@scrapy.org"],
subject="subject",
body="body",
attachs=attachs,
_callback=self._catch_mail_sent,
)
assert self.catched_msg
assert self.catched_msg["to"] == ["test@scrapy.org"]
assert self.catched_msg["subject"] == "subject"
assert self.catched_msg["body"] == "body"
msg = self.catched_msg["msg"]
assert msg["to"] == "test@scrapy.org"
assert msg["subject"] == "subject"
payload = msg.get_payload()
assert isinstance(payload, list)
assert len(payload) == 2
text, attach = payload
assert text.get_payload(decode=True) == b"body"
assert text.get_charset() == Charset("us-ascii")
assert attach.get_payload(decode=True) == b"content"
def _catch_mail_sent(self, **kwargs):
self.catched_msg = {**kwargs}
def test_send_utf8(self):
subject = "sübjèçt"
body = "bödÿ-àéïöñß"
mailsender = MailSender(debug=True)
mailsender.send(
to=["test@scrapy.org"],
subject=subject,
body=body,
charset="utf-8",
_callback=self._catch_mail_sent,
)
assert self.catched_msg
assert self.catched_msg["subject"] == subject
assert self.catched_msg["body"] == body
msg = self.catched_msg["msg"]
assert msg["subject"] == subject
assert msg.get_payload(decode=True).decode("utf-8") == body
assert msg.get_charset() == Charset("utf-8")
assert msg.get("Content-Type") == 'text/plain; charset="utf-8"'
def test_send_attach_utf8(self):
subject = "sübjèçt"
body = "bödÿ-àéïöñß"
attach = BytesIO()
attach.write(body.encode("utf-8"))
attach.seek(0)
attachs = [("attachment", "text/plain", attach)]
mailsender = MailSender(debug=True)
mailsender.send(
to=["test@scrapy.org"],
subject=subject,
body=body,
attachs=attachs,
charset="utf-8",
_callback=self._catch_mail_sent,
)
assert self.catched_msg
assert self.catched_msg["subject"] == subject
assert self.catched_msg["body"] == body
msg = self.catched_msg["msg"]
assert msg["subject"] == subject
assert msg.get_charset() == Charset("utf-8")
assert msg.get("Content-Type") == 'multipart/mixed; charset="utf-8"'
payload = msg.get_payload()
assert isinstance(payload, list)
assert len(payload) == 2
text, attach = payload
assert text.get_payload(decode=True).decode("utf-8") == body
assert text.get_charset() == Charset("utf-8")
assert attach.get_payload(decode=True).decode("utf-8") == body
def test_create_sender_factory_with_host(self):
mailsender = MailSender(debug=False, smtphost="smtp.testhost.com")
factory = mailsender._create_sender_factory(
to_addrs=["test@scrapy.org"], msg="test", d=defer.Deferred()
)
context = factory.buildProtocol("test@scrapy.org").context
assert isinstance(context, ClientTLSOptions)
| python | BSD-3-Clause | d1bd8eb49f7aba9289e4ff692006cead8bcd9080 | 2026-01-04T14:38:41.023839Z | false |
scrapy/scrapy | https://github.com/scrapy/scrapy/blob/d1bd8eb49f7aba9289e4ff692006cead8bcd9080/tests/test_poet.py | tests/test_poet.py | """Tests that make sure parts needed for the scrapy-poet stack work."""
from typing import get_type_hints
from scrapy import Spider
from scrapy.spiders import CrawlSpider, CSVFeedSpider, SitemapSpider, XMLFeedSpider
def test_callbacks():
"""Making sure annotations on all non-abstract callbacks can be resolved."""
for cb in [
Spider._parse,
CrawlSpider._parse,
CrawlSpider._callback,
XMLFeedSpider._parse,
CSVFeedSpider._parse,
SitemapSpider._parse_sitemap,
]:
get_type_hints(cb)
| python | BSD-3-Clause | d1bd8eb49f7aba9289e4ff692006cead8bcd9080 | 2026-01-04T14:38:41.023839Z | false |
scrapy/scrapy | https://github.com/scrapy/scrapy/blob/d1bd8eb49f7aba9289e4ff692006cead8bcd9080/tests/test_engine_stop_download_headers.py | tests/test_engine_stop_download_headers.py | from __future__ import annotations
from typing import TYPE_CHECKING
from testfixtures import LogCapture
from scrapy.exceptions import StopDownload
from scrapy.utils.defer import deferred_f_from_coro_f
from tests.test_engine import (
AttrsItemsSpider,
CrawlerRun,
DataClassItemsSpider,
DictItemsSpider,
MySpider,
TestEngineBase,
)
if TYPE_CHECKING:
from tests.mockserver.http import MockServer
class HeadersReceivedCrawlerRun(CrawlerRun):
def headers_received(self, headers, body_length, request, spider):
super().headers_received(headers, body_length, request, spider)
raise StopDownload(fail=False)
class TestHeadersReceivedEngine(TestEngineBase):
@deferred_f_from_coro_f
async def test_crawler(self, mockserver: MockServer) -> None:
for spider in (
MySpider,
DictItemsSpider,
AttrsItemsSpider,
DataClassItemsSpider,
):
run = HeadersReceivedCrawlerRun(spider)
with LogCapture() as log:
await run.run(mockserver)
log.check_present(
(
"scrapy.core.downloader.handlers.http11",
"DEBUG",
f"Download stopped for <GET {mockserver.url('/redirected')}> from"
" signal handler HeadersReceivedCrawlerRun.headers_received",
)
)
log.check_present(
(
"scrapy.core.downloader.handlers.http11",
"DEBUG",
f"Download stopped for <GET {mockserver.url('/static/')}> from signal"
" handler HeadersReceivedCrawlerRun.headers_received",
)
)
log.check_present(
(
"scrapy.core.downloader.handlers.http11",
"DEBUG",
f"Download stopped for <GET {mockserver.url('/numbers')}> from"
" signal handler HeadersReceivedCrawlerRun.headers_received",
)
)
self._assert_visited_urls(run)
self._assert_downloaded_responses(run, count=6)
self._assert_signals_caught(run)
self._assert_bytes_received(run)
self._assert_headers_received(run)
@staticmethod
def _assert_bytes_received(run: CrawlerRun) -> None:
assert len(run.bytes) == 0
@staticmethod
def _assert_visited_urls(run: CrawlerRun) -> None:
must_be_visited = ["/static/", "/redirect", "/redirected"]
urls_visited = {rp[0].url for rp in run.respplug}
urls_expected = {run.geturl(p) for p in must_be_visited}
assert urls_expected <= urls_visited, (
f"URLs not visited: {list(urls_expected - urls_visited)}"
)
| python | BSD-3-Clause | d1bd8eb49f7aba9289e4ff692006cead8bcd9080 | 2026-01-04T14:38:41.023839Z | false |
scrapy/scrapy | https://github.com/scrapy/scrapy/blob/d1bd8eb49f7aba9289e4ff692006cead8bcd9080/tests/test_stats.py | tests/test_stats.py | from __future__ import annotations
from datetime import datetime
from typing import TYPE_CHECKING
from unittest import mock
import pytest
from scrapy.exceptions import ScrapyDeprecationWarning
from scrapy.extensions.corestats import CoreStats
from scrapy.spiders import Spider
from scrapy.statscollectors import DummyStatsCollector, StatsCollector
from scrapy.utils.test import get_crawler
if TYPE_CHECKING:
from scrapy.crawler import Crawler
@pytest.fixture
def crawler() -> Crawler:
return get_crawler(Spider)
@pytest.fixture
def spider(crawler: Crawler) -> Spider:
return crawler._create_spider("foo")
class TestCoreStatsExtension:
@mock.patch("scrapy.extensions.corestats.datetime")
def test_core_stats_default_stats_collector(
self, mock_datetime: mock.Mock, crawler: Crawler, spider: Spider
) -> None:
fixed_datetime = datetime(2019, 12, 1, 11, 38)
mock_datetime.now = mock.Mock(return_value=fixed_datetime)
crawler.stats = StatsCollector(crawler)
ext = CoreStats.from_crawler(crawler)
ext.spider_opened(spider)
ext.item_scraped({}, spider)
ext.response_received(spider)
ext.item_dropped({}, spider, ZeroDivisionError())
ext.spider_closed(spider, "finished")
assert ext.stats._stats == {
"start_time": fixed_datetime,
"finish_time": fixed_datetime,
"item_scraped_count": 1,
"response_received_count": 1,
"item_dropped_count": 1,
"item_dropped_reasons_count/ZeroDivisionError": 1,
"finish_reason": "finished",
"elapsed_time_seconds": 0.0,
}
def test_core_stats_dummy_stats_collector(
self, crawler: Crawler, spider: Spider
) -> None:
crawler.stats = DummyStatsCollector(crawler)
ext = CoreStats.from_crawler(crawler)
ext.spider_opened(spider)
ext.item_scraped({}, spider)
ext.response_received(spider)
ext.item_dropped({}, spider, ZeroDivisionError())
ext.spider_closed(spider, "finished")
assert ext.stats._stats == {}
class TestStatsCollector:
def test_collector(self, crawler: Crawler) -> None:
stats = StatsCollector(crawler)
assert stats.get_stats() == {}
assert stats.get_value("anything") is None
assert stats.get_value("anything", "default") == "default"
stats.set_value("test", "value")
assert stats.get_stats() == {"test": "value"}
stats.set_value("test2", 23)
assert stats.get_stats() == {"test": "value", "test2": 23}
assert stats.get_value("test2") == 23
stats.inc_value("test2")
assert stats.get_value("test2") == 24
stats.inc_value("test2", 6)
assert stats.get_value("test2") == 30
stats.max_value("test2", 6)
assert stats.get_value("test2") == 30
stats.max_value("test2", 40)
assert stats.get_value("test2") == 40
stats.max_value("test3", 1)
assert stats.get_value("test3") == 1
stats.min_value("test2", 60)
assert stats.get_value("test2") == 40
stats.min_value("test2", 35)
assert stats.get_value("test2") == 35
stats.min_value("test4", 7)
assert stats.get_value("test4") == 7
def test_dummy_collector(self, crawler: Crawler) -> None:
stats = DummyStatsCollector(crawler)
assert stats.get_stats() == {}
assert stats.get_value("anything") is None
assert stats.get_value("anything", "default") == "default"
stats.set_value("test", "value")
stats.inc_value("v1")
stats.max_value("v2", 100)
stats.min_value("v3", 100)
stats.open_spider()
stats.set_value("test", "value")
assert stats.get_stats() == {}
def test_deprecated_spider_arg(self, crawler: Crawler, spider: Spider) -> None:
stats = StatsCollector(crawler)
with pytest.warns(
ScrapyDeprecationWarning,
match=r"Passing a 'spider' argument to StatsCollector.set_value\(\) is deprecated",
):
stats.set_value("test", "value", spider=spider)
assert stats.get_stats() == {"test": "value"}
with pytest.warns(
ScrapyDeprecationWarning,
match=r"Passing a 'spider' argument to StatsCollector.get_stats\(\) is deprecated",
):
assert stats.get_stats(spider) == {"test": "value"}
| python | BSD-3-Clause | d1bd8eb49f7aba9289e4ff692006cead8bcd9080 | 2026-01-04T14:38:41.023839Z | false |
scrapy/scrapy | https://github.com/scrapy/scrapy/blob/d1bd8eb49f7aba9289e4ff692006cead8bcd9080/tests/test_spidermiddleware_start.py | tests/test_spidermiddleware_start.py | from scrapy.http import Request
from scrapy.spidermiddlewares.start import StartSpiderMiddleware
from scrapy.spiders import Spider
from scrapy.utils.defer import deferred_f_from_coro_f
from scrapy.utils.misc import build_from_crawler
from scrapy.utils.test import get_crawler
class TestMiddleware:
@deferred_f_from_coro_f
async def test_async(self):
crawler = get_crawler(Spider)
mw = build_from_crawler(StartSpiderMiddleware, crawler)
async def start():
yield Request("data:,1")
yield Request("data:,2", meta={"is_start_request": True})
yield Request("data:,2", meta={"is_start_request": False})
yield Request("data:,2", meta={"is_start_request": "foo"})
result = [
request.meta["is_start_request"]
async for request in mw.process_start(start())
]
assert result == [True, True, False, "foo"]
@deferred_f_from_coro_f
async def test_sync(self):
crawler = get_crawler(Spider)
mw = build_from_crawler(StartSpiderMiddleware, crawler)
def start():
yield Request("data:,1")
yield Request("data:,2", meta={"is_start_request": True})
yield Request("data:,2", meta={"is_start_request": False})
yield Request("data:,2", meta={"is_start_request": "foo"})
result = [
request.meta["is_start_request"]
for request in mw.process_start_requests(start(), Spider("test"))
]
assert result == [True, True, False, "foo"]
| python | BSD-3-Clause | d1bd8eb49f7aba9289e4ff692006cead8bcd9080 | 2026-01-04T14:38:41.023839Z | false |
scrapy/scrapy | https://github.com/scrapy/scrapy/blob/d1bd8eb49f7aba9289e4ff692006cead8bcd9080/tests/test_downloadermiddleware_robotstxt.py | tests/test_downloadermiddleware_robotstxt.py | from __future__ import annotations
import asyncio
from typing import TYPE_CHECKING
from unittest import mock
import pytest
from twisted.internet import error
from twisted.internet.defer import Deferred, DeferredList
from twisted.python import failure
from scrapy.downloadermiddlewares.robotstxt import RobotsTxtMiddleware
from scrapy.exceptions import IgnoreRequest, NotConfigured
from scrapy.http import Request, Response, TextResponse
from scrapy.http.request import NO_CALLBACK
from scrapy.settings import Settings
from scrapy.utils.asyncio import call_later
from scrapy.utils.defer import (
deferred_f_from_coro_f,
deferred_from_coro,
maybe_deferred_to_future,
)
from tests.test_robotstxt_interface import rerp_available
if TYPE_CHECKING:
from scrapy.crawler import Crawler
class TestRobotsTxtMiddleware:
def setup_method(self):
self.crawler = mock.MagicMock()
self.crawler.settings = Settings()
self.crawler.engine.download_async = mock.AsyncMock()
def teardown_method(self):
del self.crawler
def test_robotstxt_settings(self):
self.crawler.settings = Settings()
self.crawler.settings.set("USER_AGENT", "CustomAgent")
with pytest.raises(NotConfigured):
RobotsTxtMiddleware(self.crawler)
def _get_successful_crawler(self) -> Crawler:
crawler = self.crawler
crawler.settings.set("ROBOTSTXT_OBEY", True)
ROBOTS = """
User-Agent: *
Disallow: /admin/
Disallow: /static/
# taken from https://en.wikipedia.org/robots.txt
Disallow: /wiki/K%C3%A4ytt%C3%A4j%C3%A4:
Disallow: /wiki/Käyttäjä:
User-Agent: UnicödeBöt
Disallow: /some/randome/page.html
""".encode()
response = TextResponse("http://site.local/robots.txt", body=ROBOTS)
async def return_response(request):
deferred = Deferred()
call_later(0, deferred.callback, response)
return await maybe_deferred_to_future(deferred)
crawler.engine.download_async.side_effect = return_response
return crawler
@deferred_f_from_coro_f
async def test_robotstxt(self):
middleware = RobotsTxtMiddleware(self._get_successful_crawler())
await self.assertNotIgnored(Request("http://site.local/allowed"), middleware)
self.assertRobotsTxtRequested("http://site.local")
await self.assertIgnored(Request("http://site.local/admin/main"), middleware)
await self.assertIgnored(Request("http://site.local/static/"), middleware)
await self.assertIgnored(
Request("http://site.local/wiki/K%C3%A4ytt%C3%A4j%C3%A4:"), middleware
)
await self.assertIgnored(
Request("http://site.local/wiki/Käyttäjä:"), middleware
)
@deferred_f_from_coro_f
async def test_robotstxt_multiple_reqs(self) -> None:
middleware = RobotsTxtMiddleware(self._get_successful_crawler())
d1 = deferred_from_coro(
middleware.process_request(Request("http://site.local/allowed1"))
)
d2 = deferred_from_coro(
middleware.process_request(Request("http://site.local/allowed2"))
)
await maybe_deferred_to_future(DeferredList([d1, d2], fireOnOneErrback=True))
@pytest.mark.only_asyncio
@deferred_f_from_coro_f
async def test_robotstxt_multiple_reqs_asyncio(self) -> None:
middleware = RobotsTxtMiddleware(self._get_successful_crawler())
c1 = middleware.process_request(Request("http://site.local/allowed1"))
c2 = middleware.process_request(Request("http://site.local/allowed2"))
await asyncio.gather(c1, c2)
@deferred_f_from_coro_f
async def test_robotstxt_ready_parser(self):
middleware = RobotsTxtMiddleware(self._get_successful_crawler())
await self.assertNotIgnored(Request("http://site.local/allowed"), middleware)
await self.assertNotIgnored(Request("http://site.local/allowed"), middleware)
@deferred_f_from_coro_f
async def test_robotstxt_meta(self):
middleware = RobotsTxtMiddleware(self._get_successful_crawler())
meta = {"dont_obey_robotstxt": True}
await self.assertNotIgnored(
Request("http://site.local/allowed", meta=meta), middleware
)
await self.assertNotIgnored(
Request("http://site.local/admin/main", meta=meta), middleware
)
await self.assertNotIgnored(
Request("http://site.local/static/", meta=meta), middleware
)
def _get_garbage_crawler(self) -> Crawler:
crawler = self.crawler
crawler.settings.set("ROBOTSTXT_OBEY", True)
response = Response(
"http://site.local/robots.txt", body=b"GIF89a\xd3\x00\xfe\x00\xa2"
)
async def return_response(request):
deferred = Deferred()
call_later(0, deferred.callback, response)
return await maybe_deferred_to_future(deferred)
crawler.engine.download_async.side_effect = return_response
return crawler
@deferred_f_from_coro_f
async def test_robotstxt_garbage(self):
# garbage response should be discarded, equal 'allow all'
middleware = RobotsTxtMiddleware(self._get_garbage_crawler())
await self.assertNotIgnored(Request("http://site.local"), middleware)
await self.assertNotIgnored(Request("http://site.local/allowed"), middleware)
await self.assertNotIgnored(Request("http://site.local/admin/main"), middleware)
await self.assertNotIgnored(Request("http://site.local/static/"), middleware)
def _get_emptybody_crawler(self) -> Crawler:
crawler = self.crawler
crawler.settings.set("ROBOTSTXT_OBEY", True)
response = Response("http://site.local/robots.txt")
async def return_response(request):
deferred = Deferred()
call_later(0, deferred.callback, response)
return await maybe_deferred_to_future(deferred)
crawler.engine.download_async.side_effect = return_response
return crawler
@deferred_f_from_coro_f
async def test_robotstxt_empty_response(self):
# empty response should equal 'allow all'
middleware = RobotsTxtMiddleware(self._get_emptybody_crawler())
await self.assertNotIgnored(Request("http://site.local/allowed"), middleware)
await self.assertNotIgnored(Request("http://site.local/admin/main"), middleware)
await self.assertNotIgnored(Request("http://site.local/static/"), middleware)
@deferred_f_from_coro_f
async def test_robotstxt_error(self, caplog: pytest.LogCaptureFixture) -> None:
self.crawler.settings.set("ROBOTSTXT_OBEY", True)
err = error.DNSLookupError("Robotstxt address not found")
async def return_failure(request):
deferred = Deferred()
call_later(0, deferred.errback, failure.Failure(err))
return await maybe_deferred_to_future(deferred)
self.crawler.engine.download_async.side_effect = return_failure
middleware = RobotsTxtMiddleware(self.crawler)
await middleware.process_request(Request("http://site.local"))
assert "DNS lookup failed: Robotstxt address not found" in caplog.text
@deferred_f_from_coro_f
async def test_robotstxt_immediate_error(self):
self.crawler.settings.set("ROBOTSTXT_OBEY", True)
err = error.DNSLookupError("Robotstxt address not found")
async def immediate_failure(request):
raise err
self.crawler.engine.download_async.side_effect = immediate_failure
middleware = RobotsTxtMiddleware(self.crawler)
await self.assertNotIgnored(Request("http://site.local"), middleware)
@deferred_f_from_coro_f
async def test_ignore_robotstxt_request(self):
self.crawler.settings.set("ROBOTSTXT_OBEY", True)
async def ignore_request(request):
deferred = Deferred()
call_later(0, deferred.errback, failure.Failure(IgnoreRequest()))
return await maybe_deferred_to_future(deferred)
self.crawler.engine.download_async.side_effect = ignore_request
middleware = RobotsTxtMiddleware(self.crawler)
with mock.patch(
"scrapy.downloadermiddlewares.robotstxt.logger"
) as mw_module_logger:
await self.assertNotIgnored(
Request("http://site.local/allowed"), middleware
)
assert not mw_module_logger.error.called
def test_robotstxt_user_agent_setting(self):
crawler = self._get_successful_crawler()
crawler.settings.set("ROBOTSTXT_USER_AGENT", "Examplebot")
crawler.settings.set("USER_AGENT", "Mozilla/5.0 (X11; Linux x86_64)")
middleware = RobotsTxtMiddleware(crawler)
rp = mock.MagicMock(return_value=True)
middleware.process_request_2(rp, Request("http://site.local/allowed"))
rp.allowed.assert_called_once_with("http://site.local/allowed", "Examplebot")
@deferred_f_from_coro_f
async def test_robotstxt_local_file(self):
middleware = RobotsTxtMiddleware(self._get_emptybody_crawler())
middleware.process_request_2 = mock.MagicMock()
await middleware.process_request(Request("data:text/plain,Hello World data"))
assert not middleware.process_request_2.called
await middleware.process_request(
Request("file:///tests/sample_data/test_site/nothinghere.html")
)
assert not middleware.process_request_2.called
await middleware.process_request(Request("http://site.local/allowed"))
assert middleware.process_request_2.called
async def assertNotIgnored(
self, request: Request, middleware: RobotsTxtMiddleware
) -> None:
try:
await middleware.process_request(request)
except IgnoreRequest:
pytest.fail("IgnoreRequest was raised unexpectedly")
async def assertIgnored(
self, request: Request, middleware: RobotsTxtMiddleware
) -> None:
with pytest.raises(IgnoreRequest):
await middleware.process_request(request)
def assertRobotsTxtRequested(self, base_url: str) -> None:
calls = self.crawler.engine.download_async.call_args_list
request = calls[0][0][0]
assert request.url == f"{base_url}/robots.txt"
assert request.callback == NO_CALLBACK
@pytest.mark.skipif(not rerp_available(), reason="Rerp parser is not installed")
class TestRobotsTxtMiddlewareWithRerp(TestRobotsTxtMiddleware):
def setup_method(self):
super().setup_method()
self.crawler.settings.set(
"ROBOTSTXT_PARSER", "scrapy.robotstxt.RerpRobotParser"
)
| python | BSD-3-Clause | d1bd8eb49f7aba9289e4ff692006cead8bcd9080 | 2026-01-04T14:38:41.023839Z | false |
scrapy/scrapy | https://github.com/scrapy/scrapy/blob/d1bd8eb49f7aba9289e4ff692006cead8bcd9080/tests/test_utils_asyncgen.py | tests/test_utils_asyncgen.py | from scrapy.utils.asyncgen import as_async_generator, collect_asyncgen
from scrapy.utils.defer import deferred_f_from_coro_f
class TestAsyncgenUtils:
@deferred_f_from_coro_f
async def test_as_async_generator(self):
ag = as_async_generator(range(42))
results = [i async for i in ag]
assert results == list(range(42))
@deferred_f_from_coro_f
async def test_collect_asyncgen(self):
ag = as_async_generator(range(42))
results = await collect_asyncgen(ag)
assert results == list(range(42))
| python | BSD-3-Clause | d1bd8eb49f7aba9289e4ff692006cead8bcd9080 | 2026-01-04T14:38:41.023839Z | false |
scrapy/scrapy | https://github.com/scrapy/scrapy/blob/d1bd8eb49f7aba9289e4ff692006cead8bcd9080/tests/test_utils_reactor.py | tests/test_utils_reactor.py | import asyncio
import warnings
import pytest
from scrapy.utils.defer import deferred_f_from_coro_f
from scrapy.utils.reactor import (
_asyncio_reactor_path,
install_reactor,
is_asyncio_reactor_installed,
set_asyncio_event_loop,
)
class TestAsyncio:
def test_is_asyncio_reactor_installed(self, reactor_pytest: str) -> None:
# the result should depend only on the pytest --reactor argument
assert is_asyncio_reactor_installed() == (reactor_pytest == "asyncio")
def test_install_asyncio_reactor(self):
from twisted.internet import reactor as original_reactor
with warnings.catch_warnings(record=True) as w:
install_reactor(_asyncio_reactor_path)
assert len(w) == 0, [str(warning) for warning in w]
from twisted.internet import reactor # pylint: disable=reimported
assert original_reactor == reactor
@pytest.mark.only_asyncio
@deferred_f_from_coro_f
async def test_set_asyncio_event_loop(self):
install_reactor(_asyncio_reactor_path)
assert set_asyncio_event_loop(None) is asyncio.get_running_loop()
| python | BSD-3-Clause | d1bd8eb49f7aba9289e4ff692006cead8bcd9080 | 2026-01-04T14:38:41.023839Z | false |
scrapy/scrapy | https://github.com/scrapy/scrapy/blob/d1bd8eb49f7aba9289e4ff692006cead8bcd9080/tests/test_downloader_handler_twisted_http2.py | tests/test_downloader_handler_twisted_http2.py | """Tests for scrapy.core.downloader.handlers.http2.H2DownloadHandler."""
from __future__ import annotations
import json
from typing import TYPE_CHECKING, Any
from unittest import mock
import pytest
from testfixtures import LogCapture
from twisted.internet import defer, error
from twisted.web.error import SchemeNotSupported
from twisted.web.http import H2_ENABLED
from scrapy.http import Request
from scrapy.utils.defer import deferred_f_from_coro_f, maybe_deferred_to_future
from tests.test_downloader_handlers_http_base import (
TestHttpProxyBase,
TestHttps11Base,
TestHttpsCustomCiphersBase,
TestHttpsInvalidDNSIdBase,
TestHttpsInvalidDNSPatternBase,
TestHttpsWrongHostnameBase,
TestHttpWithCrawlerBase,
)
if TYPE_CHECKING:
from scrapy.core.downloader.handlers import DownloadHandlerProtocol
from tests.mockserver.http import MockServer
from tests.mockserver.proxy_echo import ProxyEchoMockServer
pytestmark = pytest.mark.skipif(
not H2_ENABLED, reason="HTTP/2 support in Twisted is not enabled"
)
class H2DownloadHandlerMixin:
@property
def download_handler_cls(self) -> type[DownloadHandlerProtocol]:
# the import can fail when H2_ENABLED is False
from scrapy.core.downloader.handlers.http2 import ( # noqa: PLC0415
H2DownloadHandler,
)
return H2DownloadHandler
class TestHttps2(H2DownloadHandlerMixin, TestHttps11Base):
HTTP2_DATALOSS_SKIP_REASON = "Content-Length mismatch raises InvalidBodyLengthError"
@deferred_f_from_coro_f
async def test_protocol(self, mockserver: MockServer) -> None:
request = Request(
mockserver.url("/host", is_secure=self.is_secure), method="GET"
)
async with self.get_dh() as download_handler:
response = await download_handler.download_request(request)
assert response.protocol == "h2"
@deferred_f_from_coro_f
async def test_download_with_maxsize_very_large_file(
self, mockserver: MockServer
) -> None:
from twisted.internet import reactor
with mock.patch("scrapy.core.http2.stream.logger") as logger:
request = Request(
mockserver.url("/largechunkedfile", is_secure=self.is_secure)
)
def check(logger: mock.Mock) -> None:
logger.error.assert_called_once_with(mock.ANY)
async with self.get_dh({"DOWNLOAD_MAXSIZE": 1_500}) as download_handler:
with pytest.raises((defer.CancelledError, error.ConnectionAborted)):
await download_handler.download_request(request)
# As the error message is logged in the dataReceived callback, we
# have to give a bit of time to the reactor to process the queue
# after closing the connection.
d: defer.Deferred[mock.Mock] = defer.Deferred()
d.addCallback(check)
reactor.callLater(0.1, d.callback, logger)
await maybe_deferred_to_future(d)
@deferred_f_from_coro_f
async def test_unsupported_scheme(self) -> None:
request = Request("ftp://unsupported.scheme")
async with self.get_dh() as download_handler:
with pytest.raises(SchemeNotSupported):
await download_handler.download_request(request)
def test_download_cause_data_loss(self) -> None: # type: ignore[override]
pytest.skip(self.HTTP2_DATALOSS_SKIP_REASON)
def test_download_allow_data_loss(self) -> None: # type: ignore[override]
pytest.skip(self.HTTP2_DATALOSS_SKIP_REASON)
def test_download_allow_data_loss_via_setting(self) -> None: # type: ignore[override]
pytest.skip(self.HTTP2_DATALOSS_SKIP_REASON)
@deferred_f_from_coro_f
async def test_concurrent_requests_same_domain(
self, mockserver: MockServer
) -> None:
request1 = Request(mockserver.url("/text", is_secure=self.is_secure))
request2 = Request(
mockserver.url("/echo", is_secure=self.is_secure), method="POST"
)
async with self.get_dh() as download_handler:
response1 = await download_handler.download_request(request1)
assert response1.body == b"Works"
response2 = await download_handler.download_request(request2)
assert response2.headers["Content-Length"] == b"79"
@pytest.mark.xfail(reason="https://github.com/python-hyper/h2/issues/1247")
@deferred_f_from_coro_f
async def test_connect_request(self, mockserver: MockServer) -> None:
request = Request(
mockserver.url("/file", is_secure=self.is_secure), method="CONNECT"
)
async with self.get_dh() as download_handler:
response = await download_handler.download_request(request)
assert response.body == b""
@deferred_f_from_coro_f
async def test_custom_content_length_good(self, mockserver: MockServer) -> None:
request = Request(mockserver.url("/contentlength", is_secure=self.is_secure))
custom_content_length = str(len(request.body))
request.headers["Content-Length"] = custom_content_length
async with self.get_dh() as download_handler:
response = await download_handler.download_request(request)
assert response.text == custom_content_length
@deferred_f_from_coro_f
async def test_custom_content_length_bad(self, mockserver: MockServer) -> None:
request = Request(mockserver.url("/contentlength", is_secure=self.is_secure))
actual_content_length = str(len(request.body))
bad_content_length = str(len(request.body) + 1)
request.headers["Content-Length"] = bad_content_length
async with self.get_dh() as download_handler:
with LogCapture() as log:
response = await download_handler.download_request(request)
assert response.text == actual_content_length
log.check_present(
(
"scrapy.core.http2.stream",
"WARNING",
f"Ignoring bad Content-Length header "
f"{bad_content_length!r} of request {request}, sending "
f"{actual_content_length!r} instead",
)
)
@deferred_f_from_coro_f
async def test_duplicate_header(self, mockserver: MockServer) -> None:
request = Request(mockserver.url("/echo", is_secure=self.is_secure))
header, value1, value2 = "Custom-Header", "foo", "bar"
request.headers.appendlist(header, value1)
request.headers.appendlist(header, value2)
async with self.get_dh() as download_handler:
response = await download_handler.download_request(request)
assert json.loads(response.text)["headers"][header] == [value1, value2]
class TestHttps2WrongHostname(H2DownloadHandlerMixin, TestHttpsWrongHostnameBase):
pass
class TestHttps2InvalidDNSId(H2DownloadHandlerMixin, TestHttpsInvalidDNSIdBase):
pass
class TestHttps2InvalidDNSPattern(
H2DownloadHandlerMixin, TestHttpsInvalidDNSPatternBase
):
pass
class TestHttps2CustomCiphers(H2DownloadHandlerMixin, TestHttpsCustomCiphersBase):
pass
class TestHttp2WithCrawler(TestHttpWithCrawlerBase):
"""HTTP 2.0 test case with MockServer"""
@property
def settings_dict(self) -> dict[str, Any] | None:
return {
"DOWNLOAD_HANDLERS": {
"https": "scrapy.core.downloader.handlers.http2.H2DownloadHandler"
}
}
is_secure = True
class TestHttps2Proxy(H2DownloadHandlerMixin, TestHttpProxyBase):
is_secure = True
expected_http_proxy_request_body = b"/"
@deferred_f_from_coro_f
async def test_download_with_proxy_https_timeout(
self, proxy_mockserver: ProxyEchoMockServer
) -> None:
with pytest.raises(NotImplementedError):
await maybe_deferred_to_future(
super().test_download_with_proxy_https_timeout(proxy_mockserver)
)
@deferred_f_from_coro_f
async def test_download_with_proxy_without_http_scheme(
self, proxy_mockserver: ProxyEchoMockServer
) -> None:
with pytest.raises(SchemeNotSupported):
await maybe_deferred_to_future(
super().test_download_with_proxy_without_http_scheme(proxy_mockserver)
)
| python | BSD-3-Clause | d1bd8eb49f7aba9289e4ff692006cead8bcd9080 | 2026-01-04T14:38:41.023839Z | false |
scrapy/scrapy | https://github.com/scrapy/scrapy/blob/d1bd8eb49f7aba9289e4ff692006cead8bcd9080/tests/test_command_startproject.py | tests/test_command_startproject.py | from __future__ import annotations
import os
import subprocess
import sys
from contextlib import contextmanager
from itertools import chain
from pathlib import Path
from shutil import copytree
from stat import S_IWRITE as ANYONE_WRITE_PERMISSION
import scrapy
from scrapy.commands.startproject import IGNORE
from scrapy.utils.test import get_testenv
from tests.utils.cmdline import call, proc
class TestStartprojectCommand:
project_name = "testproject"
@staticmethod
def _assert_files_exist(project_dir: Path, project_name: str) -> None:
assert (project_dir / "scrapy.cfg").exists()
assert (project_dir / project_name).exists()
assert (project_dir / project_name / "__init__.py").exists()
assert (project_dir / project_name / "items.py").exists()
assert (project_dir / project_name / "pipelines.py").exists()
assert (project_dir / project_name / "settings.py").exists()
assert (project_dir / project_name / "spiders" / "__init__.py").exists()
def test_startproject(self, tmp_path: Path) -> None:
# with no dir argument creates the project in the "self.project_name" subdir of cwd
assert call("startproject", self.project_name, cwd=tmp_path) == 0
self._assert_files_exist(tmp_path / self.project_name, self.project_name)
assert call("startproject", self.project_name, cwd=tmp_path) == 1
assert call("startproject", "wrong---project---name") == 1
assert call("startproject", "sys") == 1
def test_startproject_with_project_dir(self, tmp_path: Path) -> None:
# with a dir arg creates the project in the specified dir
project_dir = tmp_path / "project"
assert (
call("startproject", self.project_name, str(project_dir), cwd=tmp_path) == 0
)
self._assert_files_exist(project_dir, self.project_name)
assert (
call(
"startproject", self.project_name, str(project_dir) + "2", cwd=tmp_path
)
== 0
)
assert (
call("startproject", self.project_name, str(project_dir), cwd=tmp_path) == 1
)
assert (
call(
"startproject", self.project_name + "2", str(project_dir), cwd=tmp_path
)
== 1
)
assert call("startproject", "wrong---project---name") == 1
assert call("startproject", "sys") == 1
assert call("startproject") == 2
assert (
call("startproject", self.project_name, str(project_dir), "another_params")
== 2
)
def test_existing_project_dir(self, tmp_path: Path) -> None:
project_name = self.project_name + "_existing"
project_path = tmp_path / project_name
project_path.mkdir()
assert call("startproject", project_name, cwd=tmp_path) == 0
self._assert_files_exist(project_path, project_name)
def get_permissions_dict(
path: str | os.PathLike, renamings=None, ignore=None
) -> dict[str, str]:
def get_permissions(path: Path) -> str:
return oct(path.stat().st_mode)
path_obj = Path(path)
renamings = renamings or ()
permissions_dict = {
".": get_permissions(path_obj),
}
for root, dirs, files in os.walk(path_obj):
nodes = list(chain(dirs, files))
if ignore:
ignored_names = ignore(root, nodes)
nodes = [node for node in nodes if node not in ignored_names]
for node in nodes:
absolute_path = Path(root, node)
relative_path = str(absolute_path.relative_to(path))
for search_string, replacement in renamings:
relative_path = relative_path.replace(search_string, replacement)
permissions = get_permissions(absolute_path)
permissions_dict[relative_path] = permissions
return permissions_dict
class TestStartprojectTemplates:
def test_startproject_template_override(self, tmp_path: Path) -> None:
tmpl = tmp_path / "templates"
tmpl_proj = tmpl / "project"
project_name = "testproject"
copytree(Path(scrapy.__path__[0], "templates"), tmpl)
(tmpl_proj / "root_template").write_bytes(b"")
args = ["--set", f"TEMPLATES_DIR={tmpl}"]
_, out, _ = proc("startproject", project_name, *args, cwd=tmp_path)
assert f"New Scrapy project '{project_name}', using template directory" in out
assert str(tmpl_proj) in out
assert (tmp_path / project_name / "root_template").exists()
def test_startproject_permissions_from_writable(self, tmp_path: Path) -> None:
"""Check that generated files have the right permissions when the
template folder has the same permissions as in the project, i.e.
everything is writable."""
scrapy_path = scrapy.__path__[0]
project_template = Path(scrapy_path, "templates", "project")
project_name = "startproject1"
renamings = (
("module", project_name),
(".tmpl", ""),
)
expected_permissions = get_permissions_dict(
project_template,
renamings,
IGNORE,
)
destination = tmp_path / "proj"
destination.mkdir()
process = subprocess.Popen(
(
sys.executable,
"-m",
"scrapy.cmdline",
"startproject",
project_name,
),
cwd=destination,
stdout=subprocess.DEVNULL,
stderr=subprocess.DEVNULL,
env=get_testenv(),
)
process.wait()
project_dir = destination / project_name
actual_permissions = get_permissions_dict(project_dir)
assert actual_permissions == expected_permissions
def test_startproject_permissions_from_read_only(self, tmp_path: Path) -> None:
"""Check that generated files have the right permissions when the
template folder has been made read-only, which is something that some
systems do.
See https://github.com/scrapy/scrapy/pull/4604
"""
scrapy_path = scrapy.__path__[0]
templates_dir = Path(scrapy_path, "templates")
project_template = Path(templates_dir, "project")
project_name = "startproject2"
renamings = (
("module", project_name),
(".tmpl", ""),
)
expected_permissions = get_permissions_dict(
project_template,
renamings,
IGNORE,
)
def _make_read_only(path: Path):
current_permissions = path.stat().st_mode
path.chmod(current_permissions & ~ANYONE_WRITE_PERMISSION)
read_only_templates_dir = tmp_path / "templates"
copytree(templates_dir, read_only_templates_dir)
for root, dirs, files in os.walk(read_only_templates_dir):
for node in chain(dirs, files):
_make_read_only(Path(root, node))
destination = tmp_path / "proj"
destination.mkdir()
assert (
call(
"startproject",
project_name,
"--set",
f"TEMPLATES_DIR={read_only_templates_dir}",
cwd=destination,
)
== 0
)
project_dir = destination / project_name
actual_permissions = get_permissions_dict(project_dir)
assert actual_permissions == expected_permissions
def test_startproject_permissions_unchanged_in_destination(
self, tmp_path: Path
) -> None:
"""Check that preexisting folders and files in the destination folder
do not see their permissions modified."""
scrapy_path = scrapy.__path__[0]
project_template = Path(scrapy_path, "templates", "project")
project_name = "startproject3"
renamings = (
("module", project_name),
(".tmpl", ""),
)
expected_permissions = get_permissions_dict(
project_template,
renamings,
IGNORE,
)
destination = tmp_path / "proj"
project_dir = destination / project_name
project_dir.mkdir(parents=True)
existing_nodes = {
f"{permissions:o}{extension}": permissions
for extension in ("", ".d")
for permissions in (
0o444,
0o555,
0o644,
0o666,
0o755,
0o777,
)
}
for node, permissions in existing_nodes.items():
path = project_dir / node
if node.endswith(".d"):
path.mkdir(mode=permissions)
else:
path.touch(mode=permissions)
expected_permissions[node] = oct(path.stat().st_mode)
assert call("startproject", project_name, ".", cwd=project_dir) == 0
actual_permissions = get_permissions_dict(project_dir)
assert actual_permissions == expected_permissions
def test_startproject_permissions_umask_022(self, tmp_path: Path) -> None:
"""Check that generated files have the right permissions when the
system uses a umask value that causes new files to have different
permissions than those from the template folder."""
@contextmanager
def umask(new_mask):
cur_mask = os.umask(new_mask)
yield
os.umask(cur_mask)
scrapy_path = scrapy.__path__[0]
project_template = Path(scrapy_path, "templates", "project")
project_name = "umaskproject"
renamings = (
("module", project_name),
(".tmpl", ""),
)
expected_permissions = get_permissions_dict(
project_template,
renamings,
IGNORE,
)
with umask(0o002):
destination = tmp_path / "proj"
destination.mkdir()
assert call("startproject", project_name, cwd=destination) == 0
project_dir = destination / project_name
actual_permissions = get_permissions_dict(project_dir)
assert actual_permissions == expected_permissions
| python | BSD-3-Clause | d1bd8eb49f7aba9289e4ff692006cead8bcd9080 | 2026-01-04T14:38:41.023839Z | false |
scrapy/scrapy | https://github.com/scrapy/scrapy/blob/d1bd8eb49f7aba9289e4ff692006cead8bcd9080/tests/test_spider_start.py | tests/test_spider_start.py | from __future__ import annotations
import warnings
from asyncio import sleep
from typing import Any
import pytest
from testfixtures import LogCapture
from scrapy import Spider, signals
from scrapy.exceptions import ScrapyDeprecationWarning
from scrapy.utils.defer import deferred_f_from_coro_f, maybe_deferred_to_future
from scrapy.utils.test import get_crawler
from .utils import twisted_sleep
SLEEP_SECONDS = 0.1
ITEM_A = {"id": "a"}
ITEM_B = {"id": "b"}
class TestMain:
async def _test_spider(
self, spider: type[Spider], expected_items: list[Any] | None = None
) -> None:
actual_items = []
expected_items = [] if expected_items is None else expected_items
def track_item(item, response, spider):
actual_items.append(item)
crawler = get_crawler(spider)
crawler.signals.connect(track_item, signals.item_scraped)
await crawler.crawl_async()
assert crawler.stats
assert crawler.stats.get_value("finish_reason") == "finished"
assert actual_items == expected_items
@deferred_f_from_coro_f
async def test_start_urls(self):
class TestSpider(Spider):
name = "test"
start_urls = ["data:,"]
async def parse(self, response):
yield ITEM_A
with warnings.catch_warnings():
warnings.simplefilter("error")
await self._test_spider(TestSpider, [ITEM_A])
@deferred_f_from_coro_f
async def test_start(self):
class TestSpider(Spider):
name = "test"
async def start(self):
yield ITEM_A
with warnings.catch_warnings():
warnings.simplefilter("error")
await self._test_spider(TestSpider, [ITEM_A])
@deferred_f_from_coro_f
async def test_start_subclass(self):
class BaseSpider(Spider):
async def start(self):
yield ITEM_A
class TestSpider(BaseSpider):
name = "test"
with warnings.catch_warnings():
warnings.simplefilter("error")
await self._test_spider(TestSpider, [ITEM_A])
@deferred_f_from_coro_f
async def test_deprecated(self):
class TestSpider(Spider):
name = "test"
def start_requests(self):
yield ITEM_A
with pytest.warns(ScrapyDeprecationWarning):
await self._test_spider(TestSpider, [ITEM_A])
@deferred_f_from_coro_f
async def test_deprecated_subclass(self):
class BaseSpider(Spider):
def start_requests(self):
yield ITEM_A
class TestSpider(BaseSpider):
name = "test"
# The warning must be about the base class and not the subclass.
with pytest.warns(ScrapyDeprecationWarning, match="BaseSpider"):
await self._test_spider(TestSpider, [ITEM_A])
@deferred_f_from_coro_f
async def test_universal(self):
class TestSpider(Spider):
name = "test"
async def start(self):
yield ITEM_A
def start_requests(self):
yield ITEM_B
with warnings.catch_warnings():
warnings.simplefilter("error")
await self._test_spider(TestSpider, [ITEM_A])
@deferred_f_from_coro_f
async def test_universal_subclass(self):
class BaseSpider(Spider):
async def start(self):
yield ITEM_A
def start_requests(self):
yield ITEM_B
class TestSpider(BaseSpider):
name = "test"
with warnings.catch_warnings():
warnings.simplefilter("error")
await self._test_spider(TestSpider, [ITEM_A])
@deferred_f_from_coro_f
async def test_start_deprecated_super(self):
class TestSpider(Spider):
name = "test"
async def start(self):
for item_or_request in super().start_requests():
yield item_or_request
with pytest.warns(
ScrapyDeprecationWarning, match=r"use Spider\.start\(\) instead"
) as messages:
await self._test_spider(TestSpider, [])
assert messages[0].filename.endswith("test_spider_start.py")
async def _test_start(self, start_, expected_items=None):
class TestSpider(Spider):
name = "test"
start = start_
await self._test_spider(TestSpider, expected_items)
@pytest.mark.only_asyncio
@deferred_f_from_coro_f
async def test_asyncio_delayed(self):
async def start(spider):
await sleep(SLEEP_SECONDS)
yield ITEM_A
await self._test_start(start, [ITEM_A])
@deferred_f_from_coro_f
async def test_twisted_delayed(self):
async def start(spider):
await maybe_deferred_to_future(twisted_sleep(SLEEP_SECONDS))
yield ITEM_A
await self._test_start(start, [ITEM_A])
# Exceptions
@deferred_f_from_coro_f
async def test_deprecated_non_generator_exception(self):
class TestSpider(Spider):
name = "test"
def start_requests(self):
raise RuntimeError
with (
LogCapture() as log,
pytest.warns(
ScrapyDeprecationWarning,
match=r"defines the deprecated start_requests\(\) method",
),
):
await self._test_spider(TestSpider, [])
assert "in start_requests\n raise RuntimeError" in str(log)
| python | BSD-3-Clause | d1bd8eb49f7aba9289e4ff692006cead8bcd9080 | 2026-01-04T14:38:41.023839Z | false |
scrapy/scrapy | https://github.com/scrapy/scrapy/blob/d1bd8eb49f7aba9289e4ff692006cead8bcd9080/tests/test_downloadermiddleware_useragent.py | tests/test_downloadermiddleware_useragent.py | from scrapy.downloadermiddlewares.useragent import UserAgentMiddleware
from scrapy.http import Request
from scrapy.spiders import Spider
from scrapy.utils.test import get_crawler
class TestUserAgentMiddleware:
def get_spider_and_mw(self, default_useragent):
crawler = get_crawler(Spider, {"USER_AGENT": default_useragent})
spider = crawler._create_spider("foo")
return spider, UserAgentMiddleware.from_crawler(crawler)
def test_default_agent(self):
_, mw = self.get_spider_and_mw("default_useragent")
req = Request("http://scrapytest.org/")
assert mw.process_request(req) is None
assert req.headers["User-Agent"] == b"default_useragent"
def test_header_agent(self):
spider, mw = self.get_spider_and_mw("default_useragent")
mw.spider_opened(spider)
req = Request(
"http://scrapytest.org/", headers={"User-Agent": "header_useragent"}
)
assert mw.process_request(req) is None
assert req.headers["User-Agent"] == b"header_useragent"
def test_no_agent(self):
spider, mw = self.get_spider_and_mw(None)
mw.spider_opened(spider)
req = Request("http://scrapytest.org/")
assert mw.process_request(req) is None
assert "User-Agent" not in req.headers
| python | BSD-3-Clause | d1bd8eb49f7aba9289e4ff692006cead8bcd9080 | 2026-01-04T14:38:41.023839Z | false |
scrapy/scrapy | https://github.com/scrapy/scrapy/blob/d1bd8eb49f7aba9289e4ff692006cead8bcd9080/tests/test_selector.py | tests/test_selector.py | import weakref
import parsel
import pytest
from packaging import version
from scrapy.http import HtmlResponse, TextResponse, XmlResponse
from scrapy.selector import Selector
PARSEL_VERSION = version.parse(getattr(parsel, "__version__", "0.0"))
PARSEL_18_PLUS = PARSEL_VERSION >= version.parse("1.8.0")
class TestSelector:
def test_simple_selection(self):
"""Simple selector tests"""
body = b"<p><input name='a'value='1'/><input name='b'value='2'/></p>"
response = TextResponse(url="http://example.com", body=body, encoding="utf-8")
sel = Selector(response)
xl = sel.xpath("//input")
assert len(xl) == 2
for x in xl:
assert isinstance(x, Selector)
assert sel.xpath("//input").getall() == [x.get() for x in sel.xpath("//input")]
assert [x.get() for x in sel.xpath("//input[@name='a']/@name")] == ["a"]
assert [
x.get()
for x in sel.xpath(
"number(concat(//input[@name='a']/@value, //input[@name='b']/@value))"
)
] == ["12.0"]
assert sel.xpath("concat('xpath', 'rules')").getall() == ["xpathrules"]
assert [
x.get()
for x in sel.xpath(
"concat(//input[@name='a']/@value, //input[@name='b']/@value)"
)
] == ["12"]
def test_root_base_url(self):
body = b'<html><form action="/path"><input name="a" /></form></html>'
url = "http://example.com"
response = TextResponse(url=url, body=body, encoding="utf-8")
sel = Selector(response)
assert url == sel.root.base
def test_flavor_detection(self):
text = b'<div><img src="a.jpg"><p>Hello</div>'
sel = Selector(XmlResponse("http://example.com", body=text, encoding="utf-8"))
assert sel.type == "xml"
assert sel.xpath("//div").getall() == [
'<div><img src="a.jpg"><p>Hello</p></img></div>'
]
sel = Selector(HtmlResponse("http://example.com", body=text, encoding="utf-8"))
assert sel.type == "html"
assert sel.xpath("//div").getall() == [
'<div><img src="a.jpg"><p>Hello</p></div>'
]
def test_http_header_encoding_precedence(self):
# '\xa3' = pound symbol in unicode
# '\xc2\xa3' = pound symbol in utf-8
# '\xa3' = pound symbol in latin-1 (iso-8859-1)
meta = (
'<meta http-equiv="Content-Type" content="text/html; charset=iso-8859-1">'
)
head = f"<head>{meta}</head>"
body_content = '<span id="blank">\xa3</span>'
body = f"<body>{body_content}</body>"
html = f"<html>{head}{body}</html>"
encoding = "utf-8"
html_utf8 = html.encode(encoding)
headers = {"Content-Type": ["text/html; charset=utf-8"]}
response = HtmlResponse(
url="http://example.com", headers=headers, body=html_utf8
)
x = Selector(response)
assert x.xpath("//span[@id='blank']/text()").getall() == ["\xa3"]
def test_badly_encoded_body(self):
# \xe9 alone isn't valid utf8 sequence
r1 = TextResponse(
"http://www.example.com",
body=b"<html><p>an Jos\xe9 de</p><html>",
encoding="utf-8",
)
Selector(r1).xpath("//text()").getall()
def test_weakref_slots(self):
"""Check that classes are using slots and are weak-referenceable"""
x = Selector(text="")
weakref.ref(x)
assert not hasattr(x, "__dict__"), (
f"{x.__class__.__name__} does not use __slots__"
)
def test_selector_bad_args(self):
with pytest.raises(ValueError, match="received both response and text"):
Selector(TextResponse(url="http://example.com", body=b""), text="")
@pytest.mark.skipif(not PARSEL_18_PLUS, reason="parsel < 1.8 doesn't support jmespath")
class TestJMESPath:
def test_json_has_html(self) -> None:
"""Sometimes the information is returned in a json wrapper"""
body = """
{
"content": [
{
"name": "A",
"value": "a"
},
{
"name": {
"age": 18
},
"value": "b"
},
{
"name": "C",
"value": "c"
},
{
"name": "<a>D</a>",
"value": "<div>d</div>"
}
],
"html": "<div><a>a<br>b</a>c</div><div><a>d</a>e<b>f</b></div>"
}
"""
resp = TextResponse(url="http://example.com", body=body, encoding="utf-8")
assert (
resp.jmespath("html").get()
== "<div><a>a<br>b</a>c</div><div><a>d</a>e<b>f</b></div>"
)
assert resp.jmespath("html").xpath("//div/a/text()").getall() == ["a", "b", "d"]
assert resp.jmespath("html").css("div > b").getall() == ["<b>f</b>"]
assert resp.jmespath("content").jmespath("name.age").get() == "18"
def test_html_has_json(self) -> None:
body = """
<div>
<h1>Information</h1>
<content>
{
"user": [
{
"name": "A",
"age": 18
},
{
"name": "B",
"age": 32
},
{
"name": "C",
"age": 22
},
{
"name": "D",
"age": 25
}
],
"total": 4,
"status": "ok"
}
</content>
</div>
"""
resp = TextResponse(url="http://example.com", body=body, encoding="utf-8")
assert resp.xpath("//div/content/text()").jmespath("user[*].name").getall() == [
"A",
"B",
"C",
"D",
]
assert resp.xpath("//div/content").jmespath("user[*].name").getall() == [
"A",
"B",
"C",
"D",
]
assert resp.xpath("//div/content").jmespath("total").get() == "4"
def test_jmestpath_with_re(self) -> None:
body = """
<div>
<h1>Information</h1>
<content>
{
"user": [
{
"name": "A",
"age": 18
},
{
"name": "B",
"age": 32
},
{
"name": "C",
"age": 22
},
{
"name": "D",
"age": 25
}
],
"total": 4,
"status": "ok"
}
</content>
</div>
"""
resp = TextResponse(url="http://example.com", body=body, encoding="utf-8")
assert resp.xpath("//div/content/text()").jmespath("user[*].name").re(
r"(\w+)"
) == ["A", "B", "C", "D"]
assert resp.xpath("//div/content").jmespath("user[*].name").re(r"(\w+)") == [
"A",
"B",
"C",
"D",
]
assert resp.xpath("//div/content").jmespath("unavailable").re(r"(\d+)") == []
assert (
resp.xpath("//div/content").jmespath("unavailable").re_first(r"(\d+)")
is None
)
assert resp.xpath("//div/content").jmespath("user[*].age.to_string(@)").re(
r"(\d+)"
) == ["18", "32", "22", "25"]
@pytest.mark.skipif(PARSEL_18_PLUS, reason="parsel >= 1.8 supports jmespath")
def test_jmespath_not_available() -> None:
body = """
{
"website": {"name": "Example"}
}
"""
resp = TextResponse(url="http://example.com", body=body, encoding="utf-8")
with pytest.raises(AttributeError):
resp.jmespath("website.name").get()
| python | BSD-3-Clause | d1bd8eb49f7aba9289e4ff692006cead8bcd9080 | 2026-01-04T14:38:41.023839Z | false |
scrapy/scrapy | https://github.com/scrapy/scrapy/blob/d1bd8eb49f7aba9289e4ff692006cead8bcd9080/tests/test_request_left.py | tests/test_request_left.py | from twisted.internet.defer import inlineCallbacks
from scrapy.signals import request_left_downloader
from scrapy.spiders import Spider
from scrapy.utils.test import get_crawler
from tests.mockserver.http import MockServer
class SignalCatcherSpider(Spider):
name = "signal_catcher"
def __init__(self, crawler, url, *args, **kwargs):
super().__init__(*args, **kwargs)
crawler.signals.connect(self.on_request_left, signal=request_left_downloader)
self.caught_times = 0
self.start_urls = [url]
@classmethod
def from_crawler(cls, crawler, *args, **kwargs):
return cls(crawler, *args, **kwargs)
def on_request_left(self, request, spider):
self.caught_times += 1
class TestCatching:
@classmethod
def setup_class(cls):
cls.mockserver = MockServer()
cls.mockserver.__enter__()
@classmethod
def teardown_class(cls):
cls.mockserver.__exit__(None, None, None)
@inlineCallbacks
def test_success(self):
crawler = get_crawler(SignalCatcherSpider)
yield crawler.crawl(self.mockserver.url("/status?n=200"))
assert crawler.spider.caught_times == 1
@inlineCallbacks
def test_timeout(self):
crawler = get_crawler(SignalCatcherSpider, {"DOWNLOAD_TIMEOUT": 0.1})
yield crawler.crawl(self.mockserver.url("/delay?n=0.2"))
assert crawler.spider.caught_times == 1
@inlineCallbacks
def test_disconnect(self):
crawler = get_crawler(SignalCatcherSpider)
yield crawler.crawl(self.mockserver.url("/drop"))
assert crawler.spider.caught_times == 1
@inlineCallbacks
def test_noconnect(self):
crawler = get_crawler(SignalCatcherSpider)
yield crawler.crawl("http://thereisdefinetelynosuchdomain.com")
assert crawler.spider.caught_times == 1
| python | BSD-3-Clause | d1bd8eb49f7aba9289e4ff692006cead8bcd9080 | 2026-01-04T14:38:41.023839Z | false |
scrapy/scrapy | https://github.com/scrapy/scrapy/blob/d1bd8eb49f7aba9289e4ff692006cead8bcd9080/tests/test_spider.py | tests/test_spider.py | from __future__ import annotations
import gzip
import re
import warnings
from datetime import datetime
from io import BytesIO
from logging import ERROR, WARNING
from pathlib import Path
from typing import Any
from unittest import mock
import pytest
from testfixtures import LogCapture
from twisted.internet.defer import inlineCallbacks
from w3lib.url import safe_url_string
from scrapy import signals
from scrapy.crawler import Crawler
from scrapy.http import HtmlResponse, Request, Response, TextResponse, XmlResponse
from scrapy.linkextractors import LinkExtractor
from scrapy.settings import Settings
from scrapy.spiders import (
CrawlSpider,
CSVFeedSpider,
Rule,
SitemapSpider,
Spider,
XMLFeedSpider,
)
from scrapy.spiders.init import InitSpider
from scrapy.utils.defer import deferred_f_from_coro_f
from scrapy.utils.test import get_crawler, get_reactor_settings
from tests import get_testdata, tests_datadir
class TestSpider:
spider_class = Spider
def test_base_spider(self):
spider = self.spider_class("example.com")
assert spider.name == "example.com"
assert spider.start_urls == [] # pylint: disable=use-implicit-booleaness-not-comparison
def test_spider_args(self):
"""``__init__`` method arguments are assigned to spider attributes"""
spider = self.spider_class("example.com", foo="bar")
assert spider.foo == "bar"
def test_spider_without_name(self):
"""``__init__`` method arguments are assigned to spider attributes"""
msg = "must have a name"
with pytest.raises(ValueError, match=msg):
self.spider_class()
with pytest.raises(ValueError, match=msg):
self.spider_class(somearg="foo")
def test_from_crawler_crawler_and_settings_population(self):
crawler = get_crawler()
spider = self.spider_class.from_crawler(crawler, "example.com")
assert hasattr(spider, "crawler")
assert spider.crawler is crawler
assert hasattr(spider, "settings")
assert spider.settings is crawler.settings
def test_from_crawler_init_call(self):
with mock.patch.object(
self.spider_class, "__init__", return_value=None
) as mock_init:
self.spider_class.from_crawler(get_crawler(), "example.com", foo="bar")
mock_init.assert_called_once_with("example.com", foo="bar")
def test_closed_signal_call(self):
class TestSpider(self.spider_class):
closed_called = False
def closed(self, reason):
self.closed_called = True
crawler = get_crawler()
spider = TestSpider.from_crawler(crawler, "example.com")
crawler.signals.send_catch_log(signal=signals.spider_opened, spider=spider)
crawler.signals.send_catch_log(
signal=signals.spider_closed, spider=spider, reason=None
)
assert spider.closed_called
def test_update_settings(self):
spider_settings = {"TEST1": "spider", "TEST2": "spider"}
project_settings = {"TEST1": "project", "TEST3": "project"}
self.spider_class.custom_settings = spider_settings
settings = Settings(project_settings, priority="project")
self.spider_class.update_settings(settings)
assert settings.get("TEST1") == "spider"
assert settings.get("TEST2") == "spider"
assert settings.get("TEST3") == "project"
@inlineCallbacks
def test_settings_in_from_crawler(self):
spider_settings = {"TEST1": "spider", "TEST2": "spider"}
project_settings = {
"TEST1": "project",
"TEST3": "project",
**get_reactor_settings(),
}
class TestSpider(self.spider_class):
name = "test"
custom_settings = spider_settings
@classmethod
def from_crawler(cls, crawler: Crawler, *args: Any, **kwargs: Any):
spider = super().from_crawler(crawler, *args, **kwargs)
spider.settings.set("TEST1", "spider_instance", priority="spider")
return spider
crawler = Crawler(TestSpider, project_settings)
assert crawler.settings.get("TEST1") == "spider"
assert crawler.settings.get("TEST2") == "spider"
assert crawler.settings.get("TEST3") == "project"
yield crawler.crawl()
assert crawler.settings.get("TEST1") == "spider_instance"
def test_logger(self):
spider = self.spider_class("example.com")
with LogCapture() as lc:
spider.logger.info("test log msg")
lc.check(("example.com", "INFO", "test log msg"))
record = lc.records[0]
assert "spider" in record.__dict__
assert record.spider is spider
def test_log(self):
spider = self.spider_class("example.com")
with mock.patch("scrapy.spiders.Spider.logger") as mock_logger:
spider.log("test log msg", "INFO")
mock_logger.log.assert_called_once_with("INFO", "test log msg")
@pytest.mark.filterwarnings("ignore::scrapy.exceptions.ScrapyDeprecationWarning")
class TestInitSpider(TestSpider):
spider_class = InitSpider
@deferred_f_from_coro_f
async def test_start_urls(self):
responses = []
class TestSpider(self.spider_class):
name = "test"
start_urls = ["data:,"]
async def parse(self, response):
responses.append(response)
crawler = get_crawler(TestSpider)
await crawler.crawl_async()
assert len(responses) == 1
assert responses[0].url == "data:,"
class TestXMLFeedSpider(TestSpider):
spider_class = XMLFeedSpider
def test_register_namespace(self):
body = b"""<?xml version="1.0" encoding="UTF-8"?>
<urlset xmlns:x="http://www.google.com/schemas/sitemap/0.84"
xmlns:y="http://www.example.com/schemas/extras/1.0">
<url><x:loc>http://www.example.com/Special-Offers.html</x:loc><y:updated>2009-08-16</y:updated>
<other value="bar" y:custom="fuu"/>
</url>
<url><loc>http://www.example.com/</loc><y:updated>2009-08-16</y:updated><other value="foo"/></url>
</urlset>"""
response = XmlResponse(url="http://example.com/sitemap.xml", body=body)
class _XMLSpider(self.spider_class):
itertag = "url"
namespaces = (
("a", "http://www.google.com/schemas/sitemap/0.84"),
("b", "http://www.example.com/schemas/extras/1.0"),
)
def parse_node(self, response, selector):
yield {
"loc": selector.xpath("a:loc/text()").getall(),
"updated": selector.xpath("b:updated/text()").getall(),
"other": selector.xpath("other/@value").getall(),
"custom": selector.xpath("other/@b:custom").getall(),
}
for iterator in ("iternodes", "xml"):
spider = _XMLSpider("example", iterator=iterator)
output = list(spider._parse(response))
assert len(output) == 2, iterator
assert output == [
{
"loc": ["http://www.example.com/Special-Offers.html"],
"updated": ["2009-08-16"],
"custom": ["fuu"],
"other": ["bar"],
},
{
"loc": [],
"updated": ["2009-08-16"],
"other": ["foo"],
"custom": [],
},
], iterator
class TestCSVFeedSpider(TestSpider):
spider_class = CSVFeedSpider
def test_parse_rows(self):
body = get_testdata("feeds", "feed-sample6.csv")
response = Response("http://example.org/dummy.csv", body=body)
class _CrawlSpider(self.spider_class):
name = "test"
delimiter = ","
quotechar = "'"
def parse_row(self, response, row):
return row
spider = _CrawlSpider()
rows = list(spider.parse_rows(response))
assert rows[0] == {"id": "1", "name": "alpha", "value": "foobar"}
assert len(rows) == 4
class TestCrawlSpider(TestSpider):
test_body = b"""<html><head><title>Page title</title></head>
<body>
<p><a href="item/12.html">Item 12</a></p>
<div class='links'>
<p><a href="/about.html">About us</a></p>
</div>
<div>
<p><a href="/nofollow.html">This shouldn't be followed</a></p>
</div>
</body></html>"""
spider_class = CrawlSpider
def test_rule_without_link_extractor(self):
response = HtmlResponse(
"http://example.org/somepage/index.html", body=self.test_body
)
class _CrawlSpider(self.spider_class):
name = "test"
allowed_domains = ["example.org"]
rules = (Rule(),)
spider = _CrawlSpider()
output = list(spider._requests_to_follow(response))
assert len(output) == 3
assert all(isinstance(r, Request) for r in output)
assert [r.url for r in output] == [
"http://example.org/somepage/item/12.html",
"http://example.org/about.html",
"http://example.org/nofollow.html",
]
def test_process_links(self):
response = HtmlResponse(
"http://example.org/somepage/index.html", body=self.test_body
)
class _CrawlSpider(self.spider_class):
name = "test"
allowed_domains = ["example.org"]
rules = (Rule(LinkExtractor(), process_links="dummy_process_links"),)
def dummy_process_links(self, links):
return links
spider = _CrawlSpider()
output = list(spider._requests_to_follow(response))
assert len(output) == 3
assert all(isinstance(r, Request) for r in output)
assert [r.url for r in output] == [
"http://example.org/somepage/item/12.html",
"http://example.org/about.html",
"http://example.org/nofollow.html",
]
def test_process_links_filter(self):
response = HtmlResponse(
"http://example.org/somepage/index.html", body=self.test_body
)
class _CrawlSpider(self.spider_class):
name = "test"
allowed_domains = ["example.org"]
rules = (Rule(LinkExtractor(), process_links="filter_process_links"),)
_test_regex = re.compile("nofollow")
def filter_process_links(self, links):
return [link for link in links if not self._test_regex.search(link.url)]
spider = _CrawlSpider()
output = list(spider._requests_to_follow(response))
assert len(output) == 2
assert all(isinstance(r, Request) for r in output)
assert [r.url for r in output] == [
"http://example.org/somepage/item/12.html",
"http://example.org/about.html",
]
def test_process_links_generator(self):
response = HtmlResponse(
"http://example.org/somepage/index.html", body=self.test_body
)
class _CrawlSpider(self.spider_class):
name = "test"
allowed_domains = ["example.org"]
rules = (Rule(LinkExtractor(), process_links="dummy_process_links"),)
def dummy_process_links(self, links):
yield from links
spider = _CrawlSpider()
output = list(spider._requests_to_follow(response))
assert len(output) == 3
assert all(isinstance(r, Request) for r in output)
assert [r.url for r in output] == [
"http://example.org/somepage/item/12.html",
"http://example.org/about.html",
"http://example.org/nofollow.html",
]
def test_process_request(self):
response = HtmlResponse(
"http://example.org/somepage/index.html", body=self.test_body
)
def process_request_change_domain(request, response):
return request.replace(url=request.url.replace(".org", ".com"))
class _CrawlSpider(self.spider_class):
name = "test"
allowed_domains = ["example.org"]
rules = (
Rule(LinkExtractor(), process_request=process_request_change_domain),
)
spider = _CrawlSpider()
output = list(spider._requests_to_follow(response))
assert len(output) == 3
assert all(isinstance(r, Request) for r in output)
assert [r.url for r in output] == [
"http://example.com/somepage/item/12.html",
"http://example.com/about.html",
"http://example.com/nofollow.html",
]
def test_process_request_with_response(self):
response = HtmlResponse(
"http://example.org/somepage/index.html", body=self.test_body
)
def process_request_meta_response_class(request, response):
request.meta["response_class"] = response.__class__.__name__
return request
class _CrawlSpider(self.spider_class):
name = "test"
allowed_domains = ["example.org"]
rules = (
Rule(
LinkExtractor(), process_request=process_request_meta_response_class
),
)
spider = _CrawlSpider()
output = list(spider._requests_to_follow(response))
assert len(output) == 3
assert all(isinstance(r, Request) for r in output)
assert [r.url for r in output] == [
"http://example.org/somepage/item/12.html",
"http://example.org/about.html",
"http://example.org/nofollow.html",
]
assert [r.meta["response_class"] for r in output] == [
"HtmlResponse",
"HtmlResponse",
"HtmlResponse",
]
def test_process_request_instance_method(self):
response = HtmlResponse(
"http://example.org/somepage/index.html", body=self.test_body
)
class _CrawlSpider(self.spider_class):
name = "test"
allowed_domains = ["example.org"]
rules = (Rule(LinkExtractor(), process_request="process_request_upper"),)
def process_request_upper(self, request, response):
return request.replace(url=request.url.upper())
spider = _CrawlSpider()
output = list(spider._requests_to_follow(response))
assert len(output) == 3
assert all(isinstance(r, Request) for r in output)
assert [r.url for r in output] == [
safe_url_string("http://EXAMPLE.ORG/SOMEPAGE/ITEM/12.HTML"),
safe_url_string("http://EXAMPLE.ORG/ABOUT.HTML"),
safe_url_string("http://EXAMPLE.ORG/NOFOLLOW.HTML"),
]
def test_process_request_instance_method_with_response(self):
response = HtmlResponse(
"http://example.org/somepage/index.html", body=self.test_body
)
class _CrawlSpider(self.spider_class):
name = "test"
allowed_domains = ["example.org"]
rules = (
Rule(
LinkExtractor(),
process_request="process_request_meta_response_class",
),
)
def process_request_meta_response_class(self, request, response):
request.meta["response_class"] = response.__class__.__name__
return request
spider = _CrawlSpider()
output = list(spider._requests_to_follow(response))
assert len(output) == 3
assert all(isinstance(r, Request) for r in output)
assert [r.url for r in output] == [
"http://example.org/somepage/item/12.html",
"http://example.org/about.html",
"http://example.org/nofollow.html",
]
assert [r.meta["response_class"] for r in output] == [
"HtmlResponse",
"HtmlResponse",
"HtmlResponse",
]
def test_follow_links_attribute_population(self):
crawler = get_crawler()
spider = self.spider_class.from_crawler(crawler, "example.com")
assert hasattr(spider, "_follow_links")
assert spider._follow_links
settings_dict = {"CRAWLSPIDER_FOLLOW_LINKS": False}
crawler = get_crawler(settings_dict=settings_dict)
spider = self.spider_class.from_crawler(crawler, "example.com")
assert hasattr(spider, "_follow_links")
assert not spider._follow_links
@inlineCallbacks
def test_start_url(self):
class TestSpider(self.spider_class):
name = "test"
start_url = "https://www.example.com"
crawler = get_crawler(TestSpider)
with LogCapture("scrapy.core.engine", propagate=False, level=ERROR) as log:
yield crawler.crawl()
assert "Error while reading start items and requests" in str(log)
assert "did you miss an 's'?" in str(log)
def test_parse_response_use(self):
class _CrawlSpider(CrawlSpider):
name = "test"
start_urls = "https://www.example.com"
_follow_links = False
with warnings.catch_warnings(record=True) as w:
spider = _CrawlSpider()
assert len(w) == 0
spider._parse_response(
TextResponse(spider.start_urls, body=b""), None, None
)
assert len(w) == 1
def test_parse_response_override(self):
class _CrawlSpider(CrawlSpider):
def _parse_response(self, response, callback, cb_kwargs, follow=True):
pass
name = "test"
start_urls = "https://www.example.com"
_follow_links = False
with warnings.catch_warnings(record=True) as w:
assert len(w) == 0
spider = _CrawlSpider()
assert len(w) == 1
spider._parse_response(
TextResponse(spider.start_urls, body=b""), None, None
)
assert len(w) == 1
def test_parse_with_rules(self):
class _CrawlSpider(CrawlSpider):
name = "test"
start_urls = "https://www.example.com"
with warnings.catch_warnings(record=True) as w:
spider = _CrawlSpider()
spider.parse_with_rules(
TextResponse(spider.start_urls, body=b""), None, None
)
assert len(w) == 0
class TestSitemapSpider(TestSpider):
spider_class = SitemapSpider
BODY = b"SITEMAP"
f = BytesIO()
g = gzip.GzipFile(fileobj=f, mode="w+b")
g.write(BODY)
g.close()
GZBODY = f.getvalue()
def assertSitemapBody(self, response: Response, body: bytes | None) -> None:
crawler = get_crawler()
spider = self.spider_class.from_crawler(crawler, "example.com")
assert spider._get_sitemap_body(response) == body
def test_get_sitemap_body(self):
r = XmlResponse(url="http://www.example.com/", body=self.BODY)
self.assertSitemapBody(r, self.BODY)
r = HtmlResponse(url="http://www.example.com/", body=self.BODY)
self.assertSitemapBody(r, None)
r = Response(url="http://www.example.com/favicon.ico", body=self.BODY)
self.assertSitemapBody(r, None)
def test_get_sitemap_body_gzip_headers(self):
r = Response(
url="http://www.example.com/sitemap",
body=self.GZBODY,
headers={"content-type": "application/gzip"},
request=Request("http://www.example.com/sitemap"),
)
self.assertSitemapBody(r, self.BODY)
def test_get_sitemap_body_xml_url(self):
r = TextResponse(url="http://www.example.com/sitemap.xml", body=self.BODY)
self.assertSitemapBody(r, self.BODY)
def test_get_sitemap_body_xml_url_compressed(self):
r = Response(
url="http://www.example.com/sitemap.xml.gz",
body=self.GZBODY,
request=Request("http://www.example.com/sitemap"),
)
self.assertSitemapBody(r, self.BODY)
# .xml.gz but body decoded by HttpCompression middleware already
r = Response(url="http://www.example.com/sitemap.xml.gz", body=self.BODY)
self.assertSitemapBody(r, self.BODY)
def test_get_sitemap_urls_from_robotstxt(self):
robots = b"""# Sitemap files
Sitemap: http://example.com/sitemap.xml
Sitemap: http://example.com/sitemap-product-index.xml
Sitemap: HTTP://example.com/sitemap-uppercase.xml
Sitemap: /sitemap-relative-url.xml
"""
r = TextResponse(url="http://www.example.com/robots.txt", body=robots)
spider = self.spider_class("example.com")
assert [req.url for req in spider._parse_sitemap(r)] == [
"http://example.com/sitemap.xml",
"http://example.com/sitemap-product-index.xml",
"http://example.com/sitemap-uppercase.xml",
"http://www.example.com/sitemap-relative-url.xml",
]
def test_alternate_url_locs(self):
sitemap = b"""<?xml version="1.0" encoding="UTF-8"?>
<urlset xmlns="http://www.sitemaps.org/schemas/sitemap/0.9"
xmlns:xhtml="http://www.w3.org/1999/xhtml">
<url>
<loc>http://www.example.com/english/</loc>
<xhtml:link rel="alternate" hreflang="de"
href="http://www.example.com/deutsch/"/>
<xhtml:link rel="alternate" hreflang="de-ch"
href="http://www.example.com/schweiz-deutsch/"/>
<xhtml:link rel="alternate" hreflang="it"
href="http://www.example.com/italiano/"/>
<xhtml:link rel="alternate" hreflang="it"/><!-- wrong tag without href -->
</url>
</urlset>"""
r = TextResponse(url="http://www.example.com/sitemap.xml", body=sitemap)
spider = self.spider_class("example.com")
assert [req.url for req in spider._parse_sitemap(r)] == [
"http://www.example.com/english/"
]
spider.sitemap_alternate_links = True
assert [req.url for req in spider._parse_sitemap(r)] == [
"http://www.example.com/english/",
"http://www.example.com/deutsch/",
"http://www.example.com/schweiz-deutsch/",
"http://www.example.com/italiano/",
]
def test_sitemap_filter(self):
sitemap = b"""<?xml version="1.0" encoding="UTF-8"?>
<urlset xmlns="http://www.sitemaps.org/schemas/sitemap/0.9"
xmlns:xhtml="http://www.w3.org/1999/xhtml">
<url>
<loc>http://www.example.com/english/</loc>
<lastmod>2010-01-01</lastmod>
</url>
<url>
<loc>http://www.example.com/portuguese/</loc>
<lastmod>2005-01-01</lastmod>
</url>
</urlset>"""
class FilteredSitemapSpider(self.spider_class):
def sitemap_filter(self, entries):
for entry in entries:
date_time = datetime.strptime(entry["lastmod"], "%Y-%m-%d")
if date_time.year > 2008:
yield entry
r = TextResponse(url="http://www.example.com/sitemap.xml", body=sitemap)
spider = self.spider_class("example.com")
assert [req.url for req in spider._parse_sitemap(r)] == [
"http://www.example.com/english/",
"http://www.example.com/portuguese/",
]
spider = FilteredSitemapSpider("example.com")
assert [req.url for req in spider._parse_sitemap(r)] == [
"http://www.example.com/english/"
]
def test_sitemap_filter_with_alternate_links(self):
sitemap = b"""<?xml version="1.0" encoding="UTF-8"?>
<urlset xmlns="http://www.sitemaps.org/schemas/sitemap/0.9"
xmlns:xhtml="http://www.w3.org/1999/xhtml">
<url>
<loc>http://www.example.com/english/article_1/</loc>
<lastmod>2010-01-01</lastmod>
<xhtml:link rel="alternate" hreflang="de"
href="http://www.example.com/deutsch/article_1/"/>
</url>
<url>
<loc>http://www.example.com/english/article_2/</loc>
<lastmod>2015-01-01</lastmod>
</url>
</urlset>"""
class FilteredSitemapSpider(self.spider_class):
def sitemap_filter(self, entries):
for entry in entries:
alternate_links = entry.get("alternate", ())
for link in alternate_links:
if "/deutsch/" in link:
entry["loc"] = link
yield entry
r = TextResponse(url="http://www.example.com/sitemap.xml", body=sitemap)
spider = self.spider_class("example.com")
assert [req.url for req in spider._parse_sitemap(r)] == [
"http://www.example.com/english/article_1/",
"http://www.example.com/english/article_2/",
]
spider = FilteredSitemapSpider("example.com")
assert [req.url for req in spider._parse_sitemap(r)] == [
"http://www.example.com/deutsch/article_1/"
]
def test_sitemapindex_filter(self):
sitemap = b"""<?xml version="1.0" encoding="UTF-8"?>
<sitemapindex xmlns="http://www.sitemaps.org/schemas/sitemap/0.9">
<sitemap>
<loc>http://www.example.com/sitemap1.xml</loc>
<lastmod>2004-01-01T20:00:00+00:00</lastmod>
</sitemap>
<sitemap>
<loc>http://www.example.com/sitemap2.xml</loc>
<lastmod>2005-01-01</lastmod>
</sitemap>
</sitemapindex>"""
class FilteredSitemapSpider(self.spider_class):
def sitemap_filter(self, entries):
for entry in entries:
date_time = datetime.strptime(
entry["lastmod"].split("T")[0], "%Y-%m-%d"
)
if date_time.year > 2004:
yield entry
r = TextResponse(url="http://www.example.com/sitemap.xml", body=sitemap)
spider = self.spider_class("example.com")
assert [req.url for req in spider._parse_sitemap(r)] == [
"http://www.example.com/sitemap1.xml",
"http://www.example.com/sitemap2.xml",
]
spider = FilteredSitemapSpider("example.com")
assert [req.url for req in spider._parse_sitemap(r)] == [
"http://www.example.com/sitemap2.xml"
]
def test_compression_bomb_setting(self):
settings = {"DOWNLOAD_MAXSIZE": 10_000_000}
crawler = get_crawler(settings_dict=settings)
spider = self.spider_class.from_crawler(crawler, "example.com")
body_path = Path(tests_datadir, "compressed", "bomb-gzip.bin")
body = body_path.read_bytes()
request = Request(url="https://example.com")
response = Response(url="https://example.com", body=body, request=request)
assert spider._get_sitemap_body(response) is None
@pytest.mark.filterwarnings("ignore::scrapy.exceptions.ScrapyDeprecationWarning")
def test_compression_bomb_spider_attr(self):
class DownloadMaxSizeSpider(self.spider_class):
download_maxsize = 10_000_000
crawler = get_crawler()
spider = DownloadMaxSizeSpider.from_crawler(crawler, "example.com")
body_path = Path(tests_datadir, "compressed", "bomb-gzip.bin")
body = body_path.read_bytes()
request = Request(url="https://example.com")
response = Response(url="https://example.com", body=body, request=request)
assert spider._get_sitemap_body(response) is None
def test_compression_bomb_request_meta(self):
crawler = get_crawler()
spider = self.spider_class.from_crawler(crawler, "example.com")
body_path = Path(tests_datadir, "compressed", "bomb-gzip.bin")
body = body_path.read_bytes()
request = Request(
url="https://example.com", meta={"download_maxsize": 10_000_000}
)
response = Response(url="https://example.com", body=body, request=request)
assert spider._get_sitemap_body(response) is None
def test_download_warnsize_setting(self):
settings = {"DOWNLOAD_WARNSIZE": 10_000_000}
crawler = get_crawler(settings_dict=settings)
spider = self.spider_class.from_crawler(crawler, "example.com")
body_path = Path(tests_datadir, "compressed", "bomb-gzip.bin")
body = body_path.read_bytes()
request = Request(url="https://example.com")
response = Response(url="https://example.com", body=body, request=request)
with LogCapture(
"scrapy.spiders.sitemap", propagate=False, level=WARNING
) as log:
spider._get_sitemap_body(response)
log.check(
(
"scrapy.spiders.sitemap",
"WARNING",
(
"<200 https://example.com> body size after decompression "
"(11511612 B) is larger than the download warning size "
"(10000000 B)."
),
),
)
@pytest.mark.filterwarnings("ignore::scrapy.exceptions.ScrapyDeprecationWarning")
def test_download_warnsize_spider_attr(self):
class DownloadWarnSizeSpider(self.spider_class):
download_warnsize = 10_000_000
crawler = get_crawler()
spider = DownloadWarnSizeSpider.from_crawler(crawler, "example.com")
body_path = Path(tests_datadir, "compressed", "bomb-gzip.bin")
body = body_path.read_bytes()
request = Request(
url="https://example.com", meta={"download_warnsize": 10_000_000}
)
response = Response(url="https://example.com", body=body, request=request)
with LogCapture(
"scrapy.spiders.sitemap", propagate=False, level=WARNING
) as log:
spider._get_sitemap_body(response)
log.check(
(
"scrapy.spiders.sitemap",
"WARNING",
(
"<200 https://example.com> body size after decompression "
"(11511612 B) is larger than the download warning size "
"(10000000 B)."
),
),
)
def test_download_warnsize_request_meta(self):
crawler = get_crawler()
spider = self.spider_class.from_crawler(crawler, "example.com")
body_path = Path(tests_datadir, "compressed", "bomb-gzip.bin")
body = body_path.read_bytes()
request = Request(
url="https://example.com", meta={"download_warnsize": 10_000_000}
)
response = Response(url="https://example.com", body=body, request=request)
with LogCapture(
"scrapy.spiders.sitemap", propagate=False, level=WARNING
) as log:
spider._get_sitemap_body(response)
log.check(
(
"scrapy.spiders.sitemap",
"WARNING",
(
"<200 https://example.com> body size after decompression "
"(11511612 B) is larger than the download warning size "
"(10000000 B)."
),
),
)
@deferred_f_from_coro_f
async def test_sitemap_urls(self):
class TestSpider(self.spider_class):
name = "test"
sitemap_urls = ["https://toscrape.com/sitemap.xml"]
crawler = get_crawler(TestSpider)
spider = TestSpider.from_crawler(crawler)
with warnings.catch_warnings():
warnings.simplefilter("error")
requests = [request async for request in spider.start()]
assert len(requests) == 1
request = requests[0]
assert request.url == "https://toscrape.com/sitemap.xml"
assert request.dont_filter is False
assert request.callback == spider._parse_sitemap
class TestDeprecation:
def test_crawl_spider(self):
assert issubclass(CrawlSpider, Spider)
assert isinstance(CrawlSpider(name="foo"), Spider)
class TestNoParseMethodSpider:
spider_class = Spider
def test_undefined_parse_method(self):
spider = self.spider_class("example.com")
text = b"Random text"
resp = TextResponse(url="http://www.example.com/random_url", body=text)
exc_msg = "Spider.parse callback is not defined"
with pytest.raises(NotImplementedError, match=exc_msg):
spider.parse(resp)
| python | BSD-3-Clause | d1bd8eb49f7aba9289e4ff692006cead8bcd9080 | 2026-01-04T14:38:41.023839Z | false |
scrapy/scrapy | https://github.com/scrapy/scrapy/blob/d1bd8eb49f7aba9289e4ff692006cead8bcd9080/tests/test_http2_client_protocol.py | tests/test_http2_client_protocol.py | from __future__ import annotations
import json
import random
import re
import string
from ipaddress import IPv4Address
from pathlib import Path
from typing import TYPE_CHECKING, Any, cast
from unittest import mock
from urllib.parse import urlencode
import pytest
from pytest_twisted import async_yield_fixture
from twisted.internet.defer import (
CancelledError,
Deferred,
DeferredList,
inlineCallbacks,
)
from twisted.internet.endpoints import SSL4ClientEndpoint, SSL4ServerEndpoint
from twisted.internet.error import TimeoutError as TxTimeoutError
from twisted.internet.ssl import Certificate, PrivateCertificate, optionsForClientTLS
from twisted.web.client import URI, ResponseFailed
from twisted.web.http import H2_ENABLED
from twisted.web.http import Request as TxRequest
from twisted.web.server import NOT_DONE_YET, Site
from twisted.web.static import File
from scrapy.http import JsonRequest, Request, Response
from scrapy.settings import Settings
from scrapy.spiders import Spider
from scrapy.utils.defer import (
deferred_f_from_coro_f,
deferred_from_coro,
maybe_deferred_to_future,
)
from tests.mockserver.http_resources import LeafResource, Status
from tests.mockserver.utils import ssl_context_factory
if TYPE_CHECKING:
from collections.abc import AsyncGenerator, Callable, Coroutine, Generator
from scrapy.core.http2.protocol import H2ClientProtocol
pytestmark = pytest.mark.skipif(
not H2_ENABLED, reason="HTTP/2 support in Twisted is not enabled"
)
def generate_random_string(size: int) -> str:
return "".join(random.choices(string.ascii_uppercase + string.digits, k=size))
def make_html_body(val: str) -> bytes:
response = f"""<html>
<h1>Hello from HTTP2<h1>
<p>{val}</p>
</html>"""
return bytes(response, "utf-8")
class DummySpider(Spider):
name = "dummy"
start_urls: list = []
def parse(self, response):
print(response)
class Data:
SMALL_SIZE = 1024 # 1 KB
LARGE_SIZE = 1024**2 # 1 MB
STR_SMALL = generate_random_string(SMALL_SIZE)
STR_LARGE = generate_random_string(LARGE_SIZE)
EXTRA_SMALL = generate_random_string(1024 * 15)
EXTRA_LARGE = generate_random_string((1024**2) * 15)
HTML_SMALL = make_html_body(STR_SMALL)
HTML_LARGE = make_html_body(STR_LARGE)
JSON_SMALL = {"data": STR_SMALL}
JSON_LARGE = {"data": STR_LARGE}
DATALOSS = b"Dataloss Content"
NO_CONTENT_LENGTH = b"This response do not have any content-length header"
class GetDataHtmlSmall(LeafResource):
def render_GET(self, request: TxRequest):
request.setHeader("Content-Type", "text/html; charset=UTF-8")
return Data.HTML_SMALL
class GetDataHtmlLarge(LeafResource):
def render_GET(self, request: TxRequest):
request.setHeader("Content-Type", "text/html; charset=UTF-8")
return Data.HTML_LARGE
class PostDataJsonMixin:
@staticmethod
def make_response(request: TxRequest, extra_data: str) -> bytes:
assert request.content is not None
response = {
"request-headers": {},
"request-body": json.loads(request.content.read()),
"extra-data": extra_data,
}
for k, v in request.requestHeaders.getAllRawHeaders():
response["request-headers"][str(k, "utf-8")] = str(v[0], "utf-8")
response_bytes = bytes(json.dumps(response), "utf-8")
request.setHeader("Content-Type", "application/json; charset=UTF-8")
request.setHeader("Content-Encoding", "UTF-8")
return response_bytes
class PostDataJsonSmall(LeafResource, PostDataJsonMixin):
def render_POST(self, request: TxRequest):
return self.make_response(request, Data.EXTRA_SMALL)
class PostDataJsonLarge(LeafResource, PostDataJsonMixin):
def render_POST(self, request: TxRequest):
return self.make_response(request, Data.EXTRA_LARGE)
class Dataloss(LeafResource):
def render_GET(self, request: TxRequest):
request.setHeader(b"Content-Length", b"1024")
self.deferRequest(request, 0, self._delayed_render, request)
return NOT_DONE_YET
@staticmethod
def _delayed_render(request: TxRequest):
request.write(Data.DATALOSS)
request.finish()
class NoContentLengthHeader(LeafResource):
def render_GET(self, request: TxRequest):
request.requestHeaders.removeHeader("Content-Length")
self.deferRequest(request, 0, self._delayed_render, request)
return NOT_DONE_YET
@staticmethod
def _delayed_render(request: TxRequest):
request.write(Data.NO_CONTENT_LENGTH)
request.finish()
class TimeoutResponse(LeafResource):
def render_GET(self, request: TxRequest):
return NOT_DONE_YET
class QueryParams(LeafResource):
def render_GET(self, request: TxRequest):
request.setHeader("Content-Type", "application/json; charset=UTF-8")
request.setHeader("Content-Encoding", "UTF-8")
query_params: dict[str, str] = {}
assert request.args is not None
for k, v in request.args.items():
query_params[str(k, "utf-8")] = str(v[0], "utf-8")
return bytes(json.dumps(query_params), "utf-8")
class RequestHeaders(LeafResource):
"""Sends all the headers received as a response"""
def render_GET(self, request: TxRequest):
request.setHeader("Content-Type", "application/json; charset=UTF-8")
request.setHeader("Content-Encoding", "UTF-8")
headers = {}
for k, v in request.requestHeaders.getAllRawHeaders():
headers[str(k, "utf-8")] = str(v[0], "utf-8")
return bytes(json.dumps(headers), "utf-8")
def make_request_dfd(client: H2ClientProtocol, request: Request) -> Deferred[Response]:
return client.request(request, DummySpider())
async def make_request(client: H2ClientProtocol, request: Request) -> Response:
return await maybe_deferred_to_future(make_request_dfd(client, request))
class TestHttps2ClientProtocol:
scheme = "https"
host = "localhost"
key_file = Path(__file__).parent / "keys" / "localhost.key"
certificate_file = Path(__file__).parent / "keys" / "localhost.crt"
@pytest.fixture
def site(self, tmp_path):
r = File(str(tmp_path))
r.putChild(b"get-data-html-small", GetDataHtmlSmall())
r.putChild(b"get-data-html-large", GetDataHtmlLarge())
r.putChild(b"post-data-json-small", PostDataJsonSmall())
r.putChild(b"post-data-json-large", PostDataJsonLarge())
r.putChild(b"dataloss", Dataloss())
r.putChild(b"no-content-length-header", NoContentLengthHeader())
r.putChild(b"status", Status())
r.putChild(b"query-params", QueryParams())
r.putChild(b"timeout", TimeoutResponse())
r.putChild(b"request-headers", RequestHeaders())
return Site(r, timeout=None)
@async_yield_fixture
async def server_port(self, site: Site) -> AsyncGenerator[int]:
from twisted.internet import reactor
context_factory = ssl_context_factory(
str(self.key_file), str(self.certificate_file)
)
server_endpoint = SSL4ServerEndpoint(
reactor, 0, context_factory, interface=self.host
)
server = await server_endpoint.listen(site)
yield server.getHost().port
await server.stopListening()
@pytest.fixture
def client_certificate(self) -> PrivateCertificate:
pem = self.key_file.read_text(
encoding="utf-8"
) + self.certificate_file.read_text(encoding="utf-8")
return PrivateCertificate.loadPEM(pem)
@async_yield_fixture
async def client(
self, server_port: int, client_certificate: PrivateCertificate
) -> AsyncGenerator[H2ClientProtocol]:
from twisted.internet import reactor
from scrapy.core.http2.protocol import H2ClientFactory # noqa: PLC0415
client_options = optionsForClientTLS(
hostname=self.host,
trustRoot=client_certificate,
acceptableProtocols=[b"h2"],
)
uri = URI.fromBytes(bytes(self.get_url(server_port, "/"), "utf-8"))
h2_client_factory = H2ClientFactory(uri, Settings(), Deferred())
client_endpoint = SSL4ClientEndpoint(
reactor, self.host, server_port, client_options
)
client = await client_endpoint.connect(h2_client_factory)
yield client
if client.connected:
client.transport.loseConnection()
client.transport.abortConnection()
def get_url(self, portno: int, path: str) -> str:
"""
:param path: Should have / at the starting compulsorily if not empty
:return: Complete url
"""
assert len(path) > 0
assert path[0] == "/" or path[0] == "&"
return f"{self.scheme}://{self.host}:{portno}{path}"
@staticmethod
async def _check_repeat(
get_coro: Callable[[], Coroutine[Any, Any, None]], count: int
) -> None:
d_list = []
for _ in range(count):
d = deferred_from_coro(get_coro())
d_list.append(d)
await maybe_deferred_to_future(DeferredList(d_list, fireOnOneErrback=True))
async def _check_GET(
self,
client: H2ClientProtocol,
request: Request,
expected_body: bytes,
expected_status: int,
) -> None:
response = await make_request(client, request)
assert response.status == expected_status
assert response.body == expected_body
assert response.request == request
content_length_header = response.headers.get("Content-Length")
assert content_length_header is not None
content_length = int(content_length_header)
assert len(response.body) == content_length
@deferred_f_from_coro_f
async def test_GET_small_body(
self, server_port: int, client: H2ClientProtocol
) -> None:
request = Request(self.get_url(server_port, "/get-data-html-small"))
await self._check_GET(client, request, Data.HTML_SMALL, 200)
@deferred_f_from_coro_f
async def test_GET_large_body(
self, server_port: int, client: H2ClientProtocol
) -> None:
request = Request(self.get_url(server_port, "/get-data-html-large"))
await self._check_GET(client, request, Data.HTML_LARGE, 200)
async def _check_GET_x10(
self,
client: H2ClientProtocol,
request: Request,
expected_body: bytes,
expected_status: int,
) -> None:
async def get_coro() -> None:
await self._check_GET(client, request, expected_body, expected_status)
await self._check_repeat(get_coro, 10)
@deferred_f_from_coro_f
async def test_GET_small_body_x10(
self, server_port: int, client: H2ClientProtocol
) -> None:
await self._check_GET_x10(
client,
Request(self.get_url(server_port, "/get-data-html-small")),
Data.HTML_SMALL,
200,
)
@deferred_f_from_coro_f
async def test_GET_large_body_x10(
self, server_port: int, client: H2ClientProtocol
) -> None:
await self._check_GET_x10(
client,
Request(self.get_url(server_port, "/get-data-html-large")),
Data.HTML_LARGE,
200,
)
@staticmethod
async def _check_POST_json(
client: H2ClientProtocol,
request: Request,
expected_request_body: dict[str, str],
expected_extra_data: str,
expected_status: int,
) -> None:
response = await make_request(client, request)
assert response.status == expected_status
assert response.request == request
content_length_header = response.headers.get("Content-Length")
assert content_length_header is not None
content_length = int(content_length_header)
assert len(response.body) == content_length
# Parse the body
content_encoding_header = response.headers[b"Content-Encoding"]
assert content_encoding_header is not None
content_encoding = str(content_encoding_header, "utf-8")
body = json.loads(str(response.body, content_encoding))
assert "request-body" in body
assert "extra-data" in body
assert "request-headers" in body
request_body = body["request-body"]
assert request_body == expected_request_body
extra_data = body["extra-data"]
assert extra_data == expected_extra_data
# Check if headers were sent successfully
request_headers = body["request-headers"]
for k, v in request.headers.items():
k_str = str(k, "utf-8")
assert k_str in request_headers
assert request_headers[k_str] == str(v[0], "utf-8")
@deferred_f_from_coro_f
async def test_POST_small_json(
self, server_port: int, client: H2ClientProtocol
) -> None:
request = JsonRequest(
url=self.get_url(server_port, "/post-data-json-small"),
method="POST",
data=Data.JSON_SMALL,
)
await self._check_POST_json(
client, request, Data.JSON_SMALL, Data.EXTRA_SMALL, 200
)
@deferred_f_from_coro_f
async def test_POST_large_json(
self, server_port: int, client: H2ClientProtocol
) -> None:
request = JsonRequest(
url=self.get_url(server_port, "/post-data-json-large"),
method="POST",
data=Data.JSON_LARGE,
)
await self._check_POST_json(
client, request, Data.JSON_LARGE, Data.EXTRA_LARGE, 200
)
async def _check_POST_json_x10(self, *args, **kwargs):
async def get_coro() -> None:
await self._check_POST_json(*args, **kwargs)
await self._check_repeat(get_coro, 10)
@deferred_f_from_coro_f
async def test_POST_small_json_x10(
self, server_port: int, client: H2ClientProtocol
) -> None:
request = JsonRequest(
url=self.get_url(server_port, "/post-data-json-small"),
method="POST",
data=Data.JSON_SMALL,
)
await self._check_POST_json_x10(
client, request, Data.JSON_SMALL, Data.EXTRA_SMALL, 200
)
@deferred_f_from_coro_f
async def test_POST_large_json_x10(
self, server_port: int, client: H2ClientProtocol
) -> None:
request = JsonRequest(
url=self.get_url(server_port, "/post-data-json-large"),
method="POST",
data=Data.JSON_LARGE,
)
await self._check_POST_json_x10(
client, request, Data.JSON_LARGE, Data.EXTRA_LARGE, 200
)
@inlineCallbacks
def test_invalid_negotiated_protocol(
self, server_port: int, client: H2ClientProtocol
) -> Generator[Deferred[Any], Any, None]:
with mock.patch(
"scrapy.core.http2.protocol.PROTOCOL_NAME", return_value=b"not-h2"
):
request = Request(url=self.get_url(server_port, "/status?n=200"))
with pytest.raises(ResponseFailed):
yield make_request_dfd(client, request)
@inlineCallbacks
def test_cancel_request(
self, server_port: int, client: H2ClientProtocol
) -> Generator[Deferred[Any], Any, None]:
request = Request(url=self.get_url(server_port, "/get-data-html-large"))
d = make_request_dfd(client, request)
d.cancel()
response = cast("Response", (yield d))
assert response.status == 499
assert response.request == request
@deferred_f_from_coro_f
async def test_download_maxsize_exceeded(
self, server_port: int, client: H2ClientProtocol
) -> None:
request = Request(
url=self.get_url(server_port, "/get-data-html-large"),
meta={"download_maxsize": 1000},
)
with pytest.raises(CancelledError) as exc_info:
await make_request(client, request)
error_pattern = re.compile(
rf"Cancelling download of {request.url}: received response "
rf"size \(\d*\) larger than download max size \(1000\)"
)
assert len(re.findall(error_pattern, str(exc_info.value))) == 1
@inlineCallbacks
def test_received_dataloss_response(
self, server_port: int, client: H2ClientProtocol
) -> Generator[Deferred[Any], Any, None]:
"""In case when value of Header Content-Length != len(Received Data)
ProtocolError is raised"""
from h2.exceptions import InvalidBodyLengthError # noqa: PLC0415
request = Request(url=self.get_url(server_port, "/dataloss"))
with pytest.raises(ResponseFailed) as exc_info:
yield make_request_dfd(client, request)
assert len(exc_info.value.reasons) > 0
assert any(
isinstance(error, InvalidBodyLengthError)
for error in exc_info.value.reasons
)
@deferred_f_from_coro_f
async def test_missing_content_length_header(
self, server_port: int, client: H2ClientProtocol
) -> None:
request = Request(url=self.get_url(server_port, "/no-content-length-header"))
response = await make_request(client, request)
assert response.status == 200
assert response.body == Data.NO_CONTENT_LENGTH
assert response.request == request
assert "Content-Length" not in response.headers
async def _check_log_warnsize(
self,
client: H2ClientProtocol,
request: Request,
warn_pattern: re.Pattern[str],
expected_body: bytes,
caplog: pytest.LogCaptureFixture,
) -> None:
with caplog.at_level("WARNING", "scrapy.core.http2.stream"):
response = await make_request(client, request)
assert response.status == 200
assert response.request == request
assert response.body == expected_body
# Check the warning is raised only once for this request
assert len(re.findall(warn_pattern, caplog.text)) == 1
@deferred_f_from_coro_f
async def test_log_expected_warnsize(
self,
server_port: int,
client: H2ClientProtocol,
caplog: pytest.LogCaptureFixture,
) -> None:
request = Request(
url=self.get_url(server_port, "/get-data-html-large"),
meta={"download_warnsize": 1000},
)
warn_pattern = re.compile(
rf"Expected response size \(\d*\) larger than "
rf"download warn size \(1000\) in request {request}"
)
await self._check_log_warnsize(
client, request, warn_pattern, Data.HTML_LARGE, caplog
)
@deferred_f_from_coro_f
async def test_log_received_warnsize(
self,
server_port: int,
client: H2ClientProtocol,
caplog: pytest.LogCaptureFixture,
) -> None:
request = Request(
url=self.get_url(server_port, "/no-content-length-header"),
meta={"download_warnsize": 10},
)
warn_pattern = re.compile(
rf"Received more \(\d*\) bytes than download "
rf"warn size \(10\) in request {request}"
)
await self._check_log_warnsize(
client, request, warn_pattern, Data.NO_CONTENT_LENGTH, caplog
)
@deferred_f_from_coro_f
async def test_max_concurrent_streams(
self, server_port: int, client: H2ClientProtocol
) -> None:
"""Send 500 requests at one to check if we can handle
very large number of request.
"""
async def get_coro() -> None:
await self._check_GET(
client,
Request(self.get_url(server_port, "/get-data-html-small")),
Data.HTML_SMALL,
200,
)
await self._check_repeat(get_coro, 500)
@inlineCallbacks
def test_inactive_stream(
self, server_port: int, client: H2ClientProtocol
) -> Generator[Deferred[Any], Any, None]:
"""Here we send 110 requests considering the MAX_CONCURRENT_STREAMS
by default is 100. After sending the first 100 requests we close the
connection."""
d_list = []
def assert_inactive_stream(failure):
assert failure.check(ResponseFailed) is not None
from scrapy.core.http2.stream import InactiveStreamClosed # noqa: PLC0415
assert any(
isinstance(e, InactiveStreamClosed) for e in failure.value.reasons
)
# Send 100 request (we do not check the result)
for _ in range(100):
d = make_request_dfd(
client, Request(self.get_url(server_port, "/get-data-html-small"))
)
d.addBoth(lambda _: None)
d_list.append(d)
# Now send 10 extra request and save the response deferred in a list
for _ in range(10):
d = make_request_dfd(
client, Request(self.get_url(server_port, "/get-data-html-small"))
)
d.addCallback(lambda _: pytest.fail("This request should have failed"))
d.addErrback(assert_inactive_stream)
d_list.append(d)
# Close the connection now to fire all the extra 10 requests errback
# with InactiveStreamClosed
assert client.transport
client.transport.loseConnection()
yield DeferredList(d_list, consumeErrors=True, fireOnOneErrback=True)
@deferred_f_from_coro_f
async def test_invalid_request_type(self, client: H2ClientProtocol):
with pytest.raises(TypeError):
await make_request(client, "https://InvalidDataTypePassed.com") # type: ignore[arg-type]
@deferred_f_from_coro_f
async def test_query_parameters(
self, server_port: int, client: H2ClientProtocol
) -> None:
params = {
"a": generate_random_string(20),
"b": generate_random_string(20),
"c": generate_random_string(20),
"d": generate_random_string(20),
}
request = Request(
self.get_url(server_port, f"/query-params?{urlencode(params)}")
)
response = await make_request(client, request)
content_encoding_header = response.headers[b"Content-Encoding"]
assert content_encoding_header is not None
content_encoding = str(content_encoding_header, "utf-8")
data = json.loads(str(response.body, content_encoding))
assert data == params
@deferred_f_from_coro_f
async def test_status_codes(
self, server_port: int, client: H2ClientProtocol
) -> None:
for status in [200, 404]:
request = Request(self.get_url(server_port, f"/status?n={status}"))
response = await make_request(client, request)
assert response.status == status
@deferred_f_from_coro_f
async def test_response_has_correct_certificate_ip_address(
self,
server_port: int,
client: H2ClientProtocol,
client_certificate: PrivateCertificate,
) -> None:
request = Request(self.get_url(server_port, "/status?n=200"))
response = await make_request(client, request)
assert response.request == request
assert isinstance(response.certificate, Certificate)
assert response.certificate.original is not None
assert response.certificate.getIssuer() == client_certificate.getIssuer()
assert response.certificate.getPublicKey().matches(
client_certificate.getPublicKey()
)
assert isinstance(response.ip_address, IPv4Address)
assert str(response.ip_address) == "127.0.0.1"
@staticmethod
async def _check_invalid_netloc(client: H2ClientProtocol, url: str) -> None:
from scrapy.core.http2.stream import InvalidHostname # noqa: PLC0415
request = Request(url)
with pytest.raises(InvalidHostname) as exc_info:
await make_request(client, request)
error_msg = str(exc_info.value)
assert "localhost" in error_msg
assert "127.0.0.1" in error_msg
assert str(request) in error_msg
@deferred_f_from_coro_f
async def test_invalid_hostname(self, client: H2ClientProtocol) -> None:
await self._check_invalid_netloc(
client, "https://notlocalhost.notlocalhostdomain"
)
@deferred_f_from_coro_f
async def test_invalid_host_port(
self, server_port: int, client: H2ClientProtocol
) -> None:
port = server_port + 1
await self._check_invalid_netloc(client, f"https://127.0.0.1:{port}")
@deferred_f_from_coro_f
async def test_connection_stays_with_invalid_requests(
self, server_port: int, client: H2ClientProtocol
):
await maybe_deferred_to_future(self.test_invalid_hostname(client))
await maybe_deferred_to_future(self.test_invalid_host_port(server_port, client))
await maybe_deferred_to_future(self.test_GET_small_body(server_port, client))
await maybe_deferred_to_future(self.test_POST_small_json(server_port, client))
@inlineCallbacks
def test_connection_timeout(
self, server_port: int, client: H2ClientProtocol
) -> Generator[Deferred[Any], Any, None]:
request = Request(self.get_url(server_port, "/timeout"))
# Update the timer to 1s to test connection timeout
client.setTimeout(1)
with pytest.raises(ResponseFailed) as exc_info:
yield make_request_dfd(client, request)
for err in exc_info.value.reasons:
from scrapy.core.http2.protocol import H2ClientProtocol # noqa: PLC0415
if isinstance(err, TxTimeoutError):
assert (
f"Connection was IDLE for more than {H2ClientProtocol.IDLE_TIMEOUT}s"
in str(err)
)
break
else:
pytest.fail("No TimeoutError raised.")
@deferred_f_from_coro_f
async def test_request_headers_received(
self, server_port: int, client: H2ClientProtocol
) -> None:
request = Request(
self.get_url(server_port, "/request-headers"),
headers={"header-1": "header value 1", "header-2": "header value 2"},
)
response = await make_request(client, request)
assert response.status == 200
assert response.request == request
response_headers = json.loads(str(response.body, "utf-8"))
assert isinstance(response_headers, dict)
for k, v in request.headers.items():
k_decoded, v_decoded = str(k, "utf-8"), str(v[0], "utf-8")
assert k_decoded in response_headers
assert v_decoded == response_headers[k_decoded]
| python | BSD-3-Clause | d1bd8eb49f7aba9289e4ff692006cead8bcd9080 | 2026-01-04T14:38:41.023839Z | false |
scrapy/scrapy | https://github.com/scrapy/scrapy/blob/d1bd8eb49f7aba9289e4ff692006cead8bcd9080/tests/test_dependencies.py | tests/test_dependencies.py | import os
import re
from configparser import ConfigParser
from pathlib import Path
import pytest
from twisted import version as twisted_version
class TestScrapyUtils:
def test_pinned_twisted_version(self):
"""When running tests within a Tox environment with pinned
dependencies, make sure that the version of Twisted is the pinned
version.
See https://github.com/scrapy/scrapy/pull/4814#issuecomment-706230011
"""
if not os.environ.get("_SCRAPY_PINNED", None):
pytest.skip("Not in a pinned environment")
tox_config_file_path = Path(__file__).parent / ".." / "tox.ini"
config_parser = ConfigParser()
config_parser.read(tox_config_file_path)
pattern = r"Twisted==([\d.]+)"
match = re.search(pattern, config_parser["pinned"]["deps"])
pinned_twisted_version_string = match[1]
assert twisted_version.short() == pinned_twisted_version_string
| python | BSD-3-Clause | d1bd8eb49f7aba9289e4ff692006cead8bcd9080 | 2026-01-04T14:38:41.023839Z | false |
scrapy/scrapy | https://github.com/scrapy/scrapy/blob/d1bd8eb49f7aba9289e4ff692006cead8bcd9080/tests/test_downloader_handler_twisted_http11.py | tests/test_downloader_handler_twisted_http11.py | """Tests for scrapy.core.downloader.handlers.http11.HTTP11DownloadHandler."""
from __future__ import annotations
from typing import TYPE_CHECKING, Any
from scrapy.core.downloader.handlers.http11 import HTTP11DownloadHandler
from tests.test_downloader_handlers_http_base import (
TestHttp11Base,
TestHttpProxyBase,
TestHttps11Base,
TestHttpsCustomCiphersBase,
TestHttpsInvalidDNSIdBase,
TestHttpsInvalidDNSPatternBase,
TestHttpsWrongHostnameBase,
TestHttpWithCrawlerBase,
TestSimpleHttpsBase,
)
if TYPE_CHECKING:
from scrapy.core.downloader.handlers import DownloadHandlerProtocol
class HTTP11DownloadHandlerMixin:
@property
def download_handler_cls(self) -> type[DownloadHandlerProtocol]:
return HTTP11DownloadHandler
class TestHttp11(HTTP11DownloadHandlerMixin, TestHttp11Base):
pass
class TestHttps11(HTTP11DownloadHandlerMixin, TestHttps11Base):
pass
class TestSimpleHttps(HTTP11DownloadHandlerMixin, TestSimpleHttpsBase):
pass
class TestHttps11WrongHostname(HTTP11DownloadHandlerMixin, TestHttpsWrongHostnameBase):
pass
class TestHttps11InvalidDNSId(HTTP11DownloadHandlerMixin, TestHttpsInvalidDNSIdBase):
pass
class TestHttps11InvalidDNSPattern(
HTTP11DownloadHandlerMixin, TestHttpsInvalidDNSPatternBase
):
pass
class TestHttps11CustomCiphers(HTTP11DownloadHandlerMixin, TestHttpsCustomCiphersBase):
pass
class TestHttp11WithCrawler(TestHttpWithCrawlerBase):
@property
def settings_dict(self) -> dict[str, Any] | None:
return None # default handler settings
class TestHttp11Proxy(HTTP11DownloadHandlerMixin, TestHttpProxyBase):
pass
| python | BSD-3-Clause | d1bd8eb49f7aba9289e4ff692006cead8bcd9080 | 2026-01-04T14:38:41.023839Z | false |
scrapy/scrapy | https://github.com/scrapy/scrapy/blob/d1bd8eb49f7aba9289e4ff692006cead8bcd9080/tests/test_linkextractors.py | tests/test_linkextractors.py | from __future__ import annotations
import pickle
import re
import pytest
from packaging.version import Version
from w3lib import __version__ as w3lib_version
from scrapy.http import HtmlResponse, XmlResponse
from scrapy.link import Link
from scrapy.linkextractors.lxmlhtml import LxmlLinkExtractor
from tests import get_testdata
# a hack to skip base class tests in pytest
class Base:
class TestLinkExtractorBase:
extractor_cls: type | None = None
def setup_method(self):
body = get_testdata("link_extractor", "linkextractor.html")
self.response = HtmlResponse(url="http://example.com/index", body=body)
def test_urls_type(self):
"""Test that the resulting urls are str objects"""
lx = self.extractor_cls()
assert all(
isinstance(link.url, str) for link in lx.extract_links(self.response)
)
def test_extract_all_links(self):
lx = self.extractor_cls()
page4_url = "http://example.com/page%204.html"
assert list(lx.extract_links(self.response)) == [
Link(url="http://example.com/sample1.html", text=""),
Link(url="http://example.com/sample2.html", text="sample 2"),
Link(url="http://example.com/sample3.html", text="sample 3 text"),
Link(
url="http://example.com/sample3.html#foo",
text="sample 3 repetition with fragment",
),
Link(url="http://www.google.com/something", text=""),
Link(url="http://example.com/innertag.html", text="inner tag"),
Link(url=page4_url, text="href with whitespaces"),
]
def test_extract_filter_allow(self):
lx = self.extractor_cls(allow=("sample",))
assert list(lx.extract_links(self.response)) == [
Link(url="http://example.com/sample1.html", text=""),
Link(url="http://example.com/sample2.html", text="sample 2"),
Link(url="http://example.com/sample3.html", text="sample 3 text"),
Link(
url="http://example.com/sample3.html#foo",
text="sample 3 repetition with fragment",
),
]
def test_extract_filter_allow_with_duplicates(self):
lx = self.extractor_cls(allow=("sample",), unique=False)
assert list(lx.extract_links(self.response)) == [
Link(url="http://example.com/sample1.html", text=""),
Link(url="http://example.com/sample2.html", text="sample 2"),
Link(url="http://example.com/sample3.html", text="sample 3 text"),
Link(
url="http://example.com/sample3.html",
text="sample 3 repetition",
),
Link(
url="http://example.com/sample3.html",
text="sample 3 repetition",
),
Link(
url="http://example.com/sample3.html#foo",
text="sample 3 repetition with fragment",
),
]
def test_extract_filter_allow_with_duplicates_canonicalize(self):
lx = self.extractor_cls(allow=("sample",), unique=False, canonicalize=True)
assert list(lx.extract_links(self.response)) == [
Link(url="http://example.com/sample1.html", text=""),
Link(url="http://example.com/sample2.html", text="sample 2"),
Link(url="http://example.com/sample3.html", text="sample 3 text"),
Link(
url="http://example.com/sample3.html",
text="sample 3 repetition",
),
Link(
url="http://example.com/sample3.html",
text="sample 3 repetition",
),
Link(
url="http://example.com/sample3.html",
text="sample 3 repetition with fragment",
),
]
def test_extract_filter_allow_no_duplicates_canonicalize(self):
lx = self.extractor_cls(allow=("sample",), unique=True, canonicalize=True)
assert list(lx.extract_links(self.response)) == [
Link(url="http://example.com/sample1.html", text=""),
Link(url="http://example.com/sample2.html", text="sample 2"),
Link(url="http://example.com/sample3.html", text="sample 3 text"),
]
def test_extract_filter_allow_and_deny(self):
lx = self.extractor_cls(allow=("sample",), deny=("3",))
assert list(lx.extract_links(self.response)) == [
Link(url="http://example.com/sample1.html", text=""),
Link(url="http://example.com/sample2.html", text="sample 2"),
]
def test_extract_filter_allowed_domains(self):
lx = self.extractor_cls(allow_domains=("google.com",))
assert list(lx.extract_links(self.response)) == [
Link(url="http://www.google.com/something", text=""),
]
def test_extraction_using_single_values(self):
"""Test the extractor's behaviour among different situations"""
lx = self.extractor_cls(allow="sample")
assert list(lx.extract_links(self.response)) == [
Link(url="http://example.com/sample1.html", text=""),
Link(url="http://example.com/sample2.html", text="sample 2"),
Link(url="http://example.com/sample3.html", text="sample 3 text"),
Link(
url="http://example.com/sample3.html#foo",
text="sample 3 repetition with fragment",
),
]
lx = self.extractor_cls(allow="sample", deny="3")
assert list(lx.extract_links(self.response)) == [
Link(url="http://example.com/sample1.html", text=""),
Link(url="http://example.com/sample2.html", text="sample 2"),
]
lx = self.extractor_cls(allow_domains="google.com")
assert list(lx.extract_links(self.response)) == [
Link(url="http://www.google.com/something", text=""),
]
lx = self.extractor_cls(deny_domains="example.com")
assert list(lx.extract_links(self.response)) == [
Link(url="http://www.google.com/something", text=""),
]
def test_nofollow(self):
"""Test the extractor's behaviour for links with rel='nofollow'"""
html = b"""<html><head><title>Page title</title></head>
<body>
<div class='links'>
<p><a href="/about.html">About us</a></p>
</div>
<div>
<p><a href="/follow.html">Follow this link</a></p>
</div>
<div>
<p><a href="/nofollow.html" rel="nofollow">Dont follow this one</a></p>
</div>
<div>
<p><a href="/nofollow2.html" rel="blah">Choose to follow or not</a></p>
</div>
<div>
<p><a href="http://google.com/something" rel="external nofollow">External link not to follow</a></p>
</div>
</body></html>"""
response = HtmlResponse("http://example.org/somepage/index.html", body=html)
lx = self.extractor_cls()
assert lx.extract_links(response) == [
Link(url="http://example.org/about.html", text="About us"),
Link(url="http://example.org/follow.html", text="Follow this link"),
Link(
url="http://example.org/nofollow.html",
text="Dont follow this one",
nofollow=True,
),
Link(
url="http://example.org/nofollow2.html",
text="Choose to follow or not",
),
Link(
url="http://google.com/something",
text="External link not to follow",
nofollow=True,
),
]
def test_matches(self):
url1 = "http://lotsofstuff.com/stuff1/index"
url2 = "http://evenmorestuff.com/uglystuff/index"
lx = self.extractor_cls(allow=(r"stuff1",))
assert lx.matches(url1)
assert not lx.matches(url2)
lx = self.extractor_cls(deny=(r"uglystuff",))
assert lx.matches(url1)
assert not lx.matches(url2)
lx = self.extractor_cls(allow_domains=("evenmorestuff.com",))
assert not lx.matches(url1)
assert lx.matches(url2)
lx = self.extractor_cls(deny_domains=("lotsofstuff.com",))
assert not lx.matches(url1)
assert lx.matches(url2)
lx = self.extractor_cls(
allow=["blah1"],
deny=["blah2"],
allow_domains=["blah1.com"],
deny_domains=["blah2.com"],
)
assert lx.matches("http://blah1.com/blah1")
assert not lx.matches("http://blah1.com/blah2")
assert not lx.matches("http://blah2.com/blah1")
assert not lx.matches("http://blah2.com/blah2")
def test_restrict_xpaths(self):
lx = self.extractor_cls(restrict_xpaths=('//div[@id="subwrapper"]',))
assert list(lx.extract_links(self.response)) == [
Link(url="http://example.com/sample1.html", text=""),
Link(url="http://example.com/sample2.html", text="sample 2"),
]
def test_restrict_xpaths_encoding(self):
"""Test restrict_xpaths with encodings"""
html = b"""<html><head><title>Page title</title></head>
<body><p><a href="item/12.html">Item 12</a></p>
<div class='links'>
<p><a href="/about.html">About us\xa3</a></p>
</div>
<div>
<p><a href="/nofollow.html">This shouldn't be followed</a></p>
</div>
</body></html>"""
response = HtmlResponse(
"http://example.org/somepage/index.html",
body=html,
encoding="windows-1252",
)
lx = self.extractor_cls(restrict_xpaths="//div[@class='links']")
assert lx.extract_links(response) == [
Link(url="http://example.org/about.html", text="About us\xa3")
]
def test_restrict_xpaths_with_html_entities(self):
html = b'<html><body><p><a href="/♥/you?c=€">text</a></p></body></html>'
response = HtmlResponse(
"http://example.org/somepage/index.html",
body=html,
encoding="iso8859-15",
)
links = self.extractor_cls(restrict_xpaths="//p").extract_links(response)
assert links == [
Link(url="http://example.org/%E2%99%A5/you?c=%A4", text="text")
]
def test_restrict_xpaths_concat_in_handle_data(self):
"""html entities cause SGMLParser to call handle_data hook twice"""
body = b"""<html><body><div><a href="/foo">>\xbe\xa9<\xb6\xab</a></body></html>"""
response = HtmlResponse("http://example.org", body=body, encoding="gb18030")
lx = self.extractor_cls(restrict_xpaths="//div")
assert lx.extract_links(response) == [
Link(
url="http://example.org/foo",
text=">\u4eac<\u4e1c",
fragment="",
nofollow=False,
)
]
def test_restrict_css(self):
lx = self.extractor_cls(restrict_css=("#subwrapper a",))
assert lx.extract_links(self.response) == [
Link(url="http://example.com/sample2.html", text="sample 2")
]
def test_restrict_css_and_restrict_xpaths_together(self):
lx = self.extractor_cls(
restrict_xpaths=('//div[@id="subwrapper"]',),
restrict_css=("#subwrapper + a",),
)
assert list(lx.extract_links(self.response)) == [
Link(url="http://example.com/sample1.html", text=""),
Link(url="http://example.com/sample2.html", text="sample 2"),
Link(url="http://example.com/sample3.html", text="sample 3 text"),
]
def test_area_tag_with_unicode_present(self):
body = b"""<html><body>\xbe\xa9<map><area href="http://example.org/foo" /></map></body></html>"""
response = HtmlResponse("http://example.org", body=body, encoding="utf-8")
lx = self.extractor_cls()
lx.extract_links(response)
lx.extract_links(response)
lx.extract_links(response)
assert lx.extract_links(response) == [
Link(
url="http://example.org/foo",
text="",
fragment="",
nofollow=False,
)
]
def test_encoded_url(self):
body = b"""<html><body><div><a href="?page=2">BinB</a></body></html>"""
response = HtmlResponse(
"http://known.fm/AC%2FDC/", body=body, encoding="utf8"
)
lx = self.extractor_cls()
assert lx.extract_links(response) == [
Link(
url="http://known.fm/AC%2FDC/?page=2",
text="BinB",
fragment="",
nofollow=False,
),
]
def test_encoded_url_in_restricted_xpath(self):
body = b"""<html><body><div><a href="?page=2">BinB</a></body></html>"""
response = HtmlResponse(
"http://known.fm/AC%2FDC/", body=body, encoding="utf8"
)
lx = self.extractor_cls(restrict_xpaths="//div")
assert lx.extract_links(response) == [
Link(
url="http://known.fm/AC%2FDC/?page=2",
text="BinB",
fragment="",
nofollow=False,
),
]
def test_ignored_extensions(self):
# jpg is ignored by default
html = b"""<a href="page.html">asd</a> and <a href="photo.jpg">"""
response = HtmlResponse("http://example.org/", body=html)
lx = self.extractor_cls()
assert lx.extract_links(response) == [
Link(url="http://example.org/page.html", text="asd"),
]
# override denied extensions
lx = self.extractor_cls(deny_extensions=["html"])
assert lx.extract_links(response) == [
Link(url="http://example.org/photo.jpg"),
]
def test_process_value(self):
"""Test restrict_xpaths with encodings"""
html = b"""
<a href="javascript:goToPage('../other/page.html','photo','width=600,height=540,scrollbars'); return false">Text</a>
<a href="/about.html">About us</a>
"""
response = HtmlResponse(
"http://example.org/somepage/index.html",
body=html,
encoding="windows-1252",
)
def process_value(value):
m = re.search(r"javascript:goToPage\('(.*?)'", value)
return m.group(1) if m else None
lx = self.extractor_cls(process_value=process_value)
assert lx.extract_links(response) == [
Link(url="http://example.org/other/page.html", text="Text")
]
def test_base_url_with_restrict_xpaths(self):
html = b"""<html><head><title>Page title</title><base href="http://otherdomain.com/base/" /></head>
<body><p><a href="item/12.html">Item 12</a></p>
</body></html>"""
response = HtmlResponse("http://example.org/somepage/index.html", body=html)
lx = self.extractor_cls(restrict_xpaths="//p")
assert lx.extract_links(response) == [
Link(url="http://otherdomain.com/base/item/12.html", text="Item 12")
]
def test_attrs(self):
lx = self.extractor_cls(attrs="href")
page4_url = "http://example.com/page%204.html"
assert lx.extract_links(self.response) == [
Link(url="http://example.com/sample1.html", text=""),
Link(url="http://example.com/sample2.html", text="sample 2"),
Link(url="http://example.com/sample3.html", text="sample 3 text"),
Link(
url="http://example.com/sample3.html#foo",
text="sample 3 repetition with fragment",
),
Link(url="http://www.google.com/something", text=""),
Link(url="http://example.com/innertag.html", text="inner tag"),
Link(url=page4_url, text="href with whitespaces"),
]
lx = self.extractor_cls(
attrs=("href", "src"), tags=("a", "area", "img"), deny_extensions=()
)
assert lx.extract_links(self.response) == [
Link(url="http://example.com/sample1.html", text=""),
Link(url="http://example.com/sample2.html", text="sample 2"),
Link(url="http://example.com/sample2.jpg", text=""),
Link(url="http://example.com/sample3.html", text="sample 3 text"),
Link(
url="http://example.com/sample3.html#foo",
text="sample 3 repetition with fragment",
),
Link(url="http://www.google.com/something", text=""),
Link(url="http://example.com/innertag.html", text="inner tag"),
Link(url=page4_url, text="href with whitespaces"),
]
lx = self.extractor_cls(attrs=None)
assert lx.extract_links(self.response) == []
def test_tags(self):
html = (
b'<html><area href="sample1.html"></area>'
b'<a href="sample2.html">sample 2</a><img src="sample2.jpg"/></html>'
)
response = HtmlResponse("http://example.com/index.html", body=html)
lx = self.extractor_cls(tags=None)
assert lx.extract_links(response) == []
lx = self.extractor_cls()
assert lx.extract_links(response) == [
Link(url="http://example.com/sample1.html", text=""),
Link(url="http://example.com/sample2.html", text="sample 2"),
]
lx = self.extractor_cls(tags="area")
assert lx.extract_links(response) == [
Link(url="http://example.com/sample1.html", text=""),
]
lx = self.extractor_cls(tags="a")
assert lx.extract_links(response) == [
Link(url="http://example.com/sample2.html", text="sample 2"),
]
lx = self.extractor_cls(
tags=("a", "img"), attrs=("href", "src"), deny_extensions=()
)
assert lx.extract_links(response) == [
Link(url="http://example.com/sample2.html", text="sample 2"),
Link(url="http://example.com/sample2.jpg", text=""),
]
def test_tags_attrs(self):
html = b"""
<html><body>
<div id="item1" data-url="get?id=1"><a href="#">Item 1</a></div>
<div id="item2" data-url="get?id=2"><a href="#">Item 2</a></div>
</body></html>
"""
response = HtmlResponse("http://example.com/index.html", body=html)
lx = self.extractor_cls(tags="div", attrs="data-url")
assert lx.extract_links(response) == [
Link(
url="http://example.com/get?id=1",
text="Item 1",
fragment="",
nofollow=False,
),
Link(
url="http://example.com/get?id=2",
text="Item 2",
fragment="",
nofollow=False,
),
]
lx = self.extractor_cls(tags=("div",), attrs=("data-url",))
assert lx.extract_links(response) == [
Link(
url="http://example.com/get?id=1",
text="Item 1",
fragment="",
nofollow=False,
),
Link(
url="http://example.com/get?id=2",
text="Item 2",
fragment="",
nofollow=False,
),
]
def test_xhtml(self):
xhtml = b"""
<?xml version="1.0"?>
<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Strict//EN"
"http://www.w3.org/TR/xhtml1/DTD/xhtml1-strict.dtd">
<html xmlns="http://www.w3.org/1999/xhtml" xml:lang="en" lang="en">
<head>
<title>XHTML document title</title>
</head>
<body>
<div class='links'>
<p><a href="/about.html">About us</a></p>
</div>
<div>
<p><a href="/follow.html">Follow this link</a></p>
</div>
<div>
<p><a href="/nofollow.html" rel="nofollow">Dont follow this one</a></p>
</div>
<div>
<p><a href="/nofollow2.html" rel="blah">Choose to follow or not</a></p>
</div>
<div>
<p><a href="http://google.com/something" rel="external nofollow">External link not to follow</a></p>
</div>
</body>
</html>
"""
response = HtmlResponse("http://example.com/index.xhtml", body=xhtml)
lx = self.extractor_cls()
assert lx.extract_links(response) == [
Link(
url="http://example.com/about.html",
text="About us",
fragment="",
nofollow=False,
),
Link(
url="http://example.com/follow.html",
text="Follow this link",
fragment="",
nofollow=False,
),
Link(
url="http://example.com/nofollow.html",
text="Dont follow this one",
fragment="",
nofollow=True,
),
Link(
url="http://example.com/nofollow2.html",
text="Choose to follow or not",
fragment="",
nofollow=False,
),
Link(
url="http://google.com/something",
text="External link not to follow",
nofollow=True,
),
]
response = XmlResponse("http://example.com/index.xhtml", body=xhtml)
lx = self.extractor_cls()
assert lx.extract_links(response) == [
Link(
url="http://example.com/about.html",
text="About us",
fragment="",
nofollow=False,
),
Link(
url="http://example.com/follow.html",
text="Follow this link",
fragment="",
nofollow=False,
),
Link(
url="http://example.com/nofollow.html",
text="Dont follow this one",
fragment="",
nofollow=True,
),
Link(
url="http://example.com/nofollow2.html",
text="Choose to follow or not",
fragment="",
nofollow=False,
),
Link(
url="http://google.com/something",
text="External link not to follow",
nofollow=True,
),
]
def test_link_wrong_href(self):
html = b"""
<a href="http://example.org/item1.html">Item 1</a>
<a href="http://[example.org/item2.html">Item 2</a>
<a href="http://example.org/item3.html">Item 3</a>
"""
response = HtmlResponse("http://example.org/index.html", body=html)
lx = self.extractor_cls()
assert list(lx.extract_links(response)) == [
Link(
url="http://example.org/item1.html",
text="Item 1",
nofollow=False,
),
Link(
url="http://example.org/item3.html",
text="Item 3",
nofollow=False,
),
]
def test_ftp_links(self):
body = b"""
<html><body>
<div><a href="ftp://www.external.com/">An Item</a></div>
</body></html>"""
response = HtmlResponse(
"http://www.example.com/index.html", body=body, encoding="utf8"
)
lx = self.extractor_cls()
assert lx.extract_links(response) == [
Link(
url="ftp://www.external.com/",
text="An Item",
fragment="",
nofollow=False,
),
]
def test_pickle_extractor(self):
lx = self.extractor_cls()
assert isinstance(pickle.loads(pickle.dumps(lx)), self.extractor_cls)
def test_link_extractor_aggregation(self):
"""When a parameter like restrict_css is used, the underlying
implementation calls its internal link extractor once per selector
matching the specified restrictions, and then aggregates the
extracted links.
Test that aggregation respects the unique and canonicalize
parameters.
"""
# unique=True (default), canonicalize=False (default)
lx = self.extractor_cls(restrict_css=("div",))
response = HtmlResponse(
"https://example.com",
body=b"""
<div>
<a href="/a">a1</a>
<a href="/b?a=1&b=2">b1</a>
</div>
<div>
<a href="/a">a2</a>
<a href="/b?b=2&a=1">b2</a>
</div>
""",
)
actual = lx.extract_links(response)
assert actual == [
Link(url="https://example.com/a", text="a1"),
Link(url="https://example.com/b?a=1&b=2", text="b1"),
Link(url="https://example.com/b?b=2&a=1", text="b2"),
]
# unique=True (default), canonicalize=True
lx = self.extractor_cls(restrict_css=("div",), canonicalize=True)
response = HtmlResponse(
"https://example.com",
body=b"""
<div>
<a href="/a">a1</a>
<a href="/b?a=1&b=2">b1</a>
</div>
<div>
<a href="/a">a2</a>
<a href="/b?b=2&a=1">b2</a>
</div>
""",
)
actual = lx.extract_links(response)
assert actual == [
Link(url="https://example.com/a", text="a1"),
Link(url="https://example.com/b?a=1&b=2", text="b1"),
]
# unique=False, canonicalize=False (default)
lx = self.extractor_cls(restrict_css=("div",), unique=False)
response = HtmlResponse(
"https://example.com",
body=b"""
<div>
<a href="/a">a1</a>
<a href="/b?a=1&b=2">b1</a>
</div>
<div>
<a href="/a">a2</a>
<a href="/b?b=2&a=1">b2</a>
</div>
""",
)
actual = lx.extract_links(response)
assert actual == [
Link(url="https://example.com/a", text="a1"),
Link(url="https://example.com/b?a=1&b=2", text="b1"),
Link(url="https://example.com/a", text="a2"),
Link(url="https://example.com/b?b=2&a=1", text="b2"),
]
# unique=False, canonicalize=True
lx = self.extractor_cls(
restrict_css=("div",), unique=False, canonicalize=True
)
response = HtmlResponse(
"https://example.com",
body=b"""
<div>
<a href="/a">a1</a>
<a href="/b?a=1&b=2">b1</a>
</div>
<div>
<a href="/a">a2</a>
<a href="/b?b=2&a=1">b2</a>
</div>
""",
)
actual = lx.extract_links(response)
assert actual == [
Link(url="https://example.com/a", text="a1"),
Link(url="https://example.com/b?a=1&b=2", text="b1"),
Link(url="https://example.com/a", text="a2"),
Link(url="https://example.com/b?a=1&b=2", text="b2"),
]
class TestLxmlLinkExtractor(Base.TestLinkExtractorBase):
extractor_cls = LxmlLinkExtractor
def test_link_wrong_href(self):
html = b"""
<a href="http://example.org/item1.html">Item 1</a>
<a href="http://[example.org/item2.html">Item 2</a>
<a href="http://example.org/item3.html">Item 3</a>
"""
response = HtmlResponse("http://example.org/index.html", body=html)
lx = self.extractor_cls()
assert list(lx.extract_links(response)) == [
Link(url="http://example.org/item1.html", text="Item 1", nofollow=False),
Link(url="http://example.org/item3.html", text="Item 3", nofollow=False),
]
def test_link_restrict_text(self):
html = b"""
<a href="http://example.org/item1.html">Pic of a cat</a>
<a href="http://example.org/item2.html">Pic of a dog</a>
<a href="http://example.org/item3.html">Pic of a cow</a>
"""
response = HtmlResponse("http://example.org/index.html", body=html)
# Simple text inclusion test
lx = self.extractor_cls(restrict_text="dog")
assert list(lx.extract_links(response)) == [
Link(
url="http://example.org/item2.html",
text="Pic of a dog",
nofollow=False,
),
]
# Unique regex test
lx = self.extractor_cls(restrict_text=r"of.*dog")
assert list(lx.extract_links(response)) == [
Link(
url="http://example.org/item2.html",
text="Pic of a dog",
nofollow=False,
),
]
# Multiple regex test
lx = self.extractor_cls(restrict_text=[r"of.*dog", r"of.*cat"])
assert list(lx.extract_links(response)) == [
Link(
url="http://example.org/item1.html",
text="Pic of a cat",
nofollow=False,
),
Link(
url="http://example.org/item2.html",
text="Pic of a dog",
nofollow=False,
),
]
@pytest.mark.skipif(
Version(w3lib_version) < Version("2.0.0"),
reason=(
"Before w3lib 2.0.0, w3lib.url.safe_url_string would not complain "
"about an invalid port value."
),
)
def test_skip_bad_links(self):
html = b"""
<a href="http://example.org:non-port">Why would you do this?</a>
<a href="http://example.org/item2.html">Good Link</a>
<a href="http://example.org/item3.html">Good Link 2</a>
"""
response = HtmlResponse("http://example.org/index.html", body=html)
lx = self.extractor_cls()
| python | BSD-3-Clause | d1bd8eb49f7aba9289e4ff692006cead8bcd9080 | 2026-01-04T14:38:41.023839Z | true |
scrapy/scrapy | https://github.com/scrapy/scrapy/blob/d1bd8eb49f7aba9289e4ff692006cead8bcd9080/tests/test_utils_display.py | tests/test_utils_display.py | import builtins
from io import StringIO
from unittest import mock
from scrapy.utils.display import pformat, pprint
value = {"a": 1}
colorized_strings = {
(
(
"{\x1b[33m'\x1b[39;49;00m\x1b[33ma\x1b[39;49;00m\x1b[33m'"
"\x1b[39;49;00m: \x1b[34m1\x1b[39;49;00m}"
)
+ suffix
)
for suffix in (
# https://github.com/pygments/pygments/issues/2313
"\n", # pygments ≤ 2.13
"\x1b[37m\x1b[39;49;00m\n", # pygments ≥ 2.14
)
}
plain_string = "{'a': 1}"
@mock.patch("sys.platform", "linux")
@mock.patch("sys.stdout.isatty")
def test_pformat(isatty):
isatty.return_value = True
assert pformat(value) in colorized_strings
@mock.patch("sys.stdout.isatty")
def test_pformat_dont_colorize(isatty):
isatty.return_value = True
assert pformat(value, colorize=False) == plain_string
def test_pformat_not_tty():
assert pformat(value) == plain_string
@mock.patch("sys.platform", "win32")
@mock.patch("platform.version")
@mock.patch("sys.stdout.isatty")
def test_pformat_old_windows(isatty, version):
isatty.return_value = True
version.return_value = "10.0.14392"
assert pformat(value) in colorized_strings
@mock.patch("sys.platform", "win32")
@mock.patch("scrapy.utils.display._enable_windows_terminal_processing")
@mock.patch("platform.version")
@mock.patch("sys.stdout.isatty")
def test_pformat_windows_no_terminal_processing(isatty, version, terminal_processing):
isatty.return_value = True
version.return_value = "10.0.14393"
terminal_processing.return_value = False
assert pformat(value) == plain_string
@mock.patch("sys.platform", "win32")
@mock.patch("scrapy.utils.display._enable_windows_terminal_processing")
@mock.patch("platform.version")
@mock.patch("sys.stdout.isatty")
def test_pformat_windows(isatty, version, terminal_processing):
isatty.return_value = True
version.return_value = "10.0.14393"
terminal_processing.return_value = True
assert pformat(value) in colorized_strings
@mock.patch("sys.platform", "linux")
@mock.patch("sys.stdout.isatty")
def test_pformat_no_pygments(isatty):
isatty.return_value = True
real_import = builtins.__import__
def mock_import(name, globals_, locals_, fromlist, level):
if "pygments" in name:
raise ImportError
return real_import(name, globals_, locals_, fromlist, level)
builtins.__import__ = mock_import
assert pformat(value) == plain_string
builtins.__import__ = real_import
def test_pprint():
with mock.patch("sys.stdout", new=StringIO()) as mock_out:
pprint(value)
assert mock_out.getvalue() == "{'a': 1}\n"
| python | BSD-3-Clause | d1bd8eb49f7aba9289e4ff692006cead8bcd9080 | 2026-01-04T14:38:41.023839Z | false |
scrapy/scrapy | https://github.com/scrapy/scrapy/blob/d1bd8eb49f7aba9289e4ff692006cead8bcd9080/tests/test_downloadermiddleware_httpauth.py | tests/test_downloadermiddleware_httpauth.py | import pytest
from w3lib.http import basic_auth_header
from scrapy.downloadermiddlewares.httpauth import HttpAuthMiddleware
from scrapy.http import Request
from scrapy.spiders import Spider
class LegacySpider(Spider):
http_user = "foo"
http_pass = "bar"
class DomainSpider(Spider):
http_user = "foo"
http_pass = "bar"
http_auth_domain = "example.com"
class AnyDomainSpider(Spider):
http_user = "foo"
http_pass = "bar"
http_auth_domain = None
class TestHttpAuthMiddlewareLegacy:
def setup_method(self):
self.spider = LegacySpider("foo")
def test_auth(self):
mw = HttpAuthMiddleware()
with pytest.raises(AttributeError):
mw.spider_opened(self.spider)
class TestHttpAuthMiddleware:
def setup_method(self):
self.mw = HttpAuthMiddleware()
spider = DomainSpider("foo")
self.mw.spider_opened(spider)
def teardown_method(self):
del self.mw
def test_no_auth(self):
req = Request("http://example-noauth.com/")
assert self.mw.process_request(req) is None
assert "Authorization" not in req.headers
def test_auth_domain(self):
req = Request("http://example.com/")
assert self.mw.process_request(req) is None
assert req.headers["Authorization"] == basic_auth_header("foo", "bar")
def test_auth_subdomain(self):
req = Request("http://foo.example.com/")
assert self.mw.process_request(req) is None
assert req.headers["Authorization"] == basic_auth_header("foo", "bar")
def test_auth_already_set(self):
req = Request("http://example.com/", headers={"Authorization": "Digest 123"})
assert self.mw.process_request(req) is None
assert req.headers["Authorization"] == b"Digest 123"
class TestHttpAuthAnyMiddleware:
def setup_method(self):
self.mw = HttpAuthMiddleware()
spider = AnyDomainSpider("foo")
self.mw.spider_opened(spider)
def teardown_method(self):
del self.mw
def test_auth(self):
req = Request("http://example.com/")
assert self.mw.process_request(req) is None
assert req.headers["Authorization"] == basic_auth_header("foo", "bar")
def test_auth_already_set(self):
req = Request("http://example.com/", headers={"Authorization": "Digest 123"})
assert self.mw.process_request(req) is None
assert req.headers["Authorization"] == b"Digest 123"
| python | BSD-3-Clause | d1bd8eb49f7aba9289e4ff692006cead8bcd9080 | 2026-01-04T14:38:41.023839Z | false |
scrapy/scrapy | https://github.com/scrapy/scrapy/blob/d1bd8eb49f7aba9289e4ff692006cead8bcd9080/tests/test_link.py | tests/test_link.py | import pytest
from scrapy.link import Link
class TestLink:
def _assert_same_links(self, link1, link2):
assert link1 == link2
assert hash(link1) == hash(link2)
def _assert_different_links(self, link1, link2):
assert link1 != link2
assert hash(link1) != hash(link2)
def test_eq_and_hash(self):
l1 = Link("http://www.example.com")
l2 = Link("http://www.example.com/other")
l3 = Link("http://www.example.com")
self._assert_same_links(l1, l1)
self._assert_different_links(l1, l2)
self._assert_same_links(l1, l3)
l4 = Link("http://www.example.com", text="test")
l5 = Link("http://www.example.com", text="test2")
l6 = Link("http://www.example.com", text="test")
self._assert_same_links(l4, l4)
self._assert_different_links(l4, l5)
self._assert_same_links(l4, l6)
l7 = Link(
"http://www.example.com", text="test", fragment="something", nofollow=False
)
l8 = Link(
"http://www.example.com", text="test", fragment="something", nofollow=False
)
l9 = Link(
"http://www.example.com", text="test", fragment="something", nofollow=True
)
l10 = Link(
"http://www.example.com", text="test", fragment="other", nofollow=False
)
self._assert_same_links(l7, l8)
self._assert_different_links(l7, l9)
self._assert_different_links(l7, l10)
def test_repr(self):
l1 = Link(
"http://www.example.com", text="test", fragment="something", nofollow=True
)
l2 = eval(repr(l1)) # pylint: disable=eval-used
self._assert_same_links(l1, l2)
def test_bytes_url(self):
with pytest.raises(TypeError):
Link(b"http://www.example.com/\xc2\xa3")
| python | BSD-3-Clause | d1bd8eb49f7aba9289e4ff692006cead8bcd9080 | 2026-01-04T14:38:41.023839Z | false |
scrapy/scrapy | https://github.com/scrapy/scrapy/blob/d1bd8eb49f7aba9289e4ff692006cead8bcd9080/tests/test_proxy_connect.py | tests/test_proxy_connect.py | import json
import os
import re
import sys
from pathlib import Path
from subprocess import PIPE, Popen
from urllib.parse import urlsplit, urlunsplit
import pytest
from testfixtures import LogCapture
from twisted.internet.defer import inlineCallbacks
from scrapy.http import Request
from scrapy.utils.test import get_crawler
from tests.mockserver.http import MockServer
from tests.spiders import SimpleSpider, SingleRequestSpider
class MitmProxy:
auth_user = "scrapy"
auth_pass = "scrapy"
def start(self):
script = """
import sys
from mitmproxy.tools.main import mitmdump
sys.argv[0] = "mitmdump"
sys.exit(mitmdump())
"""
cert_path = Path(__file__).parent.resolve() / "keys"
self.proc = Popen(
[
sys.executable,
"-u",
"-c",
script,
"--listen-host",
"127.0.0.1",
"--listen-port",
"0",
"--proxyauth",
f"{self.auth_user}:{self.auth_pass}",
"--set",
f"confdir={cert_path}",
"--ssl-insecure",
],
stdout=PIPE,
)
line = self.proc.stdout.readline().decode("utf-8")
host_port = re.search(r"listening at (?:http://)?([^:]+:\d+)", line).group(1)
return f"http://{self.auth_user}:{self.auth_pass}@{host_port}"
def stop(self):
self.proc.kill()
self.proc.communicate()
def _wrong_credentials(proxy_url):
bad_auth_proxy = list(urlsplit(proxy_url))
bad_auth_proxy[1] = bad_auth_proxy[1].replace("scrapy:scrapy@", "wrong:wronger@")
return urlunsplit(bad_auth_proxy)
@pytest.mark.requires_mitmproxy
class TestProxyConnect:
@classmethod
def setup_class(cls):
cls.mockserver = MockServer()
cls.mockserver.__enter__()
@classmethod
def teardown_class(cls):
cls.mockserver.__exit__(None, None, None)
def setup_method(self):
self._oldenv = os.environ.copy()
self._proxy = MitmProxy()
proxy_url = self._proxy.start()
os.environ["https_proxy"] = proxy_url
os.environ["http_proxy"] = proxy_url
def teardown_method(self):
self._proxy.stop()
os.environ = self._oldenv
@inlineCallbacks
def test_https_connect_tunnel(self):
crawler = get_crawler(SimpleSpider)
with LogCapture() as log:
yield crawler.crawl(self.mockserver.url("/status?n=200", is_secure=True))
self._assert_got_response_code(200, log)
@inlineCallbacks
def test_https_tunnel_auth_error(self):
os.environ["https_proxy"] = _wrong_credentials(os.environ["https_proxy"])
crawler = get_crawler(SimpleSpider)
with LogCapture() as log:
yield crawler.crawl(self.mockserver.url("/status?n=200", is_secure=True))
# The proxy returns a 407 error code but it does not reach the client;
# he just sees a TunnelError.
self._assert_got_tunnel_error(log)
@inlineCallbacks
def test_https_tunnel_without_leak_proxy_authorization_header(self):
request = Request(self.mockserver.url("/echo", is_secure=True))
crawler = get_crawler(SingleRequestSpider)
with LogCapture() as log:
yield crawler.crawl(seed=request)
self._assert_got_response_code(200, log)
echo = json.loads(crawler.spider.meta["responses"][0].text)
assert "Proxy-Authorization" not in echo["headers"]
def _assert_got_response_code(self, code, log):
assert str(log).count(f"Crawled ({code})") == 1
def _assert_got_tunnel_error(self, log):
assert "TunnelError" in str(log)
| python | BSD-3-Clause | d1bd8eb49f7aba9289e4ff692006cead8bcd9080 | 2026-01-04T14:38:41.023839Z | false |
scrapy/scrapy | https://github.com/scrapy/scrapy/blob/d1bd8eb49f7aba9289e4ff692006cead8bcd9080/tests/test_utils_request.py | tests/test_utils_request.py | from __future__ import annotations
import json
from hashlib import sha1
from weakref import WeakKeyDictionary
import pytest
from scrapy.http import Request
from scrapy.utils.python import to_bytes
from scrapy.utils.request import (
_fingerprint_cache,
fingerprint,
request_httprepr,
request_to_curl,
)
from scrapy.utils.test import get_crawler
@pytest.mark.parametrize(
("r", "expected"),
[
(
Request("http://www.example.com"),
b"GET / HTTP/1.1\r\nHost: www.example.com\r\n\r\n",
),
(
Request("http://www.example.com/some/page.html?arg=1"),
b"GET /some/page.html?arg=1 HTTP/1.1\r\nHost: www.example.com\r\n\r\n",
),
(
Request(
"http://www.example.com",
method="POST",
headers={"Content-type": b"text/html"},
body=b"Some body",
),
b"POST / HTTP/1.1\r\nHost: www.example.com\r\nContent-Type: text/html\r\n\r\nSome body",
),
],
)
def test_request_httprepr(r: Request, expected: bytes) -> None:
assert request_httprepr(r) == expected
@pytest.mark.parametrize(
"r",
[
Request("file:///tmp/foo.txt"),
Request("ftp://localhost/tmp/foo.txt"),
],
)
def test_request_httprepr_for_non_http_request(r: Request) -> None:
# the representation is not important but it must not fail.
request_httprepr(r)
class TestFingerprint:
function: staticmethod = staticmethod(fingerprint)
cache: (
WeakKeyDictionary[Request, dict[tuple[tuple[bytes, ...] | None, bool], bytes]]
| WeakKeyDictionary[Request, dict[tuple[tuple[bytes, ...] | None, bool], str]]
) = _fingerprint_cache
default_cache_key = (None, False)
known_hashes: tuple[tuple[Request, bytes | str, dict], ...] = (
(
Request("http://example.org"),
b"xs\xd7\x0c3uj\x15\xfe\xd7d\x9b\xa9\t\xe0d\xbf\x9cXD",
{},
),
(
Request("https://example.org"),
b"\xc04\x85P,\xaa\x91\x06\xf8t\xb4\xbd*\xd9\xe9\x8a:m\xc3l",
{},
),
(
Request("https://example.org?a"),
b"G\xad\xb8Ck\x19\x1c\xed\x838,\x01\xc4\xde;\xee\xa5\x94a\x0c",
{},
),
(
Request("https://example.org?a=b"),
b"\x024MYb\x8a\xc2\x1e\xbc>\xd6\xac*\xda\x9cF\xc1r\x7f\x17",
{},
),
(
Request("https://example.org?a=b&a"),
b"t+\xe8*\xfb\x84\xe3v\x1a}\x88p\xc0\xccB\xd7\x9d\xfez\x96",
{},
),
(
Request("https://example.org?a=b&a=c"),
b"\xda\x1ec\xd0\x9c\x08s`\xb4\x9b\xe2\xb6R\xf8k\xef\xeaQG\xef",
{},
),
(
Request("https://example.org", method="POST"),
b"\x9d\xcdA\x0fT\x02:\xca\xa0}\x90\xda\x05B\xded\x8aN7\x1d",
{},
),
(
Request("https://example.org", body=b"a"),
b"\xc34z>\xd8\x99\x8b\xda7\x05r\x99I\xa8\xa0x;\xa41_",
{},
),
(
Request("https://example.org", method="POST", body=b"a"),
b"5`\xe2y4\xd0\x9d\xee\xe0\xbatw\x87Q\xe8O\xd78\xfc\xe7",
{},
),
(
Request("https://example.org#a", headers={"A": b"B"}),
b"\xc04\x85P,\xaa\x91\x06\xf8t\xb4\xbd*\xd9\xe9\x8a:m\xc3l",
{},
),
(
Request("https://example.org#a", headers={"A": b"B"}),
b"]\xc7\x1f\xf2\xafG2\xbc\xa4\xfa\x99\n33\xda\x18\x94\x81U.",
{"include_headers": ["A"]},
),
(
Request("https://example.org#a", headers={"A": b"B"}),
b"<\x1a\xeb\x85y\xdeW\xfb\xdcq\x88\xee\xaf\x17\xdd\x0c\xbfH\x18\x1f",
{"keep_fragments": True},
),
(
Request("https://example.org#a", headers={"A": b"B"}),
b"\xc1\xef~\x94\x9bS\xc1\x83\t\xdcz8\x9f\xdc{\x11\x16I.\x11",
{"include_headers": ["A"], "keep_fragments": True},
),
(
Request("https://example.org/ab"),
b"N\xe5l\xb8\x12@iw\xe2\xf3\x1bp\xea\xffp!u\xe2\x8a\xc6",
{},
),
(
Request("https://example.org/a", body=b"b"),
b"_NOv\xbco$6\xfcW\x9f\xb24g\x9f\xbb\xdd\xa82\xc5",
{},
),
)
def test_query_string_key_order(self):
r1 = Request("http://www.example.com/query?id=111&cat=222")
r2 = Request("http://www.example.com/query?cat=222&id=111")
assert self.function(r1) == self.function(r1)
assert self.function(r1) == self.function(r2)
def test_query_string_key_without_value(self):
r1 = Request("http://www.example.com/hnnoticiaj1.aspx?78132,199")
r2 = Request("http://www.example.com/hnnoticiaj1.aspx?78160,199")
assert self.function(r1) != self.function(r2)
def test_caching(self):
r1 = Request("http://www.example.com/hnnoticiaj1.aspx?78160,199")
assert self.function(r1) == self.cache[r1][self.default_cache_key]
def test_header(self):
r1 = Request("http://www.example.com/members/offers.html")
r2 = Request("http://www.example.com/members/offers.html")
r2.headers["SESSIONID"] = b"somehash"
assert self.function(r1) == self.function(r2)
def test_headers(self):
r1 = Request("http://www.example.com/")
r2 = Request("http://www.example.com/")
r2.headers["Accept-Language"] = b"en"
r3 = Request("http://www.example.com/")
r3.headers["Accept-Language"] = b"en"
r3.headers["SESSIONID"] = b"somehash"
assert self.function(r1) == self.function(r2) == self.function(r3)
assert self.function(r1) == self.function(
r1, include_headers=["Accept-Language"]
)
assert self.function(r1) != self.function(
r2, include_headers=["Accept-Language"]
)
assert self.function(
r3, include_headers=["accept-language", "sessionid"]
) == self.function(r3, include_headers=["SESSIONID", "Accept-Language"])
def test_fragment(self):
r1 = Request("http://www.example.com/test.html")
r2 = Request("http://www.example.com/test.html#fragment")
assert self.function(r1) == self.function(r2)
assert self.function(r1) == self.function(r1, keep_fragments=True)
assert self.function(r2) != self.function(r2, keep_fragments=True)
assert self.function(r1) != self.function(r2, keep_fragments=True)
def test_method_and_body(self):
r1 = Request("http://www.example.com")
r2 = Request("http://www.example.com", method="POST")
r3 = Request("http://www.example.com", method="POST", body=b"request body")
assert self.function(r1) != self.function(r2)
assert self.function(r2) != self.function(r3)
def test_request_replace(self):
# cached fingerprint must be cleared on request copy
r1 = Request("http://www.example.com")
fp1 = self.function(r1)
r2 = r1.replace(url="http://www.example.com/other")
fp2 = self.function(r2)
assert fp1 != fp2
def test_part_separation(self):
# An old implementation used to serialize request data in a way that
# would put the body right after the URL.
r1 = Request("http://www.example.com/foo")
fp1 = self.function(r1)
r2 = Request("http://www.example.com/f", body=b"oo")
fp2 = self.function(r2)
assert fp1 != fp2
def test_hashes(self):
"""Test hardcoded hashes, to make sure future changes to not introduce
backward incompatibilities."""
actual = [
self.function(request, **kwargs) for request, _, kwargs in self.known_hashes
]
expected = [_fingerprint for _, _fingerprint, _ in self.known_hashes]
assert actual == expected
class TestRequestFingerprinter:
def test_fingerprint(self):
crawler = get_crawler()
request = Request("https://example.com")
assert crawler.request_fingerprinter.fingerprint(request) == fingerprint(
request
)
class TestCustomRequestFingerprinter:
def test_include_headers(self):
class RequestFingerprinter:
def fingerprint(self, request):
return fingerprint(request, include_headers=["X-ID"])
settings = {
"REQUEST_FINGERPRINTER_CLASS": RequestFingerprinter,
}
crawler = get_crawler(settings_dict=settings)
r1 = Request("http://www.example.com", headers={"X-ID": "1"})
fp1 = crawler.request_fingerprinter.fingerprint(r1)
r2 = Request("http://www.example.com", headers={"X-ID": "2"})
fp2 = crawler.request_fingerprinter.fingerprint(r2)
assert fp1 != fp2
def test_dont_canonicalize(self):
class RequestFingerprinter:
cache = WeakKeyDictionary()
def fingerprint(self, request):
if request not in self.cache:
fp = sha1()
fp.update(to_bytes(request.url))
self.cache[request] = fp.digest()
return self.cache[request]
settings = {
"REQUEST_FINGERPRINTER_CLASS": RequestFingerprinter,
}
crawler = get_crawler(settings_dict=settings)
r1 = Request("http://www.example.com?a=1&a=2")
fp1 = crawler.request_fingerprinter.fingerprint(r1)
r2 = Request("http://www.example.com?a=2&a=1")
fp2 = crawler.request_fingerprinter.fingerprint(r2)
assert fp1 != fp2
def test_meta(self):
class RequestFingerprinter:
def fingerprint(self, request):
if "fingerprint" in request.meta:
return request.meta["fingerprint"]
return fingerprint(request)
settings = {
"REQUEST_FINGERPRINTER_CLASS": RequestFingerprinter,
}
crawler = get_crawler(settings_dict=settings)
r1 = Request("http://www.example.com")
fp1 = crawler.request_fingerprinter.fingerprint(r1)
r2 = Request("http://www.example.com", meta={"fingerprint": "a"})
fp2 = crawler.request_fingerprinter.fingerprint(r2)
r3 = Request("http://www.example.com", meta={"fingerprint": "a"})
fp3 = crawler.request_fingerprinter.fingerprint(r3)
r4 = Request("http://www.example.com", meta={"fingerprint": "b"})
fp4 = crawler.request_fingerprinter.fingerprint(r4)
assert fp1 != fp2
assert fp1 != fp4
assert fp2 != fp4
assert fp2 == fp3
def test_from_crawler(self):
class RequestFingerprinter:
@classmethod
def from_crawler(cls, crawler):
return cls(crawler)
def __init__(self, crawler):
self._fingerprint = crawler.settings["FINGERPRINT"]
def fingerprint(self, request):
return self._fingerprint
settings = {
"REQUEST_FINGERPRINTER_CLASS": RequestFingerprinter,
"FINGERPRINT": b"fingerprint",
}
crawler = get_crawler(settings_dict=settings)
request = Request("http://www.example.com")
fingerprint = crawler.request_fingerprinter.fingerprint(request)
assert fingerprint == settings["FINGERPRINT"]
class TestRequestToCurl:
def _test_request(self, request_object, expected_curl_command):
curl_command = request_to_curl(request_object)
assert curl_command == expected_curl_command
def test_get(self):
request_object = Request("https://www.example.com")
expected_curl_command = "curl -X GET https://www.example.com"
self._test_request(request_object, expected_curl_command)
def test_post(self):
request_object = Request(
"https://www.httpbin.org/post",
method="POST",
body=json.dumps({"foo": "bar"}),
)
expected_curl_command = (
'curl -X POST https://www.httpbin.org/post --data-raw \'{"foo": "bar"}\''
)
self._test_request(request_object, expected_curl_command)
def test_headers(self):
request_object = Request(
"https://www.httpbin.org/post",
method="POST",
headers={"Content-Type": "application/json", "Accept": "application/json"},
body=json.dumps({"foo": "bar"}),
)
expected_curl_command = (
"curl -X POST https://www.httpbin.org/post"
' --data-raw \'{"foo": "bar"}\''
" -H 'Content-Type: application/json' -H 'Accept: application/json'"
)
self._test_request(request_object, expected_curl_command)
def test_cookies_dict(self):
request_object = Request(
"https://www.httpbin.org/post",
method="POST",
cookies={"foo": "bar"},
body=json.dumps({"foo": "bar"}),
)
expected_curl_command = (
"curl -X POST https://www.httpbin.org/post"
" --data-raw '{\"foo\": \"bar\"}' --cookie 'foo=bar'"
)
self._test_request(request_object, expected_curl_command)
def test_cookies_list(self):
request_object = Request(
"https://www.httpbin.org/post",
method="POST",
cookies=[{"foo": "bar"}],
body=json.dumps({"foo": "bar"}),
)
expected_curl_command = (
"curl -X POST https://www.httpbin.org/post"
" --data-raw '{\"foo\": \"bar\"}' --cookie 'foo=bar'"
)
self._test_request(request_object, expected_curl_command)
| python | BSD-3-Clause | d1bd8eb49f7aba9289e4ff692006cead8bcd9080 | 2026-01-04T14:38:41.023839Z | false |
scrapy/scrapy | https://github.com/scrapy/scrapy/blob/d1bd8eb49f7aba9289e4ff692006cead8bcd9080/tests/test_utils_project.py | tests/test_utils_project.py | import os
import warnings
from pathlib import Path
import pytest
from scrapy.utils.misc import set_environ
from scrapy.utils.project import data_path, get_project_settings
@pytest.fixture
def proj_path(tmp_path):
prev_dir = Path.cwd()
project_dir = tmp_path
try:
os.chdir(project_dir)
Path("scrapy.cfg").touch()
yield project_dir
finally:
os.chdir(prev_dir)
def test_data_path_outside_project():
assert str(Path(".scrapy", "somepath")) == data_path("somepath")
abspath = str(Path(os.path.sep, "absolute", "path"))
assert abspath == data_path(abspath)
def test_data_path_inside_project(proj_path: Path) -> None:
expected = proj_path / ".scrapy" / "somepath"
assert expected.resolve() == Path(data_path("somepath")).resolve()
abspath = str(Path(os.path.sep, "absolute", "path").resolve())
assert abspath == data_path(abspath)
class TestGetProjectSettings:
def test_valid_envvar(self):
value = "tests.test_cmdline.settings"
envvars = {
"SCRAPY_SETTINGS_MODULE": value,
}
with warnings.catch_warnings():
warnings.simplefilter("error")
with set_environ(**envvars):
settings = get_project_settings()
assert settings.get("SETTINGS_MODULE") == value
def test_invalid_envvar(self):
envvars = {
"SCRAPY_FOO": "bar",
}
with set_environ(**envvars):
settings = get_project_settings()
assert settings.get("SCRAPY_FOO") is None
def test_valid_and_invalid_envvars(self):
value = "tests.test_cmdline.settings"
envvars = {
"SCRAPY_FOO": "bar",
"SCRAPY_SETTINGS_MODULE": value,
}
with set_environ(**envvars):
settings = get_project_settings()
assert settings.get("SETTINGS_MODULE") == value
assert settings.get("SCRAPY_FOO") is None
| python | BSD-3-Clause | d1bd8eb49f7aba9289e4ff692006cead8bcd9080 | 2026-01-04T14:38:41.023839Z | false |
scrapy/scrapy | https://github.com/scrapy/scrapy/blob/d1bd8eb49f7aba9289e4ff692006cead8bcd9080/tests/test_utils_datatypes.py | tests/test_utils_datatypes.py | import copy
import warnings
from abc import ABC, abstractmethod
from collections.abc import Iterator, Mapping, MutableMapping
import pytest
from scrapy.exceptions import ScrapyDeprecationWarning
from scrapy.http import Request
from scrapy.utils.datatypes import (
CaseInsensitiveDict,
CaselessDict,
LocalCache,
LocalWeakReferencedCache,
SequenceExclude,
)
from scrapy.utils.python import garbage_collect
class TestCaseInsensitiveDictBase(ABC):
@property
@abstractmethod
def dict_class(self) -> type[MutableMapping]:
raise NotImplementedError
def test_init_dict(self):
seq = {"red": 1, "black": 3}
d = self.dict_class(seq)
assert d["red"] == 1
assert d["black"] == 3
def test_init_pair_sequence(self):
seq = (("red", 1), ("black", 3))
d = self.dict_class(seq)
assert d["red"] == 1
assert d["black"] == 3
def test_init_mapping(self):
class MyMapping(Mapping):
def __init__(self, **kwargs):
self._d = kwargs
def __getitem__(self, key):
return self._d[key]
def __iter__(self):
return iter(self._d)
def __len__(self):
return len(self._d)
seq = MyMapping(red=1, black=3)
d = self.dict_class(seq)
assert d["red"] == 1
assert d["black"] == 3
def test_init_mutable_mapping(self):
class MyMutableMapping(MutableMapping):
def __init__(self, **kwargs):
self._d = kwargs
def __getitem__(self, key):
return self._d[key]
def __setitem__(self, key, value):
self._d[key] = value
def __delitem__(self, key):
del self._d[key]
def __iter__(self):
return iter(self._d)
def __len__(self):
return len(self._d)
seq = MyMutableMapping(red=1, black=3)
d = self.dict_class(seq)
assert d["red"] == 1
assert d["black"] == 3
def test_caseless(self):
d = self.dict_class()
d["key_Lower"] = 1
assert d["KEy_loWer"] == 1
assert d.get("KEy_loWer") == 1
d["KEY_LOWER"] = 3
assert d["key_Lower"] == 3
assert d.get("key_Lower") == 3
def test_delete(self):
d = self.dict_class({"key_lower": 1})
del d["key_LOWER"]
with pytest.raises(KeyError):
d["key_LOWER"]
with pytest.raises(KeyError):
d["key_lower"]
@pytest.mark.filterwarnings("ignore::scrapy.exceptions.ScrapyDeprecationWarning")
def test_getdefault(self):
d = CaselessDict()
assert d.get("c", 5) == 5
d["c"] = 10
assert d.get("c", 5) == 10
@pytest.mark.filterwarnings("ignore::scrapy.exceptions.ScrapyDeprecationWarning")
def test_setdefault(self):
d = CaselessDict({"a": 1, "b": 2})
r = d.setdefault("A", 5)
assert r == 1
assert d["A"] == 1
r = d.setdefault("c", 5)
assert r == 5
assert d["C"] == 5
def test_fromkeys(self):
keys = ("a", "b")
d = self.dict_class.fromkeys(keys)
assert d["A"] is None
assert d["B"] is None
d = self.dict_class.fromkeys(keys, 1)
assert d["A"] == 1
assert d["B"] == 1
instance = self.dict_class()
d = instance.fromkeys(keys)
assert d["A"] is None
assert d["B"] is None
d = instance.fromkeys(keys, 1)
assert d["A"] == 1
assert d["B"] == 1
def test_contains(self):
d = self.dict_class()
d["a"] = 1
assert "A" in d
def test_pop(self):
d = self.dict_class()
d["a"] = 1
assert d.pop("A") == 1
with pytest.raises(KeyError):
d.pop("A")
def test_normkey(self):
class MyDict(self.dict_class):
def _normkey(self, key):
return key.title()
normkey = _normkey # deprecated CaselessDict class
d = MyDict()
d["key-one"] = 2
assert list(d.keys()) == ["Key-One"]
def test_normvalue(self):
class MyDict(self.dict_class):
def _normvalue(self, value):
if value is not None:
return value + 1
return None
normvalue = _normvalue # deprecated CaselessDict class
d = MyDict({"key": 1})
assert d["key"] == 2
assert d.get("key") == 2
d = MyDict()
d["key"] = 1
assert d["key"] == 2
assert d.get("key") == 2
d = MyDict()
d.setdefault("key", 1)
assert d["key"] == 2
assert d.get("key") == 2
d = MyDict()
d.update({"key": 1})
assert d["key"] == 2
assert d.get("key") == 2
d = MyDict.fromkeys(("key",), 1)
assert d["key"] == 2
assert d.get("key") == 2
def test_copy(self):
h1 = self.dict_class({"header1": "value"})
h2 = copy.copy(h1)
assert isinstance(h2, self.dict_class)
assert h1 == h2
assert h1.get("header1") == h2.get("header1")
assert h1.get("header1") == h2.get("HEADER1")
h3 = h1.copy()
assert isinstance(h3, self.dict_class)
assert h1 == h3
assert h1.get("header1") == h3.get("header1")
assert h1.get("header1") == h3.get("HEADER1")
class TestCaseInsensitiveDict(TestCaseInsensitiveDictBase):
dict_class = CaseInsensitiveDict
def test_repr(self):
d1 = self.dict_class({"foo": "bar"})
assert repr(d1) == "<CaseInsensitiveDict: {'foo': 'bar'}>"
d2 = self.dict_class({"AsDf": "QwErTy", "FoO": "bAr"})
assert repr(d2) == "<CaseInsensitiveDict: {'AsDf': 'QwErTy', 'FoO': 'bAr'}>"
def test_iter(self):
d = self.dict_class({"AsDf": "QwErTy", "FoO": "bAr"})
iterkeys = iter(d)
assert isinstance(iterkeys, Iterator)
assert list(iterkeys) == ["AsDf", "FoO"]
@pytest.mark.filterwarnings("ignore::scrapy.exceptions.ScrapyDeprecationWarning")
class TestCaselessDict(TestCaseInsensitiveDictBase):
dict_class = CaselessDict
def test_deprecation_message(self):
with warnings.catch_warnings(record=True) as caught:
warnings.filterwarnings("always", category=ScrapyDeprecationWarning)
self.dict_class({"foo": "bar"})
assert len(caught) == 1
assert issubclass(caught[0].category, ScrapyDeprecationWarning)
assert (
str(caught[0].message)
== "scrapy.utils.datatypes.CaselessDict is deprecated,"
" please use scrapy.utils.datatypes.CaseInsensitiveDict instead"
)
class TestSequenceExclude:
def test_list(self):
seq = [1, 2, 3]
d = SequenceExclude(seq)
assert 0 in d
assert 4 in d
assert 2 not in d
def test_range(self):
seq = range(10, 20)
d = SequenceExclude(seq)
assert 5 in d
assert 20 in d
assert 15 not in d
def test_range_step(self):
seq = range(10, 20, 3)
d = SequenceExclude(seq)
are_not_in = [v for v in range(10, 20, 3) if v in d]
assert are_not_in == []
are_not_in = [v for v in range(10, 20) if v in d]
assert are_not_in == [11, 12, 14, 15, 17, 18]
def test_string_seq(self):
seq = "cde"
d = SequenceExclude(seq)
chars = "".join(v for v in "abcdefg" if v in d)
assert chars == "abfg"
def test_stringset_seq(self):
seq = set("cde")
d = SequenceExclude(seq)
chars = "".join(v for v in "abcdefg" if v in d)
assert chars == "abfg"
def test_set(self):
"""Anything that is not in the supplied sequence will evaluate as 'in' the container."""
seq = {-3, "test", 1.1}
d = SequenceExclude(seq)
assert 0 in d
assert "foo" in d
assert 3.14 in d
assert set("bar") in d
# supplied sequence is a set, so checking for list (non)inclusion fails
with pytest.raises(TypeError):
["a", "b", "c"] in d # noqa: B015
for v in [-3, "test", 1.1]:
assert v not in d
class TestLocalCache:
def test_cache_with_limit(self):
cache = LocalCache(limit=2)
cache["a"] = 1
cache["b"] = 2
cache["c"] = 3
assert len(cache) == 2
assert "a" not in cache
assert "b" in cache
assert "c" in cache
assert cache["b"] == 2
assert cache["c"] == 3
def test_cache_without_limit(self):
maximum = 10**4
cache = LocalCache()
for x in range(maximum):
cache[str(x)] = x
assert len(cache) == maximum
for x in range(maximum):
assert str(x) in cache
assert cache[str(x)] == x
class TestLocalWeakReferencedCache:
def test_cache_with_limit(self):
cache = LocalWeakReferencedCache(limit=2)
r1 = Request("https://example.org")
r2 = Request("https://example.com")
r3 = Request("https://example.net")
cache[r1] = 1
cache[r2] = 2
cache[r3] = 3
assert len(cache) == 2
assert r1 not in cache
assert r2 in cache
assert r3 in cache
assert cache[r1] is None
assert cache[r2] == 2
assert cache[r3] == 3
del r2
# PyPy takes longer to collect dead references
garbage_collect()
assert len(cache) == 1
def test_cache_non_weak_referenceable_objects(self):
cache = LocalWeakReferencedCache()
k1 = None
k2 = 1
k3 = [1, 2, 3]
cache[k1] = 1
cache[k2] = 2
cache[k3] = 3
assert k1 not in cache
assert k2 not in cache
assert k3 not in cache
assert len(cache) == 0
def test_cache_without_limit(self):
maximum = 10**4
cache = LocalWeakReferencedCache()
refs = []
for x in range(maximum):
refs.append(Request(f"https://example.org/{x}"))
cache[refs[-1]] = x
assert len(cache) == maximum
for i, r in enumerate(refs):
assert r in cache
assert cache[r] == i
del r # delete reference to the last object in the list # pylint: disable=undefined-loop-variable
# delete half of the objects, make sure that is reflected in the cache
for _ in range(maximum // 2):
refs.pop()
# PyPy takes longer to collect dead references
garbage_collect()
assert len(cache) == maximum // 2
for i, r in enumerate(refs):
assert r in cache
assert cache[r] == i
| python | BSD-3-Clause | d1bd8eb49f7aba9289e4ff692006cead8bcd9080 | 2026-01-04T14:38:41.023839Z | false |
scrapy/scrapy | https://github.com/scrapy/scrapy/blob/d1bd8eb49f7aba9289e4ff692006cead8bcd9080/tests/test_extension_telnet.py | tests/test_extension_telnet.py | import pytest
from twisted.conch.telnet import ITelnetProtocol
from twisted.cred import credentials
from twisted.internet.defer import inlineCallbacks
from scrapy.extensions.telnet import TelnetConsole
from scrapy.utils.test import get_crawler
class TestTelnetExtension:
def _get_console_and_portal(self, settings=None):
crawler = get_crawler(settings_dict=settings)
console = TelnetConsole(crawler)
# This function has some side effects we don't need for this test
console._get_telnet_vars = dict
console.start_listening()
protocol = console.protocol()
portal = protocol.protocolArgs[0]
return console, portal
@inlineCallbacks
def test_bad_credentials(self):
console, portal = self._get_console_and_portal()
creds = credentials.UsernamePassword(b"username", b"password")
d = portal.login(creds, None, ITelnetProtocol)
with pytest.raises(ValueError, match="Invalid credentials"):
yield d
console.stop_listening()
@inlineCallbacks
def test_good_credentials(self):
console, portal = self._get_console_and_portal()
creds = credentials.UsernamePassword(
console.username.encode("utf8"), console.password.encode("utf8")
)
d = portal.login(creds, None, ITelnetProtocol)
yield d
console.stop_listening()
@inlineCallbacks
def test_custom_credentials(self):
settings = {
"TELNETCONSOLE_USERNAME": "user",
"TELNETCONSOLE_PASSWORD": "pass",
}
console, portal = self._get_console_and_portal(settings=settings)
creds = credentials.UsernamePassword(b"user", b"pass")
d = portal.login(creds, None, ITelnetProtocol)
yield d
console.stop_listening()
| python | BSD-3-Clause | d1bd8eb49f7aba9289e4ff692006cead8bcd9080 | 2026-01-04T14:38:41.023839Z | false |
scrapy/scrapy | https://github.com/scrapy/scrapy/blob/d1bd8eb49f7aba9289e4ff692006cead8bcd9080/tests/test_engine_stop_download_bytes.py | tests/test_engine_stop_download_bytes.py | from __future__ import annotations
from typing import TYPE_CHECKING
from testfixtures import LogCapture
from scrapy.exceptions import StopDownload
from scrapy.utils.defer import deferred_f_from_coro_f
from tests.test_engine import (
AttrsItemsSpider,
CrawlerRun,
DataClassItemsSpider,
DictItemsSpider,
MySpider,
TestEngineBase,
)
if TYPE_CHECKING:
from tests.mockserver.http import MockServer
class BytesReceivedCrawlerRun(CrawlerRun):
def bytes_received(self, data, request, spider):
super().bytes_received(data, request, spider)
raise StopDownload(fail=False)
class TestBytesReceivedEngine(TestEngineBase):
@deferred_f_from_coro_f
async def test_crawler(self, mockserver: MockServer) -> None:
for spider in (
MySpider,
DictItemsSpider,
AttrsItemsSpider,
DataClassItemsSpider,
):
run = BytesReceivedCrawlerRun(spider)
with LogCapture() as log:
await run.run(mockserver)
log.check_present(
(
"scrapy.core.downloader.handlers.http11",
"DEBUG",
f"Download stopped for <GET {mockserver.url('/redirected')}> "
"from signal handler BytesReceivedCrawlerRun.bytes_received",
)
)
log.check_present(
(
"scrapy.core.downloader.handlers.http11",
"DEBUG",
f"Download stopped for <GET {mockserver.url('/static/')}> "
"from signal handler BytesReceivedCrawlerRun.bytes_received",
)
)
log.check_present(
(
"scrapy.core.downloader.handlers.http11",
"DEBUG",
f"Download stopped for <GET {mockserver.url('/numbers')}> "
"from signal handler BytesReceivedCrawlerRun.bytes_received",
)
)
self._assert_visited_urls(run)
self._assert_scheduled_requests(run, count=9)
self._assert_downloaded_responses(run, count=9)
self._assert_signals_caught(run)
self._assert_headers_received(run)
self._assert_bytes_received(run)
@staticmethod
def _assert_bytes_received(run: CrawlerRun) -> None:
assert len(run.bytes) == 9
for request, data in run.bytes.items():
joined_data = b"".join(data)
assert len(data) == 1 # signal was fired only once
if run.getpath(request.url) == "/numbers":
# Received bytes are not the complete response. The exact amount depends
# on the buffer size, which can vary, so we only check that the amount
# of received bytes is strictly less than the full response.
numbers = [str(x).encode("utf8") for x in range(2**18)]
assert len(joined_data) < len(b"".join(numbers))
| python | BSD-3-Clause | d1bd8eb49f7aba9289e4ff692006cead8bcd9080 | 2026-01-04T14:38:41.023839Z | false |
scrapy/scrapy | https://github.com/scrapy/scrapy/blob/d1bd8eb49f7aba9289e4ff692006cead8bcd9080/tests/test_spidermiddleware.py | tests/test_spidermiddleware.py | from __future__ import annotations
from collections.abc import AsyncIterator, Iterable
from inspect import isasyncgen
from typing import TYPE_CHECKING, Any
from unittest import mock
import pytest
from testfixtures import LogCapture
from twisted.internet import defer
from scrapy.core.spidermw import SpiderMiddlewareManager
from scrapy.exceptions import ScrapyDeprecationWarning, _InvalidOutput
from scrapy.http import Request, Response
from scrapy.spiders import Spider
from scrapy.utils.asyncgen import collect_asyncgen
from scrapy.utils.asyncio import call_later
from scrapy.utils.defer import deferred_f_from_coro_f, maybe_deferred_to_future
from scrapy.utils.spider import DefaultSpider
from scrapy.utils.test import get_crawler
if TYPE_CHECKING:
from twisted.python.failure import Failure
from scrapy.crawler import Crawler
class TestSpiderMiddleware:
def setup_method(self):
self.request = Request("http://example.com/index.html")
self.response = Response(self.request.url, request=self.request)
self.crawler = get_crawler(Spider, {"SPIDER_MIDDLEWARES_BASE": {}})
self.crawler.spider = self.crawler._create_spider("foo")
self.mwman = SpiderMiddlewareManager.from_crawler(self.crawler)
async def _scrape_response(self) -> Any:
"""Execute spider mw manager's scrape_response_async method and return the result.
Raise exception in case of failure.
"""
def scrape_func(
response: Response | Failure, request: Request
) -> defer.Deferred[Iterable[Any]]:
it = mock.MagicMock()
return defer.succeed(it)
return await self.mwman.scrape_response_async(
scrape_func, self.response, self.request
)
class TestProcessSpiderInputInvalidOutput(TestSpiderMiddleware):
"""Invalid return value for process_spider_input method"""
@deferred_f_from_coro_f
async def test_invalid_process_spider_input(self):
class InvalidProcessSpiderInputMiddleware:
def process_spider_input(self, response):
return 1
self.mwman._add_middleware(InvalidProcessSpiderInputMiddleware())
with pytest.raises(_InvalidOutput):
await self._scrape_response()
class TestProcessSpiderOutputInvalidOutput(TestSpiderMiddleware):
"""Invalid return value for process_spider_output method"""
@deferred_f_from_coro_f
async def test_invalid_process_spider_output(self):
class InvalidProcessSpiderOutputMiddleware:
def process_spider_output(self, response, result):
return 1
self.mwman._add_middleware(InvalidProcessSpiderOutputMiddleware())
with pytest.raises(_InvalidOutput):
await self._scrape_response()
class TestProcessSpiderExceptionInvalidOutput(TestSpiderMiddleware):
"""Invalid return value for process_spider_exception method"""
@deferred_f_from_coro_f
async def test_invalid_process_spider_exception(self):
class InvalidProcessSpiderOutputExceptionMiddleware:
def process_spider_exception(self, response, exception):
return 1
class RaiseExceptionProcessSpiderOutputMiddleware:
def process_spider_output(self, response, result):
raise RuntimeError
self.mwman._add_middleware(InvalidProcessSpiderOutputExceptionMiddleware())
self.mwman._add_middleware(RaiseExceptionProcessSpiderOutputMiddleware())
with pytest.raises(_InvalidOutput):
await self._scrape_response()
class TestProcessSpiderExceptionReRaise(TestSpiderMiddleware):
"""Re raise the exception by returning None"""
@deferred_f_from_coro_f
async def test_process_spider_exception_return_none(self):
class ProcessSpiderExceptionReturnNoneMiddleware:
def process_spider_exception(self, response, exception):
return None
class RaiseExceptionProcessSpiderOutputMiddleware:
def process_spider_output(self, response, result):
1 / 0
self.mwman._add_middleware(ProcessSpiderExceptionReturnNoneMiddleware())
self.mwman._add_middleware(RaiseExceptionProcessSpiderOutputMiddleware())
with pytest.raises(ZeroDivisionError):
await self._scrape_response()
class TestBaseAsyncSpiderMiddleware(TestSpiderMiddleware):
"""Helpers for testing sync, async and mixed middlewares.
Should work for process_spider_output and, when it's supported, process_start.
"""
ITEM_TYPE: type | tuple
RESULT_COUNT = 3 # to simplify checks, let everything return 3 objects
@staticmethod
def _construct_mw_setting(
*mw_classes: type[Any], start_index: int | None = None
) -> dict[type[Any], int]:
if start_index is None:
start_index = 10
return {i: c for c, i in enumerate(mw_classes, start=start_index)}
def _callback(self) -> Any:
yield {"foo": 1}
yield {"foo": 2}
yield {"foo": 3}
async def _scrape_func(
self, response: Response | Failure, request: Request
) -> Iterable[Any] | AsyncIterator[Any]:
return self._callback()
async def _get_middleware_result(
self, *mw_classes: type[Any], start_index: int | None = None
) -> Any:
setting = self._construct_mw_setting(*mw_classes, start_index=start_index)
self.crawler = get_crawler(
Spider, {"SPIDER_MIDDLEWARES_BASE": {}, "SPIDER_MIDDLEWARES": setting}
)
self.crawler.spider = self.crawler._create_spider("foo")
self.mwman = SpiderMiddlewareManager.from_crawler(self.crawler)
return await self.mwman.scrape_response_async(
self._scrape_func, self.response, self.request
)
async def _test_simple_base(
self,
*mw_classes: type[Any],
downgrade: bool = False,
start_index: int | None = None,
) -> None:
with LogCapture() as log:
result = await self._get_middleware_result(
*mw_classes, start_index=start_index
)
assert isinstance(result, Iterable)
result_list = list(result)
assert len(result_list) == self.RESULT_COUNT
assert isinstance(result_list[0], self.ITEM_TYPE)
assert ("downgraded to a non-async" in str(log)) == downgrade
assert ("doesn't support asynchronous spider output" in str(log)) == (
ProcessSpiderOutputSimpleMiddleware in mw_classes
)
async def _test_asyncgen_base(
self,
*mw_classes: type[Any],
downgrade: bool = False,
start_index: int | None = None,
) -> None:
with LogCapture() as log:
result = await self._get_middleware_result(
*mw_classes, start_index=start_index
)
assert isinstance(result, AsyncIterator)
result_list = await collect_asyncgen(result)
assert len(result_list) == self.RESULT_COUNT
assert isinstance(result_list[0], self.ITEM_TYPE)
assert ("downgraded to a non-async" in str(log)) == downgrade
class ProcessSpiderOutputSimpleMiddleware:
def process_spider_output(self, response, result):
yield from result
class ProcessSpiderOutputAsyncGenMiddleware:
async def process_spider_output(self, response, result):
async for r in result:
yield r
class ProcessSpiderOutputUniversalMiddleware:
def process_spider_output(self, response, result):
yield from result
async def process_spider_output_async(self, response, result):
async for r in result:
yield r
class ProcessSpiderExceptionSimpleIterableMiddleware:
def process_spider_exception(self, response, exception):
yield {"foo": 1}
yield {"foo": 2}
yield {"foo": 3}
class ProcessSpiderExceptionAsyncIteratorMiddleware:
async def process_spider_exception(self, response, exception):
yield {"foo": 1}
d = defer.Deferred()
call_later(0, d.callback, None)
await maybe_deferred_to_future(d)
yield {"foo": 2}
yield {"foo": 3}
class TestProcessSpiderOutputSimple(TestBaseAsyncSpiderMiddleware):
"""process_spider_output tests for simple callbacks"""
ITEM_TYPE = dict
MW_SIMPLE = ProcessSpiderOutputSimpleMiddleware
MW_ASYNCGEN = ProcessSpiderOutputAsyncGenMiddleware
MW_UNIVERSAL = ProcessSpiderOutputUniversalMiddleware
@deferred_f_from_coro_f
async def test_simple(self):
"""Simple mw"""
await self._test_simple_base(self.MW_SIMPLE)
@deferred_f_from_coro_f
async def test_asyncgen(self):
"""Asyncgen mw; upgrade"""
await self._test_asyncgen_base(self.MW_ASYNCGEN)
@deferred_f_from_coro_f
async def test_simple_asyncgen(self):
"""Simple mw -> asyncgen mw; upgrade"""
await self._test_asyncgen_base(self.MW_ASYNCGEN, self.MW_SIMPLE)
@deferred_f_from_coro_f
async def test_asyncgen_simple(self):
"""Asyncgen mw -> simple mw; upgrade then downgrade"""
await self._test_simple_base(self.MW_SIMPLE, self.MW_ASYNCGEN, downgrade=True)
@deferred_f_from_coro_f
async def test_universal(self):
"""Universal mw"""
await self._test_simple_base(self.MW_UNIVERSAL)
@deferred_f_from_coro_f
async def test_universal_simple(self):
"""Universal mw -> simple mw"""
await self._test_simple_base(self.MW_SIMPLE, self.MW_UNIVERSAL)
@deferred_f_from_coro_f
async def test_simple_universal(self):
"""Simple mw -> universal mw"""
await self._test_simple_base(self.MW_UNIVERSAL, self.MW_SIMPLE)
@deferred_f_from_coro_f
async def test_universal_asyncgen(self):
"""Universal mw -> asyncgen mw; upgrade"""
await self._test_asyncgen_base(self.MW_ASYNCGEN, self.MW_UNIVERSAL)
@deferred_f_from_coro_f
async def test_asyncgen_universal(self):
"""Asyncgen mw -> universal mw; upgrade"""
await self._test_asyncgen_base(self.MW_UNIVERSAL, self.MW_ASYNCGEN)
class TestProcessSpiderOutputAsyncGen(TestProcessSpiderOutputSimple):
"""process_spider_output tests for async generator callbacks"""
async def _callback(self) -> Any:
for item in super()._callback():
yield item
@deferred_f_from_coro_f
async def test_simple(self):
"""Simple mw; downgrade"""
await self._test_simple_base(self.MW_SIMPLE, downgrade=True)
@deferred_f_from_coro_f
async def test_simple_asyncgen(self):
"""Simple mw -> asyncgen mw; downgrade then upgrade"""
await self._test_asyncgen_base(self.MW_ASYNCGEN, self.MW_SIMPLE, downgrade=True)
@deferred_f_from_coro_f
async def test_universal(self):
"""Universal mw"""
await self._test_asyncgen_base(self.MW_UNIVERSAL)
@deferred_f_from_coro_f
async def test_universal_simple(self):
"""Universal mw -> simple mw; downgrade"""
await self._test_simple_base(self.MW_SIMPLE, self.MW_UNIVERSAL, downgrade=True)
@deferred_f_from_coro_f
async def test_simple_universal(self):
"""Simple mw -> universal mw; downgrade"""
await self._test_simple_base(self.MW_UNIVERSAL, self.MW_SIMPLE, downgrade=True)
class ProcessSpiderOutputNonIterableMiddleware:
def process_spider_output(self, response, result):
return
class ProcessSpiderOutputCoroutineMiddleware:
async def process_spider_output(self, response, result):
return result
class TestProcessSpiderOutputInvalidResult(TestBaseAsyncSpiderMiddleware):
@deferred_f_from_coro_f
async def test_non_iterable(self):
with pytest.raises(
_InvalidOutput,
match=r"\.process_spider_output must return an iterable, got <class 'NoneType'>",
):
await self._get_middleware_result(ProcessSpiderOutputNonIterableMiddleware)
@deferred_f_from_coro_f
async def test_coroutine(self):
with pytest.raises(
_InvalidOutput,
match=r"\.process_spider_output must be an asynchronous generator",
):
await self._get_middleware_result(ProcessSpiderOutputCoroutineMiddleware)
class ProcessStartSimpleMiddleware:
async def process_start(self, start):
async for item_or_request in start:
yield item_or_request
class TestProcessStartSimple(TestBaseAsyncSpiderMiddleware):
"""process_start tests for simple start"""
ITEM_TYPE = (Request, dict)
MW_SIMPLE = ProcessStartSimpleMiddleware
async def _get_processed_start(
self, *mw_classes: type[Any]
) -> AsyncIterator[Any] | None:
class TestSpider(Spider):
name = "test"
async def start(self):
for i in range(2):
yield Request(f"https://example.com/{i}", dont_filter=True)
yield {"name": "test item"}
setting = self._construct_mw_setting(*mw_classes)
self.crawler = get_crawler(
TestSpider, {"SPIDER_MIDDLEWARES_BASE": {}, "SPIDER_MIDDLEWARES": setting}
)
self.crawler.spider = self.crawler._create_spider()
self.mwman = SpiderMiddlewareManager.from_crawler(self.crawler)
return await self.mwman.process_start()
@deferred_f_from_coro_f
async def test_simple(self):
"""Simple mw"""
start = await self._get_processed_start(self.MW_SIMPLE)
assert isasyncgen(start)
start_list = await collect_asyncgen(start)
assert len(start_list) == self.RESULT_COUNT
assert isinstance(start_list[0], self.ITEM_TYPE)
class UniversalMiddlewareNoSync:
async def process_spider_output_async(self, response, result):
yield
class UniversalMiddlewareBothSync:
def process_spider_output(self, response, result):
yield
def process_spider_output_async(self, response, result):
yield
class UniversalMiddlewareBothAsync:
async def process_spider_output(self, response, result):
yield
async def process_spider_output_async(self, response, result):
yield
class TestUniversalMiddlewareManager:
@pytest.fixture
def crawler(self) -> Crawler:
return get_crawler(Spider)
@pytest.fixture
def mwman(self, crawler: Crawler) -> SpiderMiddlewareManager:
return SpiderMiddlewareManager.from_crawler(crawler)
def test_simple_mw(self, mwman: SpiderMiddlewareManager) -> None:
mw = ProcessSpiderOutputSimpleMiddleware()
mwman._add_middleware(mw)
assert (
mwman.methods["process_spider_output"][0] == mw.process_spider_output # pylint: disable=comparison-with-callable
)
def test_async_mw(self, mwman: SpiderMiddlewareManager) -> None:
mw = ProcessSpiderOutputAsyncGenMiddleware()
mwman._add_middleware(mw)
assert (
mwman.methods["process_spider_output"][0] == mw.process_spider_output # pylint: disable=comparison-with-callable
)
def test_universal_mw(self, mwman: SpiderMiddlewareManager) -> None:
mw = ProcessSpiderOutputUniversalMiddleware()
mwman._add_middleware(mw)
assert mwman.methods["process_spider_output"][0] == (
mw.process_spider_output,
mw.process_spider_output_async,
)
def test_universal_mw_no_sync(
self, mwman: SpiderMiddlewareManager, caplog: pytest.LogCaptureFixture
) -> None:
mwman._add_middleware(UniversalMiddlewareNoSync())
assert (
"UniversalMiddlewareNoSync has process_spider_output_async"
" without process_spider_output" in caplog.text
)
assert mwman.methods["process_spider_output"][0] is None
def test_universal_mw_both_sync(
self, mwman: SpiderMiddlewareManager, caplog: pytest.LogCaptureFixture
) -> None:
mw = UniversalMiddlewareBothSync()
mwman._add_middleware(mw)
assert (
"UniversalMiddlewareBothSync.process_spider_output_async "
"is not an async generator function" in caplog.text
)
assert (
mwman.methods["process_spider_output"][0] == mw.process_spider_output # pylint: disable=comparison-with-callable
)
def test_universal_mw_both_async(
self, mwman: SpiderMiddlewareManager, caplog: pytest.LogCaptureFixture
) -> None:
mwman._add_middleware(UniversalMiddlewareBothAsync())
assert (
"UniversalMiddlewareBothAsync.process_spider_output "
"is an async generator function while process_spider_output_async exists"
in caplog.text
)
assert mwman.methods["process_spider_output"][0] is None
class TestBuiltinMiddlewareSimple(TestBaseAsyncSpiderMiddleware):
ITEM_TYPE = dict
MW_SIMPLE = ProcessSpiderOutputSimpleMiddleware
MW_ASYNCGEN = ProcessSpiderOutputAsyncGenMiddleware
MW_UNIVERSAL = ProcessSpiderOutputUniversalMiddleware
async def _get_middleware_result(
self, *mw_classes: type[Any], start_index: int | None = None
) -> Any:
setting = self._construct_mw_setting(*mw_classes, start_index=start_index)
self.crawler = get_crawler(Spider, {"SPIDER_MIDDLEWARES": setting})
self.crawler.spider = self.crawler._create_spider("foo")
self.mwman = SpiderMiddlewareManager.from_crawler(self.crawler)
return await self.mwman.scrape_response_async(
self._scrape_func, self.response, self.request
)
@deferred_f_from_coro_f
async def test_just_builtin(self):
await self._test_simple_base()
@deferred_f_from_coro_f
async def test_builtin_simple(self):
await self._test_simple_base(self.MW_SIMPLE, start_index=1000)
@deferred_f_from_coro_f
async def test_builtin_async(self):
"""Upgrade"""
await self._test_asyncgen_base(self.MW_ASYNCGEN, start_index=1000)
@deferred_f_from_coro_f
async def test_builtin_universal(self):
await self._test_simple_base(self.MW_UNIVERSAL, start_index=1000)
@deferred_f_from_coro_f
async def test_simple_builtin(self):
await self._test_simple_base(self.MW_SIMPLE)
@deferred_f_from_coro_f
async def test_async_builtin(self):
"""Upgrade"""
await self._test_asyncgen_base(self.MW_ASYNCGEN)
@deferred_f_from_coro_f
async def test_universal_builtin(self):
await self._test_simple_base(self.MW_UNIVERSAL)
class TestBuiltinMiddlewareAsyncGen(TestBuiltinMiddlewareSimple):
async def _callback(self) -> Any:
for item in super()._callback():
yield item
@deferred_f_from_coro_f
async def test_just_builtin(self):
await self._test_asyncgen_base()
@deferred_f_from_coro_f
async def test_builtin_simple(self):
"""Downgrade"""
await self._test_simple_base(self.MW_SIMPLE, downgrade=True, start_index=1000)
@deferred_f_from_coro_f
async def test_builtin_async(self):
await self._test_asyncgen_base(self.MW_ASYNCGEN, start_index=1000)
@deferred_f_from_coro_f
async def test_builtin_universal(self):
await self._test_asyncgen_base(self.MW_UNIVERSAL, start_index=1000)
@deferred_f_from_coro_f
async def test_simple_builtin(self):
"""Downgrade"""
await self._test_simple_base(self.MW_SIMPLE, downgrade=True)
@deferred_f_from_coro_f
async def test_async_builtin(self):
await self._test_asyncgen_base(self.MW_ASYNCGEN)
@deferred_f_from_coro_f
async def test_universal_builtin(self):
await self._test_asyncgen_base(self.MW_UNIVERSAL)
class TestProcessSpiderException(TestBaseAsyncSpiderMiddleware):
ITEM_TYPE = dict
MW_SIMPLE = ProcessSpiderOutputSimpleMiddleware
MW_ASYNCGEN = ProcessSpiderOutputAsyncGenMiddleware
MW_UNIVERSAL = ProcessSpiderOutputUniversalMiddleware
MW_EXC_SIMPLE = ProcessSpiderExceptionSimpleIterableMiddleware
MW_EXC_ASYNCGEN = ProcessSpiderExceptionAsyncIteratorMiddleware
def _callback(self) -> Any:
1 / 0
async def _test_asyncgen_nodowngrade(self, *mw_classes: type[Any]) -> None:
with pytest.raises(
_InvalidOutput,
match=r"Async iterable returned from .+ cannot be downgraded",
):
await self._get_middleware_result(*mw_classes)
@deferred_f_from_coro_f
async def test_exc_simple(self):
"""Simple exc mw"""
await self._test_simple_base(self.MW_EXC_SIMPLE)
@deferred_f_from_coro_f
async def test_exc_async(self):
"""Async exc mw"""
await self._test_asyncgen_base(self.MW_EXC_ASYNCGEN)
@deferred_f_from_coro_f
async def test_exc_simple_simple(self):
"""Simple exc mw -> simple output mw"""
await self._test_simple_base(self.MW_SIMPLE, self.MW_EXC_SIMPLE)
@deferred_f_from_coro_f
async def test_exc_async_async(self):
"""Async exc mw -> async output mw"""
await self._test_asyncgen_base(self.MW_ASYNCGEN, self.MW_EXC_ASYNCGEN)
@deferred_f_from_coro_f
async def test_exc_simple_async(self):
"""Simple exc mw -> async output mw; upgrade"""
await self._test_asyncgen_base(self.MW_ASYNCGEN, self.MW_EXC_SIMPLE)
@deferred_f_from_coro_f
async def test_exc_async_simple(self):
"""Async exc mw -> simple output mw; cannot work as downgrading is not supported"""
await self._test_asyncgen_nodowngrade(self.MW_SIMPLE, self.MW_EXC_ASYNCGEN)
class TestDeprecatedSpiderArg(TestSpiderMiddleware):
@deferred_f_from_coro_f
async def test_deprecated_mw_spider_arg(self):
class DeprecatedSpiderArgMiddleware:
def process_spider_input(self, response, spider):
return None
def process_spider_output(self, response, result, spider):
1 / 0
def process_spider_exception(self, response, exception, spider):
return []
with (
pytest.warns(
ScrapyDeprecationWarning,
match=r"process_spider_input\(\) requires a spider argument",
),
pytest.warns(
ScrapyDeprecationWarning,
match=r"process_spider_output\(\) requires a spider argument",
),
pytest.warns(
ScrapyDeprecationWarning,
match=r"process_spider_exception\(\) requires a spider argument",
),
):
self.mwman._add_middleware(DeprecatedSpiderArgMiddleware())
await self._scrape_response()
@deferred_f_from_coro_f
async def test_deprecated_mwman_spider_arg(self):
with pytest.warns(
ScrapyDeprecationWarning,
match=r"Passing a spider argument to SpiderMiddlewareManager.process_start\(\)"
r" is deprecated and the passed value is ignored",
):
await self.mwman.process_start(DefaultSpider())
@deferred_f_from_coro_f
async def test_deprecated_mwman_spider_arg_no_crawler(self):
with pytest.warns(
ScrapyDeprecationWarning,
match=r"MiddlewareManager.__init__\(\) was called without the crawler argument",
):
mwman = SpiderMiddlewareManager()
with pytest.warns(
ScrapyDeprecationWarning,
match=r"Passing a spider argument to SpiderMiddlewareManager.process_start\(\)"
r" is deprecated, SpiderMiddlewareManager should be instantiated with a Crawler",
):
await mwman.process_start(DefaultSpider())
| python | BSD-3-Clause | d1bd8eb49f7aba9289e4ff692006cead8bcd9080 | 2026-01-04T14:38:41.023839Z | false |
scrapy/scrapy | https://github.com/scrapy/scrapy/blob/d1bd8eb49f7aba9289e4ff692006cead8bcd9080/tests/test_utils_template.py | tests/test_utils_template.py | from scrapy.utils.template import render_templatefile
def test_simple_render(tmp_path):
context = {"project_name": "proj", "name": "spi", "classname": "TheSpider"}
template = "from ${project_name}.spiders.${name} import ${classname}"
rendered = "from proj.spiders.spi import TheSpider"
template_path = tmp_path / "templ.py.tmpl"
render_path = tmp_path / "templ.py"
template_path.write_text(template, encoding="utf8")
assert template_path.is_file() # Failure of test itself
render_templatefile(template_path, **context)
assert not template_path.exists()
assert render_path.read_text(encoding="utf8") == rendered
render_path.unlink()
assert not render_path.exists() # Failure of test itself
| python | BSD-3-Clause | d1bd8eb49f7aba9289e4ff692006cead8bcd9080 | 2026-01-04T14:38:41.023839Z | false |
scrapy/scrapy | https://github.com/scrapy/scrapy/blob/d1bd8eb49f7aba9289e4ff692006cead8bcd9080/tests/test_utils_serialize.py | tests/test_utils_serialize.py | import dataclasses
import datetime
import json
from decimal import Decimal
import attr
import pytest
from twisted.internet import defer
from scrapy.http import Request, Response
from scrapy.utils.serialize import ScrapyJSONEncoder
class TestJsonEncoder:
@pytest.fixture
def encoder(self) -> ScrapyJSONEncoder:
return ScrapyJSONEncoder(sort_keys=True)
def test_encode_decode(self, encoder: ScrapyJSONEncoder) -> None:
dt = datetime.datetime(2010, 1, 2, 10, 11, 12)
dts = "2010-01-02 10:11:12"
d = datetime.date(2010, 1, 2)
ds = "2010-01-02"
t = datetime.time(10, 11, 12)
ts = "10:11:12"
dec = Decimal("1000.12")
decs = "1000.12"
s = {"foo"}
ss = ["foo"]
dt_set = {dt}
dt_sets = [dts]
for input_, output in [
("foo", "foo"),
(d, ds),
(t, ts),
(dt, dts),
(dec, decs),
(["foo", d], ["foo", ds]),
(s, ss),
(dt_set, dt_sets),
]:
assert encoder.encode(input_) == json.dumps(output, sort_keys=True)
def test_encode_deferred(self, encoder: ScrapyJSONEncoder) -> None:
assert "Deferred" in encoder.encode(defer.Deferred())
def test_encode_request(self, encoder: ScrapyJSONEncoder) -> None:
r = Request("http://www.example.com/lala")
rs = encoder.encode(r)
assert r.method in rs
assert r.url in rs
def test_encode_response(self, encoder: ScrapyJSONEncoder) -> None:
r = Response("http://www.example.com/lala")
rs = encoder.encode(r)
assert r.url in rs
assert str(r.status) in rs
def test_encode_dataclass_item(self, encoder: ScrapyJSONEncoder) -> None:
@dataclasses.dataclass
class TestDataClass:
name: str
url: str
price: int
item = TestDataClass(name="Product", url="http://product.org", price=1)
encoded = encoder.encode(item)
assert encoded == '{"name": "Product", "price": 1, "url": "http://product.org"}'
def test_encode_attrs_item(self, encoder: ScrapyJSONEncoder) -> None:
@attr.s
class AttrsItem:
name = attr.ib(type=str)
url = attr.ib(type=str)
price = attr.ib(type=int)
item = AttrsItem(name="Product", url="http://product.org", price=1)
encoded = encoder.encode(item)
assert encoded == '{"name": "Product", "price": 1, "url": "http://product.org"}'
| python | BSD-3-Clause | d1bd8eb49f7aba9289e4ff692006cead8bcd9080 | 2026-01-04T14:38:41.023839Z | false |
scrapy/scrapy | https://github.com/scrapy/scrapy/blob/d1bd8eb49f7aba9289e4ff692006cead8bcd9080/tests/test_utils_url.py | tests/test_utils_url.py | import warnings
from importlib import import_module
import pytest
from scrapy.linkextractors import IGNORED_EXTENSIONS
from scrapy.spiders import Spider
from scrapy.utils.url import ( # type: ignore[attr-defined]
_is_filesystem_path,
_public_w3lib_objects,
add_http_if_no_scheme,
guess_scheme,
strip_url,
url_has_any_extension,
url_is_from_any_domain,
url_is_from_spider,
)
def test_url_is_from_any_domain():
url = "http://www.wheele-bin-art.co.uk/get/product/123"
assert url_is_from_any_domain(url, ["wheele-bin-art.co.uk"])
assert not url_is_from_any_domain(url, ["art.co.uk"])
url = "http://wheele-bin-art.co.uk/get/product/123"
assert url_is_from_any_domain(url, ["wheele-bin-art.co.uk"])
assert not url_is_from_any_domain(url, ["art.co.uk"])
url = "http://www.Wheele-Bin-Art.co.uk/get/product/123"
assert url_is_from_any_domain(url, ["wheele-bin-art.CO.UK"])
assert url_is_from_any_domain(url, ["WHEELE-BIN-ART.CO.UK"])
url = "http://192.169.0.15:8080/mypage.html"
assert url_is_from_any_domain(url, ["192.169.0.15:8080"])
assert not url_is_from_any_domain(url, ["192.169.0.15"])
url = (
"javascript:%20document.orderform_2581_1190810811.mode.value=%27add%27;%20"
"javascript:%20document.orderform_2581_1190810811.submit%28%29"
)
assert not url_is_from_any_domain(url, ["testdomain.com"])
assert not url_is_from_any_domain(url + ".testdomain.com", ["testdomain.com"])
def test_url_is_from_spider():
class MySpider(Spider):
name = "example.com"
assert url_is_from_spider("http://www.example.com/some/page.html", MySpider)
assert url_is_from_spider("http://sub.example.com/some/page.html", MySpider)
assert not url_is_from_spider("http://www.example.org/some/page.html", MySpider)
assert not url_is_from_spider("http://www.example.net/some/page.html", MySpider)
def test_url_is_from_spider_class_attributes():
class MySpider(Spider):
name = "example.com"
assert url_is_from_spider("http://www.example.com/some/page.html", MySpider)
assert url_is_from_spider("http://sub.example.com/some/page.html", MySpider)
assert not url_is_from_spider("http://www.example.org/some/page.html", MySpider)
assert not url_is_from_spider("http://www.example.net/some/page.html", MySpider)
def test_url_is_from_spider_with_allowed_domains():
class MySpider(Spider):
name = "example.com"
allowed_domains = ["example.org", "example.net"]
assert url_is_from_spider("http://www.example.com/some/page.html", MySpider)
assert url_is_from_spider("http://sub.example.com/some/page.html", MySpider)
assert url_is_from_spider("http://example.com/some/page.html", MySpider)
assert url_is_from_spider("http://www.example.org/some/page.html", MySpider)
assert url_is_from_spider("http://www.example.net/some/page.html", MySpider)
assert not url_is_from_spider("http://www.example.us/some/page.html", MySpider)
class MySpider2(Spider):
name = "example.com"
allowed_domains = {"example.com", "example.net"}
assert url_is_from_spider("http://www.example.com/some/page.html", MySpider2)
class MySpider3(Spider):
name = "example.com"
allowed_domains = ("example.com", "example.net")
assert url_is_from_spider("http://www.example.com/some/page.html", MySpider3)
@pytest.mark.parametrize(
("url", "expected"),
[
("http://www.example.com/archive.tar.gz", True),
("http://www.example.com/page.doc", True),
("http://www.example.com/page.pdf", True),
("http://www.example.com/page.htm", False),
("http://www.example.com/", False),
("http://www.example.com/page.doc.html", False),
],
)
def test_url_has_any_extension(url: str, expected: bool) -> None:
deny_extensions = {"." + e for e in IGNORED_EXTENSIONS}
assert url_has_any_extension(url, deny_extensions) is expected
@pytest.mark.parametrize(
("url", "expected"),
[
("www.example.com", "http://www.example.com"),
("example.com", "http://example.com"),
("www.example.com/some/page.html", "http://www.example.com/some/page.html"),
("www.example.com:80", "http://www.example.com:80"),
("www.example.com/some/page#frag", "http://www.example.com/some/page#frag"),
("www.example.com/do?a=1&b=2&c=3", "http://www.example.com/do?a=1&b=2&c=3"),
(
"username:password@www.example.com",
"http://username:password@www.example.com",
),
(
"username:password@www.example.com:80/some/page/do?a=1&b=2&c=3#frag",
"http://username:password@www.example.com:80/some/page/do?a=1&b=2&c=3#frag",
),
("http://www.example.com", "http://www.example.com"),
("http://example.com", "http://example.com"),
(
"http://www.example.com/some/page.html",
"http://www.example.com/some/page.html",
),
("http://www.example.com:80", "http://www.example.com:80"),
(
"http://www.example.com/some/page#frag",
"http://www.example.com/some/page#frag",
),
(
"http://www.example.com/do?a=1&b=2&c=3",
"http://www.example.com/do?a=1&b=2&c=3",
),
(
"http://username:password@www.example.com",
"http://username:password@www.example.com",
),
(
"http://username:password@www.example.com:80/some/page/do?a=1&b=2&c=3#frag",
"http://username:password@www.example.com:80/some/page/do?a=1&b=2&c=3#frag",
),
("//www.example.com", "http://www.example.com"),
("//example.com", "http://example.com"),
("//www.example.com/some/page.html", "http://www.example.com/some/page.html"),
("//www.example.com:80", "http://www.example.com:80"),
("//www.example.com/some/page#frag", "http://www.example.com/some/page#frag"),
("//www.example.com/do?a=1&b=2&c=3", "http://www.example.com/do?a=1&b=2&c=3"),
(
"//username:password@www.example.com",
"http://username:password@www.example.com",
),
(
"//username:password@www.example.com:80/some/page/do?a=1&b=2&c=3#frag",
"http://username:password@www.example.com:80/some/page/do?a=1&b=2&c=3#frag",
),
("https://www.example.com", "https://www.example.com"),
("ftp://www.example.com", "ftp://www.example.com"),
],
)
def test_add_http_if_no_scheme(url: str, expected: str) -> None:
assert add_http_if_no_scheme(url) == expected
@pytest.mark.parametrize(
("url", "expected"),
[
("/index", "file://"),
("/index.html", "file://"),
("./index.html", "file://"),
("../index.html", "file://"),
("../../index.html", "file://"),
("./data/index.html", "file://"),
(".hidden/data/index.html", "file://"),
("/home/user/www/index.html", "file://"),
("//home/user/www/index.html", "file://"),
("file:///home/user/www/index.html", "file://"),
("index.html", "http://"),
("example.com", "http://"),
("www.example.com", "http://"),
("www.example.com/index.html", "http://"),
("http://example.com", "http://"),
("http://example.com/index.html", "http://"),
("localhost", "http://"),
("localhost/index.html", "http://"),
# some corner cases (default to http://)
("/", "http://"),
(".../test", "http://"),
],
)
def test_guess_scheme(url: str, expected: str):
assert guess_scheme(url).startswith(expected)
@pytest.mark.parametrize(
("url", "expected", "reason"),
[
(
r"C:\absolute\path\to\a\file.html",
"file://",
"Windows filepath are not supported for scrapy shell",
),
],
)
def test_guess_scheme_skipped(url: str, expected: str, reason: str):
pytest.skip(reason)
class TestStripUrl:
@pytest.mark.parametrize(
"url",
[
"http://www.example.com/index.html",
"http://www.example.com/index.html?somekey=somevalue",
],
)
def test_noop(self, url: str) -> None:
assert strip_url(url) == url
def test_fragments(self):
assert (
strip_url(
"http://www.example.com/index.html?somekey=somevalue#section",
strip_fragment=False,
)
== "http://www.example.com/index.html?somekey=somevalue#section"
)
@pytest.mark.parametrize(
("url", "origin", "expected"),
[
("http://www.example.com/", False, "http://www.example.com/"),
("http://www.example.com", False, "http://www.example.com"),
("http://www.example.com", True, "http://www.example.com/"),
],
)
def test_path(self, url: str, origin: bool, expected: str) -> None:
assert strip_url(url, origin_only=origin) == expected
@pytest.mark.parametrize(
("url", "expected"),
[
(
"http://username@www.example.com/index.html?somekey=somevalue#section",
"http://www.example.com/index.html?somekey=somevalue",
),
(
"https://username:@www.example.com/index.html?somekey=somevalue#section",
"https://www.example.com/index.html?somekey=somevalue",
),
(
"ftp://username:password@www.example.com/index.html?somekey=somevalue#section",
"ftp://www.example.com/index.html?somekey=somevalue",
),
# user: "username@", password: none
(
"http://username%40@www.example.com/index.html?somekey=somevalue#section",
"http://www.example.com/index.html?somekey=somevalue",
),
# user: "username:pass", password: ""
(
"https://username%3Apass:@www.example.com/index.html?somekey=somevalue#section",
"https://www.example.com/index.html?somekey=somevalue",
),
# user: "me", password: "user@domain.com"
(
"ftp://me:user%40domain.com@www.example.com/index.html?somekey=somevalue#section",
"ftp://www.example.com/index.html?somekey=somevalue",
),
],
)
def test_credentials(self, url: str, expected: str) -> None:
assert strip_url(url, strip_credentials=True) == expected
@pytest.mark.parametrize(
("url", "expected"),
[
(
"http://username:password@www.example.com:80/index.html?somekey=somevalue#section",
"http://www.example.com/index.html?somekey=somevalue",
),
(
"http://username:password@www.example.com:8080/index.html#section",
"http://www.example.com:8080/index.html",
),
(
"http://username:password@www.example.com:443/index.html?somekey=somevalue&someotherkey=sov#section",
"http://www.example.com:443/index.html?somekey=somevalue&someotherkey=sov",
),
(
"https://username:password@www.example.com:443/index.html",
"https://www.example.com/index.html",
),
(
"https://username:password@www.example.com:442/index.html",
"https://www.example.com:442/index.html",
),
(
"https://username:password@www.example.com:80/index.html",
"https://www.example.com:80/index.html",
),
(
"ftp://username:password@www.example.com:21/file.txt",
"ftp://www.example.com/file.txt",
),
(
"ftp://username:password@www.example.com:221/file.txt",
"ftp://www.example.com:221/file.txt",
),
],
)
def test_default_ports_creds_off(self, url: str, expected: str) -> None:
assert strip_url(url) == expected
@pytest.mark.parametrize(
("url", "expected"),
[
(
"http://username:password@www.example.com:80/index.html",
"http://username:password@www.example.com/index.html",
),
(
"http://username:password@www.example.com:8080/index.html",
"http://username:password@www.example.com:8080/index.html",
),
(
"http://username:password@www.example.com:443/index.html",
"http://username:password@www.example.com:443/index.html",
),
(
"https://username:password@www.example.com:443/index.html",
"https://username:password@www.example.com/index.html",
),
(
"https://username:password@www.example.com:442/index.html",
"https://username:password@www.example.com:442/index.html",
),
(
"https://username:password@www.example.com:80/index.html",
"https://username:password@www.example.com:80/index.html",
),
(
"ftp://username:password@www.example.com:21/file.txt",
"ftp://username:password@www.example.com/file.txt",
),
(
"ftp://username:password@www.example.com:221/file.txt",
"ftp://username:password@www.example.com:221/file.txt",
),
],
)
def test_default_ports(self, url: str, expected: str) -> None:
assert (
strip_url(url, strip_default_port=True, strip_credentials=False) == expected
)
@pytest.mark.parametrize(
("url", "expected"),
[
(
"http://username:password@www.example.com:80/index.html?somekey=somevalue&someotherkey=sov#section",
"http://username:password@www.example.com:80/index.html?somekey=somevalue&someotherkey=sov",
),
(
"http://username:password@www.example.com:8080/index.html?somekey=somevalue&someotherkey=sov#section",
"http://username:password@www.example.com:8080/index.html?somekey=somevalue&someotherkey=sov",
),
(
"http://username:password@www.example.com:443/index.html",
"http://username:password@www.example.com:443/index.html",
),
(
"https://username:password@www.example.com:443/index.html",
"https://username:password@www.example.com:443/index.html",
),
(
"https://username:password@www.example.com:442/index.html",
"https://username:password@www.example.com:442/index.html",
),
(
"https://username:password@www.example.com:80/index.html",
"https://username:password@www.example.com:80/index.html",
),
(
"ftp://username:password@www.example.com:21/file.txt",
"ftp://username:password@www.example.com:21/file.txt",
),
(
"ftp://username:password@www.example.com:221/file.txt",
"ftp://username:password@www.example.com:221/file.txt",
),
],
)
def test_default_ports_keep(self, url: str, expected: str) -> None:
assert (
strip_url(url, strip_default_port=False, strip_credentials=False)
== expected
)
@pytest.mark.parametrize(
("url", "expected"),
[
(
"http://username:password@www.example.com/index.html",
"http://www.example.com/",
),
(
"http://username:password@www.example.com:80/foo/bar?query=value#somefrag",
"http://www.example.com/",
),
(
"http://username:password@www.example.com:8008/foo/bar?query=value#somefrag",
"http://www.example.com:8008/",
),
(
"https://username:password@www.example.com:443/index.html",
"https://www.example.com/",
),
],
)
def test_origin_only(self, url: str, expected: str) -> None:
assert strip_url(url, origin_only=True) == expected
@pytest.mark.parametrize(
("path", "expected"),
[
# https://en.wikipedia.org/wiki/Path_(computing)#Representations_of_paths_by_operating_system_and_shell
# Unix-like OS, Microsoft Windows / cmd.exe
("/home/user/docs/Letter.txt", True),
("./inthisdir", True),
("../../greatgrandparent", True),
("~/.rcinfo", True),
(r"C:\user\docs\Letter.txt", True),
("/user/docs/Letter.txt", True),
(r"C:\Letter.txt", True),
(r"\\Server01\user\docs\Letter.txt", True),
(r"\\?\UNC\Server01\user\docs\Letter.txt", True),
(r"\\?\C:\user\docs\Letter.txt", True),
(r"C:\user\docs\somefile.ext:alternate_stream_name", True),
(r"https://example.com", False),
],
)
def test__is_filesystem_path(path: str, expected: bool) -> None:
assert _is_filesystem_path(path) == expected
@pytest.mark.parametrize(
"obj_name",
[
"_unquotepath",
"_safe_chars",
"parse_url",
*_public_w3lib_objects,
],
)
def test_deprecated_imports_from_w3lib(obj_name: str) -> None:
with warnings.catch_warnings(record=True) as warns:
obj_type = "attribute" if obj_name == "_safe_chars" else "function"
message = f"The scrapy.utils.url.{obj_name} {obj_type} is deprecated, use w3lib.url.{obj_name} instead."
getattr(import_module("scrapy.utils.url"), obj_name)
assert isinstance(warns[0].message, Warning)
assert message in warns[0].message.args
| python | BSD-3-Clause | d1bd8eb49f7aba9289e4ff692006cead8bcd9080 | 2026-01-04T14:38:41.023839Z | false |
scrapy/scrapy | https://github.com/scrapy/scrapy/blob/d1bd8eb49f7aba9289e4ff692006cead8bcd9080/tests/test_spidermiddleware_depth.py | tests/test_spidermiddleware_depth.py | from __future__ import annotations
from typing import TYPE_CHECKING
import pytest
from scrapy.http import Request, Response
from scrapy.spidermiddlewares.depth import DepthMiddleware
from scrapy.spiders import Spider
from scrapy.utils.test import get_crawler
if TYPE_CHECKING:
from collections.abc import Generator
from scrapy.crawler import Crawler
from scrapy.statscollectors import StatsCollector
@pytest.fixture
def crawler() -> Crawler:
return get_crawler(Spider, {"DEPTH_LIMIT": 1, "DEPTH_STATS_VERBOSE": True})
@pytest.fixture
def stats(crawler: Crawler) -> Generator[StatsCollector]:
assert crawler.stats is not None
crawler.stats.open_spider()
yield crawler.stats
crawler.stats.close_spider()
@pytest.fixture
def mw(crawler: Crawler) -> DepthMiddleware:
return DepthMiddleware.from_crawler(crawler)
def test_process_spider_output(mw: DepthMiddleware, stats: StatsCollector) -> None:
req = Request("http://scrapytest.org")
resp = Response("http://scrapytest.org")
resp.request = req
result = [Request("http://scrapytest.org")]
out = list(mw.process_spider_output(resp, result))
assert out == result
rdc = stats.get_value("request_depth_count/1")
assert rdc == 1
req.meta["depth"] = 1
out2 = list(mw.process_spider_output(resp, result))
assert not out2
rdm = stats.get_value("request_depth_max")
assert rdm == 1
| python | BSD-3-Clause | d1bd8eb49f7aba9289e4ff692006cead8bcd9080 | 2026-01-04T14:38:41.023839Z | false |
scrapy/scrapy | https://github.com/scrapy/scrapy/blob/d1bd8eb49f7aba9289e4ff692006cead8bcd9080/tests/test_http_response.py | tests/test_http_response.py | import codecs
from unittest import mock
import pytest
from packaging.version import Version as parse_version
from w3lib import __version__ as w3lib_version
from w3lib.encoding import resolve_encoding
from scrapy.exceptions import NotSupported
from scrapy.http import (
Headers,
HtmlResponse,
Request,
Response,
TextResponse,
XmlResponse,
)
from scrapy.link import Link
from scrapy.selector import Selector
from scrapy.utils.python import to_unicode
from tests import get_testdata
class TestResponseBase:
response_class = Response
def test_init(self):
# Response requires url in the constructor
with pytest.raises(TypeError):
self.response_class()
assert isinstance(
self.response_class("http://example.com/"), self.response_class
)
with pytest.raises(TypeError):
self.response_class(b"http://example.com")
with pytest.raises(TypeError):
self.response_class(url="http://example.com", body={})
# body can be str or None
assert isinstance(
self.response_class("http://example.com/", body=b""),
self.response_class,
)
assert isinstance(
self.response_class("http://example.com/", body=b"body"),
self.response_class,
)
# test presence of all optional parameters
assert isinstance(
self.response_class(
"http://example.com/", body=b"", headers={}, status=200
),
self.response_class,
)
r = self.response_class("http://www.example.com")
assert isinstance(r.url, str)
assert r.url == "http://www.example.com"
assert r.status == 200
assert isinstance(r.headers, Headers)
assert not r.headers
headers = {"foo": "bar"}
body = b"a body"
r = self.response_class("http://www.example.com", headers=headers, body=body)
assert r.headers is not headers
assert r.headers[b"foo"] == b"bar"
r = self.response_class("http://www.example.com", status=301)
assert r.status == 301
r = self.response_class("http://www.example.com", status="301")
assert r.status == 301
with pytest.raises(ValueError, match=r"invalid literal for int\(\)"):
self.response_class("http://example.com", status="lala200")
def test_copy(self):
"""Test Response copy"""
r1 = self.response_class("http://www.example.com", body=b"Some body")
r1.flags.append("cached")
r2 = r1.copy()
assert r1.status == r2.status
assert r1.body == r2.body
# make sure flags list is shallow copied
assert r1.flags is not r2.flags, "flags must be a shallow copy, not identical"
assert r1.flags == r2.flags
# make sure headers attribute is shallow copied
assert r1.headers is not r2.headers, (
"headers must be a shallow copy, not identical"
)
assert r1.headers == r2.headers
def test_copy_meta(self):
req = Request("http://www.example.com")
req.meta["foo"] = "bar"
r1 = self.response_class(
"http://www.example.com", body=b"Some body", request=req
)
assert r1.meta is req.meta
def test_copy_cb_kwargs(self):
req = Request("http://www.example.com")
req.cb_kwargs["foo"] = "bar"
r1 = self.response_class(
"http://www.example.com", body=b"Some body", request=req
)
assert r1.cb_kwargs is req.cb_kwargs
def test_unavailable_meta(self):
r1 = self.response_class("http://www.example.com", body=b"Some body")
with pytest.raises(AttributeError, match=r"Response\.meta not available"):
r1.meta
def test_unavailable_cb_kwargs(self):
r1 = self.response_class("http://www.example.com", body=b"Some body")
with pytest.raises(AttributeError, match=r"Response\.cb_kwargs not available"):
r1.cb_kwargs
def test_copy_inherited_classes(self):
"""Test Response children copies preserve their class"""
class CustomResponse(self.response_class):
pass
r1 = CustomResponse("http://www.example.com")
r2 = r1.copy()
assert isinstance(r2, CustomResponse)
def test_replace(self):
"""Test Response.replace() method"""
hdrs = Headers({"key": "value"})
r1 = self.response_class("http://www.example.com")
r2 = r1.replace(status=301, body=b"New body", headers=hdrs)
assert r1.body == b""
assert r1.url == r2.url
assert (r1.status, r2.status) == (200, 301)
assert (r1.body, r2.body) == (b"", b"New body")
assert (r1.headers, r2.headers) == ({}, hdrs)
# Empty attributes (which may fail if not compared properly)
r3 = self.response_class("http://www.example.com", flags=["cached"])
r4 = r3.replace(body=b"", flags=[])
assert r4.body == b""
assert not r4.flags
def _assert_response_values(self, response, encoding, body):
if isinstance(body, str):
body_unicode = body
body_bytes = body.encode(encoding)
else:
body_unicode = body.decode(encoding)
body_bytes = body
assert isinstance(response.body, bytes)
assert isinstance(response.text, str)
self._assert_response_encoding(response, encoding)
assert response.body == body_bytes
assert response.text == body_unicode
def _assert_response_encoding(self, response, encoding):
assert response.encoding == resolve_encoding(encoding)
def test_immutable_attributes(self):
r = self.response_class("http://example.com")
with pytest.raises(AttributeError):
r.url = "http://example2.com"
with pytest.raises(AttributeError):
r.body = "xxx"
def test_urljoin(self):
"""Test urljoin shortcut (only for existence, since behavior equals urljoin)"""
joined = self.response_class("http://www.example.com").urljoin("/test")
absolute = "http://www.example.com/test"
assert joined == absolute
def test_shortcut_attributes(self):
r = self.response_class("http://example.com", body=b"hello")
if self.response_class == Response:
msg = "Response content isn't text"
with pytest.raises(AttributeError, match=msg):
r.text
with pytest.raises(NotSupported, match=msg):
r.css("body")
with pytest.raises(NotSupported, match=msg):
r.xpath("//body")
with pytest.raises(NotSupported, match=msg):
r.jmespath("body")
else:
r.text
r.css("body")
r.xpath("//body")
# Response.follow
def test_follow_url_absolute(self):
self._assert_followed_url("http://foo.example.com", "http://foo.example.com")
def test_follow_url_relative(self):
self._assert_followed_url("foo", "http://example.com/foo")
def test_follow_link(self):
self._assert_followed_url(
Link("http://example.com/foo"), "http://example.com/foo"
)
def test_follow_None_url(self):
r = self.response_class("http://example.com")
with pytest.raises(ValueError, match="url can't be None"):
r.follow(None)
@pytest.mark.xfail(
parse_version(w3lib_version) < parse_version("2.1.1"),
reason="https://github.com/scrapy/w3lib/pull/207",
strict=True,
)
def test_follow_whitespace_url(self):
self._assert_followed_url("foo ", "http://example.com/foo")
@pytest.mark.xfail(
parse_version(w3lib_version) < parse_version("2.1.1"),
reason="https://github.com/scrapy/w3lib/pull/207",
strict=True,
)
def test_follow_whitespace_link(self):
self._assert_followed_url(
Link("http://example.com/foo "), "http://example.com/foo"
)
def test_follow_flags(self):
res = self.response_class("http://example.com/")
fol = res.follow("http://example.com/", flags=["cached", "allowed"])
assert fol.flags == ["cached", "allowed"]
# Response.follow_all
def test_follow_all_absolute(self):
url_list = [
"http://example.org",
"http://www.example.org",
"http://example.com",
"http://www.example.com",
]
self._assert_followed_all_urls(url_list, url_list)
def test_follow_all_relative(self):
relative = ["foo", "bar", "foo/bar", "bar/foo"]
absolute = [
"http://example.com/foo",
"http://example.com/bar",
"http://example.com/foo/bar",
"http://example.com/bar/foo",
]
self._assert_followed_all_urls(relative, absolute)
def test_follow_all_links(self):
absolute = [
"http://example.com/foo",
"http://example.com/bar",
"http://example.com/foo/bar",
"http://example.com/bar/foo",
]
links = map(Link, absolute)
self._assert_followed_all_urls(links, absolute)
def test_follow_all_empty(self):
r = self.response_class("http://example.com")
assert not list(r.follow_all([]))
def test_follow_all_invalid(self):
r = self.response_class("http://example.com")
if self.response_class == Response:
with pytest.raises(TypeError):
list(r.follow_all(urls=None))
with pytest.raises(TypeError):
list(r.follow_all(urls=12345))
with pytest.raises(ValueError, match="url can't be None"):
list(r.follow_all(urls=[None]))
else:
with pytest.raises(
ValueError, match="Please supply exactly one of the following arguments"
):
list(r.follow_all(urls=None))
with pytest.raises(TypeError):
list(r.follow_all(urls=12345))
with pytest.raises(ValueError, match="url can't be None"):
list(r.follow_all(urls=[None]))
def test_follow_all_whitespace(self):
relative = ["foo ", "bar ", "foo/bar ", "bar/foo "]
absolute = [
"http://example.com/foo%20",
"http://example.com/bar%20",
"http://example.com/foo/bar%20",
"http://example.com/bar/foo%20",
]
self._assert_followed_all_urls(relative, absolute)
def test_follow_all_whitespace_links(self):
absolute = [
"http://example.com/foo ",
"http://example.com/bar ",
"http://example.com/foo/bar ",
"http://example.com/bar/foo ",
]
links = map(Link, absolute)
expected = [u.replace(" ", "%20") for u in absolute]
self._assert_followed_all_urls(links, expected)
def test_follow_all_flags(self):
re = self.response_class("http://www.example.com/")
urls = [
"http://www.example.com/",
"http://www.example.com/2",
"http://www.example.com/foo",
]
fol = re.follow_all(urls, flags=["cached", "allowed"])
for req in fol:
assert req.flags == ["cached", "allowed"]
def _assert_followed_url(self, follow_obj, target_url, response=None):
if response is None:
response = self._links_response()
req = response.follow(follow_obj)
assert req.url == target_url
return req
def _assert_followed_all_urls(self, follow_obj, target_urls, response=None):
if response is None:
response = self._links_response()
followed = response.follow_all(follow_obj)
for req, target in zip(followed, target_urls, strict=False):
assert req.url == target
yield req
def _links_response(self):
body = get_testdata("link_extractor", "linkextractor.html")
return self.response_class("http://example.com/index", body=body)
def _links_response_no_href(self):
body = get_testdata("link_extractor", "linkextractor_no_href.html")
return self.response_class("http://example.com/index", body=body)
class TestTextResponse(TestResponseBase):
response_class = TextResponse
def test_replace(self):
super().test_replace()
r1 = self.response_class(
"http://www.example.com", body="hello", encoding="cp852"
)
r2 = r1.replace(url="http://www.example.com/other")
r3 = r1.replace(url="http://www.example.com/other", encoding="latin1")
assert isinstance(r2, self.response_class)
assert r2.url == "http://www.example.com/other"
self._assert_response_encoding(r2, "cp852")
assert r3.url == "http://www.example.com/other"
assert r3._declared_encoding() == "latin1"
def test_unicode_url(self):
# instantiate with unicode url without encoding (should set default encoding)
resp = self.response_class("http://www.example.com/")
self._assert_response_encoding(resp, self.response_class._DEFAULT_ENCODING)
# make sure urls are converted to str
resp = self.response_class(url="http://www.example.com/", encoding="utf-8")
assert isinstance(resp.url, str)
resp = self.response_class(
url="http://www.example.com/price/\xa3", encoding="utf-8"
)
assert resp.url == to_unicode(b"http://www.example.com/price/\xc2\xa3")
resp = self.response_class(
url="http://www.example.com/price/\xa3", encoding="latin-1"
)
assert resp.url == "http://www.example.com/price/\xa3"
resp = self.response_class(
"http://www.example.com/price/\xa3",
headers={"Content-type": ["text/html; charset=utf-8"]},
)
assert resp.url == to_unicode(b"http://www.example.com/price/\xc2\xa3")
resp = self.response_class(
"http://www.example.com/price/\xa3",
headers={"Content-type": ["text/html; charset=iso-8859-1"]},
)
assert resp.url == "http://www.example.com/price/\xa3"
def test_unicode_body(self):
unicode_string = (
"\u043a\u0438\u0440\u0438\u043b\u043b\u0438\u0447\u0435\u0441\u043a\u0438\u0439 "
"\u0442\u0435\u043a\u0441\u0442"
)
with pytest.raises(TypeError):
self.response_class("http://www.example.com", body="unicode body")
original_string = unicode_string.encode("cp1251")
r1 = self.response_class(
"http://www.example.com", body=original_string, encoding="cp1251"
)
# check response.text
assert isinstance(r1.text, str)
assert r1.text == unicode_string
def test_encoding(self):
r1 = self.response_class(
"http://www.example.com",
body=b"\xc2\xa3",
headers={"Content-type": ["text/html; charset=utf-8"]},
)
r2 = self.response_class(
"http://www.example.com", encoding="utf-8", body="\xa3"
)
r3 = self.response_class(
"http://www.example.com",
body=b"\xa3",
headers={"Content-type": ["text/html; charset=iso-8859-1"]},
)
r4 = self.response_class("http://www.example.com", body=b"\xa2\xa3")
r5 = self.response_class(
"http://www.example.com",
body=b"\xc2\xa3",
headers={"Content-type": ["text/html; charset=None"]},
)
r6 = self.response_class(
"http://www.example.com",
body=b"\xa8D",
headers={"Content-type": ["text/html; charset=gb2312"]},
)
r7 = self.response_class(
"http://www.example.com",
body=b"\xa8D",
headers={"Content-type": ["text/html; charset=gbk"]},
)
r8 = self.response_class(
"http://www.example.com",
body=codecs.BOM_UTF8 + b"\xc2\xa3",
headers={"Content-type": ["text/html; charset=cp1251"]},
)
r9 = self.response_class(
"http://www.example.com",
body=b"\x80",
headers={
"Content-type": [b"application/x-download; filename=\x80dummy.txt"]
},
)
assert r1._headers_encoding() == "utf-8"
assert r2._headers_encoding() is None
assert r2._declared_encoding() == "utf-8"
self._assert_response_encoding(r2, "utf-8")
assert r3._headers_encoding() == "cp1252"
assert r3._declared_encoding() == "cp1252"
assert r4._headers_encoding() is None
assert r5._headers_encoding() is None
assert r8._headers_encoding() == "cp1251"
assert r9._headers_encoding() is None
assert r8._declared_encoding() == "utf-8"
assert r9._declared_encoding() is None
self._assert_response_encoding(r5, "utf-8")
self._assert_response_encoding(r8, "utf-8")
self._assert_response_encoding(r9, "cp1252")
assert r4._body_inferred_encoding() is not None
assert r4._body_inferred_encoding() != "ascii"
self._assert_response_values(r1, "utf-8", "\xa3")
self._assert_response_values(r2, "utf-8", "\xa3")
self._assert_response_values(r3, "iso-8859-1", "\xa3")
self._assert_response_values(r6, "gb18030", "\u2015")
self._assert_response_values(r7, "gb18030", "\u2015")
self._assert_response_values(r9, "cp1252", "€")
# TextResponse (and subclasses) must be passed a encoding when instantiating with unicode bodies
with pytest.raises(TypeError):
self.response_class("http://www.example.com", body="\xa3")
def test_declared_encoding_invalid(self):
"""Check that unknown declared encodings are ignored"""
r = self.response_class(
"http://www.example.com",
headers={"Content-type": ["text/html; charset=UNKNOWN"]},
body=b"\xc2\xa3",
)
assert r._declared_encoding() is None
self._assert_response_values(r, "utf-8", "\xa3")
def test_utf16(self):
"""Test utf-16 because UnicodeDammit is known to have problems with"""
r = self.response_class(
"http://www.example.com",
body=b"\xff\xfeh\x00i\x00",
encoding="utf-16",
)
self._assert_response_values(r, "utf-16", "hi")
def test_invalid_utf8_encoded_body_with_valid_utf8_BOM(self):
r6 = self.response_class(
"http://www.example.com",
headers={"Content-type": ["text/html; charset=utf-8"]},
body=b"\xef\xbb\xbfWORD\xe3\xab",
)
assert r6.encoding == "utf-8"
assert r6.text in {
"WORD\ufffd\ufffd", # w3lib < 1.19.0
"WORD\ufffd", # w3lib >= 1.19.0
}
def test_bom_is_removed_from_body(self):
# Inferring encoding from body also cache decoded body as sideeffect,
# this test tries to ensure that calling response.encoding and
# response.text in indistinct order doesn't affect final
# response.text in indistinct order doesn't affect final
# values for encoding and decoded body.
url = "http://example.com"
body = b"\xef\xbb\xbfWORD"
headers = {"Content-type": ["text/html; charset=utf-8"]}
# Test response without content-type and BOM encoding
response = self.response_class(url, body=body)
assert response.encoding == "utf-8"
assert response.text == "WORD"
response = self.response_class(url, body=body)
assert response.text == "WORD"
assert response.encoding == "utf-8"
# Body caching sideeffect isn't triggered when encoding is declared in
# content-type header but BOM still need to be removed from decoded
# body
response = self.response_class(url, headers=headers, body=body)
assert response.encoding == "utf-8"
assert response.text == "WORD"
response = self.response_class(url, headers=headers, body=body)
assert response.text == "WORD"
assert response.encoding == "utf-8"
def test_replace_wrong_encoding(self):
"""Test invalid chars are replaced properly"""
r = self.response_class(
"http://www.example.com",
encoding="utf-8",
body=b"PREFIX\xe3\xabSUFFIX",
)
# XXX: Policy for replacing invalid chars may suffer minor variations
# but it should always contain the unicode replacement char ('\ufffd')
assert "\ufffd" in r.text, repr(r.text)
assert "PREFIX" in r.text, repr(r.text)
assert "SUFFIX" in r.text, repr(r.text)
# Do not destroy html tags due to encoding bugs
r = self.response_class(
"http://example.com",
encoding="utf-8",
body=b"\xf0<span>value</span>",
)
assert "<span>value</span>" in r.text, repr(r.text)
# FIXME: This test should pass once we stop using BeautifulSoup's UnicodeDammit in TextResponse
# r = self.response_class("http://www.example.com", body=b'PREFIX\xe3\xabSUFFIX')
# assert '\ufffd' in r.text, repr(r.text)
def test_selector(self):
body = b"<html><head><title>Some page</title><body></body></html>"
response = self.response_class("http://www.example.com", body=body)
assert isinstance(response.selector, Selector)
assert response.selector.type == "html"
assert response.selector is response.selector # property is cached
assert response.selector.response is response
assert response.selector.xpath("//title/text()").getall() == ["Some page"]
assert response.selector.css("title::text").getall() == ["Some page"]
assert response.selector.re("Some (.*)</title>") == ["page"]
def test_selector_shortcuts(self):
body = b"<html><head><title>Some page</title><body></body></html>"
response = self.response_class("http://www.example.com", body=body)
assert (
response.xpath("//title/text()").getall()
== response.selector.xpath("//title/text()").getall()
)
assert (
response.css("title::text").getall()
== response.selector.css("title::text").getall()
)
def test_selector_shortcuts_kwargs(self):
body = b'<html><head><title>Some page</title><body><p class="content">A nice paragraph.</p></body></html>'
response = self.response_class("http://www.example.com", body=body)
assert (
response.xpath(
"normalize-space(//p[@class=$pclass])", pclass="content"
).getall()
== response.xpath('normalize-space(//p[@class="content"])').getall()
)
assert (
response.xpath(
"//title[count(following::p[@class=$pclass])=$pcount]/text()",
pclass="content",
pcount=1,
).getall()
== response.xpath(
'//title[count(following::p[@class="content"])=1]/text()'
).getall()
)
def test_urljoin_with_base_url(self):
"""Test urljoin shortcut which also evaluates base-url through get_base_url()."""
body = b'<html><body><base href="https://example.net"></body></html>'
joined = self.response_class("http://www.example.com", body=body).urljoin(
"/test"
)
absolute = "https://example.net/test"
assert joined == absolute
body = b'<html><body><base href="/elsewhere"></body></html>'
joined = self.response_class("http://www.example.com", body=body).urljoin(
"test"
)
absolute = "http://www.example.com/test"
assert joined == absolute
body = b'<html><body><base href="/elsewhere/"></body></html>'
joined = self.response_class("http://www.example.com", body=body).urljoin(
"test"
)
absolute = "http://www.example.com/elsewhere/test"
assert joined == absolute
def test_follow_selector(self):
resp = self._links_response()
urls = [
"http://example.com/sample2.html",
"http://example.com/sample3.html",
"http://example.com/sample3.html",
"http://example.com/sample3.html",
"http://example.com/sample3.html#foo",
"http://www.google.com/something",
"http://example.com/innertag.html",
]
# select <a> elements
for sellist in [resp.css("a"), resp.xpath("//a")]:
for sel, url in zip(sellist, urls, strict=False):
self._assert_followed_url(sel, url, response=resp)
# select <link> elements
self._assert_followed_url(
Selector(text='<link href="foo"></link>').css("link")[0],
"http://example.com/foo",
response=resp,
)
# href attributes should work
for sellist in [resp.css("a::attr(href)"), resp.xpath("//a/@href")]:
for sel, url in zip(sellist, urls, strict=False):
self._assert_followed_url(sel, url, response=resp)
# non-a elements are not supported
with pytest.raises(
ValueError, match="Only <a> and <link> elements are supported"
):
resp.follow(resp.css("div")[0])
def test_follow_selector_list(self):
resp = self._links_response()
with pytest.raises(ValueError, match="SelectorList"):
resp.follow(resp.css("a"))
def test_follow_selector_invalid(self):
resp = self._links_response()
with pytest.raises(ValueError, match="Unsupported"):
resp.follow(resp.xpath("count(//div)")[0])
def test_follow_selector_attribute(self):
resp = self._links_response()
for src in resp.css("img::attr(src)"):
self._assert_followed_url(src, "http://example.com/sample2.jpg")
def test_follow_selector_no_href(self):
resp = self.response_class(
url="http://example.com",
body=b"<html><body><a name=123>click me</a></body></html>",
)
with pytest.raises(ValueError, match="no href"):
resp.follow(resp.css("a")[0])
def test_follow_whitespace_selector(self):
resp = self.response_class(
"http://example.com",
body=b"""<html><body><a href=" foo\n">click me</a></body></html>""",
)
self._assert_followed_url(
resp.css("a")[0], "http://example.com/foo", response=resp
)
self._assert_followed_url(
resp.css("a::attr(href)")[0],
"http://example.com/foo",
response=resp,
)
def test_follow_encoding(self):
resp1 = self.response_class(
"http://example.com",
encoding="utf8",
body='<html><body><a href="foo?привет">click me</a></body></html>'.encode(),
)
req = self._assert_followed_url(
resp1.css("a")[0],
"http://example.com/foo?%D0%BF%D1%80%D0%B8%D0%B2%D0%B5%D1%82",
response=resp1,
)
assert req.encoding == "utf8"
resp2 = self.response_class(
"http://example.com",
encoding="cp1251",
body='<html><body><a href="foo?привет">click me</a></body></html>'.encode(
"cp1251"
),
)
req = self._assert_followed_url(
resp2.css("a")[0],
"http://example.com/foo?%EF%F0%E8%E2%E5%F2",
response=resp2,
)
assert req.encoding == "cp1251"
def test_follow_flags(self):
res = self.response_class("http://example.com/")
fol = res.follow("http://example.com/", flags=["cached", "allowed"])
assert fol.flags == ["cached", "allowed"]
def test_follow_all_flags(self):
re = self.response_class("http://www.example.com/")
urls = [
"http://www.example.com/",
"http://www.example.com/2",
"http://www.example.com/foo",
]
fol = re.follow_all(urls, flags=["cached", "allowed"])
for req in fol:
assert req.flags == ["cached", "allowed"]
def test_follow_all_css(self):
expected = [
"http://example.com/sample3.html",
"http://example.com/innertag.html",
]
response = self._links_response()
extracted = [r.url for r in response.follow_all(css='a[href*="example.com"]')]
assert expected == extracted
def test_follow_all_css_skip_invalid(self):
expected = [
"http://example.com/page/1/",
"http://example.com/page/3/",
"http://example.com/page/4/",
]
response = self._links_response_no_href()
extracted1 = [r.url for r in response.follow_all(css=".pagination a")]
assert expected == extracted1
extracted2 = [r.url for r in response.follow_all(response.css(".pagination a"))]
assert expected == extracted2
def test_follow_all_xpath(self):
expected = [
"http://example.com/sample3.html",
"http://example.com/innertag.html",
]
response = self._links_response()
extracted = response.follow_all(xpath='//a[contains(@href, "example.com")]')
assert expected == [r.url for r in extracted]
def test_follow_all_xpath_skip_invalid(self):
expected = [
"http://example.com/page/1/",
"http://example.com/page/3/",
"http://example.com/page/4/",
]
response = self._links_response_no_href()
extracted1 = [
r.url for r in response.follow_all(xpath='//div[@id="pagination"]/a')
]
assert expected == extracted1
extracted2 = [
r.url
for r in response.follow_all(response.xpath('//div[@id="pagination"]/a'))
]
assert expected == extracted2
def test_follow_all_too_many_arguments(self):
response = self._links_response()
with pytest.raises(
ValueError, match="Please supply exactly one of the following arguments"
):
response.follow_all(
css='a[href*="example.com"]',
xpath='//a[contains(@href, "example.com")]',
)
def test_json_response(self):
json_body = b"""{"ip": "109.187.217.200"}"""
json_response = self.response_class("http://www.example.com", body=json_body)
assert json_response.json() == {"ip": "109.187.217.200"}
text_body = b"""<html><body>text</body></html>"""
text_response = self.response_class("http://www.example.com", body=text_body)
with pytest.raises(
ValueError, match=r"(Expecting value|Unexpected '<'): line 1"
):
text_response.json()
def test_cache_json_response(self):
json_valid_bodies = [b"""{"ip": "109.187.217.200"}""", b"""null"""]
for json_body in json_valid_bodies:
json_response = self.response_class(
"http://www.example.com", body=json_body
)
with mock.patch("json.loads") as mock_json:
for _ in range(2):
json_response.json()
mock_json.assert_called_once_with(json_body)
class TestHtmlResponse(TestTextResponse):
response_class = HtmlResponse
def test_html_encoding(self):
body = b"""<html><head><title>Some page</title>
<meta http-equiv="Content-Type" content="text/html; charset=iso-8859-1">
</head><body>Price: \xa3100</body></html>'
"""
r1 = self.response_class("http://www.example.com", body=body)
self._assert_response_values(r1, "iso-8859-1", body)
body = b"""<?xml version="1.0" encoding="iso-8859-1"?>
<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.1//EN" "http://www.w3.org/TR/xhtml11/DTD/xhtml11.dtd">
Price: \xa3100
"""
r2 = self.response_class("http://www.example.com", body=body)
self._assert_response_values(r2, "iso-8859-1", body)
# for conflicting declarations headers must take precedence
body = b"""<html><head><title>Some page</title>
<meta http-equiv="Content-Type" content="text/html; charset=utf-8">
</head><body>Price: \xa3100</body></html>'
"""
r3 = self.response_class(
| python | BSD-3-Clause | d1bd8eb49f7aba9289e4ff692006cead8bcd9080 | 2026-01-04T14:38:41.023839Z | true |
scrapy/scrapy | https://github.com/scrapy/scrapy/blob/d1bd8eb49f7aba9289e4ff692006cead8bcd9080/tests/test_pipeline_images.py | tests/test_pipeline_images.py | from __future__ import annotations
import dataclasses
import io
import random
from abc import ABC, abstractmethod
from shutil import rmtree
from tempfile import mkdtemp
from typing import Any
import attr
import pytest
from itemadapter import ItemAdapter
from scrapy.http import Request, Response
from scrapy.item import Field, Item
from scrapy.pipelines.images import ImageException, ImagesPipeline
from scrapy.utils.test import get_crawler
try:
from PIL import Image
except ImportError:
pytest.skip(
"Missing Python Imaging Library, install https://pypi.org/pypi/Pillow",
allow_module_level=True,
)
else:
encoders = {"jpeg_encoder", "jpeg_decoder"}
if not encoders.issubset(set(Image.core.__dict__)): # type: ignore[attr-defined]
pytest.skip("Missing JPEG encoders", allow_module_level=True)
class TestImagesPipeline:
def setup_method(self):
self.tempdir = mkdtemp()
crawler = get_crawler()
self.pipeline = ImagesPipeline(self.tempdir, crawler=crawler)
def teardown_method(self):
rmtree(self.tempdir)
def test_file_path(self):
file_path = self.pipeline.file_path
assert (
file_path(Request("https://dev.mydeco.com/mydeco.gif"))
== "full/3fd165099d8e71b8a48b2683946e64dbfad8b52d.jpg"
)
assert (
file_path(
Request(
"http://www.maddiebrown.co.uk///catalogue-items//image_54642_12175_95307.jpg"
)
)
== "full/0ffcd85d563bca45e2f90becd0ca737bc58a00b2.jpg"
)
assert (
file_path(
Request("https://dev.mydeco.com/two/dirs/with%20spaces%2Bsigns.gif")
)
== "full/b250e3a74fff2e4703e310048a5b13eba79379d2.jpg"
)
assert (
file_path(
Request(
"http://www.dfsonline.co.uk/get_prod_image.php?img=status_0907_mdm.jpg"
)
)
== "full/4507be485f38b0da8a0be9eb2e1dfab8a19223f2.jpg"
)
assert (
file_path(Request("http://www.dorma.co.uk/images/product_details/2532/"))
== "full/97ee6f8a46cbbb418ea91502fd24176865cf39b2.jpg"
)
assert (
file_path(Request("http://www.dorma.co.uk/images/product_details/2532"))
== "full/244e0dd7d96a3b7b01f54eded250c9e272577aa1.jpg"
)
assert (
file_path(
Request("http://www.dorma.co.uk/images/product_details/2532"),
response=Response("http://www.dorma.co.uk/images/product_details/2532"),
info=object(),
)
== "full/244e0dd7d96a3b7b01f54eded250c9e272577aa1.jpg"
)
def test_thumbnail_name(self):
thumb_path = self.pipeline.thumb_path
name = "50"
assert (
thumb_path(Request("file:///tmp/foo.jpg"), name)
== "thumbs/50/38a86208c36e59d4404db9e37ce04be863ef0335.jpg"
)
assert (
thumb_path(Request("file://foo.png"), name)
== "thumbs/50/e55b765eba0ec7348e50a1df496040449071b96a.jpg"
)
assert (
thumb_path(Request("file:///tmp/foo"), name)
== "thumbs/50/0329ad83ebb8e93ea7c7906d46e9ed55f7349a50.jpg"
)
assert (
thumb_path(Request("file:///tmp/some.name/foo"), name)
== "thumbs/50/850233df65a5b83361798f532f1fc549cd13cbe9.jpg"
)
assert (
thumb_path(
Request("file:///tmp/some.name/foo"),
name,
response=Response("file:///tmp/some.name/foo"),
info=object(),
)
== "thumbs/50/850233df65a5b83361798f532f1fc549cd13cbe9.jpg"
)
def test_thumbnail_name_from_item(self):
"""
Custom thumbnail name based on item data, overriding default implementation
"""
class CustomImagesPipeline(ImagesPipeline):
def thumb_path(
self, request, thumb_id, response=None, info=None, item=None
):
return f"thumb/{thumb_id}/{item.get('path')}"
thumb_path = CustomImagesPipeline.from_crawler(
get_crawler(None, {"IMAGES_STORE": self.tempdir})
).thumb_path
item = {"path": "path-to-store-file"}
request = Request("http://example.com")
assert (
thumb_path(request, "small", item=item) == "thumb/small/path-to-store-file"
)
def test_get_images_exception(self):
self.pipeline.min_width = 100
self.pipeline.min_height = 100
_, buf1 = _create_image("JPEG", "RGB", (50, 50), (0, 0, 0))
_, buf2 = _create_image("JPEG", "RGB", (150, 50), (0, 0, 0))
_, buf3 = _create_image("JPEG", "RGB", (50, 150), (0, 0, 0))
resp1 = Response(url="https://dev.mydeco.com/mydeco.gif", body=buf1.getvalue())
resp2 = Response(url="https://dev.mydeco.com/mydeco.gif", body=buf2.getvalue())
resp3 = Response(url="https://dev.mydeco.com/mydeco.gif", body=buf3.getvalue())
req = Request(url="https://dev.mydeco.com/mydeco.gif")
with pytest.raises(ImageException):
next(self.pipeline.get_images(response=resp1, request=req, info=object()))
with pytest.raises(ImageException):
next(self.pipeline.get_images(response=resp2, request=req, info=object()))
with pytest.raises(ImageException):
next(self.pipeline.get_images(response=resp3, request=req, info=object()))
def test_get_images(self):
self.pipeline.min_width = 0
self.pipeline.min_height = 0
self.pipeline.thumbs = {"small": (20, 20)}
orig_im, buf = _create_image("JPEG", "RGB", (50, 50), (0, 0, 0))
_, orig_thumb_buf = _create_image("JPEG", "RGB", (20, 20), (0, 0, 0))
resp = Response(url="https://dev.mydeco.com/mydeco.gif", body=buf.getvalue())
req = Request(url="https://dev.mydeco.com/mydeco.gif")
get_images_gen = self.pipeline.get_images(
response=resp, request=req, info=object()
)
path, new_im, new_buf = next(get_images_gen)
assert path == "full/3fd165099d8e71b8a48b2683946e64dbfad8b52d.jpg"
assert orig_im.copy() == new_im
assert buf.getvalue() == new_buf.getvalue()
thumb_path, _, thumb_buf = next(get_images_gen)
assert thumb_path == "thumbs/small/3fd165099d8e71b8a48b2683946e64dbfad8b52d.jpg"
assert orig_thumb_buf.getvalue() == thumb_buf.getvalue()
def test_get_transposed_images(self):
orig_im = Image.new("RGB", (2, 2), (0, 0, 0))
orig_im.putpixel((1, 1), (255, 0, 0))
exif = orig_im.getexif()
exif[274] = 3
buf = io.BytesIO()
orig_im.save(buf, "PNG", exif=exif)
buf.seek(0)
resp = Response(url="https://dev.mydeco.com/mydeco.gif", body=buf.getvalue())
req = Request(url="https://dev.mydeco.com/mydeco.gif")
get_images_gen = self.pipeline.get_images(
response=resp, request=req, info=object()
)
path, new_im, _ = next(get_images_gen)
assert path == "full/3fd165099d8e71b8a48b2683946e64dbfad8b52d.jpg"
assert new_im.getpixel((0, 0)) == (255, 0, 0)
def test_convert_image(self):
SIZE = (100, 100)
# straight forward case: RGB and JPEG
COLOUR = (0, 127, 255)
im, buf = _create_image("JPEG", "RGB", SIZE, COLOUR)
converted, converted_buf = self.pipeline.convert_image(im, response_body=buf)
assert converted.mode == "RGB"
assert converted.getcolors() == [(10000, COLOUR)]
# check that we don't convert JPEGs again
assert converted_buf == buf
# check that thumbnail keep image ratio
thumbnail, _ = self.pipeline.convert_image(
converted, size=(10, 25), response_body=converted_buf
)
assert thumbnail.mode == "RGB"
assert thumbnail.size == (10, 10)
# transparency case: RGBA and PNG
COLOUR = (0, 127, 255, 50)
im, buf = _create_image("PNG", "RGBA", SIZE, COLOUR)
converted, _ = self.pipeline.convert_image(im, response_body=buf)
assert converted.mode == "RGB"
assert converted.getcolors() == [(10000, (205, 230, 255))]
# transparency case with palette: P and PNG
COLOUR = (0, 127, 255, 50)
im, buf = _create_image("PNG", "RGBA", SIZE, COLOUR)
im = im.convert("P")
converted, _ = self.pipeline.convert_image(im, response_body=buf)
assert converted.mode == "RGB"
assert converted.getcolors() == [(10000, (205, 230, 255))]
@pytest.mark.parametrize(
"bad_type",
[
"http://example.com/file.jpg",
("http://example.com/file.jpg",),
{"url": "http://example.com/file.jpg"},
123,
None,
],
)
def test_rejects_non_list_image_urls(self, tmp_path, bad_type):
pipeline = ImagesPipeline.from_crawler(
get_crawler(None, {"IMAGES_STORE": str(tmp_path)})
)
item = ImagesPipelineTestItem()
item["image_urls"] = bad_type
with pytest.raises(TypeError, match="image_urls must be a list of URLs"):
list(pipeline.get_media_requests(item, None))
class TestImagesPipelineFieldsMixin(ABC):
@property
@abstractmethod
def item_class(self) -> Any:
raise NotImplementedError
def test_item_fields_default(self):
url = "http://www.example.com/images/1.jpg"
item = self.item_class(name="item1", image_urls=[url])
pipeline = ImagesPipeline.from_crawler(
get_crawler(None, {"IMAGES_STORE": "s3://example/images/"})
)
requests = list(pipeline.get_media_requests(item, None))
assert requests[0].url == url
results = [(True, {"url": url})]
item = pipeline.item_completed(results, item, None)
images = ItemAdapter(item).get("images")
assert images == [results[0][1]]
assert isinstance(item, self.item_class)
def test_item_fields_override_settings(self):
url = "http://www.example.com/images/1.jpg"
item = self.item_class(name="item1", custom_image_urls=[url])
pipeline = ImagesPipeline.from_crawler(
get_crawler(
None,
{
"IMAGES_STORE": "s3://example/images/",
"IMAGES_URLS_FIELD": "custom_image_urls",
"IMAGES_RESULT_FIELD": "custom_images",
},
)
)
requests = list(pipeline.get_media_requests(item, None))
assert requests[0].url == url
results = [(True, {"url": url})]
item = pipeline.item_completed(results, item, None)
custom_images = ItemAdapter(item).get("custom_images")
assert custom_images == [results[0][1]]
assert isinstance(item, self.item_class)
class TestImagesPipelineFieldsDict(TestImagesPipelineFieldsMixin):
item_class = dict
class ImagesPipelineTestItem(Item):
name = Field()
# default fields
image_urls = Field()
images = Field()
# overridden fields
custom_image_urls = Field()
custom_images = Field()
class TestImagesPipelineFieldsItem(TestImagesPipelineFieldsMixin):
item_class = ImagesPipelineTestItem
@dataclasses.dataclass
class ImagesPipelineTestDataClass:
name: str
# default fields
image_urls: list = dataclasses.field(default_factory=list)
images: list = dataclasses.field(default_factory=list)
# overridden fields
custom_image_urls: list = dataclasses.field(default_factory=list)
custom_images: list = dataclasses.field(default_factory=list)
class TestImagesPipelineFieldsDataClass(TestImagesPipelineFieldsMixin):
item_class = ImagesPipelineTestDataClass
@attr.s
class ImagesPipelineTestAttrsItem:
name = attr.ib(default="")
# default fields
image_urls: list[str] = attr.ib(default=list)
images: list[dict[str, str]] = attr.ib(default=list)
# overridden fields
custom_image_urls: list[str] = attr.ib(default=list)
custom_images: list[dict[str, str]] = attr.ib(default=list)
class TestImagesPipelineFieldsAttrsItem(TestImagesPipelineFieldsMixin):
item_class = ImagesPipelineTestAttrsItem
class TestImagesPipelineCustomSettings:
img_cls_attribute_names = [
# Pipeline attribute names with corresponding setting names.
("EXPIRES", "IMAGES_EXPIRES"),
("MIN_WIDTH", "IMAGES_MIN_WIDTH"),
("MIN_HEIGHT", "IMAGES_MIN_HEIGHT"),
("IMAGES_URLS_FIELD", "IMAGES_URLS_FIELD"),
("IMAGES_RESULT_FIELD", "IMAGES_RESULT_FIELD"),
("THUMBS", "IMAGES_THUMBS"),
]
# This should match what is defined in ImagesPipeline.
default_pipeline_settings = {
"MIN_WIDTH": 0,
"MIN_HEIGHT": 0,
"EXPIRES": 90,
"THUMBS": {},
"IMAGES_URLS_FIELD": "image_urls",
"IMAGES_RESULT_FIELD": "images",
}
def _generate_fake_settings(self, tmp_path, prefix=None):
"""
:param prefix: string for setting keys
:return: dictionary of image pipeline settings
"""
def random_string():
return "".join([chr(random.randint(97, 123)) for _ in range(10)])
settings = {
"IMAGES_EXPIRES": random.randint(100, 1000),
"IMAGES_STORE": tmp_path,
"IMAGES_RESULT_FIELD": random_string(),
"IMAGES_URLS_FIELD": random_string(),
"IMAGES_MIN_WIDTH": random.randint(1, 1000),
"IMAGES_MIN_HEIGHT": random.randint(1, 1000),
"IMAGES_THUMBS": {
"small": (random.randint(1, 1000), random.randint(1, 1000)),
"big": (random.randint(1, 1000), random.randint(1, 1000)),
},
}
if not prefix:
return settings
return {
prefix.upper() + "_" + k if k != "IMAGES_STORE" else k: v
for k, v in settings.items()
}
def _generate_fake_pipeline_subclass(self):
"""
:return: ImagePipeline class will all uppercase attributes set.
"""
class UserDefinedImagePipeline(ImagesPipeline):
# Values should be in different range than fake_settings.
MIN_WIDTH = random.randint(1000, 2000)
MIN_HEIGHT = random.randint(1000, 2000)
THUMBS = {
"small": (random.randint(1000, 2000), random.randint(1000, 2000)),
"big": (random.randint(1000, 2000), random.randint(1000, 2000)),
}
EXPIRES = random.randint(1000, 2000)
IMAGES_URLS_FIELD = "field_one"
IMAGES_RESULT_FIELD = "field_two"
return UserDefinedImagePipeline
def test_different_settings_for_different_instances(self, tmp_path):
"""
If there are two instances of ImagesPipeline class with different settings, they should
have different settings.
"""
custom_settings = self._generate_fake_settings(tmp_path)
default_sts_pipe = ImagesPipeline(tmp_path, crawler=get_crawler(None))
user_sts_pipe = ImagesPipeline.from_crawler(get_crawler(None, custom_settings))
for pipe_attr, settings_attr in self.img_cls_attribute_names:
expected_default_value = self.default_pipeline_settings.get(pipe_attr)
custom_value = custom_settings.get(settings_attr)
assert expected_default_value != custom_value
assert (
getattr(default_sts_pipe, pipe_attr.lower()) == expected_default_value
)
assert getattr(user_sts_pipe, pipe_attr.lower()) == custom_value
def test_subclass_attrs_preserved_default_settings(self, tmp_path):
"""
If image settings are not defined at all subclass of ImagePipeline takes values
from class attributes.
"""
pipeline_cls = self._generate_fake_pipeline_subclass()
pipeline = pipeline_cls.from_crawler(
get_crawler(None, {"IMAGES_STORE": tmp_path})
)
for pipe_attr, settings_attr in self.img_cls_attribute_names:
# Instance attribute (lowercase) must be equal to class attribute (uppercase).
attr_value = getattr(pipeline, pipe_attr.lower())
assert attr_value != self.default_pipeline_settings[pipe_attr]
assert attr_value == getattr(pipeline, pipe_attr)
def test_subclass_attrs_preserved_custom_settings(self, tmp_path):
"""
If image settings are defined but they are not defined for subclass default
values taken from settings should be preserved.
"""
pipeline_cls = self._generate_fake_pipeline_subclass()
settings = self._generate_fake_settings(tmp_path)
pipeline = pipeline_cls.from_crawler(get_crawler(None, settings))
for pipe_attr, settings_attr in self.img_cls_attribute_names:
# Instance attribute (lowercase) must be equal to
# value defined in settings.
value = getattr(pipeline, pipe_attr.lower())
assert value != self.default_pipeline_settings[pipe_attr]
setings_value = settings.get(settings_attr)
assert value == setings_value
def test_no_custom_settings_for_subclasses(self, tmp_path):
"""
If there are no settings for subclass and no subclass attributes, pipeline should use
attributes of base class.
"""
class UserDefinedImagePipeline(ImagesPipeline):
pass
user_pipeline = UserDefinedImagePipeline.from_crawler(
get_crawler(None, {"IMAGES_STORE": tmp_path})
)
for pipe_attr, settings_attr in self.img_cls_attribute_names:
# Values from settings for custom pipeline should be set on pipeline instance.
custom_value = self.default_pipeline_settings.get(pipe_attr.upper())
assert getattr(user_pipeline, pipe_attr.lower()) == custom_value
def test_custom_settings_for_subclasses(self, tmp_path):
"""
If there are custom settings for subclass and NO class attributes, pipeline should use custom
settings.
"""
class UserDefinedImagePipeline(ImagesPipeline):
pass
prefix = UserDefinedImagePipeline.__name__.upper()
settings = self._generate_fake_settings(tmp_path, prefix=prefix)
user_pipeline = UserDefinedImagePipeline.from_crawler(
get_crawler(None, settings)
)
for pipe_attr, settings_attr in self.img_cls_attribute_names:
# Values from settings for custom pipeline should be set on pipeline instance.
custom_value = settings.get(prefix + "_" + settings_attr)
assert custom_value != self.default_pipeline_settings[pipe_attr]
assert getattr(user_pipeline, pipe_attr.lower()) == custom_value
def test_custom_settings_and_class_attrs_for_subclasses(self, tmp_path):
"""
If there are custom settings for subclass AND class attributes
setting keys are preferred and override attributes.
"""
pipeline_cls = self._generate_fake_pipeline_subclass()
prefix = pipeline_cls.__name__.upper()
settings = self._generate_fake_settings(tmp_path, prefix=prefix)
user_pipeline = pipeline_cls.from_crawler(get_crawler(None, settings))
for pipe_attr, settings_attr in self.img_cls_attribute_names:
custom_value = settings.get(prefix + "_" + settings_attr)
assert custom_value != self.default_pipeline_settings[pipe_attr]
assert getattr(user_pipeline, pipe_attr.lower()) == custom_value
def test_cls_attrs_with_DEFAULT_prefix(self, tmp_path):
class UserDefinedImagePipeline(ImagesPipeline):
DEFAULT_IMAGES_URLS_FIELD = "something"
DEFAULT_IMAGES_RESULT_FIELD = "something_else"
pipeline = UserDefinedImagePipeline.from_crawler(
get_crawler(None, {"IMAGES_STORE": tmp_path})
)
assert (
pipeline.images_result_field
== UserDefinedImagePipeline.DEFAULT_IMAGES_RESULT_FIELD
)
assert (
pipeline.images_urls_field
== UserDefinedImagePipeline.DEFAULT_IMAGES_URLS_FIELD
)
def test_user_defined_subclass_default_key_names(self, tmp_path):
"""Test situation when user defines subclass of ImagePipeline,
but uses attribute names for default pipeline (without prefixing
them with pipeline class name).
"""
settings = self._generate_fake_settings(tmp_path)
class UserPipe(ImagesPipeline):
pass
pipeline_cls = UserPipe.from_crawler(get_crawler(None, settings))
for pipe_attr, settings_attr in self.img_cls_attribute_names:
expected_value = settings.get(settings_attr)
assert getattr(pipeline_cls, pipe_attr.lower()) == expected_value
def _create_image(format_, *a, **kw):
buf = io.BytesIO()
Image.new(*a, **kw).save(buf, format_)
buf.seek(0)
return Image.open(buf), buf
| python | BSD-3-Clause | d1bd8eb49f7aba9289e4ff692006cead8bcd9080 | 2026-01-04T14:38:41.023839Z | false |
scrapy/scrapy | https://github.com/scrapy/scrapy/blob/d1bd8eb49f7aba9289e4ff692006cead8bcd9080/tests/test_http_cookies.py | tests/test_http_cookies.py | from scrapy.http import Request, Response
from scrapy.http.cookies import WrappedRequest, WrappedResponse
from scrapy.utils.httpobj import urlparse_cached
class TestWrappedRequest:
def setup_method(self):
self.request = Request(
"http://www.example.com/page.html", headers={"Content-Type": "text/html"}
)
self.wrapped = WrappedRequest(self.request)
def test_get_full_url(self):
assert self.wrapped.get_full_url() == self.request.url
assert self.wrapped.full_url == self.request.url
def test_get_host(self):
assert self.wrapped.get_host() == urlparse_cached(self.request).netloc
assert self.wrapped.host == urlparse_cached(self.request).netloc
def test_get_type(self):
assert self.wrapped.get_type() == urlparse_cached(self.request).scheme
assert self.wrapped.type == urlparse_cached(self.request).scheme
def test_is_unverifiable(self):
assert not self.wrapped.is_unverifiable()
assert not self.wrapped.unverifiable
def test_is_unverifiable2(self):
self.request.meta["is_unverifiable"] = True
assert self.wrapped.is_unverifiable()
assert self.wrapped.unverifiable
def test_get_origin_req_host(self):
assert self.wrapped.origin_req_host == "www.example.com"
def test_has_header(self):
assert self.wrapped.has_header("content-type")
assert not self.wrapped.has_header("xxxxx")
def test_get_header(self):
assert self.wrapped.get_header("content-type") == "text/html"
assert self.wrapped.get_header("xxxxx", "def") == "def"
assert self.wrapped.get_header("xxxxx") is None
wrapped = WrappedRequest(
Request(
"http://www.example.com/page.html", headers={"empty-binary-header": b""}
)
)
assert wrapped.get_header("empty-binary-header") == ""
def test_header_items(self):
assert self.wrapped.header_items() == [("Content-Type", ["text/html"])]
def test_add_unredirected_header(self):
self.wrapped.add_unredirected_header("hello", "world")
assert self.request.headers["hello"] == b"world"
class TestWrappedResponse:
def setup_method(self):
self.response = Response(
"http://www.example.com/page.html", headers={"Content-TYpe": "text/html"}
)
self.wrapped = WrappedResponse(self.response)
def test_info(self):
assert self.wrapped.info() is self.wrapped
def test_get_all(self):
# get_all result must be native string
assert self.wrapped.get_all("content-type") == ["text/html"]
| python | BSD-3-Clause | d1bd8eb49f7aba9289e4ff692006cead8bcd9080 | 2026-01-04T14:38:41.023839Z | false |
scrapy/scrapy | https://github.com/scrapy/scrapy/blob/d1bd8eb49f7aba9289e4ff692006cead8bcd9080/tests/test_zz_resources.py | tests/test_zz_resources.py | """Test that certain resources are not leaked during earlier tests."""
from __future__ import annotations
import asyncio
import logging
import pytest
from scrapy.utils.log import LogCounterHandler
def test_counter_handler() -> None:
"""Test that ``LogCounterHandler`` is always properly removed.
It's added in ``Crawler.crawl{,_async}()`` and removed on engine_stopped.
"""
c = sum(1 for h in logging.root.handlers if isinstance(h, LogCounterHandler))
assert c == 0
def test_stderr_log_handler() -> None:
"""Test that the Scrapy root handler is always properly removed.
It's added in ``configure_logging()``, called by ``{Async,}CrawlerProcess``
(without ``install_root_handler=False``). It can be removed with
``_uninstall_scrapy_root_handler()`` if installing it was really neeeded.
"""
c = sum(1 for h in logging.root.handlers if type(h) is logging.StreamHandler) # pylint: disable=unidiomatic-typecheck
assert c == 0
@pytest.mark.only_asyncio
def test_pending_asyncio_tasks() -> None:
"""Test that there are no pending asyncio tasks."""
assert not asyncio.all_tasks()
| python | BSD-3-Clause | d1bd8eb49f7aba9289e4ff692006cead8bcd9080 | 2026-01-04T14:38:41.023839Z | false |
scrapy/scrapy | https://github.com/scrapy/scrapy/blob/d1bd8eb49f7aba9289e4ff692006cead8bcd9080/tests/test_pipelines.py | tests/test_pipelines.py | import asyncio
from typing import Any
import pytest
from twisted.internet.defer import Deferred, fail, succeed
from scrapy import Request, Spider, signals
from scrapy.crawler import Crawler
from scrapy.exceptions import ScrapyDeprecationWarning
from scrapy.pipelines import ItemPipelineManager
from scrapy.utils.asyncio import call_later
from scrapy.utils.conf import build_component_list
from scrapy.utils.defer import (
deferred_f_from_coro_f,
deferred_to_future,
maybe_deferred_to_future,
)
from scrapy.utils.spider import DefaultSpider
from scrapy.utils.test import get_crawler, get_from_asyncio_queue
from tests.mockserver.http import MockServer
class SimplePipeline:
def process_item(self, item):
item["pipeline_passed"] = True
return item
class DeprecatedSpiderArgPipeline:
def open_spider(self, spider):
pass
def close_spider(self, spider):
pass
def process_item(self, item, spider):
item["pipeline_passed"] = True
return item
class DeferredPipeline:
def cb(self, item):
item["pipeline_passed"] = True
return item
def open_spider(self):
return succeed(None)
def close_spider(self):
return succeed(None)
def process_item(self, item):
d = Deferred()
d.addCallback(self.cb)
d.callback(item)
return d
class AsyncDefPipeline:
async def process_item(self, item):
d = Deferred()
call_later(0, d.callback, None)
await maybe_deferred_to_future(d)
item["pipeline_passed"] = True
return item
class AsyncDefAsyncioPipeline:
async def process_item(self, item):
d = Deferred()
loop = asyncio.get_event_loop()
loop.call_later(0, d.callback, None)
await deferred_to_future(d)
await asyncio.sleep(0.2)
item["pipeline_passed"] = await get_from_asyncio_queue(True)
return item
class AsyncDefNotAsyncioPipeline:
async def process_item(self, item):
d1 = Deferred()
from twisted.internet import reactor
reactor.callLater(0, d1.callback, None)
await d1
d2 = Deferred()
reactor.callLater(0, d2.callback, None)
await maybe_deferred_to_future(d2)
item["pipeline_passed"] = True
return item
class ProcessItemExceptionPipeline:
def process_item(self, item):
raise ValueError("process_item error")
class ProcessItemExceptionDeferredPipeline:
def process_item(self, item):
return fail(ValueError("process_item error"))
class ProcessItemExceptionAsyncPipeline:
async def process_item(self, item):
raise ValueError("process_item error")
class OpenSpiderExceptionPipeline:
def open_spider(self):
raise ValueError("open_spider error")
class OpenSpiderExceptionDeferredPipeline:
def open_spider(self):
return fail(ValueError("open_spider error"))
class OpenSpiderExceptionAsyncPipeline:
async def open_spider(self):
raise ValueError("open_spider error")
class ItemSpider(Spider):
name = "itemspider"
async def start(self):
yield Request(self.mockserver.url("/status?n=200"))
def parse(self, response):
return {"field": 42}
class TestPipeline:
def _on_item_scraped(self, item):
assert isinstance(item, dict)
assert item.get("pipeline_passed")
self.items.append(item)
def _create_crawler(self, pipeline_class: type) -> Crawler:
settings = {
"ITEM_PIPELINES": {pipeline_class: 1},
}
crawler = get_crawler(ItemSpider, settings)
crawler.signals.connect(self._on_item_scraped, signals.item_scraped)
self.items: list[Any] = []
return crawler
@pytest.mark.parametrize(
"pipeline_class",
[
SimplePipeline,
AsyncDefPipeline,
pytest.param(AsyncDefAsyncioPipeline, marks=pytest.mark.only_asyncio),
pytest.param(
AsyncDefNotAsyncioPipeline, marks=pytest.mark.only_not_asyncio
),
],
)
@deferred_f_from_coro_f
async def test_pipeline(self, mockserver: MockServer, pipeline_class: type) -> None:
crawler = self._create_crawler(pipeline_class)
await crawler.crawl_async(mockserver=mockserver)
assert len(self.items) == 1
@deferred_f_from_coro_f
async def test_pipeline_deferred(self, mockserver: MockServer) -> None:
crawler = self._create_crawler(DeferredPipeline)
with (
pytest.warns(
ScrapyDeprecationWarning,
match="DeferredPipeline.open_spider returned a Deferred",
),
pytest.warns(
ScrapyDeprecationWarning,
match="DeferredPipeline.close_spider returned a Deferred",
),
pytest.warns(
ScrapyDeprecationWarning,
match="DeferredPipeline.process_item returned a Deferred",
),
):
await crawler.crawl_async(mockserver=mockserver)
assert len(self.items) == 1
@deferred_f_from_coro_f
async def test_deprecated_spider_arg(self, mockserver: MockServer) -> None:
crawler = self._create_crawler(DeprecatedSpiderArgPipeline)
with (
pytest.warns(
ScrapyDeprecationWarning,
match=r"DeprecatedSpiderArgPipeline.open_spider\(\) requires a spider argument",
),
pytest.warns(
ScrapyDeprecationWarning,
match=r"DeprecatedSpiderArgPipeline.close_spider\(\) requires a spider argument",
),
pytest.warns(
ScrapyDeprecationWarning,
match=r"DeprecatedSpiderArgPipeline.process_item\(\) requires a spider argument",
),
):
await crawler.crawl_async(mockserver=mockserver)
assert len(self.items) == 1
@pytest.mark.parametrize(
"pipeline_class",
[
ProcessItemExceptionPipeline,
pytest.param(
ProcessItemExceptionDeferredPipeline,
marks=pytest.mark.filterwarnings(
"ignore::scrapy.exceptions.ScrapyDeprecationWarning"
),
),
ProcessItemExceptionAsyncPipeline,
],
)
@deferred_f_from_coro_f
async def test_process_item_exception(
self,
caplog: pytest.LogCaptureFixture,
mockserver: MockServer,
pipeline_class: type,
) -> None:
crawler = self._create_crawler(pipeline_class)
await crawler.crawl_async(mockserver=mockserver)
assert "Error processing {'field': 42}" in caplog.text
assert "process_item error" in caplog.text
@pytest.mark.parametrize(
"pipeline_class",
[
OpenSpiderExceptionPipeline,
pytest.param(
OpenSpiderExceptionDeferredPipeline,
marks=pytest.mark.filterwarnings(
"ignore::scrapy.exceptions.ScrapyDeprecationWarning"
),
),
OpenSpiderExceptionAsyncPipeline,
],
)
@deferred_f_from_coro_f
async def test_open_spider_exception(
self, mockserver: MockServer, pipeline_class: type
) -> None:
crawler = self._create_crawler(pipeline_class)
with pytest.raises(ValueError, match="open_spider error"):
await crawler.crawl_async(mockserver=mockserver)
class TestCustomPipelineManager:
def test_deprecated_process_item_spider_arg(self) -> None:
class CustomPipelineManager(ItemPipelineManager):
def process_item(self, item, spider): # pylint: disable=useless-parent-delegation
return super().process_item(item, spider)
crawler = get_crawler(DefaultSpider)
crawler.spider = crawler._create_spider()
itemproc = CustomPipelineManager.from_crawler(crawler)
with pytest.warns(
ScrapyDeprecationWarning,
match=r"CustomPipelineManager.process_item\(\) is deprecated, use process_item_async\(\)",
):
itemproc.process_item({}, crawler.spider)
@deferred_f_from_coro_f
async def test_integration_recommended(self, mockserver: MockServer) -> None:
class CustomPipelineManager(ItemPipelineManager):
async def process_item_async(self, item):
return await super().process_item_async(item)
items = []
def _on_item_scraped(item):
assert isinstance(item, dict)
assert item.get("pipeline_passed")
items.append(item)
crawler = get_crawler(
ItemSpider,
{
"ITEM_PROCESSOR": CustomPipelineManager,
"ITEM_PIPELINES": {SimplePipeline: 1},
},
)
crawler.spider = crawler._create_spider()
crawler.signals.connect(_on_item_scraped, signals.item_scraped)
await crawler.crawl_async(mockserver=mockserver)
assert len(items) == 1
@deferred_f_from_coro_f
async def test_integration_no_async_subclass(self, mockserver: MockServer) -> None:
class CustomPipelineManager(ItemPipelineManager):
def open_spider(self, spider):
with pytest.warns(
ScrapyDeprecationWarning,
match=r"CustomPipelineManager.open_spider\(\) is deprecated, use open_spider_async\(\)",
):
return super().open_spider(spider)
def close_spider(self, spider):
with pytest.warns(
ScrapyDeprecationWarning,
match=r"CustomPipelineManager.close_spider\(\) is deprecated, use close_spider_async\(\)",
):
return super().close_spider(spider)
def process_item(self, item, spider):
with pytest.warns(
ScrapyDeprecationWarning,
match=r"CustomPipelineManager.process_item\(\) is deprecated, use process_item_async\(\)",
):
return super().process_item(item, spider)
items = []
def _on_item_scraped(item):
assert isinstance(item, dict)
assert item.get("pipeline_passed")
items.append(item)
crawler = get_crawler(
ItemSpider,
{
"ITEM_PROCESSOR": CustomPipelineManager,
"ITEM_PIPELINES": {SimplePipeline: 1},
},
)
crawler.spider = crawler._create_spider()
crawler.signals.connect(_on_item_scraped, signals.item_scraped)
with (
pytest.warns(
ScrapyDeprecationWarning,
match=r"CustomPipelineManager overrides open_spider\(\) but doesn't override open_spider_async\(\)",
),
pytest.warns(
ScrapyDeprecationWarning,
match=r"CustomPipelineManager overrides close_spider\(\) but doesn't override close_spider_async\(\)",
),
pytest.warns(
ScrapyDeprecationWarning,
match=r"CustomPipelineManager overrides process_item\(\) but doesn't override process_item_async\(\)",
),
):
await crawler.crawl_async(mockserver=mockserver)
assert len(items) == 1
@deferred_f_from_coro_f
async def test_integration_no_async_not_subclass(
self, mockserver: MockServer
) -> None:
class CustomPipelineManager:
def __init__(self, crawler):
self.pipelines = [
p()
for p in build_component_list(
crawler.settings.getwithbase("ITEM_PIPELINES")
)
]
@classmethod
def from_crawler(cls, crawler):
return cls(crawler)
def open_spider(self, spider):
return succeed(None)
def close_spider(self, spider):
return succeed(None)
def process_item(self, item, spider):
for pipeline in self.pipelines:
item = pipeline.process_item(item)
return succeed(item)
items = []
def _on_item_scraped(item):
assert isinstance(item, dict)
assert item.get("pipeline_passed")
items.append(item)
crawler = get_crawler(
ItemSpider,
{
"ITEM_PROCESSOR": CustomPipelineManager,
"ITEM_PIPELINES": {SimplePipeline: 1},
},
)
crawler.spider = crawler._create_spider()
crawler.signals.connect(_on_item_scraped, signals.item_scraped)
with (
pytest.warns(
ScrapyDeprecationWarning,
match=r"CustomPipelineManager doesn't define a open_spider_async\(\) method",
),
pytest.warns(
ScrapyDeprecationWarning,
match=r"CustomPipelineManager doesn't define a close_spider_async\(\) method",
),
pytest.warns(
ScrapyDeprecationWarning,
match=r"CustomPipelineManager doesn't define a process_item_async\(\) method",
),
):
await crawler.crawl_async(mockserver=mockserver)
assert len(items) == 1
class TestMiddlewareManagerSpider:
"""Tests for the deprecated spider arg handling in MiddlewareManager.
Here because MiddlewareManager doesn't have methods that could take a spider arg."""
@pytest.fixture
def crawler(self) -> Crawler:
return get_crawler(Spider)
@deferred_f_from_coro_f
async def test_deprecated_spider_arg_no_crawler_spider(
self, crawler: Crawler
) -> None:
"""Crawler is provided, but doesn't have a spider, the methods raise an exception.
The instance passed to a deprecated method is ignored."""
mwman = ItemPipelineManager(crawler=crawler)
with (
pytest.warns(
ScrapyDeprecationWarning,
match=r"DeprecatedSpiderArgPipeline.open_spider\(\) requires a spider argument",
),
pytest.warns(
ScrapyDeprecationWarning,
match=r"DeprecatedSpiderArgPipeline.close_spider\(\) requires a spider argument",
),
pytest.warns(
ScrapyDeprecationWarning,
match=r"DeprecatedSpiderArgPipeline.process_item\(\) requires a spider argument",
),
):
mwman._add_middleware(DeprecatedSpiderArgPipeline())
with (
pytest.warns(
ScrapyDeprecationWarning,
match=r"ItemPipelineManager.open_spider\(\) is deprecated, use open_spider_async\(\) instead",
),
pytest.raises(
ValueError,
match=r"ItemPipelineManager needs to access self\.crawler\.spider but it is None",
),
):
await maybe_deferred_to_future(mwman.open_spider(DefaultSpider()))
with pytest.raises(
ValueError,
match=r"ItemPipelineManager needs to access self\.crawler\.spider but it is None",
):
await mwman.open_spider_async()
with (
pytest.warns(
ScrapyDeprecationWarning,
match=r"ItemPipelineManager.close_spider\(\) is deprecated, use close_spider_async\(\) instead",
),
pytest.raises(
ValueError,
match=r"ItemPipelineManager needs to access self\.crawler\.spider but it is None",
),
):
await maybe_deferred_to_future(mwman.close_spider(DefaultSpider()))
with pytest.raises(
ValueError,
match=r"ItemPipelineManager needs to access self\.crawler\.spider but it is None",
):
await mwman.close_spider_async()
def test_deprecated_spider_arg_with_crawler(self, crawler: Crawler) -> None:
"""Crawler is provided and has a spider, works. The instance passed to a deprecated method
is ignored, even if mismatched."""
mwman = ItemPipelineManager(crawler=crawler)
crawler.spider = crawler._create_spider("foo")
with pytest.warns(
ScrapyDeprecationWarning,
match=r"ItemPipelineManager.open_spider\(\) is deprecated, use open_spider_async\(\) instead",
):
mwman.open_spider(DefaultSpider())
with pytest.warns(
ScrapyDeprecationWarning,
match=r"ItemPipelineManager.close_spider\(\) is deprecated, use close_spider_async\(\) instead",
):
mwman.close_spider(DefaultSpider())
def test_deprecated_spider_arg_without_crawler(self) -> None:
"""The first instance passed to a deprecated method is used. Mismatched ones raise an error."""
with pytest.warns(
ScrapyDeprecationWarning,
match="was called without the crawler argument",
):
mwman = ItemPipelineManager()
spider = DefaultSpider()
with pytest.warns(
ScrapyDeprecationWarning,
match=r"ItemPipelineManager.open_spider\(\) is deprecated, use open_spider_async\(\) instead",
):
mwman.open_spider(spider)
with (
pytest.warns(
ScrapyDeprecationWarning,
match=r"ItemPipelineManager.close_spider\(\) is deprecated, use close_spider_async\(\) instead",
),
pytest.raises(
RuntimeError, match="Different instances of Spider were passed"
),
):
mwman.close_spider(DefaultSpider())
with pytest.warns(
ScrapyDeprecationWarning,
match=r"ItemPipelineManager.close_spider\(\) is deprecated, use close_spider_async\(\) instead",
):
mwman.close_spider(spider)
@deferred_f_from_coro_f
async def test_no_spider_arg_without_crawler(self) -> None:
"""If no crawler and no spider arg, raise an error."""
with pytest.warns(
ScrapyDeprecationWarning,
match="was called without the crawler argument",
):
mwman = ItemPipelineManager()
with (
pytest.warns(
ScrapyDeprecationWarning,
match=r"DeprecatedSpiderArgPipeline.open_spider\(\) requires a spider argument",
),
pytest.warns(
ScrapyDeprecationWarning,
match=r"DeprecatedSpiderArgPipeline.close_spider\(\) requires a spider argument",
),
pytest.warns(
ScrapyDeprecationWarning,
match=r"DeprecatedSpiderArgPipeline.process_item\(\) requires a spider argument",
),
):
mwman._add_middleware(DeprecatedSpiderArgPipeline())
with (
pytest.raises(
ValueError,
match="has no known Spider instance",
),
):
await mwman.open_spider_async()
| python | BSD-3-Clause | d1bd8eb49f7aba9289e4ff692006cead8bcd9080 | 2026-01-04T14:38:41.023839Z | false |
scrapy/scrapy | https://github.com/scrapy/scrapy/blob/d1bd8eb49f7aba9289e4ff692006cead8bcd9080/tests/test_utils_curl.py | tests/test_utils_curl.py | import warnings
from typing import Any
import pytest
from w3lib.http import basic_auth_header
from scrapy import Request
from scrapy.utils.curl import curl_to_request_kwargs
class TestCurlToRequestKwargs:
@staticmethod
def _test_command(curl_command: str, expected_result: dict[str, Any]) -> None:
result = curl_to_request_kwargs(curl_command)
assert result == expected_result
try:
Request(**result)
except TypeError as e:
pytest.fail(f"Request kwargs are not correct {e}")
def test_get(self):
curl_command = "curl http://example.org/"
expected_result = {"method": "GET", "url": "http://example.org/"}
self._test_command(curl_command, expected_result)
def test_get_without_scheme(self):
curl_command = "curl www.example.org"
expected_result = {"method": "GET", "url": "http://www.example.org"}
self._test_command(curl_command, expected_result)
def test_get_basic_auth(self):
curl_command = 'curl "https://api.test.com/" -u "some_username:some_password"'
expected_result = {
"method": "GET",
"url": "https://api.test.com/",
"headers": [
("Authorization", basic_auth_header("some_username", "some_password"))
],
}
self._test_command(curl_command, expected_result)
def test_get_complex(self):
curl_command = (
"curl 'http://httpbin.org/get' -H 'Accept-Encoding: gzip, deflate'"
" -H 'Accept-Language: en-US,en;q=0.9,ru;q=0.8,es;q=0.7' -H 'Upgra"
"de-Insecure-Requests: 1' -H 'User-Agent: Mozilla/5.0 (X11; Linux "
"x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Ubuntu Chromium/62"
".0.3202.75 Chrome/62.0.3202.75 Safari/537.36' -H 'Accept: text/ht"
"ml,application/xhtml+xml,application/xml;q=0.9,image/webp,image/a"
"png,*/*;q=0.8' -H 'Referer: http://httpbin.org/' -H 'Cookie: _gau"
"ges_unique_year=1; _gauges_unique=1; _gauges_unique_month=1; _gau"
"ges_unique_hour=1' -H 'Connection: keep-alive' --compressed -b '_"
"gauges_unique_day=1'"
)
expected_result = {
"method": "GET",
"url": "http://httpbin.org/get",
"headers": [
("Accept-Encoding", "gzip, deflate"),
("Accept-Language", "en-US,en;q=0.9,ru;q=0.8,es;q=0.7"),
("Upgrade-Insecure-Requests", "1"),
(
"User-Agent",
"Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML"
", like Gecko) Ubuntu Chromium/62.0.3202.75 Chrome/62.0.32"
"02.75 Safari/537.36",
),
(
"Accept",
"text/html,application/xhtml+xml,application/xml;q=0.9,ima"
"ge/webp,image/apng,*/*;q=0.8",
),
("Referer", "http://httpbin.org/"),
("Connection", "keep-alive"),
],
"cookies": {
"_gauges_unique_year": "1",
"_gauges_unique_hour": "1",
"_gauges_unique_day": "1",
"_gauges_unique": "1",
"_gauges_unique_month": "1",
},
}
self._test_command(curl_command, expected_result)
def test_post(self):
curl_command = (
"curl 'http://httpbin.org/post' -X POST -H 'Cookie: _gauges_unique"
"_year=1; _gauges_unique=1; _gauges_unique_month=1; _gauges_unique"
"_hour=1; _gauges_unique_day=1' -H 'Origin: http://httpbin.org' -H"
" 'Accept-Encoding: gzip, deflate' -H 'Accept-Language: en-US,en;q"
"=0.9,ru;q=0.8,es;q=0.7' -H 'Upgrade-Insecure-Requests: 1' -H 'Use"
"r-Agent: Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTM"
"L, like Gecko) Ubuntu Chromium/62.0.3202.75 Chrome/62.0.3202.75 S"
"afari/537.36' -H 'Content-Type: application/x-www-form-urlencoded"
"' -H 'Accept: text/html,application/xhtml+xml,application/xml;q=0"
".9,image/webp,image/apng,*/*;q=0.8' -H 'Cache-Control: max-age=0'"
" -H 'Referer: http://httpbin.org/forms/post' -H 'Connection: keep"
"-alive' --data 'custname=John+Smith&custtel=500&custemail=jsmith%"
"40example.org&size=small&topping=cheese&topping=onion&delivery=12"
"%3A15&comments=' --compressed"
)
expected_result = {
"method": "POST",
"url": "http://httpbin.org/post",
"body": "custname=John+Smith&custtel=500&custemail=jsmith%40exampl"
"e.org&size=small&topping=cheese&topping=onion&delivery=12"
"%3A15&comments=",
"cookies": {
"_gauges_unique_year": "1",
"_gauges_unique_hour": "1",
"_gauges_unique_day": "1",
"_gauges_unique": "1",
"_gauges_unique_month": "1",
},
"headers": [
("Origin", "http://httpbin.org"),
("Accept-Encoding", "gzip, deflate"),
("Accept-Language", "en-US,en;q=0.9,ru;q=0.8,es;q=0.7"),
("Upgrade-Insecure-Requests", "1"),
(
"User-Agent",
"Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML"
", like Gecko) Ubuntu Chromium/62.0.3202.75 Chrome/62.0.32"
"02.75 Safari/537.36",
),
("Content-Type", "application/x-www-form-urlencoded"),
(
"Accept",
"text/html,application/xhtml+xml,application/xml;q=0.9,ima"
"ge/webp,image/apng,*/*;q=0.8",
),
("Cache-Control", "max-age=0"),
("Referer", "http://httpbin.org/forms/post"),
("Connection", "keep-alive"),
],
}
self._test_command(curl_command, expected_result)
def test_post_data_raw(self):
curl_command = (
"curl 'https://www.example.org/' --data-raw 'excerptLength=200&ena"
"bleDidYouMean=true&sortCriteria=ffirstz32xnamez32x201740686%20asc"
"ending&queryFunctions=%5B%5D&rankingFunctions=%5B%5D'"
)
expected_result = {
"method": "POST",
"url": "https://www.example.org/",
"body": (
"excerptLength=200&enableDidYouMean=true&sortCriteria=ffirstz3"
"2xnamez32x201740686%20ascending&queryFunctions=%5B%5D&ranking"
"Functions=%5B%5D"
),
}
self._test_command(curl_command, expected_result)
def test_post_data_raw_with_string_prefix(self):
curl_command = "curl 'https://www.example.org/' --data-raw $'{\"$filters\":\"Filter\u0021\"}'"
expected_result = {
"method": "POST",
"url": "https://www.example.org/",
"body": '{"$filters":"Filter!"}',
}
self._test_command(curl_command, expected_result)
def test_explicit_get_with_data(self):
curl_command = "curl httpbin.org/anything -X GET --data asdf"
expected_result = {
"method": "GET",
"url": "http://httpbin.org/anything",
"body": "asdf",
}
self._test_command(curl_command, expected_result)
def test_patch(self):
curl_command = (
'curl "https://example.com/api/fake" -u "username:password" -H "Ac'
'cept: application/vnd.go.cd.v4+json" -H "Content-Type: applicatio'
'n/json" -X PATCH -d \'{"hostname": "agent02.example.com", "agent'
'_config_state": "Enabled", "resources": ["Java","Linux"], "enviro'
'nments": ["Dev"]}\''
)
expected_result = {
"method": "PATCH",
"url": "https://example.com/api/fake",
"headers": [
("Accept", "application/vnd.go.cd.v4+json"),
("Content-Type", "application/json"),
("Authorization", basic_auth_header("username", "password")),
],
"body": '{"hostname": "agent02.example.com", "agent_config_state"'
': "Enabled", "resources": ["Java","Linux"], "environments'
'": ["Dev"]}',
}
self._test_command(curl_command, expected_result)
def test_delete(self):
curl_command = 'curl -X "DELETE" https://www.url.com/page'
expected_result = {"method": "DELETE", "url": "https://www.url.com/page"}
self._test_command(curl_command, expected_result)
def test_get_silent(self):
curl_command = 'curl --silent "www.example.com"'
expected_result = {"method": "GET", "url": "http://www.example.com"}
assert curl_to_request_kwargs(curl_command) == expected_result
def test_too_few_arguments_error(self):
with pytest.raises(
ValueError,
match=r"too few arguments|the following arguments are required:\s*url",
):
curl_to_request_kwargs("curl")
def test_ignore_unknown_options(self):
# case 1: ignore_unknown_options=True:
with warnings.catch_warnings(): # avoid warning when executing tests
warnings.simplefilter("ignore")
curl_command = "curl --bar --baz http://www.example.com"
expected_result = {"method": "GET", "url": "http://www.example.com"}
assert curl_to_request_kwargs(curl_command) == expected_result
# case 2: ignore_unknown_options=False (raise exception):
with pytest.raises(ValueError, match=r"Unrecognized options:.*--bar.*--baz"):
curl_to_request_kwargs(
"curl --bar --baz http://www.example.com", ignore_unknown_options=False
)
def test_must_start_with_curl_error(self):
with pytest.raises(ValueError, match="A curl command must start"):
curl_to_request_kwargs("carl -X POST http://example.org")
| python | BSD-3-Clause | d1bd8eb49f7aba9289e4ff692006cead8bcd9080 | 2026-01-04T14:38:41.023839Z | false |
scrapy/scrapy | https://github.com/scrapy/scrapy/blob/d1bd8eb49f7aba9289e4ff692006cead8bcd9080/tests/test_toplevel.py | tests/test_toplevel.py | import scrapy
def test_version():
assert isinstance(scrapy.__version__, str)
def test_version_info():
assert isinstance(scrapy.version_info, tuple)
def test_request_shortcut():
from scrapy.http import FormRequest, Request # noqa: PLC0415
assert scrapy.Request is Request
assert scrapy.FormRequest is FormRequest
def test_spider_shortcut():
from scrapy.spiders import Spider # noqa: PLC0415
assert scrapy.Spider is Spider
def test_selector_shortcut():
from scrapy.selector import Selector # noqa: PLC0415
assert scrapy.Selector is Selector
def test_item_shortcut():
from scrapy.item import Field, Item # noqa: PLC0415
assert scrapy.Item is Item
assert scrapy.Field is Field
| python | BSD-3-Clause | d1bd8eb49f7aba9289e4ff692006cead8bcd9080 | 2026-01-04T14:38:41.023839Z | false |
scrapy/scrapy | https://github.com/scrapy/scrapy/blob/d1bd8eb49f7aba9289e4ff692006cead8bcd9080/tests/test_utils_defer.py | tests/test_utils_defer.py | from __future__ import annotations
import asyncio
import random
from asyncio import Future
from typing import TYPE_CHECKING, Any
import pytest
from twisted.internet.defer import Deferred, inlineCallbacks, succeed
from scrapy.utils.asyncgen import as_async_generator, collect_asyncgen
from scrapy.utils.defer import (
aiter_errback,
deferred_f_from_coro_f,
deferred_from_coro,
deferred_to_future,
iter_errback,
maybe_deferred_to_future,
mustbe_deferred,
parallel_async,
)
if TYPE_CHECKING:
from collections.abc import AsyncGenerator, Awaitable, Callable, Generator
@pytest.mark.filterwarnings("ignore::scrapy.exceptions.ScrapyDeprecationWarning")
class TestMustbeDeferred:
@inlineCallbacks
def test_success_function(self) -> Generator[Deferred[Any], Any, None]:
steps: list[int] = []
def _append(v: int) -> list[int]:
steps.append(v)
return steps
def _assert(v: list[int]) -> None:
assert v == [1, 2] # it is [1] with maybeDeferred
dfd = mustbe_deferred(_append, 1)
dfd.addCallback(_assert)
steps.append(2) # add another value, that should be caught by assertEqual
yield dfd
@inlineCallbacks
def test_unfired_deferred(self) -> Generator[Deferred[Any], Any, None]:
steps: list[int] = []
def _append(v: int) -> Deferred[list[int]]:
from twisted.internet import reactor
steps.append(v)
dfd: Deferred[list[int]] = Deferred()
reactor.callLater(0, dfd.callback, steps)
return dfd
def _assert(v: list[int]) -> None:
assert v == [1, 2]
dfd = mustbe_deferred(_append, 1)
dfd.addCallback(_assert)
steps.append(2) # add another value, that should be caught by assertEqual
yield dfd
def cb1(value, arg1, arg2):
return f"(cb1 {value} {arg1} {arg2})"
def cb2(value, arg1, arg2):
return succeed(f"(cb2 {value} {arg1} {arg2})")
def cb3(value, arg1, arg2):
return f"(cb3 {value} {arg1} {arg2})"
def cb_fail(value, arg1, arg2):
raise TypeError
def eb1(failure, arg1, arg2):
return f"(eb1 {failure.value.__class__.__name__} {arg1} {arg2})"
class TestIterErrback:
def test_iter_errback_good(self):
def itergood() -> Generator[int, None, None]:
yield from range(10)
errors = []
out = list(iter_errback(itergood(), errors.append))
assert out == list(range(10))
assert not errors
def test_iter_errback_bad(self):
def iterbad() -> Generator[int, None, None]:
for x in range(10):
if x == 5:
1 / 0
yield x
errors = []
out = list(iter_errback(iterbad(), errors.append))
assert out == [0, 1, 2, 3, 4]
assert len(errors) == 1
assert isinstance(errors[0].value, ZeroDivisionError)
class TestAiterErrback:
@deferred_f_from_coro_f
async def test_aiter_errback_good(self):
async def itergood() -> AsyncGenerator[int, None]:
for x in range(10):
yield x
errors = []
out = await collect_asyncgen(aiter_errback(itergood(), errors.append))
assert out == list(range(10))
assert not errors
@deferred_f_from_coro_f
async def test_iter_errback_bad(self):
async def iterbad() -> AsyncGenerator[int, None]:
for x in range(10):
if x == 5:
1 / 0
yield x
errors = []
out = await collect_asyncgen(aiter_errback(iterbad(), errors.append))
assert out == [0, 1, 2, 3, 4]
assert len(errors) == 1
assert isinstance(errors[0].value, ZeroDivisionError)
class TestAsyncDefTestsuite:
@deferred_f_from_coro_f
async def test_deferred_f_from_coro_f(self):
pass
@deferred_f_from_coro_f
async def test_deferred_f_from_coro_f_generator(self):
yield
@pytest.mark.xfail(reason="Checks that the test is actually executed", strict=True)
@deferred_f_from_coro_f
async def test_deferred_f_from_coro_f_xfail(self):
raise RuntimeError("This is expected to be raised")
class TestParallelAsync:
"""This tests _AsyncCooperatorAdapter by testing parallel_async which is its only usage.
parallel_async is called with the results of a callback (so an iterable of items, requests and None,
with arbitrary delays between values), and it uses Scraper._process_spidermw_output as the callable
(so a callable that returns a Deferred for an item, which will fire after pipelines process it, and
None for everything else). The concurrent task count is the CONCURRENT_ITEMS setting.
We want to test different concurrency values compared to the iterable length.
We also want to simulate the real usage, with arbitrary delays between getting the values
from the iterable. We also want to simulate sync and async results from the callable.
"""
CONCURRENT_ITEMS = 50
@staticmethod
def callable(o: int, results: list[int]) -> Deferred[None] | None:
from twisted.internet import reactor
if random.random() < 0.4:
# simulate async processing
dfd: Deferred[None] = Deferred()
dfd.addCallback(lambda _: results.append(o))
delay = random.random() / 8
reactor.callLater(delay, dfd.callback, None)
return dfd
# simulate trivial sync processing
results.append(o)
return None
def callable_wrapped(
self,
o: int,
results: list[int],
parallel_count: list[int],
max_parallel_count: list[int],
) -> Deferred[None] | None:
parallel_count[0] += 1
max_parallel_count[0] = max(max_parallel_count[0], parallel_count[0])
dfd = self.callable(o, results)
def decrement(_: Any = None) -> None:
assert parallel_count[0] > 0, parallel_count[0]
parallel_count[0] -= 1
if dfd is not None:
dfd.addBoth(decrement)
else:
decrement()
return dfd
@staticmethod
def get_async_iterable(length: int) -> AsyncGenerator[int, None]:
# simulate a simple callback without delays between results
return as_async_generator(range(length))
@staticmethod
async def get_async_iterable_with_delays(length: int) -> AsyncGenerator[int, None]:
# simulate a callback with delays between some of the results
from twisted.internet import reactor
for i in range(length):
if random.random() < 0.1:
dfd: Deferred[None] = Deferred()
delay = random.random() / 20
reactor.callLater(delay, dfd.callback, None)
await maybe_deferred_to_future(dfd)
yield i
@inlineCallbacks
def test_simple(self):
for length in [20, 50, 100]:
parallel_count = [0]
max_parallel_count = [0]
results = []
ait = self.get_async_iterable(length)
dl = parallel_async(
ait,
self.CONCURRENT_ITEMS,
self.callable_wrapped,
results,
parallel_count,
max_parallel_count,
)
yield dl
assert list(range(length)) == sorted(results)
assert parallel_count[0] == 0
assert max_parallel_count[0] <= self.CONCURRENT_ITEMS, max_parallel_count[0]
@inlineCallbacks
def test_delays(self):
for length in [20, 50, 100]:
parallel_count = [0]
max_parallel_count = [0]
results = []
ait = self.get_async_iterable_with_delays(length)
dl = parallel_async(
ait,
self.CONCURRENT_ITEMS,
self.callable_wrapped,
results,
parallel_count,
max_parallel_count,
)
yield dl
assert list(range(length)) == sorted(results)
assert parallel_count[0] == 0
assert max_parallel_count[0] <= self.CONCURRENT_ITEMS, max_parallel_count[0]
class TestDeferredFromCoro:
def test_deferred(self):
d = Deferred()
result = deferred_from_coro(d)
assert isinstance(result, Deferred)
assert result is d
def test_object(self):
result = deferred_from_coro(42)
assert result == 42
@inlineCallbacks
def test_coroutine(self):
async def coroutine() -> int:
return 42
result = deferred_from_coro(coroutine())
assert isinstance(result, Deferred)
coro_result = yield result
assert coro_result == 42
@pytest.mark.only_asyncio
@inlineCallbacks
def test_coroutine_asyncio(self):
async def coroutine() -> int:
await asyncio.sleep(0.01)
return 42
result = deferred_from_coro(coroutine())
assert isinstance(result, Deferred)
coro_result = yield result
assert coro_result == 42
@pytest.mark.only_asyncio
@inlineCallbacks
def test_future(self):
future = Future()
result = deferred_from_coro(future)
assert isinstance(result, Deferred)
future.set_result(42)
future_result = yield result
assert future_result == 42
class TestDeferredFFromCoroF:
@inlineCallbacks
def _assert_result(
self, c_f: Callable[[], Awaitable[int]]
) -> Generator[Deferred[Any], Any, None]:
d_f = deferred_f_from_coro_f(c_f)
d = d_f()
assert isinstance(d, Deferred)
result = yield d
assert result == 42
@inlineCallbacks
def test_coroutine(self):
async def c_f() -> int:
return 42
yield self._assert_result(c_f)
@inlineCallbacks
def test_coroutine_asyncio(self):
async def c_f() -> int:
return 42
yield self._assert_result(c_f)
@pytest.mark.only_asyncio
@inlineCallbacks
def test_future(self):
def c_f() -> Future[int]:
f: Future[int] = Future()
f.set_result(42)
return f
yield self._assert_result(c_f)
@pytest.mark.only_asyncio
class TestDeferredToFuture:
@deferred_f_from_coro_f
async def test_deferred(self):
d = Deferred()
result = deferred_to_future(d)
assert isinstance(result, Future)
d.callback(42)
future_result = await result
assert future_result == 42
@deferred_f_from_coro_f
async def test_wrapped_coroutine(self):
async def c_f() -> int:
return 42
d = deferred_from_coro(c_f())
result = deferred_to_future(d)
assert isinstance(result, Future)
future_result = await result
assert future_result == 42
@deferred_f_from_coro_f
async def test_wrapped_coroutine_asyncio(self):
async def c_f() -> int:
await asyncio.sleep(0.01)
return 42
d = deferred_from_coro(c_f())
result = deferred_to_future(d)
assert isinstance(result, Future)
future_result = await result
assert future_result == 42
@pytest.mark.only_asyncio
class TestMaybeDeferredToFutureAsyncio:
@deferred_f_from_coro_f
async def test_deferred(self):
d = Deferred()
result = maybe_deferred_to_future(d)
assert isinstance(result, Future)
d.callback(42)
future_result = await result
assert future_result == 42
@deferred_f_from_coro_f
async def test_wrapped_coroutine(self):
async def c_f() -> int:
return 42
d = deferred_from_coro(c_f())
result = maybe_deferred_to_future(d)
assert isinstance(result, Future)
future_result = await result
assert future_result == 42
@deferred_f_from_coro_f
async def test_wrapped_coroutine_asyncio(self):
async def c_f() -> int:
await asyncio.sleep(0.01)
return 42
d = deferred_from_coro(c_f())
result = maybe_deferred_to_future(d)
assert isinstance(result, Future)
future_result = await result
assert future_result == 42
@pytest.mark.only_not_asyncio
class TestMaybeDeferredToFutureNotAsyncio:
def test_deferred(self):
d = Deferred()
result = maybe_deferred_to_future(d)
assert isinstance(result, Deferred)
assert result is d
| python | BSD-3-Clause | d1bd8eb49f7aba9289e4ff692006cead8bcd9080 | 2026-01-04T14:38:41.023839Z | false |
scrapy/scrapy | https://github.com/scrapy/scrapy/blob/d1bd8eb49f7aba9289e4ff692006cead8bcd9080/tests/test_downloaderslotssettings.py | tests/test_downloaderslotssettings.py | import time
from typing import Any
import pytest
from twisted.internet.defer import inlineCallbacks
from scrapy import Request
from scrapy.core.downloader import Downloader, Slot
from scrapy.crawler import CrawlerRunner
from scrapy.exceptions import ScrapyDeprecationWarning
from scrapy.utils.defer import deferred_f_from_coro_f
from scrapy.utils.spider import DefaultSpider
from scrapy.utils.test import get_crawler
from tests.mockserver.http import MockServer
from tests.spiders import MetaSpider
class DownloaderSlotsSettingsTestSpider(MetaSpider):
name = "downloader_slots"
custom_settings = {
"DOWNLOAD_DELAY": 1,
"RANDOMIZE_DOWNLOAD_DELAY": False,
"DOWNLOAD_SLOTS": {
"quotes.toscrape.com": {
"concurrency": 1,
"delay": 2,
"randomize_delay": False,
"throttle": False,
},
"books.toscrape.com": {"delay": 3, "randomize_delay": False},
},
}
def __init__(self, *args: Any, **kwargs: Any):
super().__init__(*args, **kwargs)
self.default_slot = self.mockserver.host
self.times: dict[str, list[float]] = {}
async def start(self):
slots = [*self.custom_settings.get("DOWNLOAD_SLOTS", {}), None]
for slot in slots:
url = self.mockserver.url(f"/?downloader_slot={slot}")
self.times[slot or self.default_slot] = []
yield Request(url, callback=self.parse, meta={"download_slot": slot})
def parse(self, response):
slot = response.meta.get("download_slot", self.default_slot)
self.times[slot].append(time.time())
url = self.mockserver.url(f"/?downloader_slot={slot}&req=2")
yield Request(url, callback=self.not_parse, meta={"download_slot": slot})
def not_parse(self, response):
slot = response.meta.get("download_slot", self.default_slot)
self.times[slot].append(time.time())
class TestCrawl:
@classmethod
def setup_class(cls):
cls.mockserver = MockServer()
cls.mockserver.__enter__()
@classmethod
def teardown_class(cls):
cls.mockserver.__exit__(None, None, None)
def setup_method(self):
self.runner = CrawlerRunner()
@inlineCallbacks
def test_delay(self):
crawler = get_crawler(DownloaderSlotsSettingsTestSpider)
yield crawler.crawl(mockserver=self.mockserver)
slots = crawler.engine.downloader.slots
times = crawler.spider.times
tolerance = 0.3
delays_real = {k: v[1] - v[0] for k, v in times.items()}
error_delta = {
k: 1 - min(delays_real[k], v.delay) / max(delays_real[k], v.delay)
for k, v in slots.items()
}
assert max(list(error_delta.values())) < tolerance
def test_params():
params = {
"concurrency": 1,
"delay": 2,
"randomize_delay": False,
}
settings = {
"DOWNLOAD_SLOTS": {
"example.com": params,
},
}
crawler = get_crawler(DefaultSpider, settings_dict=settings)
crawler.spider = crawler._create_spider()
downloader = Downloader(crawler)
downloader._slot_gc_loop.stop() # Prevent an unclean reactor.
request = Request("https://example.com")
_, actual = downloader._get_slot(request)
expected = Slot(**params)
for param in params:
assert getattr(expected, param) == getattr(actual, param), (
f"Slot.{param}: {getattr(expected, param)!r} != {getattr(actual, param)!r}"
)
def test_get_slot_deprecated_spider_arg():
crawler = get_crawler(DefaultSpider)
crawler.spider = crawler._create_spider()
downloader = Downloader(crawler)
downloader._slot_gc_loop.stop() # Prevent an unclean reactor.
request = Request("https://example.com")
with pytest.warns(
ScrapyDeprecationWarning,
match=r"Passing a 'spider' argument to Downloader\._get_slot\(\) is deprecated",
):
key1, slot1 = downloader._get_slot(request, spider=crawler.spider)
key2, slot2 = downloader._get_slot(request)
assert key1 == key2
assert slot1 == slot2
@pytest.mark.parametrize(
"priority_queue_class",
[
"scrapy.pqueues.ScrapyPriorityQueue",
"scrapy.pqueues.DownloaderAwarePriorityQueue",
],
)
@deferred_f_from_coro_f
async def test_none_slot_with_priority_queue(
mockserver: MockServer, priority_queue_class: str
) -> None:
"""Test specific cases for None slot handling with different priority queues."""
crawler = get_crawler(
DownloaderSlotsSettingsTestSpider,
settings_dict={"SCHEDULER_PRIORITY_QUEUE": priority_queue_class},
)
await crawler.crawl_async(mockserver=mockserver)
assert isinstance(crawler.spider, DownloaderSlotsSettingsTestSpider)
assert hasattr(crawler.spider, "times")
assert None not in crawler.spider.times
assert crawler.spider.default_slot in crawler.spider.times
assert len(crawler.spider.times[crawler.spider.default_slot]) == 2
assert crawler.stats
stats = crawler.stats
assert stats.get_value("spider_exceptions", 0) == 0
assert stats.get_value("downloader/exception_count", 0) == 0
| python | BSD-3-Clause | d1bd8eb49f7aba9289e4ff692006cead8bcd9080 | 2026-01-04T14:38:41.023839Z | false |
scrapy/scrapy | https://github.com/scrapy/scrapy/blob/d1bd8eb49f7aba9289e4ff692006cead8bcd9080/tests/test_utils_spider.py | tests/test_utils_spider.py | from scrapy import Spider
from scrapy.http import Request
from scrapy.item import Item
from scrapy.utils.spider import iter_spider_classes, iterate_spider_output
class MySpider1(Spider):
name = "myspider1"
class MySpider2(Spider):
name = "myspider2"
def test_iterate_spider_output():
i = Item()
r = Request("http://scrapytest.org")
o = object()
assert list(iterate_spider_output(i)) == [i]
assert list(iterate_spider_output(r)) == [r]
assert list(iterate_spider_output(o)) == [o]
assert list(iterate_spider_output([r, i, o])) == [r, i, o]
def test_iter_spider_classes():
import tests.test_utils_spider # noqa: PLW0406,PLC0415 # pylint: disable=import-self
it = iter_spider_classes(tests.test_utils_spider)
assert set(it) == {MySpider1, MySpider2}
| python | BSD-3-Clause | d1bd8eb49f7aba9289e4ff692006cead8bcd9080 | 2026-01-04T14:38:41.023839Z | false |
scrapy/scrapy | https://github.com/scrapy/scrapy/blob/d1bd8eb49f7aba9289e4ff692006cead8bcd9080/tests/test_scheduler_base.py | tests/test_scheduler_base.py | from __future__ import annotations
from urllib.parse import urljoin
import pytest
from testfixtures import LogCapture
from twisted.internet import defer
from twisted.internet.defer import inlineCallbacks
from scrapy.core.scheduler import BaseScheduler
from scrapy.http import Request
from scrapy.spiders import Spider
from scrapy.utils.httpobj import urlparse_cached
from scrapy.utils.request import fingerprint
from scrapy.utils.test import get_crawler
from tests.mockserver.http import MockServer
PATHS = ["/a", "/b", "/c"]
URLS = [urljoin("https://example.org", p) for p in PATHS]
class MinimalScheduler:
def __init__(self) -> None:
self.requests: dict[bytes, Request] = {}
def has_pending_requests(self) -> bool:
return bool(self.requests)
def enqueue_request(self, request: Request) -> bool:
fp = fingerprint(request)
if fp not in self.requests:
self.requests[fp] = request
return True
return False
def next_request(self) -> Request | None:
if self.has_pending_requests():
_, request = self.requests.popitem()
return request
return None
class SimpleScheduler(MinimalScheduler):
def open(self, spider: Spider) -> defer.Deferred:
return defer.succeed("open")
def close(self, reason: str) -> defer.Deferred:
return defer.succeed("close")
def __len__(self) -> int:
return len(self.requests)
class PathsSpider(Spider):
name = "paths"
def __init__(self, mockserver, *args, **kwargs):
super().__init__(*args, **kwargs)
self.start_urls = map(mockserver.url, PATHS)
def parse(self, response):
return {"path": urlparse_cached(response).path}
class InterfaceCheckMixin:
def test_scheduler_class(self):
assert isinstance(self.scheduler, BaseScheduler)
assert issubclass(self.scheduler.__class__, BaseScheduler)
class TestBaseScheduler(InterfaceCheckMixin):
def setup_method(self):
self.scheduler = BaseScheduler()
def test_methods(self):
assert self.scheduler.open(Spider("foo")) is None
assert self.scheduler.close("finished") is None
with pytest.raises(NotImplementedError):
self.scheduler.has_pending_requests()
with pytest.raises(NotImplementedError):
self.scheduler.enqueue_request(Request("https://example.org"))
with pytest.raises(NotImplementedError):
self.scheduler.next_request()
class TestMinimalScheduler(InterfaceCheckMixin):
def setup_method(self):
self.scheduler = MinimalScheduler()
def test_open_close(self):
with pytest.raises(AttributeError):
self.scheduler.open(Spider("foo"))
with pytest.raises(AttributeError):
self.scheduler.close("finished")
def test_len(self):
with pytest.raises(AttributeError):
self.scheduler.__len__()
with pytest.raises(TypeError):
len(self.scheduler)
def test_enqueue_dequeue(self):
assert not self.scheduler.has_pending_requests()
for url in URLS:
assert self.scheduler.enqueue_request(Request(url))
assert not self.scheduler.enqueue_request(Request(url))
assert self.scheduler.has_pending_requests
dequeued = []
while self.scheduler.has_pending_requests():
request = self.scheduler.next_request()
dequeued.append(request.url)
assert set(dequeued) == set(URLS)
assert not self.scheduler.has_pending_requests()
class TestSimpleScheduler(InterfaceCheckMixin):
def setup_method(self):
self.scheduler = SimpleScheduler()
@inlineCallbacks
def test_enqueue_dequeue(self):
open_result = yield self.scheduler.open(Spider("foo"))
assert open_result == "open"
assert not self.scheduler.has_pending_requests()
for url in URLS:
assert self.scheduler.enqueue_request(Request(url))
assert not self.scheduler.enqueue_request(Request(url))
assert self.scheduler.has_pending_requests()
assert len(self.scheduler) == len(URLS)
dequeued = []
while self.scheduler.has_pending_requests():
request = self.scheduler.next_request()
dequeued.append(request.url)
assert set(dequeued) == set(URLS)
assert not self.scheduler.has_pending_requests()
assert len(self.scheduler) == 0
close_result = yield self.scheduler.close("")
assert close_result == "close"
class TestMinimalSchedulerCrawl:
scheduler_cls = MinimalScheduler
@inlineCallbacks
def test_crawl(self):
with MockServer() as mockserver:
settings = {
"SCHEDULER": self.scheduler_cls,
}
with LogCapture() as log:
crawler = get_crawler(PathsSpider, settings)
yield crawler.crawl(mockserver)
for path in PATHS:
assert f"{{'path': '{path}'}}" in str(log)
assert f"'item_scraped_count': {len(PATHS)}" in str(log)
class TestSimpleSchedulerCrawl(TestMinimalSchedulerCrawl):
scheduler_cls = SimpleScheduler
| python | BSD-3-Clause | d1bd8eb49f7aba9289e4ff692006cead8bcd9080 | 2026-01-04T14:38:41.023839Z | false |
scrapy/scrapy | https://github.com/scrapy/scrapy/blob/d1bd8eb49f7aba9289e4ff692006cead8bcd9080/tests/test_squeues_request.py | tests/test_squeues_request.py | """
Queues that handle requests
"""
from __future__ import annotations
from abc import ABC, abstractmethod
from typing import TYPE_CHECKING
import pytest
import queuelib
from scrapy.http import Request
from scrapy.spiders import Spider
from scrapy.squeues import (
FifoMemoryQueue,
LifoMemoryQueue,
MarshalFifoDiskQueue,
MarshalLifoDiskQueue,
PickleFifoDiskQueue,
PickleLifoDiskQueue,
)
from scrapy.utils.test import get_crawler
if TYPE_CHECKING:
from scrapy.crawler import Crawler
HAVE_PEEK = hasattr(queuelib.queue.FifoMemoryQueue, "peek")
@pytest.fixture
def crawler() -> Crawler:
return get_crawler(Spider)
class TestRequestQueueBase(ABC):
@property
@abstractmethod
def is_fifo(self) -> bool:
raise NotImplementedError
@pytest.mark.parametrize("test_peek", [True, False])
def test_one_element(self, q: queuelib.queue.BaseQueue, test_peek: bool):
if test_peek and not HAVE_PEEK:
pytest.skip("The queuelib queues do not define peek")
if not test_peek and HAVE_PEEK:
pytest.skip("The queuelib queues define peek")
assert len(q) == 0
if test_peek:
assert q.peek() is None
assert q.pop() is None
req = Request("http://www.example.com")
q.push(req)
assert len(q) == 1
if test_peek:
result = q.peek()
assert result is not None
assert result.url == req.url
else:
with pytest.raises(
NotImplementedError,
match="The underlying queue class does not implement 'peek'",
):
q.peek()
result = q.pop()
assert result is not None
assert result.url == req.url
assert len(q) == 0
if test_peek:
assert q.peek() is None
assert q.pop() is None
q.close()
@pytest.mark.parametrize("test_peek", [True, False])
def test_order(self, q: queuelib.queue.BaseQueue, test_peek: bool):
if test_peek and not HAVE_PEEK:
pytest.skip("The queuelib queues do not define peek")
if not test_peek and HAVE_PEEK:
pytest.skip("The queuelib queues define peek")
assert len(q) == 0
if test_peek:
assert q.peek() is None
assert q.pop() is None
req1 = Request("http://www.example.com/1")
req2 = Request("http://www.example.com/2")
req3 = Request("http://www.example.com/3")
q.push(req1)
q.push(req2)
q.push(req3)
if not test_peek:
with pytest.raises(
NotImplementedError,
match="The underlying queue class does not implement 'peek'",
):
q.peek()
reqs = [req1, req2, req3] if self.is_fifo else [req3, req2, req1]
for i, req in enumerate(reqs):
assert len(q) == 3 - i
if test_peek:
result = q.peek()
assert result is not None
assert result.url == req.url
result = q.pop()
assert result is not None
assert result.url == req.url
assert len(q) == 0
if test_peek:
assert q.peek() is None
assert q.pop() is None
q.close()
class TestPickleFifoDiskQueueRequest(TestRequestQueueBase):
is_fifo = True
@pytest.fixture
def q(self, crawler, tmp_path):
return PickleFifoDiskQueue.from_crawler(
crawler=crawler, key=str(tmp_path / "pickle" / "fifo")
)
class TestPickleLifoDiskQueueRequest(TestRequestQueueBase):
is_fifo = False
@pytest.fixture
def q(self, crawler, tmp_path):
return PickleLifoDiskQueue.from_crawler(
crawler=crawler, key=str(tmp_path / "pickle" / "lifo")
)
class TestMarshalFifoDiskQueueRequest(TestRequestQueueBase):
is_fifo = True
@pytest.fixture
def q(self, crawler, tmp_path):
return MarshalFifoDiskQueue.from_crawler(
crawler=crawler, key=str(tmp_path / "marshal" / "fifo")
)
class TestMarshalLifoDiskQueueRequest(TestRequestQueueBase):
is_fifo = False
@pytest.fixture
def q(self, crawler, tmp_path):
return MarshalLifoDiskQueue.from_crawler(
crawler=crawler, key=str(tmp_path / "marshal" / "lifo")
)
class TestFifoMemoryQueueRequest(TestRequestQueueBase):
is_fifo = True
@pytest.fixture
def q(self, crawler):
return FifoMemoryQueue.from_crawler(crawler=crawler)
class TestLifoMemoryQueueRequest(TestRequestQueueBase):
is_fifo = False
@pytest.fixture
def q(self, crawler):
return LifoMemoryQueue.from_crawler(crawler=crawler)
| python | BSD-3-Clause | d1bd8eb49f7aba9289e4ff692006cead8bcd9080 | 2026-01-04T14:38:41.023839Z | false |
scrapy/scrapy | https://github.com/scrapy/scrapy/blob/d1bd8eb49f7aba9289e4ff692006cead8bcd9080/tests/test_closespider.py | tests/test_closespider.py | from twisted.internet.defer import inlineCallbacks
from scrapy.utils.test import get_crawler
from tests.mockserver.http import MockServer
from tests.spiders import (
ErrorSpider,
FollowAllSpider,
ItemSpider,
MaxItemsAndRequestsSpider,
SlowSpider,
)
class TestCloseSpider:
@classmethod
def setup_class(cls):
cls.mockserver = MockServer()
cls.mockserver.__enter__()
@classmethod
def teardown_class(cls):
cls.mockserver.__exit__(None, None, None)
@inlineCallbacks
def test_closespider_itemcount(self):
close_on = 5
crawler = get_crawler(ItemSpider, {"CLOSESPIDER_ITEMCOUNT": close_on})
yield crawler.crawl(mockserver=self.mockserver)
reason = crawler.spider.meta["close_reason"]
assert reason == "closespider_itemcount"
itemcount = crawler.stats.get_value("item_scraped_count")
assert itemcount >= close_on
@inlineCallbacks
def test_closespider_pagecount(self):
close_on = 5
crawler = get_crawler(FollowAllSpider, {"CLOSESPIDER_PAGECOUNT": close_on})
yield crawler.crawl(mockserver=self.mockserver)
reason = crawler.spider.meta["close_reason"]
assert reason == "closespider_pagecount"
pagecount = crawler.stats.get_value("response_received_count")
assert pagecount >= close_on
@inlineCallbacks
def test_closespider_pagecount_no_item(self):
close_on = 5
max_items = 5
max_requests = close_on + max_items
crawler = get_crawler(
MaxItemsAndRequestsSpider,
{
"CLOSESPIDER_PAGECOUNT_NO_ITEM": close_on,
},
)
yield crawler.crawl(
max_items=max_items, max_requests=max_requests, mockserver=self.mockserver
)
reason = crawler.spider.meta["close_reason"]
assert reason == "closespider_pagecount_no_item"
pagecount = crawler.stats.get_value("response_received_count")
itemcount = crawler.stats.get_value("item_scraped_count")
assert pagecount <= close_on + itemcount
@inlineCallbacks
def test_closespider_pagecount_no_item_with_pagecount(self):
close_on_pagecount_no_item = 5
close_on_pagecount = 20
crawler = get_crawler(
FollowAllSpider,
{
"CLOSESPIDER_PAGECOUNT_NO_ITEM": close_on_pagecount_no_item,
"CLOSESPIDER_PAGECOUNT": close_on_pagecount,
},
)
yield crawler.crawl(mockserver=self.mockserver)
reason = crawler.spider.meta["close_reason"]
assert reason == "closespider_pagecount_no_item"
pagecount = crawler.stats.get_value("response_received_count")
assert pagecount < close_on_pagecount
@inlineCallbacks
def test_closespider_errorcount(self):
close_on = 5
crawler = get_crawler(ErrorSpider, {"CLOSESPIDER_ERRORCOUNT": close_on})
yield crawler.crawl(total=1000000, mockserver=self.mockserver)
reason = crawler.spider.meta["close_reason"]
assert reason == "closespider_errorcount"
key = f"spider_exceptions/{crawler.spider.exception_cls.__name__}"
errorcount = crawler.stats.get_value(key)
assert crawler.stats.get_value("spider_exceptions/count") >= close_on
assert errorcount >= close_on
@inlineCallbacks
def test_closespider_timeout(self):
close_on = 0.1
crawler = get_crawler(FollowAllSpider, {"CLOSESPIDER_TIMEOUT": close_on})
yield crawler.crawl(total=1000000, mockserver=self.mockserver)
reason = crawler.spider.meta["close_reason"]
assert reason == "closespider_timeout"
total_seconds = crawler.stats.get_value("elapsed_time_seconds")
assert total_seconds >= close_on
@inlineCallbacks
def test_closespider_timeout_no_item(self):
timeout = 1
crawler = get_crawler(SlowSpider, {"CLOSESPIDER_TIMEOUT_NO_ITEM": timeout})
yield crawler.crawl(n=3, mockserver=self.mockserver)
reason = crawler.spider.meta["close_reason"]
assert reason == "closespider_timeout_no_item"
total_seconds = crawler.stats.get_value("elapsed_time_seconds")
assert total_seconds >= timeout
| python | BSD-3-Clause | d1bd8eb49f7aba9289e4ff692006cead8bcd9080 | 2026-01-04T14:38:41.023839Z | false |
scrapy/scrapy | https://github.com/scrapy/scrapy/blob/d1bd8eb49f7aba9289e4ff692006cead8bcd9080/tests/test_downloadermiddleware_offsite.py | tests/test_downloadermiddleware_offsite.py | import warnings
import pytest
from scrapy import Request, Spider
from scrapy.downloadermiddlewares.offsite import OffsiteMiddleware
from scrapy.exceptions import IgnoreRequest
from scrapy.utils.test import get_crawler
UNSET = object()
@pytest.mark.parametrize(
("allowed_domain", "url", "allowed"),
[
("example.com", "http://example.com/1", True),
("example.com", "http://example.org/1", False),
("example.com", "http://sub.example.com/1", True),
("sub.example.com", "http://sub.example.com/1", True),
("sub.example.com", "http://example.com/1", False),
("example.com", "http://example.com:8000/1", True),
("example.com", "http://example.org/example.com", False),
("example.com", "http://example.org/foo.example.com", False),
("example.com", "http://example.com.example", False),
("a.example", "http://nota.example", False),
("b.a.example", "http://notb.a.example", False),
],
)
def test_process_request_domain_filtering(allowed_domain, url, allowed):
crawler = get_crawler(Spider)
crawler.spider = crawler._create_spider(name="a", allowed_domains=[allowed_domain])
mw = OffsiteMiddleware.from_crawler(crawler)
mw.spider_opened(crawler.spider)
request = Request(url)
if allowed:
assert mw.process_request(request) is None
else:
with pytest.raises(IgnoreRequest):
mw.process_request(request)
@pytest.mark.parametrize(
("value", "filtered"),
[
(UNSET, True),
(None, True),
(False, True),
(True, False),
],
)
def test_process_request_dont_filter(value, filtered):
crawler = get_crawler(Spider)
crawler.spider = crawler._create_spider(name="a", allowed_domains=["a.example"])
mw = OffsiteMiddleware.from_crawler(crawler)
mw.spider_opened(crawler.spider)
kwargs = {}
if value is not UNSET:
kwargs["dont_filter"] = value
request = Request("https://b.example", **kwargs)
if filtered:
with pytest.raises(IgnoreRequest):
mw.process_request(request)
else:
assert mw.process_request(request) is None
@pytest.mark.parametrize(
("allow_offsite", "dont_filter", "filtered"),
[
(True, UNSET, False),
(True, None, False),
(True, False, False),
(True, True, False),
(False, UNSET, True),
(False, None, True),
(False, False, True),
(False, True, False),
],
)
def test_process_request_allow_offsite(allow_offsite, dont_filter, filtered):
crawler = get_crawler(Spider)
crawler.spider = crawler._create_spider(name="a", allowed_domains=["a.example"])
mw = OffsiteMiddleware.from_crawler(crawler)
mw.spider_opened(crawler.spider)
kwargs = {"meta": {}}
if allow_offsite is not UNSET:
kwargs["meta"]["allow_offsite"] = allow_offsite
if dont_filter is not UNSET:
kwargs["dont_filter"] = dont_filter
request = Request("https://b.example", **kwargs)
if filtered:
with pytest.raises(IgnoreRequest):
mw.process_request(request)
else:
assert mw.process_request(request) is None
@pytest.mark.parametrize(
"value",
[
UNSET,
None,
[],
],
)
def test_process_request_no_allowed_domains(value):
crawler = get_crawler(Spider)
kwargs = {}
if value is not UNSET:
kwargs["allowed_domains"] = value
crawler.spider = crawler._create_spider(name="a", **kwargs)
mw = OffsiteMiddleware.from_crawler(crawler)
mw.spider_opened(crawler.spider)
request = Request("https://example.com")
assert mw.process_request(request) is None
def test_process_request_invalid_domains():
crawler = get_crawler(Spider)
allowed_domains = ["a.example", None, "http:////b.example", "//c.example"]
crawler.spider = crawler._create_spider(name="a", allowed_domains=allowed_domains)
mw = OffsiteMiddleware.from_crawler(crawler)
with warnings.catch_warnings():
warnings.simplefilter("ignore", UserWarning)
mw.spider_opened(crawler.spider)
request = Request("https://a.example")
assert mw.process_request(request) is None
for letter in ("b", "c"):
request = Request(f"https://{letter}.example")
with pytest.raises(IgnoreRequest):
mw.process_request(request)
@pytest.mark.parametrize(
("allowed_domain", "url", "allowed"),
[
("example.com", "http://example.com/1", True),
("example.com", "http://example.org/1", False),
("example.com", "http://sub.example.com/1", True),
("sub.example.com", "http://sub.example.com/1", True),
("sub.example.com", "http://example.com/1", False),
("example.com", "http://example.com:8000/1", True),
("example.com", "http://example.org/example.com", False),
("example.com", "http://example.org/foo.example.com", False),
("example.com", "http://example.com.example", False),
("a.example", "http://nota.example", False),
("b.a.example", "http://notb.a.example", False),
],
)
def test_request_scheduled_domain_filtering(allowed_domain, url, allowed):
crawler = get_crawler(Spider)
crawler.spider = crawler._create_spider(name="a", allowed_domains=[allowed_domain])
mw = OffsiteMiddleware.from_crawler(crawler)
mw.spider_opened(crawler.spider)
request = Request(url)
if allowed:
assert mw.request_scheduled(request, crawler.spider) is None
else:
with pytest.raises(IgnoreRequest):
mw.request_scheduled(request, crawler.spider)
@pytest.mark.parametrize(
("value", "filtered"),
[
(UNSET, True),
(None, True),
(False, True),
(True, False),
],
)
def test_request_scheduled_dont_filter(value, filtered):
crawler = get_crawler(Spider)
crawler.spider = crawler._create_spider(name="a", allowed_domains=["a.example"])
mw = OffsiteMiddleware.from_crawler(crawler)
mw.spider_opened(crawler.spider)
kwargs = {}
if value is not UNSET:
kwargs["dont_filter"] = value
request = Request("https://b.example", **kwargs)
if filtered:
with pytest.raises(IgnoreRequest):
mw.request_scheduled(request, crawler.spider)
else:
assert mw.request_scheduled(request, crawler.spider) is None
@pytest.mark.parametrize(
"value",
[
UNSET,
None,
[],
],
)
def test_request_scheduled_no_allowed_domains(value):
crawler = get_crawler(Spider)
kwargs = {}
if value is not UNSET:
kwargs["allowed_domains"] = value
crawler.spider = crawler._create_spider(name="a", **kwargs)
mw = OffsiteMiddleware.from_crawler(crawler)
mw.spider_opened(crawler.spider)
request = Request("https://example.com")
assert mw.request_scheduled(request, crawler.spider) is None
def test_request_scheduled_invalid_domains():
crawler = get_crawler(Spider)
allowed_domains = ["a.example", None, "http:////b.example", "//c.example"]
crawler.spider = crawler._create_spider(name="a", allowed_domains=allowed_domains)
mw = OffsiteMiddleware.from_crawler(crawler)
with warnings.catch_warnings():
warnings.simplefilter("ignore", UserWarning)
mw.spider_opened(crawler.spider)
request = Request("https://a.example")
assert mw.request_scheduled(request, crawler.spider) is None
for letter in ("b", "c"):
request = Request(f"https://{letter}.example")
with pytest.raises(IgnoreRequest):
mw.request_scheduled(request, crawler.spider)
| python | BSD-3-Clause | d1bd8eb49f7aba9289e4ff692006cead8bcd9080 | 2026-01-04T14:38:41.023839Z | false |
scrapy/scrapy | https://github.com/scrapy/scrapy/blob/d1bd8eb49f7aba9289e4ff692006cead8bcd9080/tests/test_squeues.py | tests/test_squeues.py | import pickle
import sys
import pytest
from queuelib.tests import test_queue as t
from scrapy.http import Request
from scrapy.item import Field, Item
from scrapy.loader import ItemLoader
from scrapy.selector import Selector
from scrapy.squeues import (
_MarshalFifoSerializationDiskQueue,
_MarshalLifoSerializationDiskQueue,
_PickleFifoSerializationDiskQueue,
_PickleLifoSerializationDiskQueue,
)
class MyItem(Item):
name = Field()
def _test_procesor(x):
return x + x
class MyLoader(ItemLoader):
default_item_class = MyItem
name_out = staticmethod(_test_procesor)
def nonserializable_object_test(self):
q = self.queue()
with pytest.raises(
ValueError,
match=r"unmarshallable object|Can't (get|pickle) local object|Can't pickle .*: it's not found as",
):
q.push(lambda x: x)
# Selectors should fail (lxml.html.HtmlElement objects can't be pickled)
sel = Selector(text="<html><body><p>some text</p></body></html>")
with pytest.raises(
ValueError, match=r"unmarshallable object|can't pickle Selector objects"
):
q.push(sel)
class FifoDiskQueueTestMixin:
def test_serialize(self):
q = self.queue()
q.push("a")
q.push(123)
q.push({"a": "dict"})
assert q.pop() == "a"
assert q.pop() == 123
assert q.pop() == {"a": "dict"}
test_nonserializable_object = nonserializable_object_test
class MarshalFifoDiskQueueTest(t.FifoDiskQueueTest, FifoDiskQueueTestMixin):
chunksize = 100000
def queue(self):
return _MarshalFifoSerializationDiskQueue(self.qpath, chunksize=self.chunksize)
class ChunkSize1MarshalFifoDiskQueueTest(MarshalFifoDiskQueueTest):
chunksize = 1
class ChunkSize2MarshalFifoDiskQueueTest(MarshalFifoDiskQueueTest):
chunksize = 2
class ChunkSize3MarshalFifoDiskQueueTest(MarshalFifoDiskQueueTest):
chunksize = 3
class ChunkSize4MarshalFifoDiskQueueTest(MarshalFifoDiskQueueTest):
chunksize = 4
class PickleFifoDiskQueueTest(t.FifoDiskQueueTest, FifoDiskQueueTestMixin):
chunksize = 100000
def queue(self):
return _PickleFifoSerializationDiskQueue(self.qpath, chunksize=self.chunksize)
def test_serialize_item(self):
q = self.queue()
i = MyItem(name="foo")
q.push(i)
i2 = q.pop()
assert isinstance(i2, MyItem)
assert i == i2
def test_serialize_loader(self):
q = self.queue()
loader = MyLoader()
q.push(loader)
loader2 = q.pop()
assert isinstance(loader2, MyLoader)
assert loader2.default_item_class is MyItem
assert loader2.name_out("x") == "xx"
def test_serialize_request_recursive(self):
q = self.queue()
r = Request("http://www.example.com")
r.meta["request"] = r
q.push(r)
r2 = q.pop()
assert isinstance(r2, Request)
assert r.url == r2.url
assert r2.meta["request"] is r2
def test_non_pickable_object(self):
q = self.queue()
with pytest.raises(
ValueError,
match=r"Can't (get|pickle) local object|Can't pickle .*: it's not found as",
) as exc_info:
q.push(lambda x: x)
if hasattr(sys, "pypy_version_info"):
assert isinstance(exc_info.value.__context__, pickle.PicklingError)
else:
assert isinstance(exc_info.value.__context__, AttributeError)
sel = Selector(text="<html><body><p>some text</p></body></html>")
with pytest.raises(
ValueError, match="can't pickle Selector objects"
) as exc_info:
q.push(sel)
assert isinstance(exc_info.value.__context__, TypeError)
q.close()
class ChunkSize1PickleFifoDiskQueueTest(PickleFifoDiskQueueTest):
chunksize = 1
class ChunkSize2PickleFifoDiskQueueTest(PickleFifoDiskQueueTest):
chunksize = 2
class ChunkSize3PickleFifoDiskQueueTest(PickleFifoDiskQueueTest):
chunksize = 3
class ChunkSize4PickleFifoDiskQueueTest(PickleFifoDiskQueueTest):
chunksize = 4
class LifoDiskQueueTestMixin:
def test_serialize(self):
q = self.queue()
q.push("a")
q.push(123)
q.push({"a": "dict"})
assert q.pop() == {"a": "dict"}
assert q.pop() == 123
assert q.pop() == "a"
test_nonserializable_object = nonserializable_object_test
class MarshalLifoDiskQueueTest(t.LifoDiskQueueTest, LifoDiskQueueTestMixin):
def queue(self):
return _MarshalLifoSerializationDiskQueue(self.qpath)
class PickleLifoDiskQueueTest(t.LifoDiskQueueTest, LifoDiskQueueTestMixin):
def queue(self):
return _PickleLifoSerializationDiskQueue(self.qpath)
def test_serialize_item(self):
q = self.queue()
i = MyItem(name="foo")
q.push(i)
i2 = q.pop()
assert isinstance(i2, MyItem)
assert i == i2
def test_serialize_loader(self):
q = self.queue()
loader = MyLoader()
q.push(loader)
loader2 = q.pop()
assert isinstance(loader2, MyLoader)
assert loader2.default_item_class is MyItem
assert loader2.name_out("x") == "xx"
def test_serialize_request_recursive(self):
q = self.queue()
r = Request("http://www.example.com")
r.meta["request"] = r
q.push(r)
r2 = q.pop()
assert isinstance(r2, Request)
assert r.url == r2.url
assert r2.meta["request"] is r2
| python | BSD-3-Clause | d1bd8eb49f7aba9289e4ff692006cead8bcd9080 | 2026-01-04T14:38:41.023839Z | false |
scrapy/scrapy | https://github.com/scrapy/scrapy/blob/d1bd8eb49f7aba9289e4ff692006cead8bcd9080/tests/spiders.py | tests/spiders.py | """
Some spiders used for testing and benchmarking
"""
from __future__ import annotations
import asyncio
import time
from urllib.parse import urlencode
from twisted.internet import defer
from scrapy import signals
from scrapy.exceptions import StopDownload
from scrapy.http import Request
from scrapy.item import Item
from scrapy.linkextractors import LinkExtractor
from scrapy.spiders import Spider
from scrapy.spiders.crawl import CrawlSpider, Rule
from scrapy.utils.defer import deferred_to_future, maybe_deferred_to_future
from scrapy.utils.test import get_from_asyncio_queue, get_web_client_agent_req
class MockServerSpider(Spider):
def __init__(self, mockserver=None, *args, **kwargs):
super().__init__(*args, **kwargs)
self.mockserver = mockserver
class MetaSpider(MockServerSpider):
name = "meta"
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.meta = {}
def closed(self, reason):
self.meta["close_reason"] = reason
class FollowAllSpider(MetaSpider):
name = "follow"
link_extractor = LinkExtractor()
def __init__(
self, total=10, show=20, order="rand", maxlatency=0.0, *args, **kwargs
):
super().__init__(*args, **kwargs)
self.urls_visited = []
self.times = []
qargs = {"total": total, "show": show, "order": order, "maxlatency": maxlatency}
url = self.mockserver.url(f"/follow?{urlencode(qargs, doseq=True)}")
self.start_urls = [url]
def parse(self, response):
self.urls_visited.append(response.url)
self.times.append(time.time())
for link in self.link_extractor.extract_links(response):
yield Request(link.url, callback=self.parse)
class DelaySpider(MetaSpider):
name = "delay"
def __init__(self, n=1, b=0, *args, **kwargs):
super().__init__(*args, **kwargs)
self.n = n
self.b = b
self.t1 = self.t2 = self.t2_err = 0
async def start(self):
self.t1 = time.time()
url = self.mockserver.url(f"/delay?n={self.n}&b={self.b}")
yield Request(url, callback=self.parse, errback=self.errback)
def parse(self, response):
self.t2 = time.time()
def errback(self, failure):
self.t2_err = time.time()
class LogSpider(MetaSpider):
name = "log_spider"
def log_debug(self, message: str, extra: dict | None = None):
self.logger.debug(message, extra=extra)
def log_info(self, message: str, extra: dict | None = None):
self.logger.info(message, extra=extra)
def log_warning(self, message: str, extra: dict | None = None):
self.logger.warning(message, extra=extra)
def log_error(self, message: str, extra: dict | None = None):
self.logger.error(message, extra=extra)
def log_critical(self, message: str, extra: dict | None = None):
self.logger.critical(message, extra=extra)
def parse(self, response):
pass
class SlowSpider(DelaySpider):
name = "slow"
async def start(self):
# 1st response is fast
url = self.mockserver.url("/delay?n=0&b=0")
yield Request(url, callback=self.parse, errback=self.errback)
# 2nd response is slow
url = self.mockserver.url(f"/delay?n={self.n}&b={self.b}")
yield Request(url, callback=self.parse, errback=self.errback)
def parse(self, response):
yield Item()
class SimpleSpider(MetaSpider):
name = "simple"
def __init__(self, url="http://localhost:8998", *args, **kwargs):
super().__init__(*args, **kwargs)
self.start_urls = [url]
def parse(self, response):
self.logger.info(f"Got response {response.status}")
class AsyncDefSpider(SimpleSpider):
name = "asyncdef"
async def parse(self, response):
await defer.succeed(42)
self.logger.info(f"Got response {response.status}")
class AsyncDefAsyncioSpider(SimpleSpider):
name = "asyncdef_asyncio"
async def parse(self, response):
await asyncio.sleep(0.2)
status = await get_from_asyncio_queue(response.status)
self.logger.info(f"Got response {status}")
class AsyncDefAsyncioReturnSpider(SimpleSpider):
name = "asyncdef_asyncio_return"
async def parse(self, response):
await asyncio.sleep(0.2)
status = await get_from_asyncio_queue(response.status)
self.logger.info(f"Got response {status}")
return [{"id": 1}, {"id": 2}]
class AsyncDefAsyncioReturnSingleElementSpider(SimpleSpider):
name = "asyncdef_asyncio_return_single_element"
async def parse(self, response):
await asyncio.sleep(0.1)
status = await get_from_asyncio_queue(response.status)
self.logger.info(f"Got response {status}")
return {"foo": 42}
class AsyncDefAsyncioReqsReturnSpider(SimpleSpider):
name = "asyncdef_asyncio_reqs_return"
async def parse(self, response):
await asyncio.sleep(0.2)
req_id = response.meta.get("req_id", 0)
status = await get_from_asyncio_queue(response.status)
self.logger.info(f"Got response {status}, req_id {req_id}")
if req_id > 0:
return None
reqs = []
for i in range(1, 3):
req = Request(self.start_urls[0], dont_filter=True, meta={"req_id": i})
reqs.append(req)
return reqs
class AsyncDefAsyncioGenExcSpider(SimpleSpider):
name = "asyncdef_asyncio_gen_exc"
async def parse(self, response):
for i in range(10):
await asyncio.sleep(0.1)
yield {"foo": i}
if i > 5:
raise ValueError("Stopping the processing")
class AsyncDefDeferredDirectSpider(SimpleSpider):
name = "asyncdef_deferred_direct"
async def parse(self, response):
resp = await get_web_client_agent_req(self.mockserver.url("/status?n=200"))
yield {"code": resp.code}
class AsyncDefDeferredWrappedSpider(SimpleSpider):
name = "asyncdef_deferred_wrapped"
async def parse(self, response):
resp = await deferred_to_future(
get_web_client_agent_req(self.mockserver.url("/status?n=200"))
)
yield {"code": resp.code}
class AsyncDefDeferredMaybeWrappedSpider(SimpleSpider):
name = "asyncdef_deferred_wrapped"
async def parse(self, response):
resp = await maybe_deferred_to_future(
get_web_client_agent_req(self.mockserver.url("/status?n=200"))
)
yield {"code": resp.code}
class AsyncDefAsyncioGenSpider(SimpleSpider):
name = "asyncdef_asyncio_gen"
async def parse(self, response):
await asyncio.sleep(0.2)
yield {"foo": 42}
self.logger.info(f"Got response {response.status}")
class AsyncDefAsyncioGenLoopSpider(SimpleSpider):
name = "asyncdef_asyncio_gen_loop"
async def parse(self, response):
for i in range(10):
await asyncio.sleep(0.1)
yield {"foo": i}
self.logger.info(f"Got response {response.status}")
class AsyncDefAsyncioGenComplexSpider(SimpleSpider):
name = "asyncdef_asyncio_gen_complex"
initial_reqs = 4
following_reqs = 3
depth = 2
def _get_req(self, index, cb=None):
return Request(
self.mockserver.url(f"/status?n=200&request={index}"),
meta={"index": index},
dont_filter=True,
callback=cb,
)
async def start(self):
for i in range(1, self.initial_reqs + 1):
yield self._get_req(i)
async def parse(self, response):
index = response.meta["index"]
yield {"index": index}
if index < 10**self.depth:
for new_index in range(10 * index, 10 * index + self.following_reqs):
yield self._get_req(new_index)
yield self._get_req(index, cb=self.parse2)
await asyncio.sleep(0.1)
yield {"index": index + 5}
async def parse2(self, response):
await asyncio.sleep(0.1)
yield {"index2": response.meta["index"]}
class ItemSpider(FollowAllSpider):
name = "item"
def parse(self, response):
for request in super().parse(response):
yield request
yield Item()
yield {}
class MaxItemsAndRequestsSpider(FollowAllSpider):
def __init__(self, max_items=10, max_requests=10, *args, **kwargs):
super().__init__(*args, **kwargs)
self.max_items = max_items
self.max_requests = max_requests
def parse(self, response):
self.items_scraped = 0
self.pages_crawled = 1 # account for the start url
for request in super().parse(response):
if self.pages_crawled < self.max_requests:
yield request
self.pages_crawled += 1
if self.items_scraped < self.max_items:
yield Item()
self.items_scraped += 1
class DefaultError(Exception):
pass
class ErrorSpider(FollowAllSpider):
name = "error"
exception_cls = DefaultError
def raise_exception(self):
raise self.exception_cls("Expected exception")
def parse(self, response):
for request in super().parse(response):
yield request
self.raise_exception()
class BrokenStartSpider(FollowAllSpider):
fail_before_yield = False
fail_yielding = False
def __init__(self, *a, **kw):
super().__init__(*a, **kw)
self.seedsseen = []
async def start(self):
if self.fail_before_yield:
1 / 0
for s in range(100):
qargs = {"total": 10, "seed": s}
url = self.mockserver.url(f"/follow?{urlencode(qargs, doseq=True)}")
yield Request(url, meta={"seed": s})
if self.fail_yielding:
2 / 0
assert self.seedsseen, "All seeds consumed before any download happened"
def parse(self, response):
self.seedsseen.append(response.meta.get("seed"))
yield from super().parse(response)
class StartItemSpider(FollowAllSpider):
async def start(self):
yield {"name": "test item"}
class StartGoodAndBadOutput(FollowAllSpider):
async def start(self):
yield {"a": "a"}
yield Request("data:,a")
yield "data:,b"
yield object()
class SingleRequestSpider(MetaSpider):
seed = None
callback_func = None
errback_func = None
async def start(self):
if isinstance(self.seed, Request):
yield self.seed.replace(callback=self.parse, errback=self.on_error)
else:
yield Request(self.seed, callback=self.parse, errback=self.on_error)
def parse(self, response):
self.meta.setdefault("responses", []).append(response)
if callable(self.callback_func):
return self.callback_func(response)
if "next" in response.meta:
return response.meta["next"]
return None
def on_error(self, failure):
self.meta["failure"] = failure
if callable(self.errback_func):
return self.errback_func(failure)
return None
class DuplicateStartSpider(MockServerSpider):
dont_filter = True
name = "duplicatestartrequests"
distinct_urls = 2
dupe_factor = 3
async def start(self):
for i in range(self.distinct_urls):
for j in range(self.dupe_factor):
url = self.mockserver.url(f"/echo?headers=1&body=test{i}")
yield Request(url, dont_filter=self.dont_filter)
def __init__(self, url="http://localhost:8998", *args, **kwargs):
super().__init__(*args, **kwargs)
self.visited = 0
def parse(self, response):
self.visited += 1
class CrawlSpiderWithParseMethod(MockServerSpider, CrawlSpider):
"""
A CrawlSpider which overrides the 'parse' method
"""
name = "crawl_spider_with_parse_method"
custom_settings: dict = {
"RETRY_HTTP_CODES": [], # no need to retry
}
rules = (Rule(LinkExtractor(), callback="parse", follow=True),)
async def start(self):
test_body = b"""
<html>
<head><title>Page title</title></head>
<body>
<p><a href="/status?n=200">Item 200</a></p> <!-- callback -->
<p><a href="/status?n=201">Item 201</a></p> <!-- callback -->
</body>
</html>
"""
url = self.mockserver.url("/alpayload")
yield Request(url, method="POST", body=test_body)
def parse(self, response, foo=None):
self.logger.info("[parse] status %i (foo: %s)", response.status, foo)
yield Request(
self.mockserver.url("/status?n=202"), self.parse, cb_kwargs={"foo": "bar"}
)
class CrawlSpiderWithAsyncCallback(CrawlSpiderWithParseMethod):
"""A CrawlSpider with an async def callback"""
name = "crawl_spider_with_async_callback"
rules = (Rule(LinkExtractor(), callback="parse_async", follow=True),)
async def parse_async(self, response, foo=None):
self.logger.info("[parse_async] status %i (foo: %s)", response.status, foo)
return Request(
self.mockserver.url("/status?n=202"),
self.parse_async,
cb_kwargs={"foo": "bar"},
)
class CrawlSpiderWithAsyncGeneratorCallback(CrawlSpiderWithParseMethod):
"""A CrawlSpider with an async generator callback"""
name = "crawl_spider_with_async_generator_callback"
rules = (Rule(LinkExtractor(), callback="parse_async_gen", follow=True),)
async def parse_async_gen(self, response, foo=None):
self.logger.info("[parse_async_gen] status %i (foo: %s)", response.status, foo)
yield Request(
self.mockserver.url("/status?n=202"),
self.parse_async_gen,
cb_kwargs={"foo": "bar"},
)
class CrawlSpiderWithErrback(CrawlSpiderWithParseMethod):
name = "crawl_spider_with_errback"
rules = (Rule(LinkExtractor(), callback="parse", errback="errback", follow=True),)
async def start(self):
test_body = b"""
<html>
<head><title>Page title</title></head>
<body>
<p><a href="/status?n=200">Item 200</a></p> <!-- callback -->
<p><a href="/status?n=201">Item 201</a></p> <!-- callback -->
<p><a href="/status?n=404">Item 404</a></p> <!-- errback -->
<p><a href="/status?n=500">Item 500</a></p> <!-- errback -->
<p><a href="/status?n=501">Item 501</a></p> <!-- errback -->
</body>
</html>
"""
url = self.mockserver.url("/alpayload")
yield Request(url, method="POST", body=test_body)
def errback(self, failure):
self.logger.info("[errback] status %i", failure.value.response.status)
class CrawlSpiderWithProcessRequestCallbackKeywordArguments(CrawlSpiderWithParseMethod):
name = "crawl_spider_with_process_request_cb_kwargs"
rules = (
Rule(
LinkExtractor(),
callback="parse",
follow=True,
process_request="process_request",
),
)
def process_request(self, request, response):
request.cb_kwargs["foo"] = "process_request"
return request
class BytesReceivedCallbackSpider(MetaSpider):
full_response_length = 2**18
@classmethod
def from_crawler(cls, crawler, *args, **kwargs):
spider = super().from_crawler(crawler, *args, **kwargs)
crawler.signals.connect(spider.bytes_received, signals.bytes_received)
return spider
async def start(self):
body = b"a" * self.full_response_length
url = self.mockserver.url("/alpayload")
yield Request(url, method="POST", body=body, errback=self.errback)
def parse(self, response):
self.meta["response"] = response
def errback(self, failure):
self.meta["failure"] = failure
def bytes_received(self, data, request, spider):
self.meta["bytes_received"] = data
raise StopDownload(fail=False)
class BytesReceivedErrbackSpider(BytesReceivedCallbackSpider):
def bytes_received(self, data, request, spider):
self.meta["bytes_received"] = data
raise StopDownload(fail=True)
class HeadersReceivedCallbackSpider(MetaSpider):
@classmethod
def from_crawler(cls, crawler, *args, **kwargs):
spider = super().from_crawler(crawler, *args, **kwargs)
crawler.signals.connect(spider.headers_received, signals.headers_received)
return spider
async def start(self):
yield Request(self.mockserver.url("/status"), errback=self.errback)
def parse(self, response):
self.meta["response"] = response
def errback(self, failure):
self.meta["failure"] = failure
def headers_received(self, headers, body_length, request, spider):
self.meta["headers_received"] = headers
raise StopDownload(fail=False)
class HeadersReceivedErrbackSpider(HeadersReceivedCallbackSpider):
def headers_received(self, headers, body_length, request, spider):
self.meta["headers_received"] = headers
raise StopDownload(fail=True)
| python | BSD-3-Clause | d1bd8eb49f7aba9289e4ff692006cead8bcd9080 | 2026-01-04T14:38:41.023839Z | false |
scrapy/scrapy | https://github.com/scrapy/scrapy/blob/d1bd8eb49f7aba9289e4ff692006cead8bcd9080/tests/test_downloadermiddleware_defaultheaders.py | tests/test_downloadermiddleware_defaultheaders.py | from scrapy.downloadermiddlewares.defaultheaders import DefaultHeadersMiddleware
from scrapy.http import Request
from scrapy.spiders import Spider
from scrapy.utils.python import to_bytes
from scrapy.utils.test import get_crawler
class TestDefaultHeadersMiddleware:
def get_defaults_mw(self):
crawler = get_crawler(Spider)
defaults = {
to_bytes(k): [to_bytes(v)]
for k, v in crawler.settings.get("DEFAULT_REQUEST_HEADERS").items()
}
return defaults, DefaultHeadersMiddleware.from_crawler(crawler)
def test_process_request(self):
defaults, mw = self.get_defaults_mw()
req = Request("http://www.scrapytest.org")
mw.process_request(req)
assert req.headers == defaults
def test_update_headers(self):
defaults, mw = self.get_defaults_mw()
headers = {"Accept-Language": ["es"], "Test-Header": ["test"]}
bytes_headers = {b"Accept-Language": [b"es"], b"Test-Header": [b"test"]}
req = Request("http://www.scrapytest.org", headers=headers)
assert req.headers == bytes_headers
mw.process_request(req)
defaults.update(bytes_headers)
assert req.headers == defaults
| python | BSD-3-Clause | d1bd8eb49f7aba9289e4ff692006cead8bcd9080 | 2026-01-04T14:38:41.023839Z | false |
scrapy/scrapy | https://github.com/scrapy/scrapy/blob/d1bd8eb49f7aba9289e4ff692006cead8bcd9080/tests/test_command_crawl.py | tests/test_command_crawl.py | from __future__ import annotations
from typing import TYPE_CHECKING
from tests.test_commands import TestProjectBase
from tests.utils.cmdline import proc
if TYPE_CHECKING:
from collections.abc import Iterable
from pathlib import Path
class TestCrawlCommand(TestProjectBase):
def crawl(
self, code: str, proj_path: Path, args: Iterable[str] = ()
) -> tuple[int, str, str]:
(proj_path / self.project_name / "spiders" / "myspider.py").write_text(
code, encoding="utf-8"
)
return proc("crawl", "myspider", *args, cwd=proj_path)
def get_log(self, code: str, proj_path: Path, args: Iterable[str] = ()) -> str:
_, _, stderr = self.crawl(code, proj_path, args=args)
return stderr
def test_no_output(self, proj_path: Path) -> None:
spider_code = """
import scrapy
class MySpider(scrapy.Spider):
name = 'myspider'
async def start(self):
self.logger.debug('It works!')
return
yield
"""
log = self.get_log(spider_code, proj_path)
assert "[myspider] DEBUG: It works!" in log
assert (
"Using reactor: twisted.internet.asyncioreactor.AsyncioSelectorReactor"
in log
)
assert "Spider closed (finished)" in log
def test_output(self, proj_path: Path) -> None:
spider_code = """
import scrapy
class MySpider(scrapy.Spider):
name = 'myspider'
async def start(self):
self.logger.debug('FEEDS: {}'.format(self.settings.getdict('FEEDS')))
return
yield
"""
args = ["-o", "example.json"]
log = self.get_log(spider_code, proj_path, args=args)
assert "[myspider] DEBUG: FEEDS: {'example.json': {'format': 'json'}}" in log
def test_overwrite_output(self, proj_path: Path) -> None:
spider_code = """
import json
import scrapy
class MySpider(scrapy.Spider):
name = 'myspider'
async def start(self):
self.logger.debug(
'FEEDS: {}'.format(
json.dumps(self.settings.getdict('FEEDS'), sort_keys=True)
)
)
return
yield
"""
j = proj_path / "example.json"
j.write_text("not empty", encoding="utf-8")
args = ["-O", "example.json"]
log = self.get_log(spider_code, proj_path, args=args)
assert (
'[myspider] DEBUG: FEEDS: {"example.json": {"format": "json", "overwrite": true}}'
in log
)
with j.open(encoding="utf-8") as f2:
first_line = f2.readline()
assert first_line != "not empty"
def test_output_and_overwrite_output(self, proj_path: Path) -> None:
spider_code = """
import scrapy
class MySpider(scrapy.Spider):
name = 'myspider'
async def start(self):
return
yield
"""
args = ["-o", "example1.json", "-O", "example2.json"]
log = self.get_log(spider_code, proj_path, args=args)
assert (
"error: Please use only one of -o/--output and -O/--overwrite-output" in log
)
def test_default_reactor(self, proj_path: Path) -> None:
spider_code = """
import scrapy
class MySpider(scrapy.Spider):
name = 'myspider'
async def start(self):
self.logger.debug('It works!')
return
yield
"""
log = self.get_log(spider_code, proj_path, args=("-s", "TWISTED_REACTOR="))
assert "[myspider] DEBUG: It works!" in log
assert (
"Using reactor: twisted.internet.asyncioreactor.AsyncioSelectorReactor"
not in log
)
assert "Spider closed (finished)" in log
| python | BSD-3-Clause | d1bd8eb49f7aba9289e4ff692006cead8bcd9080 | 2026-01-04T14:38:41.023839Z | false |
scrapy/scrapy | https://github.com/scrapy/scrapy/blob/d1bd8eb49f7aba9289e4ff692006cead8bcd9080/tests/test_command_runspider.py | tests/test_command_runspider.py | from __future__ import annotations
import asyncio
import inspect
import platform
import sys
from typing import TYPE_CHECKING
import pytest
from tests.test_crawler import ExceptionSpider, NoRequestsSpider
from tests.utils.cmdline import proc
if TYPE_CHECKING:
from collections.abc import Iterable
from pathlib import Path
class TestRunSpiderCommand:
spider_filename = "myspider.py"
debug_log_spider = """
import scrapy
class MySpider(scrapy.Spider):
name = 'myspider'
async def start(self):
self.logger.debug("It Works!")
return
yield
"""
badspider = """
import scrapy
class BadSpider(scrapy.Spider):
name = "bad"
async def start(self):
raise Exception("oops!")
yield
"""
def runspider(
self, cwd: Path, code: str, name: str | None = None, args: Iterable[str] = ()
) -> tuple[int, str, str]:
fname = cwd / (name or self.spider_filename)
fname.write_text(code, encoding="utf-8")
return proc("runspider", str(fname), *args, cwd=cwd)
def get_log(
self, cwd: Path, code: str, name: str | None = None, args: Iterable[str] = ()
) -> str:
_, _, stderr = self.runspider(cwd, code, name, args=args)
return stderr
def test_runspider(self, tmp_path: Path) -> None:
log = self.get_log(tmp_path, self.debug_log_spider)
assert "DEBUG: It Works!" in log
assert (
"Using reactor: twisted.internet.asyncioreactor.AsyncioSelectorReactor"
in log
)
assert "INFO: Spider closed (finished)" in log
def test_run_fail_spider(self, tmp_path: Path) -> None:
ret, _, _ = self.runspider(
tmp_path, "import scrapy\n" + inspect.getsource(ExceptionSpider)
)
assert ret != 0
def test_run_good_spider(self, tmp_path: Path) -> None:
ret, _, _ = self.runspider(
tmp_path, "import scrapy\n" + inspect.getsource(NoRequestsSpider)
)
assert ret == 0
def test_runspider_log_level(self, tmp_path: Path) -> None:
log = self.get_log(
tmp_path, self.debug_log_spider, args=("-s", "LOG_LEVEL=INFO")
)
assert "DEBUG: It Works!" not in log
assert "INFO: Spider opened" in log
def test_runspider_default_reactor(self, tmp_path: Path) -> None:
log = self.get_log(
tmp_path, self.debug_log_spider, args=("-s", "TWISTED_REACTOR=")
)
assert "DEBUG: It Works!" in log
assert (
"Using reactor: twisted.internet.asyncioreactor.AsyncioSelectorReactor"
not in log
)
assert "INFO: Spider opened" in log
assert "INFO: Closing spider (finished)" in log
assert "INFO: Spider closed (finished)" in log
def test_runspider_dnscache_disabled(self, tmp_path: Path) -> None:
# see https://github.com/scrapy/scrapy/issues/2811
# The spider below should not be able to connect to localhost:12345,
# which is intended,
# but this should not be because of DNS lookup error
# assumption: localhost will resolve in all cases (true?)
dnscache_spider = """
import scrapy
class MySpider(scrapy.Spider):
name = 'myspider'
start_urls = ['http://localhost:12345']
custom_settings = {
"ROBOTSTXT_OBEY": False,
"RETRY_ENABLED": False,
}
def parse(self, response):
return {'test': 'value'}
"""
log = self.get_log(
tmp_path, dnscache_spider, args=("-s", "DNSCACHE_ENABLED=False")
)
assert "DNSLookupError" not in log
assert "INFO: Spider opened" in log
@pytest.mark.parametrize("value", [False, True])
def test_runspider_log_short_names(self, tmp_path: Path, value: bool) -> None:
log1 = self.get_log(
tmp_path, self.debug_log_spider, args=("-s", f"LOG_SHORT_NAMES={value}")
)
assert "[myspider] DEBUG: It Works!" in log1
assert ("[scrapy]" in log1) is value
assert ("[scrapy.core.engine]" in log1) is not value
def test_runspider_no_spider_found(self, tmp_path: Path) -> None:
log = self.get_log(tmp_path, "from scrapy.spiders import Spider\n")
assert "No spider found in file" in log
def test_runspider_file_not_found(self) -> None:
_, _, log = proc("runspider", "some_non_existent_file")
assert "File not found: some_non_existent_file" in log
def test_runspider_unable_to_load(self, tmp_path: Path) -> None:
log = self.get_log(tmp_path, "", name="myspider.txt")
assert "Unable to load" in log
def test_start_errors(self, tmp_path: Path) -> None:
log = self.get_log(tmp_path, self.badspider, name="badspider.py")
assert "start" in log
assert "badspider.py" in log, log
def test_asyncio_enabled_true(self, tmp_path: Path) -> None:
log = self.get_log(
tmp_path,
self.debug_log_spider,
args=[
"-s",
"TWISTED_REACTOR=twisted.internet.asyncioreactor.AsyncioSelectorReactor",
],
)
assert (
"Using reactor: twisted.internet.asyncioreactor.AsyncioSelectorReactor"
in log
)
def test_asyncio_enabled_default(self, tmp_path: Path) -> None:
log = self.get_log(tmp_path, self.debug_log_spider)
assert (
"Using reactor: twisted.internet.asyncioreactor.AsyncioSelectorReactor"
in log
)
def test_asyncio_enabled_false(self, tmp_path: Path) -> None:
log = self.get_log(
tmp_path,
self.debug_log_spider,
args=["-s", "TWISTED_REACTOR=twisted.internet.selectreactor.SelectReactor"],
)
assert "Using reactor: twisted.internet.selectreactor.SelectReactor" in log
assert (
"Using reactor: twisted.internet.asyncioreactor.AsyncioSelectorReactor"
not in log
)
@pytest.mark.requires_uvloop
def test_custom_asyncio_loop_enabled_true(self, tmp_path: Path) -> None:
log = self.get_log(
tmp_path,
self.debug_log_spider,
args=[
"-s",
"TWISTED_REACTOR=twisted.internet.asyncioreactor.AsyncioSelectorReactor",
"-s",
"ASYNCIO_EVENT_LOOP=uvloop.Loop",
],
)
assert "Using asyncio event loop: uvloop.Loop" in log
def test_custom_asyncio_loop_enabled_false(self, tmp_path: Path) -> None:
log = self.get_log(
tmp_path,
self.debug_log_spider,
args=[
"-s",
"TWISTED_REACTOR=twisted.internet.asyncioreactor.AsyncioSelectorReactor",
],
)
if sys.platform != "win32":
loop = asyncio.new_event_loop()
else:
loop = asyncio.SelectorEventLoop()
assert (
f"Using asyncio event loop: {loop.__module__}.{loop.__class__.__name__}"
in log
)
def test_output(self, tmp_path: Path) -> None:
spider_code = """
import scrapy
class MySpider(scrapy.Spider):
name = 'myspider'
async def start(self):
self.logger.debug('FEEDS: {}'.format(self.settings.getdict('FEEDS')))
return
yield
"""
args = ["-o", "example.json"]
log = self.get_log(tmp_path, spider_code, args=args)
assert "[myspider] DEBUG: FEEDS: {'example.json': {'format': 'json'}}" in log
def test_overwrite_output(self, tmp_path: Path) -> None:
spider_code = """
import json
import scrapy
class MySpider(scrapy.Spider):
name = 'myspider'
async def start(self):
self.logger.debug(
'FEEDS: {}'.format(
json.dumps(self.settings.getdict('FEEDS'), sort_keys=True)
)
)
return
yield
"""
(tmp_path / "example.json").write_text("not empty", encoding="utf-8")
args = ["-O", "example.json"]
log = self.get_log(tmp_path, spider_code, args=args)
assert (
'[myspider] DEBUG: FEEDS: {"example.json": {"format": "json", "overwrite": true}}'
in log
)
with (tmp_path / "example.json").open(encoding="utf-8") as f2:
first_line = f2.readline()
assert first_line != "not empty"
def test_output_and_overwrite_output(self, tmp_path: Path) -> None:
spider_code = """
import scrapy
class MySpider(scrapy.Spider):
name = 'myspider'
async def start(self):
return
yield
"""
args = ["-o", "example1.json", "-O", "example2.json"]
log = self.get_log(tmp_path, spider_code, args=args)
assert (
"error: Please use only one of -o/--output and -O/--overwrite-output" in log
)
def test_output_stdout(self, tmp_path: Path) -> None:
spider_code = """
import scrapy
class MySpider(scrapy.Spider):
name = 'myspider'
async def start(self):
self.logger.debug('FEEDS: {}'.format(self.settings.getdict('FEEDS')))
return
yield
"""
args = ["-o", "-:json"]
log = self.get_log(tmp_path, spider_code, args=args)
assert "[myspider] DEBUG: FEEDS: {'stdout:': {'format': 'json'}}" in log
@pytest.mark.parametrize("arg", ["output.json:json", "output.json"])
def test_absolute_path(self, tmp_path: Path, arg: str) -> None:
spider_code = """
import scrapy
class MySpider(scrapy.Spider):
name = 'myspider'
start_urls = ["data:,"]
def parse(self, response):
yield {"hello": "world"}
"""
args = ["-o", str(tmp_path / arg)]
log = self.get_log(tmp_path, spider_code, args=args)
assert (
f"[scrapy.extensions.feedexport] INFO: Stored json feed (1 items) in: {tmp_path / 'output.json'}"
in log
)
def test_args_change_settings(self, tmp_path: Path) -> None:
spider_code = """
import scrapy
class MySpider(scrapy.Spider):
name = 'myspider'
@classmethod
def from_crawler(cls, crawler, *args, **kwargs):
spider = super().from_crawler(crawler, *args, **kwargs)
spider.settings.set("FOO", kwargs.get("foo"))
return spider
async def start(self):
self.logger.info(f"The value of FOO is {self.settings.getint('FOO')}")
return
yield
"""
args = ["-a", "foo=42"]
log = self.get_log(tmp_path, spider_code, args=args)
assert "Spider closed (finished)" in log
assert "The value of FOO is 42" in log
@pytest.mark.skipif(
platform.system() != "Windows", reason="Windows required for .pyw files"
)
class TestWindowsRunSpiderCommand(TestRunSpiderCommand):
spider_filename = "myspider.pyw"
def test_start_errors(self, tmp_path: Path) -> None:
log = self.get_log(tmp_path, self.badspider, name="badspider.pyw")
assert "start" in log
assert "badspider.pyw" in log
def test_runspider_unable_to_load(self, tmp_path: Path) -> None:
pytest.skip("Already Tested in 'RunSpiderCommandTest'")
| python | BSD-3-Clause | d1bd8eb49f7aba9289e4ff692006cead8bcd9080 | 2026-01-04T14:38:41.023839Z | false |
scrapy/scrapy | https://github.com/scrapy/scrapy/blob/d1bd8eb49f7aba9289e4ff692006cead8bcd9080/tests/test_spidermiddleware_referer.py | tests/test_spidermiddleware_referer.py | from __future__ import annotations
import warnings
from typing import TYPE_CHECKING, Any, cast
from urllib.parse import urlparse
import pytest
from scrapy.downloadermiddlewares.redirect import RedirectMiddleware
from scrapy.http import Request, Response
from scrapy.settings import Settings
from scrapy.spidermiddlewares.referer import (
POLICY_NO_REFERRER,
POLICY_NO_REFERRER_WHEN_DOWNGRADE,
POLICY_ORIGIN,
POLICY_ORIGIN_WHEN_CROSS_ORIGIN,
POLICY_SAME_ORIGIN,
POLICY_SCRAPY_DEFAULT,
POLICY_STRICT_ORIGIN,
POLICY_STRICT_ORIGIN_WHEN_CROSS_ORIGIN,
POLICY_UNSAFE_URL,
DefaultReferrerPolicy,
NoReferrerPolicy,
NoReferrerWhenDowngradePolicy,
OriginPolicy,
OriginWhenCrossOriginPolicy,
RefererMiddleware,
ReferrerPolicy,
SameOriginPolicy,
StrictOriginPolicy,
StrictOriginWhenCrossOriginPolicy,
UnsafeUrlPolicy,
)
from scrapy.utils.spider import DefaultSpider
from scrapy.utils.test import get_crawler
if TYPE_CHECKING:
from collections.abc import Sequence
from scrapy.crawler import Crawler
class TestRefererMiddleware:
req_meta: dict[str, Any] = {}
resp_headers: dict[str, str] = {}
settings: dict[str, Any] = {}
scenarii: list[tuple[str, str, bytes | None]] = [
("http://scrapytest.org", "http://scrapytest.org/", b"http://scrapytest.org"),
]
@pytest.fixture
def mw(self) -> RefererMiddleware:
settings = Settings(self.settings)
return RefererMiddleware(settings)
def get_request(self, target: str) -> Request:
return Request(target, meta=self.req_meta)
def get_response(self, origin: str) -> Response:
return Response(origin, headers=self.resp_headers)
def test(self, mw: RefererMiddleware) -> None:
for origin, target, referrer in self.scenarii:
response = self.get_response(origin)
request = self.get_request(target)
out = list(mw.process_spider_output(response, [request]))
assert out[0].headers.get("Referer") == referrer
class MixinDefault:
"""
Based on https://www.w3.org/TR/referrer-policy/#referrer-policy-no-referrer-when-downgrade
with some additional filtering of s3://
"""
scenarii: list[tuple[str, str, bytes | None]] = [
("https://example.com/", "https://scrapy.org/", b"https://example.com/"),
("http://example.com/", "http://scrapy.org/", b"http://example.com/"),
("http://example.com/", "https://scrapy.org/", b"http://example.com/"),
("https://example.com/", "http://scrapy.org/", None),
# no credentials leak
(
"http://user:password@example.com/",
"https://scrapy.org/",
b"http://example.com/",
),
# no referrer leak for local schemes
("file:///home/path/to/somefile.html", "https://scrapy.org/", None),
("file:///home/path/to/somefile.html", "http://scrapy.org/", None),
# no referrer leak for s3 origins
("s3://mybucket/path/to/data.csv", "https://scrapy.org/", None),
("s3://mybucket/path/to/data.csv", "http://scrapy.org/", None),
]
class MixinNoReferrer:
scenarii: list[tuple[str, str, bytes | None]] = [
("https://example.com/page.html", "https://example.com/", None),
("http://www.example.com/", "https://scrapy.org/", None),
("http://www.example.com/", "http://scrapy.org/", None),
("https://www.example.com/", "http://scrapy.org/", None),
("file:///home/path/to/somefile.html", "http://scrapy.org/", None),
]
class MixinNoReferrerWhenDowngrade:
scenarii: list[tuple[str, str, bytes | None]] = [
# TLS to TLS: send non-empty referrer
(
"https://example.com/page.html",
"https://not.example.com/",
b"https://example.com/page.html",
),
(
"https://example.com/page.html",
"https://scrapy.org/",
b"https://example.com/page.html",
),
(
"https://example.com:443/page.html",
"https://scrapy.org/",
b"https://example.com/page.html",
),
(
"https://example.com:444/page.html",
"https://scrapy.org/",
b"https://example.com:444/page.html",
),
(
"ftps://example.com/urls.zip",
"https://scrapy.org/",
b"ftps://example.com/urls.zip",
),
# TLS to non-TLS: do not send referrer
("https://example.com/page.html", "http://not.example.com/", None),
("https://example.com/page.html", "http://scrapy.org/", None),
("ftps://example.com/urls.zip", "http://scrapy.org/", None),
# non-TLS to TLS or non-TLS: send referrer
(
"http://example.com/page.html",
"https://not.example.com/",
b"http://example.com/page.html",
),
(
"http://example.com/page.html",
"https://scrapy.org/",
b"http://example.com/page.html",
),
(
"http://example.com:8080/page.html",
"https://scrapy.org/",
b"http://example.com:8080/page.html",
),
(
"http://example.com:80/page.html",
"http://not.example.com/",
b"http://example.com/page.html",
),
(
"http://example.com/page.html",
"http://scrapy.org/",
b"http://example.com/page.html",
),
(
"http://example.com:443/page.html",
"http://scrapy.org/",
b"http://example.com:443/page.html",
),
(
"ftp://example.com/urls.zip",
"http://scrapy.org/",
b"ftp://example.com/urls.zip",
),
(
"ftp://example.com/urls.zip",
"https://scrapy.org/",
b"ftp://example.com/urls.zip",
),
# test for user/password stripping
(
"http://user:password@example.com/page.html",
"https://not.example.com/",
b"http://example.com/page.html",
),
]
class MixinSameOrigin:
scenarii: list[tuple[str, str, bytes | None]] = [
# Same origin (protocol, host, port): send referrer
(
"https://example.com/page.html",
"https://example.com/not-page.html",
b"https://example.com/page.html",
),
(
"http://example.com/page.html",
"http://example.com/not-page.html",
b"http://example.com/page.html",
),
(
"https://example.com:443/page.html",
"https://example.com/not-page.html",
b"https://example.com/page.html",
),
(
"http://example.com:80/page.html",
"http://example.com/not-page.html",
b"http://example.com/page.html",
),
(
"http://example.com/page.html",
"http://example.com:80/not-page.html",
b"http://example.com/page.html",
),
(
"http://example.com:8888/page.html",
"http://example.com:8888/not-page.html",
b"http://example.com:8888/page.html",
),
# Different host: do NOT send referrer
(
"https://example.com/page.html",
"https://not.example.com/otherpage.html",
None,
),
("http://example.com/page.html", "http://not.example.com/otherpage.html", None),
("http://example.com/page.html", "http://www.example.com/otherpage.html", None),
# Different port: do NOT send referrer
(
"https://example.com:444/page.html",
"https://example.com/not-page.html",
None,
),
("http://example.com:81/page.html", "http://example.com/not-page.html", None),
("http://example.com/page.html", "http://example.com:81/not-page.html", None),
# Different protocols: do NOT send referrer
("https://example.com/page.html", "http://example.com/not-page.html", None),
("https://example.com/page.html", "http://not.example.com/", None),
("ftps://example.com/urls.zip", "https://example.com/not-page.html", None),
("ftp://example.com/urls.zip", "http://example.com/not-page.html", None),
("ftps://example.com/urls.zip", "https://example.com/not-page.html", None),
# test for user/password stripping
(
"https://user:password@example.com/page.html",
"http://example.com/not-page.html",
None,
),
(
"https://user:password@example.com/page.html",
"https://example.com/not-page.html",
b"https://example.com/page.html",
),
]
class MixinOrigin:
scenarii: list[tuple[str, str, bytes | None]] = [
# TLS or non-TLS to TLS or non-TLS: referrer origin is sent (yes, even for downgrades)
(
"https://example.com/page.html",
"https://example.com/not-page.html",
b"https://example.com/",
),
(
"https://example.com/page.html",
"https://scrapy.org",
b"https://example.com/",
),
("https://example.com/page.html", "http://scrapy.org", b"https://example.com/"),
("http://example.com/page.html", "http://scrapy.org", b"http://example.com/"),
# test for user/password stripping
(
"https://user:password@example.com/page.html",
"http://scrapy.org",
b"https://example.com/",
),
]
class MixinStrictOrigin:
scenarii: list[tuple[str, str, bytes | None]] = [
# TLS or non-TLS to TLS or non-TLS: referrer origin is sent but not for downgrades
(
"https://example.com/page.html",
"https://example.com/not-page.html",
b"https://example.com/",
),
(
"https://example.com/page.html",
"https://scrapy.org",
b"https://example.com/",
),
("http://example.com/page.html", "http://scrapy.org", b"http://example.com/"),
# downgrade: send nothing
("https://example.com/page.html", "http://scrapy.org", None),
# upgrade: send origin
("http://example.com/page.html", "https://scrapy.org", b"http://example.com/"),
# test for user/password stripping
(
"https://user:password@example.com/page.html",
"https://scrapy.org",
b"https://example.com/",
),
("https://user:password@example.com/page.html", "http://scrapy.org", None),
]
class MixinOriginWhenCrossOrigin:
scenarii: list[tuple[str, str, bytes | None]] = [
# Same origin (protocol, host, port): send referrer
(
"https://example.com/page.html",
"https://example.com/not-page.html",
b"https://example.com/page.html",
),
(
"http://example.com/page.html",
"http://example.com/not-page.html",
b"http://example.com/page.html",
),
(
"https://example.com:443/page.html",
"https://example.com/not-page.html",
b"https://example.com/page.html",
),
(
"http://example.com:80/page.html",
"http://example.com/not-page.html",
b"http://example.com/page.html",
),
(
"http://example.com/page.html",
"http://example.com:80/not-page.html",
b"http://example.com/page.html",
),
(
"http://example.com:8888/page.html",
"http://example.com:8888/not-page.html",
b"http://example.com:8888/page.html",
),
# Different host: send origin as referrer
(
"https://example2.com/page.html",
"https://scrapy.org/otherpage.html",
b"https://example2.com/",
),
(
"https://example2.com/page.html",
"https://not.example2.com/otherpage.html",
b"https://example2.com/",
),
(
"http://example2.com/page.html",
"http://not.example2.com/otherpage.html",
b"http://example2.com/",
),
# exact match required
(
"http://example2.com/page.html",
"http://www.example2.com/otherpage.html",
b"http://example2.com/",
),
# Different port: send origin as referrer
(
"https://example3.com:444/page.html",
"https://example3.com/not-page.html",
b"https://example3.com:444/",
),
(
"http://example3.com:81/page.html",
"http://example3.com/not-page.html",
b"http://example3.com:81/",
),
# Different protocols: send origin as referrer
(
"https://example4.com/page.html",
"http://example4.com/not-page.html",
b"https://example4.com/",
),
(
"https://example4.com/page.html",
"http://not.example4.com/",
b"https://example4.com/",
),
(
"ftps://example4.com/urls.zip",
"https://example4.com/not-page.html",
b"ftps://example4.com/",
),
(
"ftp://example4.com/urls.zip",
"http://example4.com/not-page.html",
b"ftp://example4.com/",
),
(
"ftps://example4.com/urls.zip",
"https://example4.com/not-page.html",
b"ftps://example4.com/",
),
# test for user/password stripping
(
"https://user:password@example5.com/page.html",
"https://example5.com/not-page.html",
b"https://example5.com/page.html",
),
# TLS to non-TLS downgrade: send origin
(
"https://user:password@example5.com/page.html",
"http://example5.com/not-page.html",
b"https://example5.com/",
),
]
class MixinStrictOriginWhenCrossOrigin:
scenarii: list[tuple[str, str, bytes | None]] = [
# Same origin (protocol, host, port): send referrer
(
"https://example.com/page.html",
"https://example.com/not-page.html",
b"https://example.com/page.html",
),
(
"http://example.com/page.html",
"http://example.com/not-page.html",
b"http://example.com/page.html",
),
(
"https://example.com:443/page.html",
"https://example.com/not-page.html",
b"https://example.com/page.html",
),
(
"http://example.com:80/page.html",
"http://example.com/not-page.html",
b"http://example.com/page.html",
),
(
"http://example.com/page.html",
"http://example.com:80/not-page.html",
b"http://example.com/page.html",
),
(
"http://example.com:8888/page.html",
"http://example.com:8888/not-page.html",
b"http://example.com:8888/page.html",
),
# Different host: send origin as referrer
(
"https://example2.com/page.html",
"https://scrapy.org/otherpage.html",
b"https://example2.com/",
),
(
"https://example2.com/page.html",
"https://not.example2.com/otherpage.html",
b"https://example2.com/",
),
(
"http://example2.com/page.html",
"http://not.example2.com/otherpage.html",
b"http://example2.com/",
),
# exact match required
(
"http://example2.com/page.html",
"http://www.example2.com/otherpage.html",
b"http://example2.com/",
),
# Different port: send origin as referrer
(
"https://example3.com:444/page.html",
"https://example3.com/not-page.html",
b"https://example3.com:444/",
),
(
"http://example3.com:81/page.html",
"http://example3.com/not-page.html",
b"http://example3.com:81/",
),
# downgrade
("https://example4.com/page.html", "http://example4.com/not-page.html", None),
("https://example4.com/page.html", "http://not.example4.com/", None),
# non-TLS to non-TLS
(
"ftp://example4.com/urls.zip",
"http://example4.com/not-page.html",
b"ftp://example4.com/",
),
# upgrade
(
"http://example4.com/page.html",
"https://example4.com/not-page.html",
b"http://example4.com/",
),
(
"http://example4.com/page.html",
"https://not.example4.com/",
b"http://example4.com/",
),
# Different protocols: send origin as referrer
(
"ftps://example4.com/urls.zip",
"https://example4.com/not-page.html",
b"ftps://example4.com/",
),
(
"ftps://example4.com/urls.zip",
"https://example4.com/not-page.html",
b"ftps://example4.com/",
),
# test for user/password stripping
(
"https://user:password@example5.com/page.html",
"https://example5.com/not-page.html",
b"https://example5.com/page.html",
),
# TLS to non-TLS downgrade: send nothing
(
"https://user:password@example5.com/page.html",
"http://example5.com/not-page.html",
None,
),
]
class MixinUnsafeUrl:
scenarii: list[tuple[str, str, bytes | None]] = [
# TLS to TLS: send referrer
(
"https://example.com/sekrit.html",
"http://not.example.com/",
b"https://example.com/sekrit.html",
),
(
"https://example1.com/page.html",
"https://not.example1.com/",
b"https://example1.com/page.html",
),
(
"https://example1.com/page.html",
"https://scrapy.org/",
b"https://example1.com/page.html",
),
(
"https://example1.com:443/page.html",
"https://scrapy.org/",
b"https://example1.com/page.html",
),
(
"https://example1.com:444/page.html",
"https://scrapy.org/",
b"https://example1.com:444/page.html",
),
(
"ftps://example1.com/urls.zip",
"https://scrapy.org/",
b"ftps://example1.com/urls.zip",
),
# TLS to non-TLS: send referrer (yes, it's unsafe)
(
"https://example2.com/page.html",
"http://not.example2.com/",
b"https://example2.com/page.html",
),
(
"https://example2.com/page.html",
"http://scrapy.org/",
b"https://example2.com/page.html",
),
(
"ftps://example2.com/urls.zip",
"http://scrapy.org/",
b"ftps://example2.com/urls.zip",
),
# non-TLS to TLS or non-TLS: send referrer (yes, it's unsafe)
(
"http://example3.com/page.html",
"https://not.example3.com/",
b"http://example3.com/page.html",
),
(
"http://example3.com/page.html",
"https://scrapy.org/",
b"http://example3.com/page.html",
),
(
"http://example3.com:8080/page.html",
"https://scrapy.org/",
b"http://example3.com:8080/page.html",
),
(
"http://example3.com:80/page.html",
"http://not.example3.com/",
b"http://example3.com/page.html",
),
(
"http://example3.com/page.html",
"http://scrapy.org/",
b"http://example3.com/page.html",
),
(
"http://example3.com:443/page.html",
"http://scrapy.org/",
b"http://example3.com:443/page.html",
),
(
"ftp://example3.com/urls.zip",
"http://scrapy.org/",
b"ftp://example3.com/urls.zip",
),
(
"ftp://example3.com/urls.zip",
"https://scrapy.org/",
b"ftp://example3.com/urls.zip",
),
# test for user/password stripping
(
"http://user:password@example4.com/page.html",
"https://not.example4.com/",
b"http://example4.com/page.html",
),
(
"https://user:password@example4.com/page.html",
"http://scrapy.org/",
b"https://example4.com/page.html",
),
]
class TestRefererMiddlewareDefault(MixinDefault, TestRefererMiddleware):
pass
# --- Tests using settings to set policy using class path
class TestSettingsNoReferrer(MixinNoReferrer, TestRefererMiddleware):
settings = {"REFERRER_POLICY": "scrapy.spidermiddlewares.referer.NoReferrerPolicy"}
class TestSettingsNoReferrerWhenDowngrade(
MixinNoReferrerWhenDowngrade, TestRefererMiddleware
):
settings = {
"REFERRER_POLICY": "scrapy.spidermiddlewares.referer.NoReferrerWhenDowngradePolicy"
}
class TestSettingsSameOrigin(MixinSameOrigin, TestRefererMiddleware):
settings = {"REFERRER_POLICY": "scrapy.spidermiddlewares.referer.SameOriginPolicy"}
class TestSettingsOrigin(MixinOrigin, TestRefererMiddleware):
settings = {"REFERRER_POLICY": "scrapy.spidermiddlewares.referer.OriginPolicy"}
class TestSettingsStrictOrigin(MixinStrictOrigin, TestRefererMiddleware):
settings = {
"REFERRER_POLICY": "scrapy.spidermiddlewares.referer.StrictOriginPolicy"
}
class TestSettingsOriginWhenCrossOrigin(
MixinOriginWhenCrossOrigin, TestRefererMiddleware
):
settings = {
"REFERRER_POLICY": "scrapy.spidermiddlewares.referer.OriginWhenCrossOriginPolicy"
}
class TestSettingsStrictOriginWhenCrossOrigin(
MixinStrictOriginWhenCrossOrigin, TestRefererMiddleware
):
settings = {
"REFERRER_POLICY": "scrapy.spidermiddlewares.referer.StrictOriginWhenCrossOriginPolicy"
}
class TestSettingsUnsafeUrl(MixinUnsafeUrl, TestRefererMiddleware):
settings = {"REFERRER_POLICY": "scrapy.spidermiddlewares.referer.UnsafeUrlPolicy"}
class CustomPythonOrgPolicy(ReferrerPolicy):
"""
A dummy policy that returns referrer as http(s)://python.org
depending on the scheme of the target URL.
"""
def referrer(self, response, request):
scheme = urlparse(request).scheme
if scheme == "https":
return b"https://python.org/"
if scheme == "http":
return b"http://python.org/"
return None
class TestSettingsCustomPolicy(TestRefererMiddleware):
settings = {"REFERRER_POLICY": CustomPythonOrgPolicy}
scenarii = [
("https://example.com/", "https://scrapy.org/", b"https://python.org/"),
("http://example.com/", "http://scrapy.org/", b"http://python.org/"),
("http://example.com/", "https://scrapy.org/", b"https://python.org/"),
("https://example.com/", "http://scrapy.org/", b"http://python.org/"),
(
"file:///home/path/to/somefile.html",
"https://scrapy.org/",
b"https://python.org/",
),
(
"file:///home/path/to/somefile.html",
"http://scrapy.org/",
b"http://python.org/",
),
]
# --- Tests using Request meta dict to set policy
class TestRequestMetaDefault(MixinDefault, TestRefererMiddleware):
req_meta = {"referrer_policy": POLICY_SCRAPY_DEFAULT}
class TestRequestMetaNoReferrer(MixinNoReferrer, TestRefererMiddleware):
req_meta = {"referrer_policy": POLICY_NO_REFERRER}
class TestRequestMetaNoReferrerWhenDowngrade(
MixinNoReferrerWhenDowngrade, TestRefererMiddleware
):
req_meta = {"referrer_policy": POLICY_NO_REFERRER_WHEN_DOWNGRADE}
class TestRequestMetaSameOrigin(MixinSameOrigin, TestRefererMiddleware):
req_meta = {"referrer_policy": POLICY_SAME_ORIGIN}
class TestRequestMetaOrigin(MixinOrigin, TestRefererMiddleware):
req_meta = {"referrer_policy": POLICY_ORIGIN}
class TestRequestMetaSrictOrigin(MixinStrictOrigin, TestRefererMiddleware):
req_meta = {"referrer_policy": POLICY_STRICT_ORIGIN}
class TestRequestMetaOriginWhenCrossOrigin(
MixinOriginWhenCrossOrigin, TestRefererMiddleware
):
req_meta = {"referrer_policy": POLICY_ORIGIN_WHEN_CROSS_ORIGIN}
class TestRequestMetaStrictOriginWhenCrossOrigin(
MixinStrictOriginWhenCrossOrigin, TestRefererMiddleware
):
req_meta = {"referrer_policy": POLICY_STRICT_ORIGIN_WHEN_CROSS_ORIGIN}
class TestRequestMetaUnsafeUrl(MixinUnsafeUrl, TestRefererMiddleware):
req_meta = {"referrer_policy": POLICY_UNSAFE_URL}
class TestRequestMetaPrecedence001(MixinUnsafeUrl, TestRefererMiddleware):
settings = {"REFERRER_POLICY": "scrapy.spidermiddlewares.referer.SameOriginPolicy"}
req_meta = {"referrer_policy": POLICY_UNSAFE_URL}
class TestRequestMetaPrecedence002(MixinNoReferrer, TestRefererMiddleware):
settings = {
"REFERRER_POLICY": "scrapy.spidermiddlewares.referer.NoReferrerWhenDowngradePolicy"
}
req_meta = {"referrer_policy": POLICY_NO_REFERRER}
class TestRequestMetaPrecedence003(MixinUnsafeUrl, TestRefererMiddleware):
settings = {
"REFERRER_POLICY": "scrapy.spidermiddlewares.referer.OriginWhenCrossOriginPolicy"
}
req_meta = {"referrer_policy": POLICY_UNSAFE_URL}
class TestRequestMetaSettingFallback:
params = [
(
# When an unknown policy is referenced in Request.meta
# (here, a typo error),
# the policy defined in settings takes precedence
{
"REFERRER_POLICY": "scrapy.spidermiddlewares.referer.OriginWhenCrossOriginPolicy"
},
{},
{"referrer_policy": "ssscrapy-default"},
OriginWhenCrossOriginPolicy,
True,
),
(
# same as above but with string value for settings policy
{"REFERRER_POLICY": "origin-when-cross-origin"},
{},
{"referrer_policy": "ssscrapy-default"},
OriginWhenCrossOriginPolicy,
True,
),
(
# request meta references a wrong policy but it is set,
# so the Referrer-Policy header in response is not used,
# and the settings' policy is applied
{"REFERRER_POLICY": "origin-when-cross-origin"},
{"Referrer-Policy": "unsafe-url"},
{"referrer_policy": "ssscrapy-default"},
OriginWhenCrossOriginPolicy,
True,
),
(
# here, request meta does not set the policy
# so response headers take precedence
{"REFERRER_POLICY": "origin-when-cross-origin"},
{"Referrer-Policy": "unsafe-url"},
{},
UnsafeUrlPolicy,
False,
),
(
# here, request meta does not set the policy,
# but response headers also use an unknown policy,
# so the settings' policy is used
{"REFERRER_POLICY": "origin-when-cross-origin"},
{"Referrer-Policy": "unknown"},
{},
OriginWhenCrossOriginPolicy,
True,
),
]
def test(self):
origin = "http://www.scrapy.org"
target = "http://www.example.com"
for (
settings,
response_headers,
request_meta,
policy_class,
check_warning,
) in self.params[3:]:
mw = RefererMiddleware(Settings(settings))
response = Response(origin, headers=response_headers)
request = Request(target, meta=request_meta)
with warnings.catch_warnings(record=True) as w:
policy = mw.policy(response, request)
assert isinstance(policy, policy_class)
if check_warning:
assert len(w) == 1
assert w[0].category is RuntimeWarning, w[0].message
class TestSettingsPolicyByName:
def test_valid_name(self):
for s, p in [
(POLICY_SCRAPY_DEFAULT, DefaultReferrerPolicy),
(POLICY_NO_REFERRER, NoReferrerPolicy),
(POLICY_NO_REFERRER_WHEN_DOWNGRADE, NoReferrerWhenDowngradePolicy),
(POLICY_SAME_ORIGIN, SameOriginPolicy),
(POLICY_ORIGIN, OriginPolicy),
(POLICY_STRICT_ORIGIN, StrictOriginPolicy),
(POLICY_ORIGIN_WHEN_CROSS_ORIGIN, OriginWhenCrossOriginPolicy),
(POLICY_STRICT_ORIGIN_WHEN_CROSS_ORIGIN, StrictOriginWhenCrossOriginPolicy),
(POLICY_UNSAFE_URL, UnsafeUrlPolicy),
]:
settings = Settings({"REFERRER_POLICY": s})
mw = RefererMiddleware(settings)
assert mw.default_policy == p
def test_valid_name_casevariants(self):
for s, p in [
(POLICY_SCRAPY_DEFAULT, DefaultReferrerPolicy),
(POLICY_NO_REFERRER, NoReferrerPolicy),
(POLICY_NO_REFERRER_WHEN_DOWNGRADE, NoReferrerWhenDowngradePolicy),
(POLICY_SAME_ORIGIN, SameOriginPolicy),
(POLICY_ORIGIN, OriginPolicy),
(POLICY_STRICT_ORIGIN, StrictOriginPolicy),
(POLICY_ORIGIN_WHEN_CROSS_ORIGIN, OriginWhenCrossOriginPolicy),
(POLICY_STRICT_ORIGIN_WHEN_CROSS_ORIGIN, StrictOriginWhenCrossOriginPolicy),
(POLICY_UNSAFE_URL, UnsafeUrlPolicy),
]:
settings = Settings({"REFERRER_POLICY": s.upper()})
mw = RefererMiddleware(settings)
assert mw.default_policy == p
def test_invalid_name(self):
settings = Settings({"REFERRER_POLICY": "some-custom-unknown-policy"})
with pytest.raises(RuntimeError):
RefererMiddleware(settings)
def test_multiple_policy_tokens(self):
# test parsing without space(s) after the comma
settings1 = Settings(
{
"REFERRER_POLICY": (
f"some-custom-unknown-policy,"
f"{POLICY_SAME_ORIGIN},"
f"{POLICY_STRICT_ORIGIN_WHEN_CROSS_ORIGIN},"
f"another-custom-unknown-policy"
)
}
)
mw1 = RefererMiddleware(settings1)
assert mw1.default_policy == StrictOriginWhenCrossOriginPolicy
# test parsing with space(s) after the comma
settings2 = Settings(
{
"REFERRER_POLICY": (
f"{POLICY_STRICT_ORIGIN_WHEN_CROSS_ORIGIN},"
f" another-custom-unknown-policy,"
f" {POLICY_UNSAFE_URL}"
)
}
)
mw2 = RefererMiddleware(settings2)
assert mw2.default_policy == UnsafeUrlPolicy
def test_multiple_policy_tokens_all_invalid(self):
settings = Settings(
{
"REFERRER_POLICY": (
"some-custom-unknown-policy,"
"another-custom-unknown-policy,"
"yet-another-custom-unknown-policy"
)
}
)
with pytest.raises(RuntimeError):
RefererMiddleware(settings)
class TestPolicyHeaderPrecedence001(MixinUnsafeUrl, TestRefererMiddleware):
settings = {"REFERRER_POLICY": "scrapy.spidermiddlewares.referer.SameOriginPolicy"}
resp_headers = {"Referrer-Policy": POLICY_UNSAFE_URL.upper()}
class TestPolicyHeaderPrecedence002(MixinNoReferrer, TestRefererMiddleware):
settings = {
"REFERRER_POLICY": "scrapy.spidermiddlewares.referer.NoReferrerWhenDowngradePolicy"
}
resp_headers = {"Referrer-Policy": POLICY_NO_REFERRER.swapcase()}
class TestPolicyHeaderPrecedence003(
MixinNoReferrerWhenDowngrade, TestRefererMiddleware
):
settings = {
"REFERRER_POLICY": "scrapy.spidermiddlewares.referer.OriginWhenCrossOriginPolicy"
}
resp_headers = {"Referrer-Policy": POLICY_NO_REFERRER_WHEN_DOWNGRADE.title()}
class TestPolicyHeaderPrecedence004(
MixinNoReferrerWhenDowngrade, TestRefererMiddleware
):
"""
The empty string means "no-referrer-when-downgrade"
"""
settings = {
| python | BSD-3-Clause | d1bd8eb49f7aba9289e4ff692006cead8bcd9080 | 2026-01-04T14:38:41.023839Z | true |
scrapy/scrapy | https://github.com/scrapy/scrapy/blob/d1bd8eb49f7aba9289e4ff692006cead8bcd9080/tests/test_logstats.py | tests/test_logstats.py | from datetime import datetime
import pytest
from scrapy.extensions.logstats import LogStats
from scrapy.utils.test import get_crawler
from tests.spiders import SimpleSpider
class TestLogStats:
def setup_method(self):
self.crawler = get_crawler(SimpleSpider)
self.spider = self.crawler._create_spider("spidey")
self.stats = self.crawler.stats
self.stats.set_value("response_received_count", 4802)
self.stats.set_value("item_scraped_count", 3201)
def test_stats_calculations(self):
logstats = LogStats.from_crawler(self.crawler)
with pytest.raises(AttributeError):
logstats.pagesprev
with pytest.raises(AttributeError):
logstats.itemsprev
logstats.spider_opened(self.spider)
assert logstats.pagesprev == 4802
assert logstats.itemsprev == 3201
logstats.calculate_stats()
assert logstats.items == 3201
assert logstats.pages == 4802
assert logstats.irate == 0.0
assert logstats.prate == 0.0
assert logstats.pagesprev == 4802
assert logstats.itemsprev == 3201
# Simulate what happens after a minute
self.stats.set_value("response_received_count", 5187)
self.stats.set_value("item_scraped_count", 3492)
logstats.calculate_stats()
assert logstats.items == 3492
assert logstats.pages == 5187
assert logstats.irate == 291.0
assert logstats.prate == 385.0
assert logstats.pagesprev == 5187
assert logstats.itemsprev == 3492
# Simulate when spider closes after running for 30 mins
self.stats.set_value("start_time", datetime.fromtimestamp(1655100172))
self.stats.set_value("finish_time", datetime.fromtimestamp(1655101972))
logstats.spider_closed(self.spider, "test reason")
assert self.stats.get_value("responses_per_minute") == 172.9
assert self.stats.get_value("items_per_minute") == 116.4
def test_stats_calculations_no_time(self):
"""The stat values should be None since the start and finish time are
not available.
"""
logstats = LogStats.from_crawler(self.crawler)
logstats.spider_closed(self.spider, "test reason")
assert self.stats.get_value("responses_per_minute") is None
assert self.stats.get_value("items_per_minute") is None
def test_stats_calculation_no_elapsed_time(self):
"""The stat values should be None since the elapsed time is 0."""
logstats = LogStats.from_crawler(self.crawler)
self.stats.set_value("start_time", datetime.fromtimestamp(1655100172))
self.stats.set_value("finish_time", datetime.fromtimestamp(1655100172))
logstats.spider_closed(self.spider, "test reason")
assert self.stats.get_value("responses_per_minute") is None
assert self.stats.get_value("items_per_minute") is None
| python | BSD-3-Clause | d1bd8eb49f7aba9289e4ff692006cead8bcd9080 | 2026-01-04T14:38:41.023839Z | false |
scrapy/scrapy | https://github.com/scrapy/scrapy/blob/d1bd8eb49f7aba9289e4ff692006cead8bcd9080/tests/test_utils_trackref.py | tests/test_utils_trackref.py | from io import StringIO
from time import sleep, time
from unittest import mock
import pytest
from scrapy.utils import trackref
class Foo(trackref.object_ref):
pass
class Bar(trackref.object_ref):
pass
@pytest.fixture(autouse=True)
def clear_refs() -> None:
trackref.live_refs.clear()
def test_format_live_refs():
o1 = Foo() # noqa: F841
o2 = Bar() # noqa: F841
o3 = Foo() # noqa: F841
assert (
trackref.format_live_refs()
== """\
Live References
Bar 1 oldest: 0s ago
Foo 2 oldest: 0s ago
"""
)
assert (
trackref.format_live_refs(ignore=Foo)
== """\
Live References
Bar 1 oldest: 0s ago
"""
)
@mock.patch("sys.stdout", new_callable=StringIO)
def test_print_live_refs_empty(stdout):
trackref.print_live_refs()
assert stdout.getvalue() == "Live References\n\n\n"
@mock.patch("sys.stdout", new_callable=StringIO)
def test_print_live_refs_with_objects(stdout):
o1 = Foo() # noqa: F841
trackref.print_live_refs()
assert (
stdout.getvalue()
== """\
Live References
Foo 1 oldest: 0s ago\n\n"""
)
def test_get_oldest():
o1 = Foo()
o1_time = time()
o2 = Bar()
o3_time = time()
if o3_time <= o1_time:
sleep(0.01)
o3_time = time()
if o3_time <= o1_time:
pytest.skip("time.time is not precise enough")
o3 = Foo() # noqa: F841
assert trackref.get_oldest("Foo") is o1
assert trackref.get_oldest("Bar") is o2
assert trackref.get_oldest("XXX") is None
def test_iter_all():
o1 = Foo()
o2 = Bar() # noqa: F841
o3 = Foo()
assert set(trackref.iter_all("Foo")) == {o1, o3}
| python | BSD-3-Clause | d1bd8eb49f7aba9289e4ff692006cead8bcd9080 | 2026-01-04T14:38:41.023839Z | false |
scrapy/scrapy | https://github.com/scrapy/scrapy/blob/d1bd8eb49f7aba9289e4ff692006cead8bcd9080/tests/test_dupefilters.py | tests/test_dupefilters.py | import hashlib
import shutil
import sys
import tempfile
from pathlib import Path
from warnings import catch_warnings
from testfixtures import LogCapture
from scrapy.core.scheduler import Scheduler
from scrapy.dupefilters import BaseDupeFilter, RFPDupeFilter
from scrapy.exceptions import ScrapyDeprecationWarning
from scrapy.http import Request
from scrapy.utils.python import to_bytes
from scrapy.utils.test import get_crawler
from tests.spiders import SimpleSpider
def _get_dupefilter(*, crawler=None, settings=None, open_=True):
if crawler is None:
crawler = get_crawler(settings_dict=settings)
scheduler = Scheduler.from_crawler(crawler)
dupefilter = scheduler.df
if open_:
dupefilter.open()
return dupefilter
class FromCrawlerRFPDupeFilter(RFPDupeFilter):
@classmethod
def from_crawler(cls, crawler):
df = super().from_crawler(crawler)
df.method = "from_crawler"
return df
class DirectDupeFilter:
method = "n/a"
class TestRFPDupeFilter:
def test_df_from_crawler_scheduler(self):
settings = {
"DUPEFILTER_DEBUG": True,
"DUPEFILTER_CLASS": FromCrawlerRFPDupeFilter,
}
crawler = get_crawler(settings_dict=settings)
scheduler = Scheduler.from_crawler(crawler)
assert scheduler.df.debug
assert scheduler.df.method == "from_crawler"
def test_df_direct_scheduler(self):
settings = {
"DUPEFILTER_CLASS": DirectDupeFilter,
}
crawler = get_crawler(settings_dict=settings)
scheduler = Scheduler.from_crawler(crawler)
assert scheduler.df.method == "n/a"
def test_filter(self):
dupefilter = _get_dupefilter()
r1 = Request("http://scrapytest.org/1")
r2 = Request("http://scrapytest.org/2")
r3 = Request("http://scrapytest.org/2")
assert not dupefilter.request_seen(r1)
assert dupefilter.request_seen(r1)
assert not dupefilter.request_seen(r2)
assert dupefilter.request_seen(r3)
dupefilter.close("finished")
def test_dupefilter_path(self):
r1 = Request("http://scrapytest.org/1")
r2 = Request("http://scrapytest.org/2")
path = tempfile.mkdtemp()
try:
df = _get_dupefilter(settings={"JOBDIR": path}, open_=False)
try:
df.open()
assert not df.request_seen(r1)
assert df.request_seen(r1)
finally:
df.close("finished")
df2 = _get_dupefilter(settings={"JOBDIR": path}, open_=False)
assert df != df2
try:
df2.open()
assert df2.request_seen(r1)
assert not df2.request_seen(r2)
assert df2.request_seen(r2)
finally:
df2.close("finished")
finally:
shutil.rmtree(path)
def test_request_fingerprint(self):
"""Test if customization of request_fingerprint method will change
output of request_seen.
"""
dupefilter = _get_dupefilter()
r1 = Request("http://scrapytest.org/index.html")
r2 = Request("http://scrapytest.org/INDEX.html")
assert not dupefilter.request_seen(r1)
assert not dupefilter.request_seen(r2)
dupefilter.close("finished")
class RequestFingerprinter:
def fingerprint(self, request):
fp = hashlib.sha1()
fp.update(to_bytes(request.url.lower()))
return fp.digest()
settings = {"REQUEST_FINGERPRINTER_CLASS": RequestFingerprinter}
case_insensitive_dupefilter = _get_dupefilter(settings=settings)
assert not case_insensitive_dupefilter.request_seen(r1)
assert case_insensitive_dupefilter.request_seen(r2)
case_insensitive_dupefilter.close("finished")
def test_seenreq_newlines(self):
r"""Checks against adding duplicate \r to
line endings on Windows platforms."""
r1 = Request("http://scrapytest.org/1")
path = tempfile.mkdtemp()
crawler = get_crawler(settings_dict={"JOBDIR": path})
try:
scheduler = Scheduler.from_crawler(crawler)
df = scheduler.df
df.open()
df.request_seen(r1)
df.close("finished")
with Path(path, "requests.seen").open("rb") as seen_file:
line = next(seen_file).decode()
assert not line.endswith("\r\r\n")
if sys.platform == "win32":
assert line.endswith("\r\n")
else:
assert line.endswith("\n")
finally:
shutil.rmtree(path)
def test_log(self):
with LogCapture() as log:
settings = {
"DUPEFILTER_DEBUG": False,
"DUPEFILTER_CLASS": FromCrawlerRFPDupeFilter,
}
crawler = get_crawler(SimpleSpider, settings_dict=settings)
spider = SimpleSpider.from_crawler(crawler)
dupefilter = _get_dupefilter(crawler=crawler)
r1 = Request("http://scrapytest.org/index.html")
r2 = Request("http://scrapytest.org/index.html")
dupefilter.log(r1, spider)
dupefilter.log(r2, spider)
assert crawler.stats.get_value("dupefilter/filtered") == 2
log.check_present(
(
"scrapy.dupefilters",
"DEBUG",
"Filtered duplicate request: <GET http://scrapytest.org/index.html> - no more"
" duplicates will be shown (see DUPEFILTER_DEBUG to show all duplicates)",
)
)
dupefilter.close("finished")
def test_log_debug(self):
with LogCapture() as log:
settings = {
"DUPEFILTER_DEBUG": True,
"DUPEFILTER_CLASS": FromCrawlerRFPDupeFilter,
}
crawler = get_crawler(SimpleSpider, settings_dict=settings)
spider = SimpleSpider.from_crawler(crawler)
dupefilter = _get_dupefilter(crawler=crawler)
r1 = Request("http://scrapytest.org/index.html")
r2 = Request(
"http://scrapytest.org/index.html",
headers={"Referer": "http://scrapytest.org/INDEX.html"},
)
dupefilter.log(r1, spider)
dupefilter.log(r2, spider)
assert crawler.stats.get_value("dupefilter/filtered") == 2
log.check_present(
(
"scrapy.dupefilters",
"DEBUG",
"Filtered duplicate request: <GET http://scrapytest.org/index.html> (referer: None)",
)
)
log.check_present(
(
"scrapy.dupefilters",
"DEBUG",
"Filtered duplicate request: <GET http://scrapytest.org/index.html>"
" (referer: http://scrapytest.org/INDEX.html)",
)
)
dupefilter.close("finished")
def test_log_debug_default_dupefilter(self):
with LogCapture() as log:
settings = {
"DUPEFILTER_DEBUG": True,
}
crawler = get_crawler(SimpleSpider, settings_dict=settings)
spider = SimpleSpider.from_crawler(crawler)
dupefilter = _get_dupefilter(crawler=crawler)
r1 = Request("http://scrapytest.org/index.html")
r2 = Request(
"http://scrapytest.org/index.html",
headers={"Referer": "http://scrapytest.org/INDEX.html"},
)
dupefilter.log(r1, spider)
dupefilter.log(r2, spider)
assert crawler.stats.get_value("dupefilter/filtered") == 2
log.check_present(
(
"scrapy.dupefilters",
"DEBUG",
"Filtered duplicate request: <GET http://scrapytest.org/index.html> (referer: None)",
)
)
log.check_present(
(
"scrapy.dupefilters",
"DEBUG",
"Filtered duplicate request: <GET http://scrapytest.org/index.html>"
" (referer: http://scrapytest.org/INDEX.html)",
)
)
dupefilter.close("finished")
class TestBaseDupeFilter:
def test_log_deprecation(self):
dupefilter = _get_dupefilter(
settings={"DUPEFILTER_CLASS": BaseDupeFilter},
)
with catch_warnings(record=True) as warning_list:
dupefilter.log(None, None)
assert len(warning_list) == 1
assert (
str(warning_list[0].message)
== "Calling BaseDupeFilter.log() is deprecated."
)
assert warning_list[0].category == ScrapyDeprecationWarning
| python | BSD-3-Clause | d1bd8eb49f7aba9289e4ff692006cead8bcd9080 | 2026-01-04T14:38:41.023839Z | false |
scrapy/scrapy | https://github.com/scrapy/scrapy/blob/d1bd8eb49f7aba9289e4ff692006cead8bcd9080/tests/test_command_check.py | tests/test_command_check.py | from __future__ import annotations
import sys
from io import StringIO
from typing import TYPE_CHECKING
from unittest import TestCase
from unittest.mock import Mock, PropertyMock, call, patch
from scrapy.commands.check import Command, TextTestResult
from tests.test_commands import TestProjectBase
from tests.utils.cmdline import proc
if TYPE_CHECKING:
from pathlib import Path
class DummyTestCase(TestCase):
pass
class TestCheckCommand(TestProjectBase):
spider_name = "check_spider"
def _write_contract(self, proj_path: Path, contracts: str, parse_def: str) -> None:
spider = proj_path / self.project_name / "spiders" / "checkspider.py"
spider.write_text(
f"""
import scrapy
class CheckSpider(scrapy.Spider):
name = '{self.spider_name}'
start_urls = ['data:,']
custom_settings = {{
"DOWNLOAD_DELAY": 0,
}}
def parse(self, response, **cb_kwargs):
\"\"\"
@url data:,
{contracts}
\"\"\"
{parse_def}
""",
encoding="utf-8",
)
def _test_contract(
self, proj_path: Path, contracts: str = "", parse_def: str = "pass"
) -> None:
self._write_contract(proj_path, contracts, parse_def)
ret, out, err = proc("check", cwd=proj_path)
assert "F" not in out
assert "OK" in err
assert ret == 0
def test_check_returns_requests_contract(self, proj_path: Path) -> None:
contracts = """
@returns requests 1
"""
parse_def = """
yield scrapy.Request(url='http://next-url.com')
"""
self._test_contract(proj_path, contracts, parse_def)
def test_check_returns_items_contract(self, proj_path: Path) -> None:
contracts = """
@returns items 1
"""
parse_def = """
yield {'key1': 'val1', 'key2': 'val2'}
"""
self._test_contract(proj_path, contracts, parse_def)
def test_check_cb_kwargs_contract(self, proj_path: Path) -> None:
contracts = """
@cb_kwargs {"arg1": "val1", "arg2": "val2"}
"""
parse_def = """
if len(cb_kwargs.items()) == 0:
raise Exception("Callback args not set")
"""
self._test_contract(proj_path, contracts, parse_def)
def test_check_scrapes_contract(self, proj_path: Path) -> None:
contracts = """
@scrapes key1 key2
"""
parse_def = """
yield {'key1': 'val1', 'key2': 'val2'}
"""
self._test_contract(proj_path, contracts, parse_def)
def test_check_all_default_contracts(self, proj_path: Path) -> None:
contracts = """
@returns items 1
@returns requests 1
@scrapes key1 key2
@cb_kwargs {"arg1": "val1", "arg2": "val2"}
"""
parse_def = """
yield {'key1': 'val1', 'key2': 'val2'}
yield scrapy.Request(url='http://next-url.com')
if len(cb_kwargs.items()) == 0:
raise Exception("Callback args not set")
"""
self._test_contract(proj_path, contracts, parse_def)
def test_SCRAPY_CHECK_set(self, proj_path: Path) -> None:
parse_def = """
import os
if not os.environ.get('SCRAPY_CHECK'):
raise Exception('SCRAPY_CHECK not set')
"""
self._test_contract(proj_path, parse_def=parse_def)
def test_printSummary_with_unsuccessful_test_result_without_errors_and_without_failures(
self,
) -> None:
result = TextTestResult(Mock(), descriptions=False, verbosity=1)
start_time = 1.0
stop_time = 2.0
result.testsRun = 5
result.failures = []
result.errors = []
result.unexpectedSuccesses = [DummyTestCase(), DummyTestCase()]
with patch.object(result.stream, "write") as mock_write:
result.printSummary(start_time, stop_time)
mock_write.assert_has_calls([call("FAILED"), call("\n")])
def test_printSummary_with_unsuccessful_test_result_with_only_failures(
self,
) -> None:
result = TextTestResult(Mock(), descriptions=False, verbosity=1)
start_time = 1.0
stop_time = 2.0
result.testsRun = 5
result.failures = [(DummyTestCase(), "failure")]
result.errors = []
with patch.object(result.stream, "writeln") as mock_write:
result.printSummary(start_time, stop_time)
mock_write.assert_called_with(" (failures=1)")
def test_printSummary_with_unsuccessful_test_result_with_only_errors(self) -> None:
result = TextTestResult(Mock(), descriptions=False, verbosity=1)
start_time = 1.0
stop_time = 2.0
result.testsRun = 5
result.failures = []
result.errors = [(DummyTestCase(), "error")]
with patch.object(result.stream, "writeln") as mock_write:
result.printSummary(start_time, stop_time)
mock_write.assert_called_with(" (errors=1)")
def test_printSummary_with_unsuccessful_test_result_with_both_failures_and_errors(
self,
) -> None:
result = TextTestResult(Mock(), descriptions=False, verbosity=1)
start_time = 1.0
stop_time = 2.0
result.testsRun = 5
result.failures = [(DummyTestCase(), "failure")]
result.errors = [(DummyTestCase(), "error")]
with patch.object(result.stream, "writeln") as mock_write:
result.printSummary(start_time, stop_time)
mock_write.assert_called_with(" (failures=1, errors=1)")
@patch("scrapy.commands.check.ContractsManager")
def test_run_with_opts_list_prints_spider(self, cm_cls_mock) -> None:
output = StringIO()
sys.stdout = output
cmd = Command()
cmd.settings = Mock(getwithbase=Mock(return_value={}))
cm_cls_mock.return_value = cm_mock = Mock()
spider_loader_mock = Mock()
cmd.crawler_process = Mock(spider_loader=spider_loader_mock)
spider_name = "FakeSpider"
spider_cls_mock = Mock()
type(spider_cls_mock).name = PropertyMock(return_value=spider_name)
spider_loader_mock.load.side_effect = lambda x: {spider_name: spider_cls_mock}[
x
]
tested_methods = ["fakeMethod1", "fakeMethod2"]
cm_mock.tested_methods_from_spidercls.side_effect = lambda x: {
spider_cls_mock: tested_methods
}[x]
cmd.run([spider_name], Mock(list=True))
assert output.getvalue() == "FakeSpider\n * fakeMethod1\n * fakeMethod2\n"
sys.stdout = sys.__stdout__
@patch("scrapy.commands.check.ContractsManager")
def test_run_without_opts_list_does_not_crawl_spider_with_no_tested_methods(
self, cm_cls_mock
) -> None:
cmd = Command()
cmd.settings = Mock(getwithbase=Mock(return_value={}))
cm_cls_mock.return_value = cm_mock = Mock()
spider_loader_mock = Mock()
cmd.crawler_process = Mock(spider_loader=spider_loader_mock)
spider_name = "FakeSpider"
spider_cls_mock = Mock()
spider_loader_mock.load.side_effect = lambda x: {spider_name: spider_cls_mock}[
x
]
tested_methods: list[str] = []
cm_mock.tested_methods_from_spidercls.side_effect = lambda x: {
spider_cls_mock: tested_methods
}[x]
cmd.run([spider_name], Mock(list=False))
cmd.crawler_process.crawl.assert_not_called()
| python | BSD-3-Clause | d1bd8eb49f7aba9289e4ff692006cead8bcd9080 | 2026-01-04T14:38:41.023839Z | false |
scrapy/scrapy | https://github.com/scrapy/scrapy/blob/d1bd8eb49f7aba9289e4ff692006cead8bcd9080/tests/test_scrapy__getattr__.py | tests/test_scrapy__getattr__.py | import warnings
def test_deprecated_concurrent_requests_per_ip_attribute():
with warnings.catch_warnings(record=True) as warns:
from scrapy.settings.default_settings import ( # noqa: PLC0415
CONCURRENT_REQUESTS_PER_IP,
)
assert CONCURRENT_REQUESTS_PER_IP is not None
assert isinstance(CONCURRENT_REQUESTS_PER_IP, int)
assert (
"The scrapy.settings.default_settings.CONCURRENT_REQUESTS_PER_IP attribute is deprecated, use scrapy.settings.default_settings.CONCURRENT_REQUESTS_PER_DOMAIN instead."
in warns[0].message.args
)
| python | BSD-3-Clause | d1bd8eb49f7aba9289e4ff692006cead8bcd9080 | 2026-01-04T14:38:41.023839Z | false |
scrapy/scrapy | https://github.com/scrapy/scrapy/blob/d1bd8eb49f7aba9289e4ff692006cead8bcd9080/tests/test_request_attribute_binding.py | tests/test_request_attribute_binding.py | from testfixtures import LogCapture
from twisted.internet.defer import inlineCallbacks
from scrapy import Request, signals
from scrapy.http.response import Response
from scrapy.utils.test import get_crawler
from tests.mockserver.http import MockServer
from tests.spiders import SingleRequestSpider
OVERRIDDEN_URL = "https://example.org"
class ProcessResponseMiddleware:
def process_response(self, request, response):
return response.replace(request=Request(OVERRIDDEN_URL))
class RaiseExceptionRequestMiddleware:
def process_request(self, request):
1 / 0
return request
class CatchExceptionOverrideRequestMiddleware:
def process_exception(self, request, exception):
return Response(
url="http://localhost/",
body=b"Caught " + exception.__class__.__name__.encode("utf-8"),
request=Request(OVERRIDDEN_URL),
)
class CatchExceptionDoNotOverrideRequestMiddleware:
def process_exception(self, request, exception):
return Response(
url="http://localhost/",
body=b"Caught " + exception.__class__.__name__.encode("utf-8"),
)
class AlternativeCallbacksSpider(SingleRequestSpider):
name = "alternative_callbacks_spider"
def alt_callback(self, response, foo=None):
self.logger.info("alt_callback was invoked with foo=%s", foo)
class AlternativeCallbacksMiddleware:
def __init__(self, crawler):
self.crawler = crawler
@classmethod
def from_crawler(cls, crawler):
return cls(crawler)
def process_response(self, request, response):
new_request = request.replace(
url=OVERRIDDEN_URL,
callback=self.crawler.spider.alt_callback,
cb_kwargs={"foo": "bar"},
)
return response.replace(request=new_request)
class TestCrawl:
@classmethod
def setup_class(cls):
cls.mockserver = MockServer()
cls.mockserver.__enter__()
@classmethod
def teardown_class(cls):
cls.mockserver.__exit__(None, None, None)
@inlineCallbacks
def test_response_200(self):
url = self.mockserver.url("/status?n=200")
crawler = get_crawler(SingleRequestSpider)
yield crawler.crawl(seed=url, mockserver=self.mockserver)
response = crawler.spider.meta["responses"][0]
assert response.request.url == url
@inlineCallbacks
def test_response_error(self):
for status in ("404", "500"):
url = self.mockserver.url(f"/status?n={status}")
crawler = get_crawler(SingleRequestSpider)
yield crawler.crawl(seed=url, mockserver=self.mockserver)
failure = crawler.spider.meta["failure"]
response = failure.value.response
assert failure.request.url == url
assert response.request.url == url
@inlineCallbacks
def test_downloader_middleware_raise_exception(self):
url = self.mockserver.url("/status?n=200")
crawler = get_crawler(
SingleRequestSpider,
{
"DOWNLOADER_MIDDLEWARES": {
RaiseExceptionRequestMiddleware: 590,
},
},
)
yield crawler.crawl(seed=url, mockserver=self.mockserver)
failure = crawler.spider.meta["failure"]
assert failure.request.url == url
assert isinstance(failure.value, ZeroDivisionError)
@inlineCallbacks
def test_downloader_middleware_override_request_in_process_response(self):
"""
Downloader middleware which returns a response with an specific 'request' attribute.
* The spider callback should receive the overridden response.request
* Handlers listening to the response_received signal should receive the overridden response.request
* The "crawled" log message should show the overridden response.request
"""
signal_params = {}
def signal_handler(response, request, spider):
signal_params["response"] = response
signal_params["request"] = request
url = self.mockserver.url("/status?n=200")
crawler = get_crawler(
SingleRequestSpider,
{
"DOWNLOADER_MIDDLEWARES": {
ProcessResponseMiddleware: 595,
}
},
)
crawler.signals.connect(signal_handler, signal=signals.response_received)
with LogCapture() as log:
yield crawler.crawl(seed=url, mockserver=self.mockserver)
response = crawler.spider.meta["responses"][0]
assert response.request.url == OVERRIDDEN_URL
assert signal_params["response"].url == url
assert signal_params["request"].url == OVERRIDDEN_URL
log.check_present(
(
"scrapy.core.engine",
"DEBUG",
f"Crawled (200) <GET {OVERRIDDEN_URL}> (referer: None)",
),
)
@inlineCallbacks
def test_downloader_middleware_override_in_process_exception(self):
"""
An exception is raised but caught by the next middleware, which
returns a Response with a specific 'request' attribute.
The spider callback should receive the overridden response.request
"""
url = self.mockserver.url("/status?n=200")
crawler = get_crawler(
SingleRequestSpider,
{
"DOWNLOADER_MIDDLEWARES": {
RaiseExceptionRequestMiddleware: 590,
CatchExceptionOverrideRequestMiddleware: 595,
},
},
)
yield crawler.crawl(seed=url, mockserver=self.mockserver)
response = crawler.spider.meta["responses"][0]
assert response.body == b"Caught ZeroDivisionError"
assert response.request.url == OVERRIDDEN_URL
@inlineCallbacks
def test_downloader_middleware_do_not_override_in_process_exception(self):
"""
An exception is raised but caught by the next middleware, which
returns a Response without a specific 'request' attribute.
The spider callback should receive the original response.request
"""
url = self.mockserver.url("/status?n=200")
crawler = get_crawler(
SingleRequestSpider,
{
"DOWNLOADER_MIDDLEWARES": {
RaiseExceptionRequestMiddleware: 590,
CatchExceptionDoNotOverrideRequestMiddleware: 595,
},
},
)
yield crawler.crawl(seed=url, mockserver=self.mockserver)
response = crawler.spider.meta["responses"][0]
assert response.body == b"Caught ZeroDivisionError"
assert response.request.url == url
@inlineCallbacks
def test_downloader_middleware_alternative_callback(self):
"""
Downloader middleware which returns a response with a
specific 'request' attribute, with an alternative callback
"""
crawler = get_crawler(
AlternativeCallbacksSpider,
{
"DOWNLOADER_MIDDLEWARES": {
AlternativeCallbacksMiddleware: 595,
}
},
)
with LogCapture() as log:
url = self.mockserver.url("/status?n=200")
yield crawler.crawl(seed=url, mockserver=self.mockserver)
log.check_present(
(
"alternative_callbacks_spider",
"INFO",
"alt_callback was invoked with foo=bar",
),
)
| python | BSD-3-Clause | d1bd8eb49f7aba9289e4ff692006cead8bcd9080 | 2026-01-04T14:38:41.023839Z | false |
scrapy/scrapy | https://github.com/scrapy/scrapy/blob/d1bd8eb49f7aba9289e4ff692006cead8bcd9080/tests/test_webclient.py | tests/test_webclient.py | """
Tests borrowed from the twisted.web.client tests.
"""
from __future__ import annotations
from urllib.parse import urlparse
import OpenSSL.SSL
import pytest
from pytest_twisted import async_yield_fixture
from twisted.internet import defer
from twisted.internet.defer import inlineCallbacks
from twisted.internet.testing import StringTransport
from twisted.protocols.policies import WrappingFactory
from twisted.web import resource, server, static, util
from twisted.web.client import _makeGetterFactory
from scrapy.core.downloader import webclient as client
from scrapy.core.downloader.contextfactory import ScrapyClientContextFactory
from scrapy.http import Headers, Request
from scrapy.utils.misc import build_from_crawler
from scrapy.utils.python import to_bytes, to_unicode
from scrapy.utils.test import get_crawler
from tests.mockserver.http_resources import (
ForeverTakingResource,
HostHeaderResource,
PayloadResource,
)
from tests.mockserver.utils import ssl_context_factory
from tests.test_core_downloader import TestContextFactoryBase
def getPage(url, contextFactory=None, response_transform=None, *args, **kwargs):
"""Adapted version of twisted.web.client.getPage"""
def _clientfactory(url, *args, **kwargs):
url = to_unicode(url)
timeout = kwargs.pop("timeout", 0)
f = client.ScrapyHTTPClientFactory(
Request(url, *args, **kwargs), timeout=timeout
)
f.deferred.addCallback(response_transform or (lambda r: r.body))
return f
return _makeGetterFactory(
to_bytes(url),
_clientfactory,
contextFactory=contextFactory,
*args,
**kwargs,
).deferred
@pytest.mark.filterwarnings("ignore::scrapy.exceptions.ScrapyDeprecationWarning")
class TestScrapyHTTPPageGetter:
def test_earlyHeaders(self):
# basic test stolen from twisted HTTPageGetter
factory = client.ScrapyHTTPClientFactory(
Request(
url="http://foo/bar",
body="some data",
headers={
"Host": "example.net",
"User-Agent": "fooble",
"Cookie": "blah blah",
"Content-Length": "12981",
"Useful": "value",
},
)
)
self._test(
factory,
b"GET /bar HTTP/1.0\r\n"
b"Content-Length: 9\r\n"
b"Useful: value\r\n"
b"Connection: close\r\n"
b"User-Agent: fooble\r\n"
b"Host: example.net\r\n"
b"Cookie: blah blah\r\n"
b"\r\n"
b"some data",
)
# test minimal sent headers
factory = client.ScrapyHTTPClientFactory(Request("http://foo/bar"))
self._test(factory, b"GET /bar HTTP/1.0\r\nHost: foo\r\n\r\n")
# test a simple POST with body and content-type
factory = client.ScrapyHTTPClientFactory(
Request(
method="POST",
url="http://foo/bar",
body="name=value",
headers={"Content-Type": "application/x-www-form-urlencoded"},
)
)
self._test(
factory,
b"POST /bar HTTP/1.0\r\n"
b"Host: foo\r\n"
b"Connection: close\r\n"
b"Content-Type: application/x-www-form-urlencoded\r\n"
b"Content-Length: 10\r\n"
b"\r\n"
b"name=value",
)
# test a POST method with no body provided
factory = client.ScrapyHTTPClientFactory(
Request(method="POST", url="http://foo/bar")
)
self._test(
factory,
b"POST /bar HTTP/1.0\r\nHost: foo\r\nContent-Length: 0\r\n\r\n",
)
# test with single and multivalued headers
factory = client.ScrapyHTTPClientFactory(
Request(
url="http://foo/bar",
headers={
"X-Meta-Single": "single",
"X-Meta-Multivalued": ["value1", "value2"],
},
)
)
self._test(
factory,
b"GET /bar HTTP/1.0\r\n"
b"Host: foo\r\n"
b"X-Meta-Multivalued: value1\r\n"
b"X-Meta-Multivalued: value2\r\n"
b"X-Meta-Single: single\r\n"
b"\r\n",
)
# same test with single and multivalued headers but using Headers class
factory = client.ScrapyHTTPClientFactory(
Request(
url="http://foo/bar",
headers=Headers(
{
"X-Meta-Single": "single",
"X-Meta-Multivalued": ["value1", "value2"],
}
),
)
)
self._test(
factory,
b"GET /bar HTTP/1.0\r\n"
b"Host: foo\r\n"
b"X-Meta-Multivalued: value1\r\n"
b"X-Meta-Multivalued: value2\r\n"
b"X-Meta-Single: single\r\n"
b"\r\n",
)
def _test(self, factory, testvalue):
transport = StringTransport()
protocol = client.ScrapyHTTPPageGetter()
protocol.factory = factory
protocol.makeConnection(transport)
assert set(transport.value().splitlines()) == set(testvalue.splitlines())
return testvalue
def test_non_standard_line_endings(self):
# regression test for: http://dev.scrapy.org/ticket/258
factory = client.ScrapyHTTPClientFactory(Request(url="http://foo/bar"))
protocol = client.ScrapyHTTPPageGetter()
protocol.factory = factory
protocol.headers = Headers()
protocol.dataReceived(b"HTTP/1.0 200 OK\n")
protocol.dataReceived(b"Hello: World\n")
protocol.dataReceived(b"Foo: Bar\n")
protocol.dataReceived(b"\n")
assert protocol.headers == Headers({"Hello": ["World"], "Foo": ["Bar"]})
class EncodingResource(resource.Resource):
out_encoding = "cp1251"
def render(self, request):
body = to_unicode(request.content.read())
request.setHeader(b"content-encoding", self.out_encoding)
return body.encode(self.out_encoding)
class BrokenDownloadResource(resource.Resource):
def render(self, request):
# only sends 3 bytes even though it claims to send 5
request.setHeader(b"content-length", b"5")
request.write(b"abc")
return b""
class ErrorResource(resource.Resource):
def render(self, request):
request.setResponseCode(401)
if request.args.get(b"showlength"):
request.setHeader(b"content-length", b"0")
return b""
class NoLengthResource(resource.Resource):
def render(self, request):
return b"nolength"
@pytest.mark.filterwarnings("ignore::scrapy.exceptions.ScrapyDeprecationWarning")
class TestWebClient:
def _listen(self, site):
from twisted.internet import reactor
return reactor.listenTCP(0, site, interface="127.0.0.1")
@pytest.fixture
def wrapper(self, tmp_path):
(tmp_path / "file").write_bytes(b"0123456789")
r = static.File(str(tmp_path))
r.putChild(b"redirect", util.Redirect(b"/file"))
r.putChild(b"wait", ForeverTakingResource())
r.putChild(b"error", ErrorResource())
r.putChild(b"nolength", NoLengthResource())
r.putChild(b"host", HostHeaderResource())
r.putChild(b"payload", PayloadResource())
r.putChild(b"broken", BrokenDownloadResource())
r.putChild(b"encoding", EncodingResource())
site = server.Site(r, timeout=None)
return WrappingFactory(site)
@async_yield_fixture
async def server_port(self, wrapper):
port = self._listen(wrapper)
yield port.getHost().port
await port.stopListening()
@pytest.fixture
def server_url(self, server_port):
return f"http://127.0.0.1:{server_port}/"
@inlineCallbacks
def testPayload(self, server_url):
s = "0123456789" * 10
body = yield getPage(server_url + "payload", body=s)
assert body == to_bytes(s)
@inlineCallbacks
def testHostHeader(self, server_port, server_url):
# if we pass Host header explicitly, it should be used, otherwise
# it should extract from url
body = yield getPage(server_url + "host")
assert body == to_bytes(f"127.0.0.1:{server_port}")
body = yield getPage(server_url + "host", headers={"Host": "www.example.com"})
assert body == to_bytes("www.example.com")
@inlineCallbacks
def test_getPage(self, server_url):
"""
L{client.getPage} returns a L{Deferred} which is called back with
the body of the response if the default method B{GET} is used.
"""
body = yield getPage(server_url + "file")
assert body == b"0123456789"
@inlineCallbacks
def test_getPageHead(self, server_url):
"""
L{client.getPage} returns a L{Deferred} which is called back with
the empty string if the method is C{HEAD} and there is a successful
response code.
"""
def _getPage(method):
return getPage(server_url + "file", method=method)
body = yield _getPage("head")
assert body == b""
body = yield _getPage("HEAD")
assert body == b""
@inlineCallbacks
def test_timeoutNotTriggering(self, server_port, server_url):
"""
When a non-zero timeout is passed to L{getPage} and the page is
retrieved before the timeout period elapses, the L{Deferred} is
called back with the contents of the page.
"""
body = yield getPage(server_url + "host", timeout=100)
assert body == to_bytes(f"127.0.0.1:{server_port}")
@inlineCallbacks
def test_timeoutTriggering(self, wrapper, server_url):
"""
When a non-zero timeout is passed to L{getPage} and that many
seconds elapse before the server responds to the request. the
L{Deferred} is errbacked with a L{error.TimeoutError}.
"""
with pytest.raises(defer.TimeoutError):
yield getPage(server_url + "wait", timeout=0.000001)
# Clean up the server which is hanging around not doing
# anything.
connected = list(wrapper.protocols.keys())
# There might be nothing here if the server managed to already see
# that the connection was lost.
if connected:
connected[0].transport.loseConnection()
@inlineCallbacks
def testNotFound(self, server_url):
body = yield getPage(server_url + "notsuchfile")
assert b"404 - No Such Resource" in body
@inlineCallbacks
def testFactoryInfo(self, server_url):
from twisted.internet import reactor
url = server_url + "file"
parsed = urlparse(url)
factory = client.ScrapyHTTPClientFactory(Request(url))
reactor.connectTCP(parsed.hostname, parsed.port, factory)
yield factory.deferred
assert factory.status == b"200"
assert factory.version.startswith(b"HTTP/")
assert factory.message == b"OK"
assert factory.response_headers[b"content-length"] == b"10"
@inlineCallbacks
def testRedirect(self, server_url):
body = yield getPage(server_url + "redirect")
assert (
body
== b'\n<html>\n <head>\n <meta http-equiv="refresh" content="0;URL=/file">\n'
b' </head>\n <body bgcolor="#FFFFFF" text="#000000">\n '
b'<a href="/file">click here</a>\n </body>\n</html>\n'
)
@inlineCallbacks
def test_encoding(self, server_url):
"""Test that non-standart body encoding matches
Content-Encoding header"""
original_body = b"\xd0\x81\xd1\x8e\xd0\xaf"
response = yield getPage(
server_url + "encoding", body=original_body, response_transform=lambda r: r
)
content_encoding = to_unicode(response.headers[b"Content-Encoding"])
assert content_encoding == EncodingResource.out_encoding
assert response.body.decode(content_encoding) == to_unicode(original_body)
@pytest.mark.filterwarnings("ignore::scrapy.exceptions.ScrapyDeprecationWarning")
class TestWebClientSSL(TestContextFactoryBase):
@inlineCallbacks
def testPayload(self, server_url):
s = "0123456789" * 10
body = yield getPage(server_url + "payload", body=s)
assert body == to_bytes(s)
class TestWebClientCustomCiphersSSL(TestWebClientSSL):
# we try to use a cipher that is not enabled by default in OpenSSL
custom_ciphers = "CAMELLIA256-SHA"
context_factory = ssl_context_factory(cipher_string=custom_ciphers)
@inlineCallbacks
def testPayload(self, server_url):
s = "0123456789" * 10
crawler = get_crawler(
settings_dict={"DOWNLOADER_CLIENT_TLS_CIPHERS": self.custom_ciphers}
)
client_context_factory = build_from_crawler(ScrapyClientContextFactory, crawler)
body = yield getPage(
server_url + "payload", body=s, contextFactory=client_context_factory
)
assert body == to_bytes(s)
@inlineCallbacks
def testPayloadDisabledCipher(self, server_url):
s = "0123456789" * 10
crawler = get_crawler(
settings_dict={
"DOWNLOADER_CLIENT_TLS_CIPHERS": "ECDHE-RSA-AES256-GCM-SHA384"
}
)
client_context_factory = build_from_crawler(ScrapyClientContextFactory, crawler)
with pytest.raises(OpenSSL.SSL.Error):
yield getPage(
server_url + "payload", body=s, contextFactory=client_context_factory
)
| python | BSD-3-Clause | d1bd8eb49f7aba9289e4ff692006cead8bcd9080 | 2026-01-04T14:38:41.023839Z | false |
scrapy/scrapy | https://github.com/scrapy/scrapy/blob/d1bd8eb49f7aba9289e4ff692006cead8bcd9080/tests/test_crawl.py | tests/test_crawl.py | from __future__ import annotations
import json
import logging
from ipaddress import IPv4Address
from socket import gethostbyname
from typing import TYPE_CHECKING, Any
from urllib.parse import urlencode, urlparse
import pytest
from testfixtures import LogCapture
from twisted.internet.defer import inlineCallbacks, succeed
from twisted.internet.ssl import Certificate
from twisted.python.failure import Failure
from scrapy import Spider, signals
from scrapy.crawler import CrawlerRunner
from scrapy.exceptions import CloseSpider, ScrapyDeprecationWarning, StopDownload
from scrapy.http import Request
from scrapy.http.response import Response
from scrapy.utils.defer import deferred_f_from_coro_f, maybe_deferred_to_future
from scrapy.utils.engine import format_engine_status, get_engine_status
from scrapy.utils.python import to_unicode
from scrapy.utils.test import get_crawler, get_reactor_settings
from tests import NON_EXISTING_RESOLVABLE
from tests.mockserver.http import MockServer
from tests.spiders import (
AsyncDefAsyncioGenComplexSpider,
AsyncDefAsyncioGenExcSpider,
AsyncDefAsyncioGenLoopSpider,
AsyncDefAsyncioGenSpider,
AsyncDefAsyncioReqsReturnSpider,
AsyncDefAsyncioReturnSingleElementSpider,
AsyncDefAsyncioReturnSpider,
AsyncDefAsyncioSpider,
AsyncDefDeferredDirectSpider,
AsyncDefDeferredMaybeWrappedSpider,
AsyncDefDeferredWrappedSpider,
AsyncDefSpider,
BrokenStartSpider,
BytesReceivedCallbackSpider,
BytesReceivedErrbackSpider,
CrawlSpiderWithAsyncCallback,
CrawlSpiderWithAsyncGeneratorCallback,
CrawlSpiderWithErrback,
CrawlSpiderWithParseMethod,
CrawlSpiderWithProcessRequestCallbackKeywordArguments,
DelaySpider,
DuplicateStartSpider,
FollowAllSpider,
HeadersReceivedCallbackSpider,
HeadersReceivedErrbackSpider,
SimpleSpider,
SingleRequestSpider,
StartGoodAndBadOutput,
StartItemSpider,
)
if TYPE_CHECKING:
from scrapy.statscollectors import StatsCollector
class TestCrawl:
mockserver: MockServer
@classmethod
def setup_class(cls):
cls.mockserver = MockServer()
cls.mockserver.__enter__()
@classmethod
def teardown_class(cls):
cls.mockserver.__exit__(None, None, None)
@inlineCallbacks
def test_follow_all(self):
crawler = get_crawler(FollowAllSpider)
yield crawler.crawl(mockserver=self.mockserver)
assert len(crawler.spider.urls_visited) == 11 # 10 + start_url
@deferred_f_from_coro_f
async def test_fixed_delay(self):
await self._test_delay(total=3, delay=0.2)
@deferred_f_from_coro_f
async def test_randomized_delay(self):
await self._test_delay(total=3, delay=0.1, randomize=True)
async def _test_delay(
self, total: int, delay: float, randomize: bool = False
) -> None:
crawl_kwargs = {
"maxlatency": delay * 2,
"mockserver": self.mockserver,
"total": total,
}
tolerance = 1 - (0.6 if randomize else 0.2)
settings = {"DOWNLOAD_DELAY": delay, "RANDOMIZE_DOWNLOAD_DELAY": randomize}
crawler = get_crawler(FollowAllSpider, settings)
await crawler.crawl_async(**crawl_kwargs)
assert crawler.spider
assert isinstance(crawler.spider, FollowAllSpider)
times = crawler.spider.times
total_time = times[-1] - times[0]
average = total_time / (len(times) - 1)
assert average > delay * tolerance, f"download delay too small: {average}"
# Ensure that the same test parameters would cause a failure if no
# download delay is set. Otherwise, it means we are using a combination
# of ``total`` and ``delay`` values that are too small for the test
# code above to have any meaning.
settings["DOWNLOAD_DELAY"] = 0
crawler = get_crawler(FollowAllSpider, settings)
await crawler.crawl_async(**crawl_kwargs)
assert crawler.spider
assert isinstance(crawler.spider, FollowAllSpider)
times = crawler.spider.times
total_time = times[-1] - times[0]
average = total_time / (len(times) - 1)
assert average <= delay / tolerance, "test total or delay values are too small"
@inlineCallbacks
def test_timeout_success(self):
crawler = get_crawler(DelaySpider)
yield crawler.crawl(n=0.5, mockserver=self.mockserver)
assert crawler.spider.t1 > 0
assert crawler.spider.t2 > 0
assert crawler.spider.t2 > crawler.spider.t1
@inlineCallbacks
def test_timeout_failure(self):
crawler = get_crawler(DelaySpider, {"DOWNLOAD_TIMEOUT": 0.35})
yield crawler.crawl(n=0.5, mockserver=self.mockserver)
assert crawler.spider.t1 > 0
assert crawler.spider.t2 == 0
assert crawler.spider.t2_err > 0
assert crawler.spider.t2_err > crawler.spider.t1
# server hangs after receiving response headers
crawler = get_crawler(DelaySpider, {"DOWNLOAD_TIMEOUT": 0.35})
yield crawler.crawl(n=0.5, b=1, mockserver=self.mockserver)
assert crawler.spider.t1 > 0
assert crawler.spider.t2 == 0
assert crawler.spider.t2_err > 0
assert crawler.spider.t2_err > crawler.spider.t1
@inlineCallbacks
def test_retry_503(self):
crawler = get_crawler(SimpleSpider)
with LogCapture() as log:
yield crawler.crawl(
self.mockserver.url("/status?n=503"), mockserver=self.mockserver
)
self._assert_retried(log)
@inlineCallbacks
def test_retry_conn_failed(self):
crawler = get_crawler(SimpleSpider)
with LogCapture() as log:
yield crawler.crawl(
"http://localhost:65432/status?n=503", mockserver=self.mockserver
)
self._assert_retried(log)
@inlineCallbacks
def test_retry_dns_error(self):
if NON_EXISTING_RESOLVABLE:
pytest.skip("Non-existing hosts are resolvable")
crawler = get_crawler(SimpleSpider)
with LogCapture() as log:
# try to fetch the homepage of a nonexistent domain
yield crawler.crawl(
"http://dns.resolution.invalid./", mockserver=self.mockserver
)
self._assert_retried(log)
@inlineCallbacks
def test_start_bug_before_yield(self):
with LogCapture("scrapy", level=logging.ERROR) as log:
crawler = get_crawler(BrokenStartSpider)
yield crawler.crawl(fail_before_yield=1, mockserver=self.mockserver)
assert len(log.records) == 1
record = log.records[0]
assert record.exc_info is not None
assert record.exc_info[0] is ZeroDivisionError
@inlineCallbacks
def test_start_bug_yielding(self):
with LogCapture("scrapy", level=logging.ERROR) as log:
crawler = get_crawler(BrokenStartSpider)
yield crawler.crawl(fail_yielding=1, mockserver=self.mockserver)
assert len(log.records) == 1
record = log.records[0]
assert record.exc_info is not None
assert record.exc_info[0] is ZeroDivisionError
@inlineCallbacks
def test_start_items(self):
items = []
def _on_item_scraped(item):
items.append(item)
with LogCapture("scrapy", level=logging.ERROR) as log:
crawler = get_crawler(StartItemSpider)
crawler.signals.connect(_on_item_scraped, signals.item_scraped)
yield crawler.crawl(mockserver=self.mockserver)
assert len(log.records) == 0
assert items == [{"name": "test item"}]
@inlineCallbacks
def test_start_unsupported_output(self):
"""Anything that is not a request is assumed to be an item, avoiding a
potentially expensive call to itemadapter.is_item(), and letting
instead things fail when ItemAdapter is actually used on the
corresponding non-item object."""
items = []
def _on_item_scraped(item):
items.append(item)
with LogCapture("scrapy", level=logging.ERROR) as log:
crawler = get_crawler(StartGoodAndBadOutput)
crawler.signals.connect(_on_item_scraped, signals.item_scraped)
yield crawler.crawl(mockserver=self.mockserver)
assert len(log.records) == 0
assert len(items) == 3
assert not any(isinstance(item, Request) for item in items)
@inlineCallbacks
def test_start_dupes(self):
settings = {"CONCURRENT_REQUESTS": 1}
crawler = get_crawler(DuplicateStartSpider, settings)
yield crawler.crawl(
dont_filter=True, distinct_urls=2, dupe_factor=3, mockserver=self.mockserver
)
assert crawler.spider.visited == 6
crawler = get_crawler(DuplicateStartSpider, settings)
yield crawler.crawl(
dont_filter=False,
distinct_urls=3,
dupe_factor=4,
mockserver=self.mockserver,
)
assert crawler.spider.visited == 3
@inlineCallbacks
def test_unbounded_response(self):
# Completeness of responses without Content-Length or Transfer-Encoding
# can not be determined, we treat them as valid but flagged as "partial"
query = urlencode(
{
"raw": """\
HTTP/1.1 200 OK
Server: Apache-Coyote/1.1
X-Powered-By: Servlet 2.4; JBoss-4.2.3.GA (build: SVNTag=JBoss_4_2_3_GA date=200807181417)/JBossWeb-2.0
Set-Cookie: JSESSIONID=08515F572832D0E659FD2B0D8031D75F; Path=/
Pragma: no-cache
Expires: Thu, 01 Jan 1970 00:00:00 GMT
Cache-Control: no-cache
Cache-Control: no-store
Content-Type: text/html;charset=UTF-8
Content-Language: en
Date: Tue, 27 Aug 2013 13:05:05 GMT
Connection: close
foo body
with multiples lines
"""
}
)
crawler = get_crawler(SimpleSpider)
with LogCapture() as log:
yield crawler.crawl(
self.mockserver.url(f"/raw?{query}"), mockserver=self.mockserver
)
assert str(log).count("Got response 200") == 1
@inlineCallbacks
def test_retry_conn_lost(self):
# connection lost after receiving data
crawler = get_crawler(SimpleSpider)
with LogCapture() as log:
yield crawler.crawl(
self.mockserver.url("/drop?abort=0"), mockserver=self.mockserver
)
self._assert_retried(log)
@inlineCallbacks
def test_retry_conn_aborted(self):
# connection lost before receiving data
crawler = get_crawler(SimpleSpider)
with LogCapture() as log:
yield crawler.crawl(
self.mockserver.url("/drop?abort=1"), mockserver=self.mockserver
)
self._assert_retried(log)
def _assert_retried(self, log):
assert str(log).count("Retrying") == 2
assert str(log).count("Gave up retrying") == 1
@inlineCallbacks
def test_referer_header(self):
"""Referer header is set by RefererMiddleware unless it is already set"""
req0 = Request(self.mockserver.url("/echo?headers=1&body=0"), dont_filter=1)
req1 = req0.replace()
req2 = req0.replace(headers={"Referer": None})
req3 = req0.replace(headers={"Referer": "http://example.com"})
req0.meta["next"] = req1
req1.meta["next"] = req2
req2.meta["next"] = req3
crawler = get_crawler(SingleRequestSpider)
yield crawler.crawl(seed=req0, mockserver=self.mockserver)
# basic asserts in case of weird communication errors
assert "responses" in crawler.spider.meta
assert "failures" not in crawler.spider.meta
# start() doesn't set Referer header
echo0 = json.loads(to_unicode(crawler.spider.meta["responses"][2].body))
assert "Referer" not in echo0["headers"]
# following request sets Referer to the source request url
echo1 = json.loads(to_unicode(crawler.spider.meta["responses"][1].body))
assert echo1["headers"].get("Referer") == [req0.url]
# next request avoids Referer header
echo2 = json.loads(to_unicode(crawler.spider.meta["responses"][2].body))
assert "Referer" not in echo2["headers"]
# last request explicitly sets a Referer header
echo3 = json.loads(to_unicode(crawler.spider.meta["responses"][3].body))
assert echo3["headers"].get("Referer") == ["http://example.com"]
@inlineCallbacks
def test_engine_status(self):
est = []
def cb(response):
est.append(get_engine_status(crawler.engine))
crawler = get_crawler(SingleRequestSpider)
yield crawler.crawl(
seed=self.mockserver.url("/"), callback_func=cb, mockserver=self.mockserver
)
assert len(est) == 1, est
s = dict(est[0])
assert s["engine.spider.name"] == crawler.spider.name
assert s["len(engine.scraper.slot.active)"] == 1
@inlineCallbacks
def test_format_engine_status(self):
est = []
def cb(response):
est.append(format_engine_status(crawler.engine))
crawler = get_crawler(SingleRequestSpider)
yield crawler.crawl(
seed=self.mockserver.url("/"), callback_func=cb, mockserver=self.mockserver
)
assert len(est) == 1, est
est = est[0].split("\n")[2:-2] # remove header & footer
# convert to dict
est = [x.split(":") for x in est]
est = [x for sublist in est for x in sublist] # flatten
est = [x.lstrip().rstrip() for x in est]
it = iter(est)
s = dict(zip(it, it, strict=False))
assert s["engine.spider.name"] == crawler.spider.name
assert s["len(engine.scraper.slot.active)"] == "1"
@inlineCallbacks
def test_open_spider_error_on_faulty_pipeline(self):
settings = {
"ITEM_PIPELINES": {
"tests.pipelines.ZeroDivisionErrorPipeline": 300,
}
}
crawler = get_crawler(SimpleSpider, settings)
with pytest.raises(ZeroDivisionError):
yield crawler.crawl(
self.mockserver.url("/status?n=200"), mockserver=self.mockserver
)
assert not crawler.crawling
@inlineCallbacks
def test_crawlerrunner_accepts_crawler(self):
crawler = get_crawler(SimpleSpider)
runner = CrawlerRunner()
with LogCapture() as log:
yield runner.crawl(
crawler,
self.mockserver.url("/status?n=200"),
mockserver=self.mockserver,
)
assert "Got response 200" in str(log)
@inlineCallbacks
def test_crawl_multiple(self, caplog: pytest.LogCaptureFixture):
runner = CrawlerRunner(get_reactor_settings())
runner.crawl(
SimpleSpider,
self.mockserver.url("/status?n=200"),
mockserver=self.mockserver,
)
runner.crawl(
SimpleSpider,
self.mockserver.url("/status?n=503"),
mockserver=self.mockserver,
)
with caplog.at_level(logging.DEBUG):
yield runner.join()
self._assert_retried(caplog.text)
assert "Got response 200" in caplog.text
@deferred_f_from_coro_f
async def test_unknown_url_scheme(self, caplog: pytest.LogCaptureFixture) -> None:
crawler = get_crawler(SimpleSpider)
await maybe_deferred_to_future(crawler.crawl("foo://bar"))
assert "NotSupported: Unsupported URL scheme 'foo'" in caplog.text
class TestCrawlSpider:
mockserver: MockServer
@classmethod
def setup_class(cls):
cls.mockserver = MockServer()
cls.mockserver.__enter__()
@classmethod
def teardown_class(cls):
cls.mockserver.__exit__(None, None, None)
async def _run_spider(
self, spider_cls: type[Spider]
) -> tuple[LogCapture, list[Any], StatsCollector]:
items = []
def _on_item_scraped(item):
items.append(item)
crawler = get_crawler(spider_cls)
crawler.signals.connect(_on_item_scraped, signals.item_scraped)
with LogCapture() as log:
await maybe_deferred_to_future(
crawler.crawl(
self.mockserver.url("/status?n=200"), mockserver=self.mockserver
)
)
assert crawler.stats
return log, items, crawler.stats
@inlineCallbacks
def test_crawlspider_with_parse(self):
crawler = get_crawler(CrawlSpiderWithParseMethod)
with LogCapture() as log:
yield crawler.crawl(mockserver=self.mockserver)
assert "[parse] status 200 (foo: None)" in str(log)
assert "[parse] status 201 (foo: None)" in str(log)
assert "[parse] status 202 (foo: bar)" in str(log)
@inlineCallbacks
def test_crawlspider_with_async_callback(self):
crawler = get_crawler(CrawlSpiderWithAsyncCallback)
with LogCapture() as log:
yield crawler.crawl(mockserver=self.mockserver)
assert "[parse_async] status 200 (foo: None)" in str(log)
assert "[parse_async] status 201 (foo: None)" in str(log)
assert "[parse_async] status 202 (foo: bar)" in str(log)
@inlineCallbacks
def test_crawlspider_with_async_generator_callback(self):
crawler = get_crawler(CrawlSpiderWithAsyncGeneratorCallback)
with LogCapture() as log:
yield crawler.crawl(mockserver=self.mockserver)
assert "[parse_async_gen] status 200 (foo: None)" in str(log)
assert "[parse_async_gen] status 201 (foo: None)" in str(log)
assert "[parse_async_gen] status 202 (foo: bar)" in str(log)
@inlineCallbacks
def test_crawlspider_with_errback(self):
crawler = get_crawler(CrawlSpiderWithErrback)
with LogCapture() as log:
yield crawler.crawl(mockserver=self.mockserver)
assert "[parse] status 200 (foo: None)" in str(log)
assert "[parse] status 201 (foo: None)" in str(log)
assert "[parse] status 202 (foo: bar)" in str(log)
assert "[errback] status 404" in str(log)
assert "[errback] status 500" in str(log)
assert "[errback] status 501" in str(log)
@inlineCallbacks
def test_crawlspider_process_request_cb_kwargs(self):
crawler = get_crawler(CrawlSpiderWithProcessRequestCallbackKeywordArguments)
with LogCapture() as log:
yield crawler.crawl(mockserver=self.mockserver)
assert "[parse] status 200 (foo: process_request)" in str(log)
assert "[parse] status 201 (foo: process_request)" in str(log)
assert "[parse] status 202 (foo: bar)" in str(log)
@inlineCallbacks
def test_async_def_parse(self):
crawler = get_crawler(AsyncDefSpider)
with LogCapture() as log:
yield crawler.crawl(
self.mockserver.url("/status?n=200"), mockserver=self.mockserver
)
assert "Got response 200" in str(log)
@pytest.mark.only_asyncio
@inlineCallbacks
def test_async_def_asyncio_parse(self):
crawler = get_crawler(
AsyncDefAsyncioSpider,
{
"TWISTED_REACTOR": "twisted.internet.asyncioreactor.AsyncioSelectorReactor"
},
)
with LogCapture() as log:
yield crawler.crawl(
self.mockserver.url("/status?n=200"), mockserver=self.mockserver
)
assert "Got response 200" in str(log)
@pytest.mark.only_asyncio
@deferred_f_from_coro_f
async def test_async_def_asyncio_parse_items_list(self):
log, items, _ = await self._run_spider(AsyncDefAsyncioReturnSpider)
assert "Got response 200" in str(log)
assert {"id": 1} in items
assert {"id": 2} in items
@pytest.mark.only_asyncio
@inlineCallbacks
def test_async_def_asyncio_parse_items_single_element(self):
items = []
def _on_item_scraped(item):
items.append(item)
crawler = get_crawler(AsyncDefAsyncioReturnSingleElementSpider)
crawler.signals.connect(_on_item_scraped, signals.item_scraped)
with LogCapture() as log:
yield crawler.crawl(
self.mockserver.url("/status?n=200"), mockserver=self.mockserver
)
assert "Got response 200" in str(log)
assert {"foo": 42} in items
@pytest.mark.only_asyncio
@deferred_f_from_coro_f
async def test_async_def_asyncgen_parse(self):
log, _, stats = await self._run_spider(AsyncDefAsyncioGenSpider)
assert "Got response 200" in str(log)
itemcount = stats.get_value("item_scraped_count")
assert itemcount == 1
@pytest.mark.only_asyncio
@deferred_f_from_coro_f
async def test_async_def_asyncgen_parse_loop(self):
log, items, stats = await self._run_spider(AsyncDefAsyncioGenLoopSpider)
assert "Got response 200" in str(log)
itemcount = stats.get_value("item_scraped_count")
assert itemcount == 10
for i in range(10):
assert {"foo": i} in items
@pytest.mark.only_asyncio
@deferred_f_from_coro_f
async def test_async_def_asyncgen_parse_exc(self):
log, items, stats = await self._run_spider(AsyncDefAsyncioGenExcSpider)
log = str(log)
assert "Spider error processing" in log
assert "ValueError" in log
itemcount = stats.get_value("item_scraped_count")
assert itemcount == 7
for i in range(7):
assert {"foo": i} in items
@pytest.mark.only_asyncio
@deferred_f_from_coro_f
async def test_async_def_asyncgen_parse_complex(self):
_, items, stats = await self._run_spider(AsyncDefAsyncioGenComplexSpider)
itemcount = stats.get_value("item_scraped_count")
assert itemcount == 156
# some random items
for i in [1, 4, 21, 22, 207, 311]:
assert {"index": i} in items
for i in [10, 30, 122]:
assert {"index2": i} in items
@pytest.mark.only_asyncio
@deferred_f_from_coro_f
async def test_async_def_asyncio_parse_reqs_list(self):
log, *_ = await self._run_spider(AsyncDefAsyncioReqsReturnSpider)
for req_id in range(3):
assert f"Got response 200, req_id {req_id}" in str(log)
@pytest.mark.only_not_asyncio
@deferred_f_from_coro_f
async def test_async_def_deferred_direct(self):
_, items, _ = await self._run_spider(AsyncDefDeferredDirectSpider)
assert items == [{"code": 200}]
@pytest.mark.only_asyncio
@deferred_f_from_coro_f
async def test_async_def_deferred_wrapped(self):
_, items, _ = await self._run_spider(AsyncDefDeferredWrappedSpider)
assert items == [{"code": 200}]
@deferred_f_from_coro_f
async def test_async_def_deferred_maybe_wrapped(self):
_, items, _ = await self._run_spider(AsyncDefDeferredMaybeWrappedSpider)
assert items == [{"code": 200}]
@inlineCallbacks
def test_response_ssl_certificate_none(self):
crawler = get_crawler(SingleRequestSpider)
url = self.mockserver.url("/echo?body=test", is_secure=False)
yield crawler.crawl(seed=url, mockserver=self.mockserver)
assert crawler.spider.meta["responses"][0].certificate is None
@inlineCallbacks
def test_response_ssl_certificate(self):
crawler = get_crawler(SingleRequestSpider)
url = self.mockserver.url("/echo?body=test", is_secure=True)
yield crawler.crawl(seed=url, mockserver=self.mockserver)
cert = crawler.spider.meta["responses"][0].certificate
assert isinstance(cert, Certificate)
assert cert.getSubject().commonName == b"localhost"
assert cert.getIssuer().commonName == b"localhost"
@pytest.mark.xfail(
reason="Responses with no body return early and contain no certificate"
)
@inlineCallbacks
def test_response_ssl_certificate_empty_response(self):
crawler = get_crawler(SingleRequestSpider)
url = self.mockserver.url("/status?n=200", is_secure=True)
yield crawler.crawl(seed=url, mockserver=self.mockserver)
cert = crawler.spider.meta["responses"][0].certificate
assert isinstance(cert, Certificate)
assert cert.getSubject().commonName == b"localhost"
assert cert.getIssuer().commonName == b"localhost"
@inlineCallbacks
def test_dns_server_ip_address_none(self):
crawler = get_crawler(SingleRequestSpider)
url = self.mockserver.url("/status?n=200")
yield crawler.crawl(seed=url, mockserver=self.mockserver)
ip_address = crawler.spider.meta["responses"][0].ip_address
assert ip_address is None
@inlineCallbacks
def test_dns_server_ip_address(self):
crawler = get_crawler(SingleRequestSpider)
url = self.mockserver.url("/echo?body=test")
expected_netloc, _ = urlparse(url).netloc.split(":")
yield crawler.crawl(seed=url, mockserver=self.mockserver)
ip_address = crawler.spider.meta["responses"][0].ip_address
assert isinstance(ip_address, IPv4Address)
assert str(ip_address) == gethostbyname(expected_netloc)
@inlineCallbacks
def test_bytes_received_stop_download_callback(self):
crawler = get_crawler(BytesReceivedCallbackSpider)
yield crawler.crawl(mockserver=self.mockserver)
assert crawler.spider.meta.get("failure") is None
assert isinstance(crawler.spider.meta["response"], Response)
assert crawler.spider.meta["response"].body == crawler.spider.meta.get(
"bytes_received"
)
assert (
len(crawler.spider.meta["response"].body)
< crawler.spider.full_response_length
)
@inlineCallbacks
def test_bytes_received_stop_download_errback(self):
crawler = get_crawler(BytesReceivedErrbackSpider)
yield crawler.crawl(mockserver=self.mockserver)
assert crawler.spider.meta.get("response") is None
assert isinstance(crawler.spider.meta["failure"], Failure)
assert isinstance(crawler.spider.meta["failure"].value, StopDownload)
assert isinstance(crawler.spider.meta["failure"].value.response, Response)
assert crawler.spider.meta[
"failure"
].value.response.body == crawler.spider.meta.get("bytes_received")
assert (
len(crawler.spider.meta["failure"].value.response.body)
< crawler.spider.full_response_length
)
@inlineCallbacks
def test_headers_received_stop_download_callback(self):
crawler = get_crawler(HeadersReceivedCallbackSpider)
yield crawler.crawl(mockserver=self.mockserver)
assert crawler.spider.meta.get("failure") is None
assert isinstance(crawler.spider.meta["response"], Response)
assert crawler.spider.meta["response"].headers == crawler.spider.meta.get(
"headers_received"
)
@inlineCallbacks
def test_headers_received_stop_download_errback(self):
crawler = get_crawler(HeadersReceivedErrbackSpider)
yield crawler.crawl(mockserver=self.mockserver)
assert crawler.spider.meta.get("response") is None
assert isinstance(crawler.spider.meta["failure"], Failure)
assert isinstance(crawler.spider.meta["failure"].value, StopDownload)
assert isinstance(crawler.spider.meta["failure"].value.response, Response)
assert crawler.spider.meta[
"failure"
].value.response.headers == crawler.spider.meta.get("headers_received")
@inlineCallbacks
def test_spider_callback_deferred_deprecated(self):
def cb(response: Response) -> Any:
return succeed(None)
crawler = get_crawler(SingleRequestSpider)
with pytest.warns(
ScrapyDeprecationWarning,
match="Returning Deferreds from spider callbacks is deprecated",
):
yield crawler.crawl(seed=self.mockserver.url("/"), callback_func=cb)
@inlineCallbacks
def test_spider_errback(self):
failures = []
def eb(failure: Failure) -> Failure:
failures.append(failure)
return failure
crawler = get_crawler(SingleRequestSpider)
with LogCapture() as log:
yield crawler.crawl(
seed=self.mockserver.url("/status?n=400"), errback_func=eb
)
assert len(failures) == 1
assert "HTTP status code is not handled or not allowed" in str(log)
assert "Spider error processing" not in str(log)
@inlineCallbacks
def test_spider_errback_silence(self):
failures = []
def eb(failure: Failure) -> None:
failures.append(failure)
crawler = get_crawler(SingleRequestSpider)
with LogCapture() as log:
yield crawler.crawl(
seed=self.mockserver.url("/status?n=400"), errback_func=eb
)
assert len(failures) == 1
assert "HTTP status code is not handled or not allowed" not in str(log)
assert "Spider error processing" not in str(log)
@inlineCallbacks
def test_spider_errback_exception(self):
def eb(failure: Failure) -> None:
raise ValueError("foo")
crawler = get_crawler(SingleRequestSpider)
with LogCapture() as log:
yield crawler.crawl(
seed=self.mockserver.url("/status?n=400"), errback_func=eb
)
assert "Spider error processing" in str(log)
@inlineCallbacks
def test_spider_errback_item(self):
def eb(failure: Failure) -> Any:
return {"foo": "bar"}
crawler = get_crawler(SingleRequestSpider)
with LogCapture() as log:
yield crawler.crawl(
seed=self.mockserver.url("/status?n=400"), errback_func=eb
)
assert "HTTP status code is not handled or not allowed" not in str(log)
assert "Spider error processing" not in str(log)
assert "'item_scraped_count': 1" in str(log)
@inlineCallbacks
def test_spider_errback_request(self):
def eb(failure: Failure) -> Request:
return Request(self.mockserver.url("/"))
crawler = get_crawler(SingleRequestSpider)
with LogCapture() as log:
yield crawler.crawl(
seed=self.mockserver.url("/status?n=400"), errback_func=eb
)
assert "HTTP status code is not handled or not allowed" not in str(log)
assert "Spider error processing" not in str(log)
assert "Crawled (200)" in str(log)
@inlineCallbacks
def test_spider_errback_downloader_error(self):
failures = []
def eb(failure: Failure) -> Failure:
failures.append(failure)
return failure
crawler = get_crawler(SingleRequestSpider)
with LogCapture() as log:
yield crawler.crawl(
seed=self.mockserver.url("/drop?abort=1"), errback_func=eb
)
assert len(failures) == 1
assert "Error downloading" in str(log)
assert "Spider error processing" not in str(log)
@inlineCallbacks
def test_spider_errback_downloader_error_exception(self):
def eb(failure: Failure) -> None:
raise ValueError("foo")
crawler = get_crawler(SingleRequestSpider)
with LogCapture() as log:
yield crawler.crawl(
seed=self.mockserver.url("/drop?abort=1"), errback_func=eb
)
assert "Error downloading" in str(log)
assert "Spider error processing" in str(log)
@inlineCallbacks
def test_spider_errback_downloader_error_item(self):
def eb(failure: Failure) -> Any:
return {"foo": "bar"}
crawler = get_crawler(SingleRequestSpider)
with LogCapture() as log:
yield crawler.crawl(
seed=self.mockserver.url("/drop?abort=1"), errback_func=eb
)
assert "HTTP status code is not handled or not allowed" not in str(log)
assert "Spider error processing" not in str(log)
assert "'item_scraped_count': 1" in str(log)
@inlineCallbacks
def test_spider_errback_downloader_error_request(self):
def eb(failure: Failure) -> Request:
return Request(self.mockserver.url("/"))
crawler = get_crawler(SingleRequestSpider)
with LogCapture() as log:
yield crawler.crawl(
seed=self.mockserver.url("/drop?abort=1"), errback_func=eb
)
| python | BSD-3-Clause | d1bd8eb49f7aba9289e4ff692006cead8bcd9080 | 2026-01-04T14:38:41.023839Z | true |
scrapy/scrapy | https://github.com/scrapy/scrapy/blob/d1bd8eb49f7aba9289e4ff692006cead8bcd9080/tests/test_command_parse.py | tests/test_command_parse.py | from __future__ import annotations
import argparse
import re
from typing import TYPE_CHECKING
import pytest
from scrapy.commands import parse
from scrapy.settings import Settings
from tests.test_commands import TestProjectBase
from tests.utils.cmdline import call, proc
if TYPE_CHECKING:
from pathlib import Path
from tests.mockserver.http import MockServer
class TestParseCommand(TestProjectBase):
spider_name = "parse_spider"
@pytest.fixture(autouse=True)
def create_files(self, proj_path: Path) -> None:
proj_mod_path = proj_path / self.project_name
(proj_mod_path / "spiders" / "myspider.py").write_text(
f"""
import scrapy
from scrapy.linkextractors import LinkExtractor
from scrapy.spiders import CrawlSpider, Rule
from scrapy.utils.test import get_from_asyncio_queue
import asyncio
class BaseSpider(scrapy.Spider):
custom_settings = {{
"DOWNLOAD_DELAY": 0,
}}
class AsyncDefAsyncioReturnSpider(BaseSpider):
name = "asyncdef_asyncio_return"
async def parse(self, response):
await asyncio.sleep(0.2)
status = await get_from_asyncio_queue(response.status)
self.logger.info(f"Got response {{status}}")
return [{{'id': 1}}, {{'id': 2}}]
class AsyncDefAsyncioReturnSingleElementSpider(BaseSpider):
name = "asyncdef_asyncio_return_single_element"
async def parse(self, response):
await asyncio.sleep(0.1)
status = await get_from_asyncio_queue(response.status)
self.logger.info(f"Got response {{status}}")
return {{'foo': 42}}
class AsyncDefAsyncioGenLoopSpider(BaseSpider):
name = "asyncdef_asyncio_gen_loop"
async def parse(self, response):
for i in range(10):
await asyncio.sleep(0.1)
yield {{'foo': i}}
self.logger.info(f"Got response {{response.status}}")
class AsyncDefAsyncioSpider(BaseSpider):
name = "asyncdef_asyncio"
async def parse(self, response):
await asyncio.sleep(0.2)
status = await get_from_asyncio_queue(response.status)
self.logger.debug(f"Got response {{status}}")
class AsyncDefAsyncioGenExcSpider(BaseSpider):
name = "asyncdef_asyncio_gen_exc"
async def parse(self, response):
for i in range(10):
await asyncio.sleep(0.1)
yield {{'foo': i}}
if i > 5:
raise ValueError("Stopping the processing")
class CallbackSignatureDownloaderMiddleware:
def process_request(self, request, spider):
from inspect import signature
spider.logger.debug(f"request.callback signature: {{signature(request.callback)}}")
class MySpider(scrapy.Spider):
name = '{self.spider_name}'
custom_settings = {{
"DOWNLOADER_MIDDLEWARES": {{
CallbackSignatureDownloaderMiddleware: 0,
}},
"DOWNLOAD_DELAY": 0,
}}
def parse(self, response):
if getattr(self, 'test_arg', None):
self.logger.debug('It Works!')
return [scrapy.Item(), dict(foo='bar')]
def parse_request_with_meta(self, response):
foo = response.meta.get('foo', 'bar')
if foo == 'bar':
self.logger.debug('It Does Not Work :(')
else:
self.logger.debug('It Works!')
def parse_request_with_cb_kwargs(self, response, foo=None, key=None):
if foo == 'bar' and key == 'value':
self.logger.debug('It Works!')
else:
self.logger.debug('It Does Not Work :(')
def parse_request_without_meta(self, response):
foo = response.meta.get('foo', 'bar')
if foo == 'bar':
self.logger.debug('It Works!')
else:
self.logger.debug('It Does Not Work :(')
class MyGoodCrawlSpider(CrawlSpider):
name = 'goodcrawl{self.spider_name}'
custom_settings = {{
"DOWNLOAD_DELAY": 0,
}}
rules = (
Rule(LinkExtractor(allow=r'/html'), callback='parse_item', follow=True),
Rule(LinkExtractor(allow=r'/text'), follow=True),
)
def parse_item(self, response):
return [scrapy.Item(), dict(foo='bar')]
def parse(self, response):
return [scrapy.Item(), dict(nomatch='default')]
class MyBadCrawlSpider(CrawlSpider):
'''Spider which doesn't define a parse_item callback while using it in a rule.'''
name = 'badcrawl{self.spider_name}'
custom_settings = {{
"DOWNLOAD_DELAY": 0,
}}
rules = (
Rule(LinkExtractor(allow=r'/html'), callback='parse_item', follow=True),
)
def parse(self, response):
return [scrapy.Item(), dict(foo='bar')]
""",
encoding="utf-8",
)
(proj_mod_path / "pipelines.py").write_text(
"""
import logging
class MyPipeline:
component_name = 'my_pipeline'
def process_item(self, item):
logging.info('It Works!')
return item
""",
encoding="utf-8",
)
with (proj_mod_path / "settings.py").open("a", encoding="utf-8") as f:
f.write(
f"""
ITEM_PIPELINES = {{'{self.project_name}.pipelines.MyPipeline': 1}}
"""
)
def test_spider_arguments(self, proj_path: Path, mockserver: MockServer) -> None:
_, _, stderr = proc(
"parse",
"--spider",
self.spider_name,
"-a",
"test_arg=1",
"-c",
"parse",
"--verbose",
mockserver.url("/html"),
cwd=proj_path,
)
assert "DEBUG: It Works!" in stderr
def test_request_with_meta(self, proj_path: Path, mockserver: MockServer) -> None:
raw_json_string = '{"foo" : "baz"}'
_, _, stderr = proc(
"parse",
"--spider",
self.spider_name,
"--meta",
raw_json_string,
"-c",
"parse_request_with_meta",
"--verbose",
mockserver.url("/html"),
cwd=proj_path,
)
assert "DEBUG: It Works!" in stderr
_, _, stderr = proc(
"parse",
"--spider",
self.spider_name,
"-m",
raw_json_string,
"-c",
"parse_request_with_meta",
"--verbose",
mockserver.url("/html"),
cwd=proj_path,
)
assert "DEBUG: It Works!" in stderr
def test_request_with_cb_kwargs(
self, proj_path: Path, mockserver: MockServer
) -> None:
raw_json_string = '{"foo" : "bar", "key": "value"}'
_, _, stderr = proc(
"parse",
"--spider",
self.spider_name,
"--cbkwargs",
raw_json_string,
"-c",
"parse_request_with_cb_kwargs",
"--verbose",
mockserver.url("/html"),
cwd=proj_path,
)
assert "DEBUG: It Works!" in stderr
assert (
"DEBUG: request.callback signature: (response, foo=None, key=None)"
in stderr
)
def test_request_without_meta(
self, proj_path: Path, mockserver: MockServer
) -> None:
_, _, stderr = proc(
"parse",
"--spider",
self.spider_name,
"-c",
"parse_request_without_meta",
"--nolinks",
mockserver.url("/html"),
cwd=proj_path,
)
assert "DEBUG: It Works!" in stderr
def test_pipelines(self, proj_path: Path, mockserver: MockServer) -> None:
_, _, stderr = proc(
"parse",
"--spider",
self.spider_name,
"--pipelines",
"-c",
"parse",
"--verbose",
mockserver.url("/html"),
cwd=proj_path,
)
assert "INFO: It Works!" in stderr
def test_async_def_asyncio_parse_items_list(
self, proj_path: Path, mockserver: MockServer
) -> None:
_, out, stderr = proc(
"parse",
"--spider",
"asyncdef_asyncio_return",
"-c",
"parse",
mockserver.url("/html"),
cwd=proj_path,
)
assert "INFO: Got response 200" in stderr
assert "{'id': 1}" in out
assert "{'id': 2}" in out
def test_async_def_asyncio_parse_items_single_element(
self, proj_path: Path, mockserver: MockServer
) -> None:
_, out, stderr = proc(
"parse",
"--spider",
"asyncdef_asyncio_return_single_element",
"-c",
"parse",
mockserver.url("/html"),
cwd=proj_path,
)
assert "INFO: Got response 200" in stderr
assert "{'foo': 42}" in out
def test_async_def_asyncgen_parse_loop(
self, proj_path: Path, mockserver: MockServer
) -> None:
_, out, stderr = proc(
"parse",
"--spider",
"asyncdef_asyncio_gen_loop",
"-c",
"parse",
mockserver.url("/html"),
cwd=proj_path,
)
assert "INFO: Got response 200" in stderr
for i in range(10):
assert f"{{'foo': {i}}}" in out
def test_async_def_asyncgen_parse_exc(
self, proj_path: Path, mockserver: MockServer
) -> None:
_, out, stderr = proc(
"parse",
"--spider",
"asyncdef_asyncio_gen_exc",
"-c",
"parse",
mockserver.url("/html"),
cwd=proj_path,
)
assert "ValueError" in stderr
for i in range(7):
assert f"{{'foo': {i}}}" in out
def test_async_def_asyncio_parse(
self, proj_path: Path, mockserver: MockServer
) -> None:
_, _, stderr = proc(
"parse",
"--spider",
"asyncdef_asyncio",
"-c",
"parse",
mockserver.url("/html"),
cwd=proj_path,
)
assert "DEBUG: Got response 200" in stderr
def test_parse_items(self, proj_path: Path, mockserver: MockServer) -> None:
_, out, _ = proc(
"parse",
"--spider",
self.spider_name,
"-c",
"parse",
mockserver.url("/html"),
cwd=proj_path,
)
assert "[{}, {'foo': 'bar'}]" in out
def test_parse_items_no_callback_passed(
self, proj_path: Path, mockserver: MockServer
) -> None:
_, out, _ = proc(
"parse",
"--spider",
self.spider_name,
mockserver.url("/html"),
cwd=proj_path,
)
assert "[{}, {'foo': 'bar'}]" in out
def test_wrong_callback_passed(
self, proj_path: Path, mockserver: MockServer
) -> None:
_, out, stderr = proc(
"parse",
"--spider",
self.spider_name,
"-c",
"dummy",
mockserver.url("/html"),
cwd=proj_path,
)
assert re.search(r"# Scraped Items -+\r?\n\[\]", out)
assert "Cannot find callback" in stderr
def test_crawlspider_matching_rule_callback_set(
self, proj_path: Path, mockserver: MockServer
) -> None:
"""If a rule matches the URL, use it's defined callback."""
_, out, _ = proc(
"parse",
"--spider",
"goodcrawl" + self.spider_name,
"-r",
mockserver.url("/html"),
cwd=proj_path,
)
assert "[{}, {'foo': 'bar'}]" in out
def test_crawlspider_matching_rule_default_callback(
self, proj_path: Path, mockserver: MockServer
) -> None:
"""If a rule match but it has no callback set, use the 'parse' callback."""
_, out, _ = proc(
"parse",
"--spider",
"goodcrawl" + self.spider_name,
"-r",
mockserver.url("/text"),
cwd=proj_path,
)
assert "[{}, {'nomatch': 'default'}]" in out
def test_spider_with_no_rules_attribute(
self, proj_path: Path, mockserver: MockServer
) -> None:
"""Using -r with a spider with no rule should not produce items."""
_, out, stderr = proc(
"parse",
"--spider",
self.spider_name,
"-r",
mockserver.url("/html"),
cwd=proj_path,
)
assert re.search(r"# Scraped Items -+\r?\n\[\]", out)
assert "No CrawlSpider rules found" in stderr
def test_crawlspider_missing_callback(
self, proj_path: Path, mockserver: MockServer
) -> None:
_, out, _ = proc(
"parse",
"--spider",
"badcrawl" + self.spider_name,
"-r",
mockserver.url("/html"),
cwd=proj_path,
)
assert re.search(r"# Scraped Items -+\r?\n\[\]", out)
def test_crawlspider_no_matching_rule(
self, proj_path: Path, mockserver: MockServer
) -> None:
"""The requested URL has no matching rule, so no items should be scraped"""
_, out, stderr = proc(
"parse",
"--spider",
"badcrawl" + self.spider_name,
"-r",
mockserver.url("/enc-gb18030"),
cwd=proj_path,
)
assert re.search(r"# Scraped Items -+\r?\n\[\]", out)
assert "Cannot find a rule that matches" in stderr
def test_crawlspider_not_exists_with_not_matched_url(
self, proj_path: Path, mockserver: MockServer
) -> None:
assert call("parse", mockserver.url("/invalid_url"), cwd=proj_path) == 0
def test_output_flag(self, proj_path: Path, mockserver: MockServer) -> None:
"""Checks if a file was created successfully having
correct format containing correct data in it.
"""
file_name = "data.json"
file_path = proj_path / file_name
proc(
"parse",
"--spider",
self.spider_name,
"-c",
"parse",
"-o",
file_name,
mockserver.url("/html"),
cwd=proj_path,
)
assert file_path.exists()
assert file_path.is_file()
content = '[\n{},\n{"foo": "bar"}\n]'
assert file_path.read_text(encoding="utf-8") == content
def test_parse_add_options(self):
command = parse.Command()
command.settings = Settings()
parser = argparse.ArgumentParser(
prog="scrapy",
formatter_class=argparse.HelpFormatter,
conflict_handler="resolve",
prefix_chars="-",
)
command.add_options(parser)
namespace = parser.parse_args(
["--verbose", "--nolinks", "-d", "2", "--spider", self.spider_name]
)
assert namespace.nolinks
assert namespace.depth == 2
assert namespace.spider == self.spider_name
assert namespace.verbose
| python | BSD-3-Clause | d1bd8eb49f7aba9289e4ff692006cead8bcd9080 | 2026-01-04T14:38:41.023839Z | false |
scrapy/scrapy | https://github.com/scrapy/scrapy/blob/d1bd8eb49f7aba9289e4ff692006cead8bcd9080/tests/test_logformatter.py | tests/test_logformatter.py | import logging
import pytest
from testfixtures import LogCapture
from twisted.internet.defer import inlineCallbacks
from twisted.python.failure import Failure
from scrapy.exceptions import DropItem
from scrapy.http import Request, Response
from scrapy.item import Field, Item
from scrapy.logformatter import LogFormatter
from scrapy.spiders import Spider
from scrapy.utils.test import get_crawler
from tests.mockserver.http import MockServer
from tests.spiders import ItemSpider
class CustomItem(Item):
name = Field()
def __str__(self):
return f"name: {self['name']}"
class TestLogFormatter:
def setup_method(self):
self.formatter = LogFormatter()
self.spider = Spider("default")
self.spider.crawler = get_crawler()
def test_crawled_with_referer(self):
req = Request("http://www.example.com")
res = Response("http://www.example.com")
logkws = self.formatter.crawled(req, res, self.spider)
logline = logkws["msg"] % logkws["args"]
assert logline == "Crawled (200) <GET http://www.example.com> (referer: None)"
def test_crawled_without_referer(self):
req = Request(
"http://www.example.com", headers={"referer": "http://example.com"}
)
res = Response("http://www.example.com", flags=["cached"])
logkws = self.formatter.crawled(req, res, self.spider)
logline = logkws["msg"] % logkws["args"]
assert (
logline
== "Crawled (200) <GET http://www.example.com> (referer: http://example.com) ['cached']"
)
def test_flags_in_request(self):
req = Request("http://www.example.com", flags=["test", "flag"])
res = Response("http://www.example.com")
logkws = self.formatter.crawled(req, res, self.spider)
logline = logkws["msg"] % logkws["args"]
assert (
logline
== "Crawled (200) <GET http://www.example.com> ['test', 'flag'] (referer: None)"
)
def test_dropped(self):
item = {}
exception = Exception("\u2018")
response = Response("http://www.example.com")
logkws = self.formatter.dropped(item, exception, response, self.spider)
logline = logkws["msg"] % logkws["args"]
lines = logline.splitlines()
assert all(isinstance(x, str) for x in lines)
assert lines == ["Dropped: \u2018", "{}"]
def test_dropitem_default_log_level(self):
item = {}
exception = DropItem("Test drop")
response = Response("http://www.example.com")
spider = Spider("foo")
spider.crawler = get_crawler(Spider)
logkws = self.formatter.dropped(item, exception, response, spider)
assert logkws["level"] == logging.WARNING
spider.crawler.settings.frozen = False
spider.crawler.settings["DEFAULT_DROPITEM_LOG_LEVEL"] = logging.INFO
spider.crawler.settings.frozen = True
logkws = self.formatter.dropped(item, exception, response, spider)
assert logkws["level"] == logging.INFO
spider.crawler.settings.frozen = False
spider.crawler.settings["DEFAULT_DROPITEM_LOG_LEVEL"] = "INFO"
spider.crawler.settings.frozen = True
logkws = self.formatter.dropped(item, exception, response, spider)
assert logkws["level"] == logging.INFO
spider.crawler.settings.frozen = False
spider.crawler.settings["DEFAULT_DROPITEM_LOG_LEVEL"] = 10
spider.crawler.settings.frozen = True
logkws = self.formatter.dropped(item, exception, response, spider)
assert logkws["level"] == logging.DEBUG
spider.crawler.settings.frozen = False
spider.crawler.settings["DEFAULT_DROPITEM_LOG_LEVEL"] = 0
spider.crawler.settings.frozen = True
logkws = self.formatter.dropped(item, exception, response, spider)
assert logkws["level"] == logging.NOTSET
unsupported_value = object()
spider.crawler.settings.frozen = False
spider.crawler.settings["DEFAULT_DROPITEM_LOG_LEVEL"] = unsupported_value
spider.crawler.settings.frozen = True
logkws = self.formatter.dropped(item, exception, response, spider)
assert logkws["level"] == unsupported_value
with pytest.raises(TypeError):
logging.log(logkws["level"], "message") # noqa: LOG015
def test_dropitem_custom_log_level(self):
item = {}
response = Response("http://www.example.com")
exception = DropItem("Test drop", log_level="INFO")
logkws = self.formatter.dropped(item, exception, response, self.spider)
assert logkws["level"] == logging.INFO
exception = DropItem("Test drop", log_level="ERROR")
logkws = self.formatter.dropped(item, exception, response, self.spider)
assert logkws["level"] == logging.ERROR
def test_item_error(self):
# In practice, the complete traceback is shown by passing the
# 'exc_info' argument to the logging function
item = {"key": "value"}
exception = Exception()
response = Response("http://www.example.com")
logkws = self.formatter.item_error(item, exception, response, self.spider)
logline = logkws["msg"] % logkws["args"]
assert logline == "Error processing {'key': 'value'}"
def test_spider_error(self):
# In practice, the complete traceback is shown by passing the
# 'exc_info' argument to the logging function
failure = Failure(Exception())
request = Request(
"http://www.example.com", headers={"Referer": "http://example.org"}
)
response = Response("http://www.example.com", request=request)
logkws = self.formatter.spider_error(failure, request, response, self.spider)
logline = logkws["msg"] % logkws["args"]
assert (
logline
== "Spider error processing <GET http://www.example.com> (referer: http://example.org)"
)
def test_download_error_short(self):
# In practice, the complete traceback is shown by passing the
# 'exc_info' argument to the logging function
failure = Failure(Exception())
request = Request("http://www.example.com")
logkws = self.formatter.download_error(failure, request, self.spider)
logline = logkws["msg"] % logkws["args"]
assert logline == "Error downloading <GET http://www.example.com>"
def test_download_error_long(self):
# In practice, the complete traceback is shown by passing the
# 'exc_info' argument to the logging function
failure = Failure(Exception())
request = Request("http://www.example.com")
logkws = self.formatter.download_error(
failure, request, self.spider, "Some message"
)
logline = logkws["msg"] % logkws["args"]
assert logline == "Error downloading <GET http://www.example.com>: Some message"
def test_scraped(self):
item = CustomItem()
item["name"] = "\xa3"
response = Response("http://www.example.com")
logkws = self.formatter.scraped(item, response, self.spider)
logline = logkws["msg"] % logkws["args"]
lines = logline.splitlines()
assert all(isinstance(x, str) for x in lines)
assert lines == ["Scraped from <200 http://www.example.com>", "name: \xa3"]
class LogFormatterSubclass(LogFormatter):
def crawled(self, request, response, spider):
kwargs = super().crawled(request, response, spider)
CRAWLEDMSG = "Crawled (%(status)s) %(request)s (referer: %(referer)s) %(flags)s"
log_args = kwargs["args"]
log_args["flags"] = str(request.flags)
return {
"level": kwargs["level"],
"msg": CRAWLEDMSG,
"args": log_args,
}
class TestLogformatterSubclass(TestLogFormatter):
def setup_method(self):
self.formatter = LogFormatterSubclass()
self.spider = Spider("default")
self.spider.crawler = get_crawler(Spider)
def test_crawled_with_referer(self):
req = Request("http://www.example.com")
res = Response("http://www.example.com")
logkws = self.formatter.crawled(req, res, self.spider)
logline = logkws["msg"] % logkws["args"]
assert (
logline == "Crawled (200) <GET http://www.example.com> (referer: None) []"
)
def test_crawled_without_referer(self):
req = Request(
"http://www.example.com",
headers={"referer": "http://example.com"},
flags=["cached"],
)
res = Response("http://www.example.com")
logkws = self.formatter.crawled(req, res, self.spider)
logline = logkws["msg"] % logkws["args"]
assert (
logline
== "Crawled (200) <GET http://www.example.com> (referer: http://example.com) ['cached']"
)
def test_flags_in_request(self):
req = Request("http://www.example.com", flags=["test", "flag"])
res = Response("http://www.example.com")
logkws = self.formatter.crawled(req, res, self.spider)
logline = logkws["msg"] % logkws["args"]
assert (
logline
== "Crawled (200) <GET http://www.example.com> (referer: None) ['test', 'flag']"
)
class SkipMessagesLogFormatter(LogFormatter):
def crawled(self, *args, **kwargs):
return None
def scraped(self, *args, **kwargs):
return None
def dropped(self, *args, **kwargs):
return None
class DropSomeItemsPipeline:
drop = True
def process_item(self, item):
if self.drop:
self.drop = False
raise DropItem("Ignoring item")
self.drop = True
class TestShowOrSkipMessages:
@classmethod
def setup_class(cls):
cls.mockserver = MockServer()
cls.mockserver.__enter__()
@classmethod
def teardown_class(cls):
cls.mockserver.__exit__(None, None, None)
def setup_method(self):
self.base_settings = {
"LOG_LEVEL": "DEBUG",
"ITEM_PIPELINES": {
DropSomeItemsPipeline: 300,
},
}
@inlineCallbacks
def test_show_messages(self):
crawler = get_crawler(ItemSpider, self.base_settings)
with LogCapture() as lc:
yield crawler.crawl(mockserver=self.mockserver)
assert "Scraped from <200 http://127.0.0.1:" in str(lc)
assert "Crawled (200) <GET http://127.0.0.1:" in str(lc)
assert "Dropped: Ignoring item" in str(lc)
@inlineCallbacks
def test_skip_messages(self):
settings = self.base_settings.copy()
settings["LOG_FORMATTER"] = SkipMessagesLogFormatter
crawler = get_crawler(ItemSpider, settings)
with LogCapture() as lc:
yield crawler.crawl(mockserver=self.mockserver)
assert "Scraped from <200 http://127.0.0.1:" not in str(lc)
assert "Crawled (200) <GET http://127.0.0.1:" not in str(lc)
assert "Dropped: Ignoring item" not in str(lc)
| python | BSD-3-Clause | d1bd8eb49f7aba9289e4ff692006cead8bcd9080 | 2026-01-04T14:38:41.023839Z | false |
scrapy/scrapy | https://github.com/scrapy/scrapy/blob/d1bd8eb49f7aba9289e4ff692006cead8bcd9080/tests/test_responsetypes.py | tests/test_responsetypes.py | from scrapy.http import (
Headers,
HtmlResponse,
JsonResponse,
Response,
TextResponse,
XmlResponse,
)
from scrapy.responsetypes import responsetypes
class TestResponseTypes:
def test_from_filename(self):
mappings = [
("data.bin", Response),
("file.txt", TextResponse),
("file.xml.gz", Response),
("file.xml", XmlResponse),
("file.html", HtmlResponse),
("file.unknownext", Response),
]
for source, cls in mappings:
retcls = responsetypes.from_filename(source)
assert retcls is cls, f"{source} ==> {retcls} != {cls}"
def test_from_content_disposition(self):
mappings = [
(b'attachment; filename="data.xml"', XmlResponse),
(b"attachment; filename=data.xml", XmlResponse),
("attachment;filename=data£.tar.gz".encode(), Response),
("attachment;filename=dataµ.tar.gz".encode("latin-1"), Response),
("attachment;filename=data高.doc".encode("gbk"), Response),
("attachment;filename=دورهdata.html".encode("cp720"), HtmlResponse),
(
"attachment;filename=日本語版Wikipedia.xml".encode("iso2022_jp"),
XmlResponse,
),
]
for source, cls in mappings:
retcls = responsetypes.from_content_disposition(source)
assert retcls is cls, f"{source} ==> {retcls} != {cls}"
def test_from_content_type(self):
mappings = [
("text/html; charset=UTF-8", HtmlResponse),
("text/xml; charset=UTF-8", XmlResponse),
("application/xhtml+xml; charset=UTF-8", HtmlResponse),
("application/vnd.wap.xhtml+xml; charset=utf-8", HtmlResponse),
("application/xml; charset=UTF-8", XmlResponse),
("application/octet-stream", Response),
("application/json; encoding=UTF8;charset=UTF-8", JsonResponse),
("application/x-json; encoding=UTF8;charset=UTF-8", JsonResponse),
("application/json-amazonui-streaming;charset=UTF-8", JsonResponse),
(b"application/x-download; filename=\x80dummy.txt", Response),
]
for source, cls in mappings:
retcls = responsetypes.from_content_type(source)
assert retcls is cls, f"{source} ==> {retcls} != {cls}"
def test_from_body(self):
mappings = [
(b"\x03\x02\xdf\xdd\x23", Response),
(b"Some plain text\ndata with tabs\t and null bytes\0", TextResponse),
(b"<html><head><title>Hello</title></head>", HtmlResponse),
# https://codersblock.com/blog/the-smallest-valid-html5-page/
(b"<!DOCTYPE html>\n<title>.</title>", HtmlResponse),
(b'<?xml version="1.0" encoding="utf-8"', XmlResponse),
]
for source, cls in mappings:
retcls = responsetypes.from_body(source)
assert retcls is cls, f"{source} ==> {retcls} != {cls}"
def test_from_headers(self):
mappings = [
({"Content-Type": ["text/html; charset=utf-8"]}, HtmlResponse),
(
{
"Content-Type": ["text/html; charset=utf-8"],
"Content-Encoding": ["gzip"],
},
Response,
),
(
{
"Content-Type": ["application/octet-stream"],
"Content-Disposition": ["attachment; filename=data.txt"],
},
TextResponse,
),
]
for source, cls in mappings:
source = Headers(source)
retcls = responsetypes.from_headers(source)
assert retcls is cls, f"{source} ==> {retcls} != {cls}"
def test_from_args(self):
# TODO: add more tests that check precedence between the different arguments
mappings = [
({"url": "http://www.example.com/data.csv"}, TextResponse),
# headers takes precedence over url
(
{
"headers": Headers({"Content-Type": ["text/html; charset=utf-8"]}),
"url": "http://www.example.com/item/",
},
HtmlResponse,
),
(
{
"headers": Headers(
{"Content-Disposition": ['attachment; filename="data.xml.gz"']}
),
"url": "http://www.example.com/page/",
},
Response,
),
]
for source, cls in mappings:
retcls = responsetypes.from_args(**source)
assert retcls is cls, f"{source} ==> {retcls} != {cls}"
def test_custom_mime_types_loaded(self):
# check that mime.types files shipped with scrapy are loaded
assert responsetypes.mimetypes.guess_type("x.scrapytest")[0] == "x-scrapy/test"
| python | BSD-3-Clause | d1bd8eb49f7aba9289e4ff692006cead8bcd9080 | 2026-01-04T14:38:41.023839Z | false |
scrapy/scrapy | https://github.com/scrapy/scrapy/blob/d1bd8eb49f7aba9289e4ff692006cead8bcd9080/tests/test_http_headers.py | tests/test_http_headers.py | import copy
import pytest
from scrapy.http import Headers
class TestHeaders:
def assertSortedEqual(self, first, second, msg=None):
assert sorted(first) == sorted(second), msg
def test_basics(self):
h = Headers({"Content-Type": "text/html", "Content-Length": 1234})
assert h["Content-Type"]
assert h["Content-Length"]
with pytest.raises(KeyError):
h["Accept"]
assert h.get("Accept") is None
assert h.getlist("Accept") == []
assert h.get("Accept", "*/*") == b"*/*"
assert h.getlist("Accept", "*/*") == [b"*/*"]
assert h.getlist("Accept", ["text/html", "images/jpeg"]) == [
b"text/html",
b"images/jpeg",
]
def test_single_value(self):
h = Headers()
h["Content-Type"] = "text/html"
assert h["Content-Type"] == b"text/html"
assert h.get("Content-Type") == b"text/html"
assert h.getlist("Content-Type") == [b"text/html"]
def test_multivalue(self):
h = Headers()
h["X-Forwarded-For"] = hlist = ["ip1", "ip2"]
assert h["X-Forwarded-For"] == b"ip2"
assert h.get("X-Forwarded-For") == b"ip2"
assert h.getlist("X-Forwarded-For") == [b"ip1", b"ip2"]
assert h.getlist("X-Forwarded-For") is not hlist
def test_multivalue_for_one_header(self):
h = Headers((("a", "b"), ("a", "c")))
assert h["a"] == b"c"
assert h.get("a") == b"c"
assert h.getlist("a") == [b"b", b"c"]
def test_encode_utf8(self):
h = Headers({"key": "\xa3"}, encoding="utf-8")
key, val = dict(h).popitem()
assert isinstance(key, bytes), key
assert isinstance(val[0], bytes), val[0]
assert val[0] == b"\xc2\xa3"
def test_encode_latin1(self):
h = Headers({"key": "\xa3"}, encoding="latin1")
_, val = dict(h).popitem()
assert val[0] == b"\xa3"
def test_encode_multiple(self):
h = Headers({"key": ["\xa3"]}, encoding="utf-8")
_, val = dict(h).popitem()
assert val[0] == b"\xc2\xa3"
def test_delete_and_contains(self):
h = Headers()
h["Content-Type"] = "text/html"
assert "Content-Type" in h
del h["Content-Type"]
assert "Content-Type" not in h
def test_setdefault(self):
h = Headers()
hlist = ["ip1", "ip2"]
olist = h.setdefault("X-Forwarded-For", hlist)
assert h.getlist("X-Forwarded-For") is not hlist
assert h.getlist("X-Forwarded-For") is olist
h = Headers()
olist = h.setdefault("X-Forwarded-For", "ip1")
assert h.getlist("X-Forwarded-For") == [b"ip1"]
assert h.getlist("X-Forwarded-For") is olist
def test_iterables(self):
idict = {"Content-Type": "text/html", "X-Forwarded-For": ["ip1", "ip2"]}
h = Headers(idict)
assert dict(h) == {
b"Content-Type": [b"text/html"],
b"X-Forwarded-For": [b"ip1", b"ip2"],
}
self.assertSortedEqual(h.keys(), [b"X-Forwarded-For", b"Content-Type"])
self.assertSortedEqual(
h.items(),
[(b"X-Forwarded-For", [b"ip1", b"ip2"]), (b"Content-Type", [b"text/html"])],
)
self.assertSortedEqual(h.values(), [b"ip2", b"text/html"])
def test_update(self):
h = Headers()
h.update({"Content-Type": "text/html", "X-Forwarded-For": ["ip1", "ip2"]})
assert h.getlist("Content-Type") == [b"text/html"]
assert h.getlist("X-Forwarded-For") == [b"ip1", b"ip2"]
def test_copy(self):
h1 = Headers({"header1": ["value1", "value2"]})
h2 = copy.copy(h1)
assert h1 == h2
assert h1.getlist("header1") == h2.getlist("header1")
assert h1.getlist("header1") is not h2.getlist("header1")
assert isinstance(h2, Headers)
def test_appendlist(self):
h1 = Headers({"header1": "value1"})
h1.appendlist("header1", "value3")
assert h1.getlist("header1") == [b"value1", b"value3"]
h1 = Headers()
h1.appendlist("header1", "value1")
h1.appendlist("header1", "value3")
assert h1.getlist("header1") == [b"value1", b"value3"]
def test_setlist(self):
h1 = Headers({"header1": "value1"})
assert h1.getlist("header1") == [b"value1"]
h1.setlist("header1", [b"value2", b"value3"])
assert h1.getlist("header1") == [b"value2", b"value3"]
def test_setlistdefault(self):
h1 = Headers({"header1": "value1"})
h1.setlistdefault("header1", ["value2", "value3"])
h1.setlistdefault("header2", ["value2", "value3"])
assert h1.getlist("header1") == [b"value1"]
assert h1.getlist("header2") == [b"value2", b"value3"]
def test_none_value(self):
h1 = Headers()
h1["foo"] = "bar"
h1["foo"] = None
h1.setdefault("foo", "bar")
assert h1.get("foo") is None
assert h1.getlist("foo") == []
def test_int_value(self):
h1 = Headers({"hey": 5})
h1["foo"] = 1
h1.setdefault("bar", 2)
h1.setlist("buz", [1, "dos", 3])
assert h1.getlist("foo") == [b"1"]
assert h1.getlist("bar") == [b"2"]
assert h1.getlist("buz") == [b"1", b"dos", b"3"]
assert h1.getlist("hey") == [b"5"]
def test_invalid_value(self):
with pytest.raises(TypeError, match="Unsupported value type"):
Headers({"foo": object()})
with pytest.raises(TypeError, match="Unsupported value type"):
Headers()["foo"] = object()
with pytest.raises(TypeError, match="Unsupported value type"):
Headers().setdefault("foo", object())
with pytest.raises(TypeError, match="Unsupported value type"):
Headers().setlist("foo", [object()])
| python | BSD-3-Clause | d1bd8eb49f7aba9289e4ff692006cead8bcd9080 | 2026-01-04T14:38:41.023839Z | false |
scrapy/scrapy | https://github.com/scrapy/scrapy/blob/d1bd8eb49f7aba9289e4ff692006cead8bcd9080/tests/test_command_version.py | tests/test_command_version.py | import scrapy
from tests.utils.cmdline import proc
class TestVersionCommand:
def test_output(self) -> None:
_, out, _ = proc("version")
assert out.strip() == f"Scrapy {scrapy.__version__}"
def test_verbose_output(self) -> None:
_, out, _ = proc("version", "-v")
headers = [line.partition(":")[0].strip() for line in out.strip().splitlines()]
assert headers == [
"Scrapy",
"lxml",
"libxml2",
"cssselect",
"parsel",
"w3lib",
"Twisted",
"Python",
"pyOpenSSL",
"cryptography",
"Platform",
]
| python | BSD-3-Clause | d1bd8eb49f7aba9289e4ff692006cead8bcd9080 | 2026-01-04T14:38:41.023839Z | false |
scrapy/scrapy | https://github.com/scrapy/scrapy/blob/d1bd8eb49f7aba9289e4ff692006cead8bcd9080/tests/test_middleware.py | tests/test_middleware.py | from __future__ import annotations
from typing import TYPE_CHECKING
import pytest
from scrapy import Spider
from scrapy.exceptions import NotConfigured, ScrapyDeprecationWarning
from scrapy.middleware import MiddlewareManager
from scrapy.utils.test import get_crawler
if TYPE_CHECKING:
from scrapy.crawler import Crawler
class M1:
def open_spider(self, spider):
pass
def close_spider(self, spider):
pass
def process(self, response, request):
pass
class M2:
def open_spider(self, spider):
pass
def close_spider(self, spider):
pass
class M3:
def process(self, response, request):
pass
class MOff:
def open_spider(self, spider):
pass
def close_spider(self, spider):
pass
def __init__(self):
raise NotConfigured("foo")
class MyMiddlewareManager(MiddlewareManager):
component_name = "my"
@classmethod
def _get_mwlist_from_settings(cls, settings):
return [M1, MOff, M3]
def _add_middleware(self, mw):
if hasattr(mw, "open_spider"):
self.methods["open_spider"].append(mw.open_spider)
if hasattr(mw, "close_spider"):
self.methods["close_spider"].appendleft(mw.close_spider)
if hasattr(mw, "process"):
self.methods["process"].append(mw.process)
@pytest.fixture
def crawler() -> Crawler:
return get_crawler(Spider)
def test_init(crawler: Crawler) -> None:
m1, m2, m3 = M1(), M2(), M3()
mwman = MyMiddlewareManager(m1, m2, m3, crawler=crawler)
assert list(mwman.methods["open_spider"]) == [m1.open_spider, m2.open_spider]
assert list(mwman.methods["close_spider"]) == [m2.close_spider, m1.close_spider]
assert list(mwman.methods["process"]) == [m1.process, m3.process]
assert mwman.crawler == crawler
def test_methods(crawler: Crawler) -> None:
mwman = MyMiddlewareManager(M1(), M2(), M3(), crawler=crawler)
assert [x.__self__.__class__ for x in mwman.methods["open_spider"]] == [M1, M2] # type: ignore[union-attr]
assert [x.__self__.__class__ for x in mwman.methods["close_spider"]] == [M2, M1] # type: ignore[union-attr]
assert [x.__self__.__class__ for x in mwman.methods["process"]] == [M1, M3] # type: ignore[union-attr]
def test_enabled(crawler: Crawler) -> None:
m1, m2, m3 = M1(), M2(), M3()
mwman = MyMiddlewareManager(m1, m2, m3, crawler=crawler)
assert mwman.middlewares == (m1, m2, m3)
def test_enabled_from_settings(crawler: Crawler) -> None:
crawler = get_crawler()
mwman = MyMiddlewareManager.from_crawler(crawler)
classes = [x.__class__ for x in mwman.middlewares]
assert classes == [M1, M3]
assert mwman.crawler == crawler
def test_no_crawler() -> None:
m1, m2, m3 = M1(), M2(), M3()
with pytest.warns(
ScrapyDeprecationWarning, match="was called without the crawler argument"
):
mwman = MyMiddlewareManager(m1, m2, m3)
assert mwman.middlewares == (m1, m2, m3)
assert mwman.crawler is None
| python | BSD-3-Clause | d1bd8eb49f7aba9289e4ff692006cead8bcd9080 | 2026-01-04T14:38:41.023839Z | false |
scrapy/scrapy | https://github.com/scrapy/scrapy/blob/d1bd8eb49f7aba9289e4ff692006cead8bcd9080/tests/pipelines.py | tests/pipelines.py | """
Some pipelines used for testing
"""
class ZeroDivisionErrorPipeline:
def open_spider(self):
1 / 0
def process_item(self, item):
return item
class ProcessWithZeroDivisionErrorPipeline:
def process_item(self, item):
1 / 0
| python | BSD-3-Clause | d1bd8eb49f7aba9289e4ff692006cead8bcd9080 | 2026-01-04T14:38:41.023839Z | false |
scrapy/scrapy | https://github.com/scrapy/scrapy/blob/d1bd8eb49f7aba9289e4ff692006cead8bcd9080/tests/test_utils_gz.py | tests/test_utils_gz.py | from gzip import BadGzipFile
from pathlib import Path
import pytest
from w3lib.encoding import html_to_unicode
from scrapy.http import Response
from scrapy.utils.gz import gunzip, gzip_magic_number
from tests import tests_datadir
SAMPLEDIR = Path(tests_datadir, "compressed")
def test_gunzip_basic():
r1 = Response(
"http://www.example.com",
body=(SAMPLEDIR / "feed-sample1.xml.gz").read_bytes(),
)
assert gzip_magic_number(r1)
r2 = Response("http://www.example.com", body=gunzip(r1.body))
assert not gzip_magic_number(r2)
assert len(r2.body) == 9950
def test_gunzip_truncated():
text = gunzip((SAMPLEDIR / "truncated-crc-error.gz").read_bytes())
assert text.endswith(b"</html")
def test_gunzip_no_gzip_file_raises():
with pytest.raises(BadGzipFile):
gunzip((SAMPLEDIR / "feed-sample1.xml").read_bytes())
def test_gunzip_truncated_short():
r1 = Response(
"http://www.example.com",
body=(SAMPLEDIR / "truncated-crc-error-short.gz").read_bytes(),
)
assert gzip_magic_number(r1)
r2 = Response("http://www.example.com", body=gunzip(r1.body))
assert r2.body.endswith(b"</html>")
assert not gzip_magic_number(r2)
def test_is_gzipped_empty():
r1 = Response("http://www.example.com")
assert not gzip_magic_number(r1)
def test_gunzip_illegal_eof():
text = html_to_unicode(
"charset=cp1252", gunzip((SAMPLEDIR / "unexpected-eof.gz").read_bytes())
)[1]
expected_text = (SAMPLEDIR / "unexpected-eof-output.txt").read_text(
encoding="utf-8"
)
assert len(text) == len(expected_text)
assert text == expected_text
| python | BSD-3-Clause | d1bd8eb49f7aba9289e4ff692006cead8bcd9080 | 2026-01-04T14:38:41.023839Z | false |
scrapy/scrapy | https://github.com/scrapy/scrapy/blob/d1bd8eb49f7aba9289e4ff692006cead8bcd9080/tests/test_feedexport.py | tests/test_feedexport.py | from __future__ import annotations
import bz2
import csv
import gzip
import json
import lzma
import marshal
import os
import pickle
import random
import shutil
import string
import sys
import tempfile
import warnings
from abc import ABC, abstractmethod
from collections import defaultdict
from io import BytesIO
from logging import getLogger
from pathlib import Path
from string import ascii_letters, digits
from typing import IO, TYPE_CHECKING, Any
from unittest import mock
from urllib.parse import quote, urljoin
from urllib.request import pathname2url
import lxml.etree
import pytest
from packaging.version import Version
from testfixtures import LogCapture
from twisted.internet.defer import inlineCallbacks
from w3lib.url import file_uri_to_path, path_to_file_uri
from zope.interface import implementer
from zope.interface.verify import verifyObject
import scrapy
from scrapy import Spider, signals
from scrapy.exceptions import NotConfigured, ScrapyDeprecationWarning
from scrapy.exporters import CsvItemExporter, JsonItemExporter
from scrapy.extensions.feedexport import (
BlockingFeedStorage,
FeedExporter,
FeedSlot,
FileFeedStorage,
FTPFeedStorage,
GCSFeedStorage,
IFeedStorage,
S3FeedStorage,
StdoutFeedStorage,
)
from scrapy.settings import Settings
from scrapy.utils.defer import deferred_f_from_coro_f, maybe_deferred_to_future
from scrapy.utils.python import to_unicode
from scrapy.utils.test import get_crawler
from tests.mockserver.ftp import MockFTPServer
from tests.mockserver.http import MockServer
from tests.spiders import ItemSpider
if TYPE_CHECKING:
from collections.abc import Callable, Iterable
from os import PathLike
def path_to_url(path):
return urljoin("file:", pathname2url(str(path)))
def printf_escape(string):
return string.replace("%", "%%")
def build_url(path: str | PathLike) -> str:
path_str = str(path)
if path_str[0] != "/":
path_str = "/" + path_str
return urljoin("file:", path_str)
def mock_google_cloud_storage() -> tuple[Any, Any, Any]:
"""Creates autospec mocks for google-cloud-storage Client, Bucket and Blob
classes and set their proper return values.
"""
from google.cloud.storage import Blob, Bucket, Client # noqa: PLC0415
client_mock = mock.create_autospec(Client)
bucket_mock = mock.create_autospec(Bucket)
client_mock.get_bucket.return_value = bucket_mock
blob_mock = mock.create_autospec(Blob)
bucket_mock.blob.return_value = blob_mock
return (client_mock, bucket_mock, blob_mock)
class TestFileFeedStorage:
def test_store_file_uri(self, tmp_path):
path = tmp_path / "file.txt"
uri = path_to_file_uri(str(path))
self._assert_stores(FileFeedStorage(uri), path)
def test_store_file_uri_makedirs(self, tmp_path):
path = tmp_path / "more" / "paths" / "file.txt"
uri = path_to_file_uri(str(path))
self._assert_stores(FileFeedStorage(uri), path)
def test_store_direct_path(self, tmp_path):
path = tmp_path / "file.txt"
self._assert_stores(FileFeedStorage(str(path)), path)
def test_store_direct_path_relative(self, tmp_path):
old_cwd = Path.cwd()
try:
os.chdir(tmp_path)
path = Path("foo", "bar")
self._assert_stores(FileFeedStorage(str(path)), path)
finally:
os.chdir(old_cwd)
def test_interface(self, tmp_path):
path = tmp_path / "file.txt"
st = FileFeedStorage(str(path))
verifyObject(IFeedStorage, st)
@staticmethod
def _store(path: Path, feed_options: dict[str, Any] | None = None) -> None:
storage = FileFeedStorage(str(path), feed_options=feed_options)
spider = scrapy.Spider("default")
file = storage.open(spider)
file.write(b"content")
storage.store(file)
def test_append(self, tmp_path):
path = tmp_path / "file.txt"
self._store(path)
self._assert_stores(FileFeedStorage(str(path)), path, b"contentcontent")
def test_overwrite(self, tmp_path):
path = tmp_path / "file.txt"
self._store(path, {"overwrite": True})
self._assert_stores(
FileFeedStorage(str(path), feed_options={"overwrite": True}), path
)
@staticmethod
def _assert_stores(
storage: FileFeedStorage, path: Path, expected_content: bytes = b"content"
) -> None:
spider = scrapy.Spider("default")
file = storage.open(spider)
file.write(b"content")
storage.store(file)
assert path.exists()
try:
assert path.read_bytes() == expected_content
finally:
path.unlink()
def test_preserves_windows_path_without_file_scheme(self):
path = r"C:\Users\user\Desktop\test.txt"
storage = FileFeedStorage(path)
assert storage.path == path
class TestFTPFeedStorage:
def get_test_spider(self, settings=None):
class TestSpider(scrapy.Spider):
name = "test_spider"
crawler = get_crawler(settings_dict=settings)
return TestSpider.from_crawler(crawler)
async def _store(self, uri, content, feed_options=None, settings=None):
crawler = get_crawler(settings_dict=settings or {})
storage = FTPFeedStorage.from_crawler(
crawler,
uri,
feed_options=feed_options,
)
verifyObject(IFeedStorage, storage)
spider = self.get_test_spider()
file = storage.open(spider)
file.write(content)
await maybe_deferred_to_future(storage.store(file))
def _assert_stored(self, path: Path, content):
assert path.exists()
try:
assert path.read_bytes() == content
finally:
path.unlink()
@deferred_f_from_coro_f
async def test_append(self):
with MockFTPServer() as ftp_server:
filename = "file"
url = ftp_server.url(filename)
feed_options = {"overwrite": False}
await self._store(url, b"foo", feed_options=feed_options)
await self._store(url, b"bar", feed_options=feed_options)
self._assert_stored(ftp_server.path / filename, b"foobar")
@deferred_f_from_coro_f
async def test_overwrite(self):
with MockFTPServer() as ftp_server:
filename = "file"
url = ftp_server.url(filename)
await self._store(url, b"foo")
await self._store(url, b"bar")
self._assert_stored(ftp_server.path / filename, b"bar")
@deferred_f_from_coro_f
async def test_append_active_mode(self):
with MockFTPServer() as ftp_server:
settings = {"FEED_STORAGE_FTP_ACTIVE": True}
filename = "file"
url = ftp_server.url(filename)
feed_options = {"overwrite": False}
await self._store(url, b"foo", feed_options=feed_options, settings=settings)
await self._store(url, b"bar", feed_options=feed_options, settings=settings)
self._assert_stored(ftp_server.path / filename, b"foobar")
@deferred_f_from_coro_f
async def test_overwrite_active_mode(self):
with MockFTPServer() as ftp_server:
settings = {"FEED_STORAGE_FTP_ACTIVE": True}
filename = "file"
url = ftp_server.url(filename)
await self._store(url, b"foo", settings=settings)
await self._store(url, b"bar", settings=settings)
self._assert_stored(ftp_server.path / filename, b"bar")
def test_uri_auth_quote(self):
# RFC3986: 3.2.1. User Information
pw_quoted = quote(string.punctuation, safe="")
st = FTPFeedStorage(f"ftp://foo:{pw_quoted}@example.com/some_path", {})
assert st.password == string.punctuation
class MyBlockingFeedStorage(BlockingFeedStorage):
def _store_in_thread(self, file: IO[bytes]) -> None:
return
class TestBlockingFeedStorage:
def get_test_spider(self, settings=None):
class TestSpider(scrapy.Spider):
name = "test_spider"
crawler = get_crawler(settings_dict=settings)
return TestSpider.from_crawler(crawler)
def test_default_temp_dir(self):
b = MyBlockingFeedStorage()
storage_file = b.open(self.get_test_spider())
storage_dir = Path(storage_file.name).parent
assert str(storage_dir) == tempfile.gettempdir()
def test_temp_file(self, tmp_path):
b = MyBlockingFeedStorage()
spider = self.get_test_spider({"FEED_TEMPDIR": str(tmp_path)})
storage_file = b.open(spider)
storage_dir = Path(storage_file.name).parent
assert storage_dir == tmp_path
def test_invalid_folder(self, tmp_path):
b = MyBlockingFeedStorage()
invalid_path = tmp_path / "invalid_path"
spider = self.get_test_spider({"FEED_TEMPDIR": str(invalid_path)})
with pytest.raises(OSError, match="Not a Directory:"):
b.open(spider=spider)
@pytest.mark.requires_boto3
class TestS3FeedStorage:
def test_parse_credentials(self):
aws_credentials = {
"AWS_ACCESS_KEY_ID": "settings_key",
"AWS_SECRET_ACCESS_KEY": "settings_secret",
"AWS_SESSION_TOKEN": "settings_token",
}
crawler = get_crawler(settings_dict=aws_credentials)
# Instantiate with crawler
storage = S3FeedStorage.from_crawler(
crawler,
"s3://mybucket/export.csv",
)
assert storage.access_key == "settings_key"
assert storage.secret_key == "settings_secret"
assert storage.session_token == "settings_token"
# Instantiate directly
storage = S3FeedStorage(
"s3://mybucket/export.csv",
aws_credentials["AWS_ACCESS_KEY_ID"],
aws_credentials["AWS_SECRET_ACCESS_KEY"],
session_token=aws_credentials["AWS_SESSION_TOKEN"],
)
assert storage.access_key == "settings_key"
assert storage.secret_key == "settings_secret"
assert storage.session_token == "settings_token"
# URI priority > settings priority
storage = S3FeedStorage(
"s3://uri_key:uri_secret@mybucket/export.csv",
aws_credentials["AWS_ACCESS_KEY_ID"],
aws_credentials["AWS_SECRET_ACCESS_KEY"],
)
assert storage.access_key == "uri_key"
assert storage.secret_key == "uri_secret"
@deferred_f_from_coro_f
async def test_store(self):
settings = {
"AWS_ACCESS_KEY_ID": "access_key",
"AWS_SECRET_ACCESS_KEY": "secret_key",
}
crawler = get_crawler(settings_dict=settings)
bucket = "mybucket"
key = "export.csv"
storage = S3FeedStorage.from_crawler(crawler, f"s3://{bucket}/{key}")
verifyObject(IFeedStorage, storage)
file = mock.MagicMock()
storage.s3_client = mock.MagicMock()
await maybe_deferred_to_future(storage.store(file))
assert storage.s3_client.upload_fileobj.call_args == mock.call(
Bucket=bucket, Key=key, Fileobj=file
)
def test_init_without_acl(self):
storage = S3FeedStorage("s3://mybucket/export.csv", "access_key", "secret_key")
assert storage.access_key == "access_key"
assert storage.secret_key == "secret_key"
assert storage.acl is None
def test_init_with_acl(self):
storage = S3FeedStorage(
"s3://mybucket/export.csv", "access_key", "secret_key", "custom-acl"
)
assert storage.access_key == "access_key"
assert storage.secret_key == "secret_key"
assert storage.acl == "custom-acl"
def test_init_with_endpoint_url(self):
storage = S3FeedStorage(
"s3://mybucket/export.csv",
"access_key",
"secret_key",
endpoint_url="https://example.com",
)
assert storage.access_key == "access_key"
assert storage.secret_key == "secret_key"
assert storage.endpoint_url == "https://example.com"
def test_init_with_region_name(self):
region_name = "ap-east-1"
storage = S3FeedStorage(
"s3://mybucket/export.csv",
"access_key",
"secret_key",
region_name=region_name,
)
assert storage.access_key == "access_key"
assert storage.secret_key == "secret_key"
assert storage.region_name == region_name
assert storage.s3_client._client_config.region_name == region_name
def test_from_crawler_without_acl(self):
settings = {
"AWS_ACCESS_KEY_ID": "access_key",
"AWS_SECRET_ACCESS_KEY": "secret_key",
}
crawler = get_crawler(settings_dict=settings)
storage = S3FeedStorage.from_crawler(
crawler,
"s3://mybucket/export.csv",
)
assert storage.access_key == "access_key"
assert storage.secret_key == "secret_key"
assert storage.acl is None
def test_without_endpoint_url(self):
settings = {
"AWS_ACCESS_KEY_ID": "access_key",
"AWS_SECRET_ACCESS_KEY": "secret_key",
}
crawler = get_crawler(settings_dict=settings)
storage = S3FeedStorage.from_crawler(
crawler,
"s3://mybucket/export.csv",
)
assert storage.access_key == "access_key"
assert storage.secret_key == "secret_key"
assert storage.endpoint_url is None
def test_without_region_name(self):
settings = {
"AWS_ACCESS_KEY_ID": "access_key",
"AWS_SECRET_ACCESS_KEY": "secret_key",
}
crawler = get_crawler(settings_dict=settings)
storage = S3FeedStorage.from_crawler(
crawler,
"s3://mybucket/export.csv",
)
assert storage.access_key == "access_key"
assert storage.secret_key == "secret_key"
assert storage.s3_client._client_config.region_name == "us-east-1"
def test_from_crawler_with_acl(self):
settings = {
"AWS_ACCESS_KEY_ID": "access_key",
"AWS_SECRET_ACCESS_KEY": "secret_key",
"FEED_STORAGE_S3_ACL": "custom-acl",
}
crawler = get_crawler(settings_dict=settings)
storage = S3FeedStorage.from_crawler(
crawler,
"s3://mybucket/export.csv",
)
assert storage.access_key == "access_key"
assert storage.secret_key == "secret_key"
assert storage.acl == "custom-acl"
def test_from_crawler_with_endpoint_url(self):
settings = {
"AWS_ACCESS_KEY_ID": "access_key",
"AWS_SECRET_ACCESS_KEY": "secret_key",
"AWS_ENDPOINT_URL": "https://example.com",
}
crawler = get_crawler(settings_dict=settings)
storage = S3FeedStorage.from_crawler(crawler, "s3://mybucket/export.csv")
assert storage.access_key == "access_key"
assert storage.secret_key == "secret_key"
assert storage.endpoint_url == "https://example.com"
def test_from_crawler_with_region_name(self):
region_name = "ap-east-1"
settings = {
"AWS_ACCESS_KEY_ID": "access_key",
"AWS_SECRET_ACCESS_KEY": "secret_key",
"AWS_REGION_NAME": region_name,
}
crawler = get_crawler(settings_dict=settings)
storage = S3FeedStorage.from_crawler(crawler, "s3://mybucket/export.csv")
assert storage.access_key == "access_key"
assert storage.secret_key == "secret_key"
assert storage.region_name == region_name
assert storage.s3_client._client_config.region_name == region_name
@deferred_f_from_coro_f
async def test_store_without_acl(self):
storage = S3FeedStorage(
"s3://mybucket/export.csv",
"access_key",
"secret_key",
)
assert storage.access_key == "access_key"
assert storage.secret_key == "secret_key"
assert storage.acl is None
storage.s3_client = mock.MagicMock()
await maybe_deferred_to_future(storage.store(BytesIO(b"test file")))
acl = (
storage.s3_client.upload_fileobj.call_args[1]
.get("ExtraArgs", {})
.get("ACL")
)
assert acl is None
@deferred_f_from_coro_f
async def test_store_with_acl(self):
storage = S3FeedStorage(
"s3://mybucket/export.csv", "access_key", "secret_key", "custom-acl"
)
assert storage.access_key == "access_key"
assert storage.secret_key == "secret_key"
assert storage.acl == "custom-acl"
storage.s3_client = mock.MagicMock()
await maybe_deferred_to_future(storage.store(BytesIO(b"test file")))
acl = storage.s3_client.upload_fileobj.call_args[1]["ExtraArgs"]["ACL"]
assert acl == "custom-acl"
def test_overwrite_default(self):
with LogCapture() as log:
S3FeedStorage(
"s3://mybucket/export.csv", "access_key", "secret_key", "custom-acl"
)
assert "S3 does not support appending to files" not in str(log)
def test_overwrite_false(self):
with LogCapture() as log:
S3FeedStorage(
"s3://mybucket/export.csv",
"access_key",
"secret_key",
"custom-acl",
feed_options={"overwrite": False},
)
assert "S3 does not support appending to files" in str(log)
class TestGCSFeedStorage:
def test_parse_settings(self):
try:
from google.cloud.storage import Client # noqa: F401,PLC0415
except ImportError:
pytest.skip("GCSFeedStorage requires google-cloud-storage")
settings = {"GCS_PROJECT_ID": "123", "FEED_STORAGE_GCS_ACL": "publicRead"}
crawler = get_crawler(settings_dict=settings)
storage = GCSFeedStorage.from_crawler(crawler, "gs://mybucket/export.csv")
assert storage.project_id == "123"
assert storage.acl == "publicRead"
assert storage.bucket_name == "mybucket"
assert storage.blob_name == "export.csv"
def test_parse_empty_acl(self):
try:
from google.cloud.storage import Client # noqa: F401,PLC0415
except ImportError:
pytest.skip("GCSFeedStorage requires google-cloud-storage")
settings = {"GCS_PROJECT_ID": "123", "FEED_STORAGE_GCS_ACL": ""}
crawler = get_crawler(settings_dict=settings)
storage = GCSFeedStorage.from_crawler(crawler, "gs://mybucket/export.csv")
assert storage.acl is None
settings = {"GCS_PROJECT_ID": "123", "FEED_STORAGE_GCS_ACL": None}
crawler = get_crawler(settings_dict=settings)
storage = GCSFeedStorage.from_crawler(crawler, "gs://mybucket/export.csv")
assert storage.acl is None
@deferred_f_from_coro_f
async def test_store(self):
try:
from google.cloud.storage import Client # noqa: F401,PLC0415
except ImportError:
pytest.skip("GCSFeedStorage requires google-cloud-storage")
uri = "gs://mybucket/export.csv"
project_id = "myproject-123"
acl = "publicRead"
(client_mock, bucket_mock, blob_mock) = mock_google_cloud_storage()
with mock.patch("google.cloud.storage.Client") as m:
m.return_value = client_mock
f = mock.Mock()
storage = GCSFeedStorage(uri, project_id, acl)
await maybe_deferred_to_future(storage.store(f))
f.seek.assert_called_once_with(0)
m.assert_called_once_with(project=project_id)
client_mock.get_bucket.assert_called_once_with("mybucket")
bucket_mock.blob.assert_called_once_with("export.csv")
blob_mock.upload_from_file.assert_called_once_with(f, predefined_acl=acl)
def test_overwrite_default(self):
with LogCapture() as log:
GCSFeedStorage("gs://mybucket/export.csv", "myproject-123", "custom-acl")
assert "GCS does not support appending to files" not in str(log)
def test_overwrite_false(self):
with LogCapture() as log:
GCSFeedStorage(
"gs://mybucket/export.csv",
"myproject-123",
"custom-acl",
feed_options={"overwrite": False},
)
assert "GCS does not support appending to files" in str(log)
class TestStdoutFeedStorage:
def test_store(self):
out = BytesIO()
storage = StdoutFeedStorage("stdout:", _stdout=out)
file = storage.open(scrapy.Spider("default"))
file.write(b"content")
storage.store(file)
assert out.getvalue() == b"content"
def test_overwrite_default(self):
with LogCapture() as log:
StdoutFeedStorage("stdout:")
assert (
"Standard output (stdout) storage does not support overwriting"
not in str(log)
)
def test_overwrite_true(self):
with LogCapture() as log:
StdoutFeedStorage("stdout:", feed_options={"overwrite": True})
assert "Standard output (stdout) storage does not support overwriting" in str(
log
)
class FromCrawlerMixin:
init_with_crawler = False
@classmethod
def from_crawler(cls, crawler, *args, feed_options=None, **kwargs):
cls.init_with_crawler = True
return cls(*args, **kwargs)
class FromCrawlerCsvItemExporter(CsvItemExporter, FromCrawlerMixin):
pass
class FromCrawlerFileFeedStorage(FileFeedStorage, FromCrawlerMixin):
@classmethod
def from_crawler(cls, crawler, *args, feed_options=None, **kwargs):
cls.init_with_crawler = True
return cls(*args, feed_options=feed_options, **kwargs)
class DummyBlockingFeedStorage(BlockingFeedStorage):
def __init__(self, uri, *args, feed_options=None):
self.path = Path(file_uri_to_path(uri))
def _store_in_thread(self, file):
dirname = self.path.parent
if dirname and not dirname.exists():
dirname.mkdir(parents=True)
with self.path.open("ab") as output_file:
output_file.write(file.read())
file.close()
class FailingBlockingFeedStorage(DummyBlockingFeedStorage):
def _store_in_thread(self, file):
raise OSError("Cannot store")
@implementer(IFeedStorage)
class LogOnStoreFileStorage:
"""
This storage logs inside `store` method.
It can be used to make sure `store` method is invoked.
"""
def __init__(self, uri, feed_options=None):
self.path = file_uri_to_path(uri)
self.logger = getLogger()
def open(self, spider):
return tempfile.NamedTemporaryFile(prefix="feed-")
def store(self, file):
self.logger.info("Storage.store is called")
file.close()
class TestFeedExportBase(ABC):
mockserver: MockServer
class MyItem(scrapy.Item):
foo = scrapy.Field()
egg = scrapy.Field()
baz = scrapy.Field()
class MyItem2(scrapy.Item):
foo = scrapy.Field()
hello = scrapy.Field()
def _random_temp_filename(self, inter_dir="") -> Path:
chars = [random.choice(ascii_letters + digits) for _ in range(15)]
filename = "".join(chars)
return Path(self.temp_dir, inter_dir, filename)
@classmethod
def setup_class(cls):
cls.mockserver = MockServer()
cls.mockserver.__enter__()
@classmethod
def teardown_class(cls):
cls.mockserver.__exit__(None, None, None)
def setup_method(self):
self.temp_dir = tempfile.mkdtemp()
def teardown_method(self):
shutil.rmtree(self.temp_dir, ignore_errors=True)
async def exported_data(
self, items: Iterable[Any], settings: dict[str, Any]
) -> dict[str, Any]:
"""
Return exported data which a spider yielding ``items`` would return.
"""
class TestSpider(scrapy.Spider):
name = "testspider"
def parse(self, response):
yield from items
return await self.run_and_export(TestSpider, settings)
async def exported_no_data(self, settings: dict[str, Any]) -> dict[str, Any]:
"""
Return exported data which a spider yielding no ``items`` would return.
"""
class TestSpider(scrapy.Spider):
name = "testspider"
def parse(self, response):
pass
return await self.run_and_export(TestSpider, settings)
async def assertExported(
self,
items: Iterable[Any],
header: Iterable[str],
rows: Iterable[dict[str, Any]],
settings: dict[str, Any] | None = None,
) -> None:
await self.assertExportedCsv(items, header, rows, settings)
await self.assertExportedJsonLines(items, rows, settings)
await self.assertExportedXml(items, rows, settings)
await self.assertExportedPickle(items, rows, settings)
await self.assertExportedMarshal(items, rows, settings)
await self.assertExportedMultiple(items, rows, settings)
async def assertExportedCsv( # noqa: B027
self,
items: Iterable[Any],
header: Iterable[str],
rows: Iterable[dict[str, Any]],
settings: dict[str, Any] | None = None,
) -> None:
pass
async def assertExportedJsonLines( # noqa: B027
self,
items: Iterable[Any],
rows: Iterable[dict[str, Any]],
settings: dict[str, Any] | None = None,
) -> None:
pass
async def assertExportedXml( # noqa: B027
self,
items: Iterable[Any],
rows: Iterable[dict[str, Any]],
settings: dict[str, Any] | None = None,
) -> None:
pass
async def assertExportedMultiple( # noqa: B027
self,
items: Iterable[Any],
rows: Iterable[dict[str, Any]],
settings: dict[str, Any] | None = None,
) -> None:
pass
async def assertExportedPickle( # noqa: B027
self,
items: Iterable[Any],
rows: Iterable[dict[str, Any]],
settings: dict[str, Any] | None = None,
) -> None:
pass
async def assertExportedMarshal( # noqa: B027
self,
items: Iterable[Any],
rows: Iterable[dict[str, Any]],
settings: dict[str, Any] | None = None,
) -> None:
pass
@abstractmethod
async def run_and_export(
self, spider_cls: type[Spider], settings: dict[str, Any]
) -> dict[str, Any]:
pass
def _load_until_eof(self, data, load_func):
result = []
with tempfile.TemporaryFile() as temp:
temp.write(data)
temp.seek(0)
while True:
try:
result.append(load_func(temp))
except EOFError:
break
return result
class InstrumentedFeedSlot(FeedSlot):
"""Instrumented FeedSlot subclass for keeping track of calls to
start_exporting and finish_exporting."""
def start_exporting(self):
self.update_listener("start")
super().start_exporting()
def finish_exporting(self):
self.update_listener("finish")
super().finish_exporting()
@classmethod
def subscribe__listener(cls, listener):
cls.update_listener = listener.update
class IsExportingListener:
"""When subscribed to InstrumentedFeedSlot, keeps track of when
a call to start_exporting has been made without a closing call to
finish_exporting and when a call to finish_exporting has been made
before a call to start_exporting."""
def __init__(self):
self.start_without_finish = False
self.finish_without_start = False
def update(self, method):
if method == "start":
self.start_without_finish = True
elif method == "finish":
if self.start_without_finish:
self.start_without_finish = False
else:
self.finish_before_start = True
class ExceptionJsonItemExporter(JsonItemExporter):
"""JsonItemExporter that throws an exception every time export_item is called."""
def export_item(self, _):
raise RuntimeError("foo")
class TestFeedExport(TestFeedExportBase):
async def run_and_export(
self, spider_cls: type[Spider], settings: dict[str, Any]
) -> dict[str, Any]:
"""Run spider with specified settings; return exported data."""
FEEDS = settings.get("FEEDS") or {}
settings["FEEDS"] = {
printf_escape(path_to_url(file_path)): feed_options
for file_path, feed_options in FEEDS.items()
}
content: dict[str, Any] = {}
try:
spider_cls.start_urls = [self.mockserver.url("/")]
crawler = get_crawler(spider_cls, settings)
await crawler.crawl_async()
for file_path, feed_options in FEEDS.items():
content[feed_options["format"]] = (
Path(file_path).read_bytes() if Path(file_path).exists() else None
)
finally:
for file_path in FEEDS:
if not Path(file_path).exists():
continue
Path(file_path).unlink()
return content
async def assertExportedCsv(
self,
items: Iterable[Any],
header: Iterable[str],
rows: Iterable[dict[str, Any]],
settings: dict[str, Any] | None = None,
) -> None:
settings = settings or {}
settings.update(
{
"FEEDS": {
self._random_temp_filename(): {"format": "csv"},
},
}
)
data = await self.exported_data(items, settings)
reader = csv.DictReader(to_unicode(data["csv"]).splitlines())
assert reader.fieldnames == list(header)
assert rows == list(reader)
async def assertExportedJsonLines(
self,
items: Iterable[Any],
rows: Iterable[dict[str, Any]],
settings: dict[str, Any] | None = None,
) -> None:
settings = settings or {}
settings.update(
{
"FEEDS": {
self._random_temp_filename(): {"format": "jl"},
},
}
)
data = await self.exported_data(items, settings)
parsed = [json.loads(to_unicode(line)) for line in data["jl"].splitlines()]
rows = [{k: v for k, v in row.items() if v} for row in rows]
assert rows == parsed
async def assertExportedXml(
self,
items: Iterable[Any],
rows: Iterable[dict[str, Any]],
settings: dict[str, Any] | None = None,
) -> None:
settings = settings or {}
settings.update(
{
"FEEDS": {
self._random_temp_filename(): {"format": "xml"},
},
}
)
data = await self.exported_data(items, settings)
rows = [{k: v for k, v in row.items() if v} for row in rows]
root = lxml.etree.fromstring(data["xml"])
got_rows = [{e.tag: e.text for e in it} for it in root.findall("item")]
assert rows == got_rows
async def assertExportedMultiple(
self,
items: Iterable[Any],
rows: Iterable[dict[str, Any]],
settings: dict[str, Any] | None = None,
) -> None:
settings = settings or {}
settings.update(
{
"FEEDS": {
self._random_temp_filename(): {"format": "xml"},
self._random_temp_filename(): {"format": "json"},
},
}
)
data = await self.exported_data(items, settings)
rows = [{k: v for k, v in row.items() if v} for row in rows]
# XML
root = lxml.etree.fromstring(data["xml"])
xml_rows = [{e.tag: e.text for e in it} for it in root.findall("item")]
assert rows == xml_rows
# JSON
json_rows = json.loads(to_unicode(data["json"]))
assert rows == json_rows
async def assertExportedPickle(
self,
items: Iterable[Any],
rows: Iterable[dict[str, Any]],
settings: dict[str, Any] | None = None,
) -> None:
settings = settings or {}
settings.update(
{
"FEEDS": {
self._random_temp_filename(): {"format": "pickle"},
},
}
)
| python | BSD-3-Clause | d1bd8eb49f7aba9289e4ff692006cead8bcd9080 | 2026-01-04T14:38:41.023839Z | true |
scrapy/scrapy | https://github.com/scrapy/scrapy/blob/d1bd8eb49f7aba9289e4ff692006cead8bcd9080/tests/test_extension_statsmailer.py | tests/test_extension_statsmailer.py | from unittest.mock import MagicMock
import pytest
from scrapy import signals
from scrapy.exceptions import NotConfigured
from scrapy.extensions import statsmailer
from scrapy.mail import MailSender
from scrapy.signalmanager import SignalManager
from scrapy.statscollectors import StatsCollector
from scrapy.utils.spider import DefaultSpider
@pytest.fixture
def dummy_stats():
class DummyStats(StatsCollector):
def __init__(self):
# pylint: disable=super-init-not-called
self._stats = {"global_item_scraped_count": 42}
def get_stats(self):
return {"item_scraped_count": 10, **self._stats}
return DummyStats()
def test_from_crawler_without_recipients_raises_notconfigured():
crawler = MagicMock()
crawler.settings.getlist.return_value = []
crawler.stats = MagicMock()
with pytest.raises(NotConfigured):
statsmailer.StatsMailer.from_crawler(crawler)
def test_from_crawler_with_recipients_initializes_extension(dummy_stats, monkeypatch):
crawler = MagicMock()
crawler.settings.getlist.return_value = ["test@example.com"]
crawler.stats = dummy_stats
crawler.signals = SignalManager(crawler)
mailer = MagicMock(spec=MailSender)
monkeypatch.setattr(statsmailer.MailSender, "from_crawler", lambda _: mailer)
ext = statsmailer.StatsMailer.from_crawler(crawler)
assert isinstance(ext, statsmailer.StatsMailer)
assert ext.recipients == ["test@example.com"]
assert ext.mail is mailer
def test_from_crawler_connects_spider_closed_signal(dummy_stats, monkeypatch):
crawler = MagicMock()
crawler.settings.getlist.return_value = ["test@example.com"]
crawler.stats = dummy_stats
crawler.signals = SignalManager(crawler)
mailer = MagicMock(spec=MailSender)
monkeypatch.setattr(statsmailer.MailSender, "from_crawler", lambda _: mailer)
statsmailer.StatsMailer.from_crawler(crawler)
connected = crawler.signals.send_catch_log(
signals.spider_closed, spider=DefaultSpider(name="dummy")
)
assert connected is not None
def test_spider_closed_sends_email(dummy_stats):
recipients = ["test@example.com"]
mail = MagicMock(spec=MailSender)
ext = statsmailer.StatsMailer(dummy_stats, recipients, mail)
spider = DefaultSpider(name="dummy")
ext.spider_closed(spider)
args, _ = mail.send.call_args
to, subject, body = args
assert to == recipients
assert "Scrapy stats for: dummy" in subject
assert "global_item_scraped_count" in body
assert "item_scraped_count" in body
| python | BSD-3-Clause | d1bd8eb49f7aba9289e4ff692006cead8bcd9080 | 2026-01-04T14:38:41.023839Z | false |
scrapy/scrapy | https://github.com/scrapy/scrapy/blob/d1bd8eb49f7aba9289e4ff692006cead8bcd9080/tests/test_core_downloader.py | tests/test_core_downloader.py | from __future__ import annotations
import warnings
from typing import TYPE_CHECKING, Any, cast
import OpenSSL.SSL
import pytest
from pytest_twisted import async_yield_fixture
from twisted.web import server, static
from twisted.web.client import Agent, BrowserLikePolicyForHTTPS, readBody
from twisted.web.client import Response as TxResponse
from scrapy.core.downloader import Downloader, Slot
from scrapy.core.downloader.contextfactory import (
ScrapyClientContextFactory,
load_context_factory_from_settings,
)
from scrapy.core.downloader.handlers.http11 import _RequestBodyProducer
from scrapy.exceptions import ScrapyDeprecationWarning
from scrapy.settings import Settings
from scrapy.utils.defer import deferred_f_from_coro_f, maybe_deferred_to_future
from scrapy.utils.misc import build_from_crawler
from scrapy.utils.python import to_bytes
from scrapy.utils.spider import DefaultSpider
from scrapy.utils.test import get_crawler
from tests.mockserver.http_resources import PayloadResource
from tests.mockserver.utils import ssl_context_factory
if TYPE_CHECKING:
from twisted.internet.defer import Deferred
from twisted.web.iweb import IBodyProducer
class TestSlot:
def test_repr(self):
slot = Slot(concurrency=8, delay=0.1, randomize_delay=True)
assert repr(slot) == "Slot(concurrency=8, delay=0.10, randomize_delay=True)"
class TestContextFactoryBase:
context_factory = None
@async_yield_fixture
async def server_url(self, tmp_path):
(tmp_path / "file").write_bytes(b"0123456789")
r = static.File(str(tmp_path))
r.putChild(b"payload", PayloadResource())
site = server.Site(r, timeout=None)
port = self._listen(site)
portno = port.getHost().port
yield f"https://127.0.0.1:{portno}/"
await port.stopListening()
def _listen(self, site):
from twisted.internet import reactor
return reactor.listenSSL(
0,
site,
contextFactory=self.context_factory or ssl_context_factory(),
interface="127.0.0.1",
)
@staticmethod
async def get_page(
url: str,
client_context_factory: BrowserLikePolicyForHTTPS,
body: str | None = None,
) -> bytes:
from twisted.internet import reactor
agent = Agent(reactor, contextFactory=client_context_factory)
body_producer = _RequestBodyProducer(body.encode()) if body else None
response: TxResponse = cast(
"TxResponse",
await maybe_deferred_to_future(
agent.request(
b"GET",
url.encode(),
bodyProducer=cast("IBodyProducer", body_producer),
)
),
)
with warnings.catch_warnings():
# https://github.com/twisted/twisted/issues/8227
warnings.filterwarnings(
"ignore",
category=DeprecationWarning,
message=r".*does not have an abortConnection method",
)
d: Deferred[bytes] = readBody(response) # type: ignore[arg-type]
return await maybe_deferred_to_future(d)
class TestContextFactory(TestContextFactoryBase):
@deferred_f_from_coro_f
async def testPayload(self, server_url: str) -> None:
s = "0123456789" * 10
crawler = get_crawler()
settings = Settings()
client_context_factory = load_context_factory_from_settings(settings, crawler)
body = await self.get_page(
server_url + "payload", client_context_factory, body=s
)
assert body == to_bytes(s)
def test_override_getContext(self):
class MyFactory(ScrapyClientContextFactory):
def getContext(
self, hostname: Any = None, port: Any = None
) -> OpenSSL.SSL.Context:
ctx: OpenSSL.SSL.Context = super().getContext(hostname, port)
return ctx
with warnings.catch_warnings(record=True) as w:
MyFactory()
assert len(w) == 1
assert (
"Overriding ScrapyClientContextFactory.getContext() is deprecated"
in str(w[0].message)
)
class TestContextFactoryTLSMethod(TestContextFactoryBase):
async def _assert_factory_works(
self, server_url: str, client_context_factory: ScrapyClientContextFactory
) -> None:
s = "0123456789" * 10
body = await self.get_page(
server_url + "payload", client_context_factory, body=s
)
assert body == to_bytes(s)
@deferred_f_from_coro_f
async def test_setting_default(self, server_url: str) -> None:
crawler = get_crawler()
settings = Settings()
client_context_factory = load_context_factory_from_settings(settings, crawler)
assert client_context_factory._ssl_method == OpenSSL.SSL.SSLv23_METHOD
await self._assert_factory_works(server_url, client_context_factory)
def test_setting_none(self):
crawler = get_crawler()
settings = Settings({"DOWNLOADER_CLIENT_TLS_METHOD": None})
with pytest.raises(KeyError):
load_context_factory_from_settings(settings, crawler)
def test_setting_bad(self):
crawler = get_crawler()
settings = Settings({"DOWNLOADER_CLIENT_TLS_METHOD": "bad"})
with pytest.raises(KeyError):
load_context_factory_from_settings(settings, crawler)
@deferred_f_from_coro_f
async def test_setting_explicit(self, server_url: str) -> None:
crawler = get_crawler()
settings = Settings({"DOWNLOADER_CLIENT_TLS_METHOD": "TLSv1.2"})
client_context_factory = load_context_factory_from_settings(settings, crawler)
assert client_context_factory._ssl_method == OpenSSL.SSL.TLSv1_2_METHOD
await self._assert_factory_works(server_url, client_context_factory)
@deferred_f_from_coro_f
async def test_direct_from_crawler(self, server_url: str) -> None:
# the setting is ignored
crawler = get_crawler(settings_dict={"DOWNLOADER_CLIENT_TLS_METHOD": "bad"})
client_context_factory = build_from_crawler(ScrapyClientContextFactory, crawler)
assert client_context_factory._ssl_method == OpenSSL.SSL.SSLv23_METHOD
await self._assert_factory_works(server_url, client_context_factory)
@deferred_f_from_coro_f
async def test_direct_init(self, server_url: str) -> None:
client_context_factory = ScrapyClientContextFactory(OpenSSL.SSL.TLSv1_2_METHOD)
assert client_context_factory._ssl_method == OpenSSL.SSL.TLSv1_2_METHOD
await self._assert_factory_works(server_url, client_context_factory)
@deferred_f_from_coro_f
async def test_fetch_deprecated_spider_arg():
class CustomDownloader(Downloader):
def fetch(self, request, spider): # pylint: disable=signature-differs
return super().fetch(request, spider)
crawler = get_crawler(DefaultSpider, {"DOWNLOADER": CustomDownloader})
with pytest.warns(
ScrapyDeprecationWarning,
match=r"The fetch\(\) method of .+\.CustomDownloader requires a spider argument",
):
await crawler.crawl_async()
| python | BSD-3-Clause | d1bd8eb49f7aba9289e4ff692006cead8bcd9080 | 2026-01-04T14:38:41.023839Z | false |
scrapy/scrapy | https://github.com/scrapy/scrapy/blob/d1bd8eb49f7aba9289e4ff692006cead8bcd9080/tests/test_utils_iterators.py | tests/test_utils_iterators.py | from __future__ import annotations
from abc import ABC, abstractmethod
from typing import TYPE_CHECKING, Any
import pytest
from scrapy.exceptions import ScrapyDeprecationWarning
from scrapy.http import Response, TextResponse, XmlResponse
from scrapy.utils.iterators import _body_or_str, csviter, xmliter, xmliter_lxml
from tests import get_testdata
if TYPE_CHECKING:
from collections.abc import Iterator
from scrapy import Selector
class TestXmliterBase(ABC):
@abstractmethod
def xmliter(
self, obj: Response | str | bytes, nodename: str, *args: Any
) -> Iterator[Selector]:
raise NotImplementedError
def test_xmliter(self):
body = b"""
<?xml version="1.0" encoding="UTF-8"?>
<products xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
xsi:noNamespaceSchemaLocation="someschmea.xsd">
<product id="001">
<type>Type 1</type>
<name>Name 1</name>
</product>
<product id="002">
<type>Type 2</type>
<name>Name 2</name>
</product>
</products>
"""
response = XmlResponse(url="http://example.com", body=body)
attrs = [
(
x.attrib["id"],
x.xpath("name/text()").getall(),
x.xpath("./type/text()").getall(),
)
for x in self.xmliter(response, "product")
]
assert attrs == [
("001", ["Name 1"], ["Type 1"]),
("002", ["Name 2"], ["Type 2"]),
]
def test_xmliter_unusual_node(self):
body = b"""<?xml version="1.0" encoding="UTF-8"?>
<root>
<matchme...></matchme...>
<matchmenot></matchmenot>
</root>
"""
response = XmlResponse(url="http://example.com", body=body)
nodenames = [
e.xpath("name()").getall() for e in self.xmliter(response, "matchme...")
]
assert nodenames == [["matchme..."]]
def test_xmliter_unicode(self):
# example taken from https://github.com/scrapy/scrapy/issues/1665
body = """<?xml version="1.0" encoding="UTF-8"?>
<þingflokkar>
<þingflokkur id="26">
<heiti />
<skammstafanir>
<stuttskammstöfun>-</stuttskammstöfun>
<löngskammstöfun />
</skammstafanir>
<tímabil>
<fyrstaþing>80</fyrstaþing>
</tímabil>
</þingflokkur>
<þingflokkur id="21">
<heiti>Alþýðubandalag</heiti>
<skammstafanir>
<stuttskammstöfun>Ab</stuttskammstöfun>
<löngskammstöfun>Alþb.</löngskammstöfun>
</skammstafanir>
<tímabil>
<fyrstaþing>76</fyrstaþing>
<síðastaþing>123</síðastaþing>
</tímabil>
</þingflokkur>
<þingflokkur id="27">
<heiti>Alþýðuflokkur</heiti>
<skammstafanir>
<stuttskammstöfun>A</stuttskammstöfun>
<löngskammstöfun>Alþfl.</löngskammstöfun>
</skammstafanir>
<tímabil>
<fyrstaþing>27</fyrstaþing>
<síðastaþing>120</síðastaþing>
</tímabil>
</þingflokkur>
</þingflokkar>"""
for r in (
# with bytes
XmlResponse(url="http://example.com", body=body.encode("utf-8")),
# Unicode body needs encoding information
XmlResponse(url="http://example.com", body=body, encoding="utf-8"),
):
attrs = [
(
x.attrib["id"],
x.xpath("./skammstafanir/stuttskammstöfun/text()").getall(),
x.xpath("./tímabil/fyrstaþing/text()").getall(),
)
for x in self.xmliter(r, "þingflokkur")
]
assert attrs == [
("26", ["-"], ["80"]),
("21", ["Ab"], ["76"]),
("27", ["A"], ["27"]),
]
def test_xmliter_text(self):
body = (
'<?xml version="1.0" encoding="UTF-8"?>'
"<products><product>one</product><product>two</product></products>"
)
assert [x.xpath("text()").getall() for x in self.xmliter(body, "product")] == [
["one"],
["two"],
]
def test_xmliter_namespaces(self):
body = b"""
<?xml version="1.0" encoding="UTF-8"?>
<rss version="2.0" xmlns:g="http://base.google.com/ns/1.0">
<channel>
<title>My Dummy Company</title>
<link>http://www.mydummycompany.com</link>
<description>This is a dummy company. We do nothing.</description>
<item>
<title>Item 1</title>
<description>This is item 1</description>
<link>http://www.mydummycompany.com/items/1</link>
<g:image_link>http://www.mydummycompany.com/images/item1.jpg</g:image_link>
<g:id>ITEM_1</g:id>
<g:price>400</g:price>
</item>
</channel>
</rss>
"""
response = XmlResponse(url="http://mydummycompany.com", body=body)
my_iter = self.xmliter(response, "item")
node = next(my_iter)
node.register_namespace("g", "http://base.google.com/ns/1.0")
assert node.xpath("title/text()").getall() == ["Item 1"]
assert node.xpath("description/text()").getall() == ["This is item 1"]
assert node.xpath("link/text()").getall() == [
"http://www.mydummycompany.com/items/1"
]
assert node.xpath("g:image_link/text()").getall() == [
"http://www.mydummycompany.com/images/item1.jpg"
]
assert node.xpath("g:id/text()").getall() == ["ITEM_1"]
assert node.xpath("g:price/text()").getall() == ["400"]
assert node.xpath("image_link/text()").getall() == []
assert node.xpath("id/text()").getall() == []
assert node.xpath("price/text()").getall() == []
def test_xmliter_namespaced_nodename(self):
body = b"""
<?xml version="1.0" encoding="UTF-8"?>
<rss version="2.0" xmlns:g="http://base.google.com/ns/1.0">
<channel>
<title>My Dummy Company</title>
<link>http://www.mydummycompany.com</link>
<description>This is a dummy company. We do nothing.</description>
<item>
<title>Item 1</title>
<description>This is item 1</description>
<link>http://www.mydummycompany.com/items/1</link>
<g:image_link>http://www.mydummycompany.com/images/item1.jpg</g:image_link>
<g:id>ITEM_1</g:id>
<g:price>400</g:price>
</item>
</channel>
</rss>
"""
response = XmlResponse(url="http://mydummycompany.com", body=body)
my_iter = self.xmliter(response, "g:image_link")
node = next(my_iter)
node.register_namespace("g", "http://base.google.com/ns/1.0")
assert node.xpath("text()").extract() == [
"http://www.mydummycompany.com/images/item1.jpg"
]
def test_xmliter_namespaced_nodename_missing(self):
body = b"""
<?xml version="1.0" encoding="UTF-8"?>
<rss version="2.0" xmlns:g="http://base.google.com/ns/1.0">
<channel>
<title>My Dummy Company</title>
<link>http://www.mydummycompany.com</link>
<description>This is a dummy company. We do nothing.</description>
<item>
<title>Item 1</title>
<description>This is item 1</description>
<link>http://www.mydummycompany.com/items/1</link>
<g:image_link>http://www.mydummycompany.com/images/item1.jpg</g:image_link>
<g:id>ITEM_1</g:id>
<g:price>400</g:price>
</item>
</channel>
</rss>
"""
response = XmlResponse(url="http://mydummycompany.com", body=body)
my_iter = self.xmliter(response, "g:link_image")
with pytest.raises(StopIteration):
next(my_iter)
def test_xmliter_exception(self):
body = (
'<?xml version="1.0" encoding="UTF-8"?>'
"<products><product>one</product><product>two</product></products>"
)
my_iter = self.xmliter(body, "product")
next(my_iter)
next(my_iter)
with pytest.raises(StopIteration):
next(my_iter)
def test_xmliter_objtype_exception(self):
i = self.xmliter(42, "product")
with pytest.raises(TypeError):
next(i)
def test_xmliter_encoding(self):
body = (
b'<?xml version="1.0" encoding="ISO-8859-9"?>\n'
b"<xml>\n"
b" <item>Some Turkish Characters \xd6\xc7\xde\xdd\xd0\xdc \xfc\xf0\xfd\xfe\xe7\xf6</item>\n"
b"</xml>\n\n"
)
response = XmlResponse("http://www.example.com", body=body)
assert (
next(self.xmliter(response, "item")).get()
== "<item>Some Turkish Characters \xd6\xc7\u015e\u0130\u011e\xdc \xfc\u011f\u0131\u015f\xe7\xf6</item>"
)
@pytest.mark.filterwarnings("ignore::scrapy.exceptions.ScrapyDeprecationWarning")
class TestXmliter(TestXmliterBase):
def xmliter(
self, obj: Response | str | bytes, nodename: str, *args: Any
) -> Iterator[Selector]:
return xmliter(obj, nodename)
def test_deprecation(self):
body = b"""
<?xml version="1.0" encoding="UTF-8"?>
<products>
<product></product>
</products>
"""
with pytest.warns(
ScrapyDeprecationWarning,
match="xmliter",
):
next(self.xmliter(body, "product"))
class TestLxmlXmliter(TestXmliterBase):
def xmliter(
self, obj: Response | str | bytes, nodename: str, *args: Any
) -> Iterator[Selector]:
return xmliter_lxml(obj, nodename, *args)
def test_xmliter_iterate_namespace(self):
body = b"""
<?xml version="1.0" encoding="UTF-8"?>
<rss version="2.0" xmlns="http://base.google.com/ns/1.0">
<channel>
<title>My Dummy Company</title>
<link>http://www.mydummycompany.com</link>
<description>This is a dummy company. We do nothing.</description>
<item>
<title>Item 1</title>
<description>This is item 1</description>
<link>http://www.mydummycompany.com/items/1</link>
<image_link>http://www.mydummycompany.com/images/item1.jpg</image_link>
<image_link>http://www.mydummycompany.com/images/item2.jpg</image_link>
</item>
</channel>
</rss>
"""
response = XmlResponse(url="http://mydummycompany.com", body=body)
no_namespace_iter = self.xmliter(response, "image_link")
assert len(list(no_namespace_iter)) == 0
namespace_iter = self.xmliter(
response, "image_link", "http://base.google.com/ns/1.0"
)
node = next(namespace_iter)
assert node.xpath("text()").getall() == [
"http://www.mydummycompany.com/images/item1.jpg"
]
node = next(namespace_iter)
assert node.xpath("text()").getall() == [
"http://www.mydummycompany.com/images/item2.jpg"
]
def test_xmliter_namespaces_prefix(self):
body = b"""
<?xml version="1.0" encoding="UTF-8"?>
<root>
<h:table xmlns:h="http://www.w3.org/TR/html4/">
<h:tr>
<h:td>Apples</h:td>
<h:td>Bananas</h:td>
</h:tr>
</h:table>
<f:table xmlns:f="http://www.w3schools.com/furniture">
<f:name>African Coffee Table</f:name>
<f:width>80</f:width>
<f:length>120</f:length>
</f:table>
</root>
"""
response = XmlResponse(url="http://mydummycompany.com", body=body)
my_iter = self.xmliter(response, "table", "http://www.w3.org/TR/html4/", "h")
node = next(my_iter)
assert len(node.xpath("h:tr/h:td").getall()) == 2
assert node.xpath("h:tr/h:td[1]/text()").getall() == ["Apples"]
assert node.xpath("h:tr/h:td[2]/text()").getall() == ["Bananas"]
my_iter = self.xmliter(
response, "table", "http://www.w3schools.com/furniture", "f"
)
node = next(my_iter)
assert node.xpath("f:name/text()").getall() == ["African Coffee Table"]
def test_xmliter_objtype_exception(self):
i = self.xmliter(42, "product")
with pytest.raises(TypeError):
next(i)
class TestUtilsCsv:
def test_csviter_defaults(self):
body = get_testdata("feeds", "feed-sample3.csv")
response = TextResponse(url="http://example.com/", body=body)
csv = csviter(response)
result = list(csv)
assert result == [
{"id": "1", "name": "alpha", "value": "foobar"},
{"id": "2", "name": "unicode", "value": "\xfan\xedc\xf3d\xe9\u203d"},
{"id": "3", "name": "multi", "value": "foo\nbar"},
{"id": "4", "name": "empty", "value": ""},
]
# explicit type check cuz' we no like stinkin' autocasting! yarrr
for result_row in result:
assert all(isinstance(k, str) for k in result_row)
assert all(isinstance(v, str) for v in result_row.values())
def test_csviter_delimiter(self):
body = get_testdata("feeds", "feed-sample3.csv").replace(b",", b"\t")
response = TextResponse(url="http://example.com/", body=body)
csv = csviter(response, delimiter="\t")
assert list(csv) == [
{"id": "1", "name": "alpha", "value": "foobar"},
{"id": "2", "name": "unicode", "value": "\xfan\xedc\xf3d\xe9\u203d"},
{"id": "3", "name": "multi", "value": "foo\nbar"},
{"id": "4", "name": "empty", "value": ""},
]
def test_csviter_quotechar(self):
body1 = get_testdata("feeds", "feed-sample6.csv")
body2 = get_testdata("feeds", "feed-sample6.csv").replace(b",", b"|")
response1 = TextResponse(url="http://example.com/", body=body1)
csv1 = csviter(response1, quotechar="'")
assert list(csv1) == [
{"id": "1", "name": "alpha", "value": "foobar"},
{"id": "2", "name": "unicode", "value": "\xfan\xedc\xf3d\xe9\u203d"},
{"id": "3", "name": "multi", "value": "foo\nbar"},
{"id": "4", "name": "empty", "value": ""},
]
response2 = TextResponse(url="http://example.com/", body=body2)
csv2 = csviter(response2, delimiter="|", quotechar="'")
assert list(csv2) == [
{"id": "1", "name": "alpha", "value": "foobar"},
{"id": "2", "name": "unicode", "value": "\xfan\xedc\xf3d\xe9\u203d"},
{"id": "3", "name": "multi", "value": "foo\nbar"},
{"id": "4", "name": "empty", "value": ""},
]
def test_csviter_wrong_quotechar(self):
body = get_testdata("feeds", "feed-sample6.csv")
response = TextResponse(url="http://example.com/", body=body)
csv = csviter(response)
assert list(csv) == [
{"'id'": "1", "'name'": "'alpha'", "'value'": "'foobar'"},
{
"'id'": "2",
"'name'": "'unicode'",
"'value'": "'\xfan\xedc\xf3d\xe9\u203d'",
},
{"'id'": "'3'", "'name'": "'multi'", "'value'": "'foo"},
{"'id'": "4", "'name'": "'empty'", "'value'": ""},
]
def test_csviter_delimiter_binary_response_assume_utf8_encoding(self):
body = get_testdata("feeds", "feed-sample3.csv").replace(b",", b"\t")
response = Response(url="http://example.com/", body=body)
csv = csviter(response, delimiter="\t")
assert list(csv) == [
{"id": "1", "name": "alpha", "value": "foobar"},
{"id": "2", "name": "unicode", "value": "\xfan\xedc\xf3d\xe9\u203d"},
{"id": "3", "name": "multi", "value": "foo\nbar"},
{"id": "4", "name": "empty", "value": ""},
]
def test_csviter_headers(self):
sample = get_testdata("feeds", "feed-sample3.csv").splitlines()
headers, body = sample[0].split(b","), b"\n".join(sample[1:])
response = TextResponse(url="http://example.com/", body=body)
csv = csviter(response, headers=[h.decode("utf-8") for h in headers])
assert list(csv) == [
{"id": "1", "name": "alpha", "value": "foobar"},
{"id": "2", "name": "unicode", "value": "\xfan\xedc\xf3d\xe9\u203d"},
{"id": "3", "name": "multi", "value": "foo\nbar"},
{"id": "4", "name": "empty", "value": ""},
]
def test_csviter_falserow(self):
body = get_testdata("feeds", "feed-sample3.csv")
body = b"\n".join((body, b"a,b", b"a,b,c,d"))
response = TextResponse(url="http://example.com/", body=body)
csv = csviter(response)
assert list(csv) == [
{"id": "1", "name": "alpha", "value": "foobar"},
{"id": "2", "name": "unicode", "value": "\xfan\xedc\xf3d\xe9\u203d"},
{"id": "3", "name": "multi", "value": "foo\nbar"},
{"id": "4", "name": "empty", "value": ""},
]
def test_csviter_exception(self):
body = get_testdata("feeds", "feed-sample3.csv")
response = TextResponse(url="http://example.com/", body=body)
my_iter = csviter(response)
next(my_iter)
next(my_iter)
next(my_iter)
next(my_iter)
with pytest.raises(StopIteration):
next(my_iter)
def test_csviter_encoding(self):
body1 = get_testdata("feeds", "feed-sample4.csv")
body2 = get_testdata("feeds", "feed-sample5.csv")
response = TextResponse(
url="http://example.com/", body=body1, encoding="latin1"
)
csv = csviter(response)
assert list(csv) == [
{"id": "1", "name": "latin1", "value": "test"},
{"id": "2", "name": "something", "value": "\xf1\xe1\xe9\xf3"},
]
response = TextResponse(url="http://example.com/", body=body2, encoding="cp852")
csv = csviter(response)
assert list(csv) == [
{"id": "1", "name": "cp852", "value": "test"},
{
"id": "2",
"name": "something",
"value": "\u255a\u2569\u2569\u2569\u2550\u2550\u2557",
},
]
class TestBodyOrStr:
bbody = b"utf8-body"
ubody = bbody.decode("utf8")
@pytest.mark.parametrize(
"obj",
[
bbody,
ubody,
TextResponse(url="http://example.org/", body=bbody, encoding="utf-8"),
Response(url="http://example.org/", body=bbody),
],
)
def test_body_or_str(self, obj: Response | str | bytes) -> None:
r1 = _body_or_str(obj)
self._assert_type_and_value(r1, self.ubody, obj)
r2 = _body_or_str(obj, unicode=True)
self._assert_type_and_value(r2, self.ubody, obj)
r3 = _body_or_str(obj, unicode=False)
self._assert_type_and_value(r3, self.bbody, obj)
assert type(r1) is type(r2)
assert type(r1) is not type(r3)
@staticmethod
def _assert_type_and_value(
a: str | bytes, b: str | bytes, obj: Response | str | bytes
) -> None:
assert type(a) is type(b), f"Got {type(a)}, expected {type(b)} for {obj!r}"
assert a == b
| python | BSD-3-Clause | d1bd8eb49f7aba9289e4ff692006cead8bcd9080 | 2026-01-04T14:38:41.023839Z | false |
scrapy/scrapy | https://github.com/scrapy/scrapy/blob/d1bd8eb49f7aba9289e4ff692006cead8bcd9080/tests/test_downloadermiddleware_httpproxy.py | tests/test_downloadermiddleware_httpproxy.py | import os
import pytest
from scrapy.downloadermiddlewares.httpproxy import HttpProxyMiddleware
from scrapy.exceptions import NotConfigured
from scrapy.http import Request
from scrapy.spiders import Spider
from scrapy.utils.test import get_crawler
class TestHttpProxyMiddleware:
failureException = AssertionError # type: ignore[assignment]
def setup_method(self):
self._oldenv = os.environ.copy()
def teardown_method(self):
os.environ = self._oldenv
def test_not_enabled(self):
crawler = get_crawler(Spider, {"HTTPPROXY_ENABLED": False})
with pytest.raises(NotConfigured):
HttpProxyMiddleware.from_crawler(crawler)
def test_no_environment_proxies(self):
os.environ = {"dummy_proxy": "reset_env_and_do_not_raise"}
mw = HttpProxyMiddleware()
for url in ("http://e.com", "https://e.com", "file:///tmp/a"):
req = Request(url)
assert mw.process_request(req) is None
assert req.url == url
assert req.meta == {}
def test_environment_proxies(self):
os.environ["http_proxy"] = http_proxy = "https://proxy.for.http:3128"
os.environ["https_proxy"] = https_proxy = "http://proxy.for.https:8080"
os.environ.pop("file_proxy", None)
mw = HttpProxyMiddleware()
for url, proxy in [
("http://e.com", http_proxy),
("https://e.com", https_proxy),
("file://tmp/a", None),
]:
req = Request(url)
assert mw.process_request(req) is None
assert req.url == url
assert req.meta.get("proxy") == proxy
def test_proxy_precedence_meta(self):
os.environ["http_proxy"] = "https://proxy.com"
mw = HttpProxyMiddleware()
req = Request("http://scrapytest.org", meta={"proxy": "https://new.proxy:3128"})
assert mw.process_request(req) is None
assert req.meta == {"proxy": "https://new.proxy:3128"}
def test_proxy_auth(self):
os.environ["http_proxy"] = "https://user:pass@proxy:3128"
mw = HttpProxyMiddleware()
req = Request("http://scrapytest.org")
assert mw.process_request(req) is None
assert req.meta["proxy"] == "https://proxy:3128"
assert req.headers.get("Proxy-Authorization") == b"Basic dXNlcjpwYXNz"
# proxy from request.meta
req = Request(
"http://scrapytest.org",
meta={"proxy": "https://username:password@proxy:3128"},
)
assert mw.process_request(req) is None
assert req.meta["proxy"] == "https://proxy:3128"
assert (
req.headers.get("Proxy-Authorization") == b"Basic dXNlcm5hbWU6cGFzc3dvcmQ="
)
def test_proxy_auth_empty_passwd(self):
os.environ["http_proxy"] = "https://user:@proxy:3128"
mw = HttpProxyMiddleware()
req = Request("http://scrapytest.org")
assert mw.process_request(req) is None
assert req.meta["proxy"] == "https://proxy:3128"
assert req.headers.get("Proxy-Authorization") == b"Basic dXNlcjo="
# proxy from request.meta
req = Request(
"http://scrapytest.org", meta={"proxy": "https://username:@proxy:3128"}
)
assert mw.process_request(req) is None
assert req.meta["proxy"] == "https://proxy:3128"
assert req.headers.get("Proxy-Authorization") == b"Basic dXNlcm5hbWU6"
def test_proxy_auth_encoding(self):
# utf-8 encoding
os.environ["http_proxy"] = "https://m\u00e1n:pass@proxy:3128"
mw = HttpProxyMiddleware(auth_encoding="utf-8")
req = Request("http://scrapytest.org")
assert mw.process_request(req) is None
assert req.meta["proxy"] == "https://proxy:3128"
assert req.headers.get("Proxy-Authorization") == b"Basic bcOhbjpwYXNz"
# proxy from request.meta
req = Request(
"http://scrapytest.org", meta={"proxy": "https://\u00fcser:pass@proxy:3128"}
)
assert mw.process_request(req) is None
assert req.meta["proxy"] == "https://proxy:3128"
assert req.headers.get("Proxy-Authorization") == b"Basic w7xzZXI6cGFzcw=="
# default latin-1 encoding
mw = HttpProxyMiddleware(auth_encoding="latin-1")
req = Request("http://scrapytest.org")
assert mw.process_request(req) is None
assert req.meta["proxy"] == "https://proxy:3128"
assert req.headers.get("Proxy-Authorization") == b"Basic beFuOnBhc3M="
# proxy from request.meta, latin-1 encoding
req = Request(
"http://scrapytest.org", meta={"proxy": "https://\u00fcser:pass@proxy:3128"}
)
assert mw.process_request(req) is None
assert req.meta["proxy"] == "https://proxy:3128"
assert req.headers.get("Proxy-Authorization") == b"Basic /HNlcjpwYXNz"
def test_proxy_already_seted(self):
os.environ["http_proxy"] = "https://proxy.for.http:3128"
mw = HttpProxyMiddleware()
req = Request("http://noproxy.com", meta={"proxy": None})
assert mw.process_request(req) is None
assert "proxy" in req.meta
assert req.meta["proxy"] is None
def test_no_proxy(self):
os.environ["http_proxy"] = "https://proxy.for.http:3128"
mw = HttpProxyMiddleware()
os.environ["no_proxy"] = "*"
req = Request("http://noproxy.com")
assert mw.process_request(req) is None
assert "proxy" not in req.meta
os.environ["no_proxy"] = "other.com"
req = Request("http://noproxy.com")
assert mw.process_request(req) is None
assert "proxy" in req.meta
os.environ["no_proxy"] = "other.com,noproxy.com"
req = Request("http://noproxy.com")
assert mw.process_request(req) is None
assert "proxy" not in req.meta
# proxy from meta['proxy'] takes precedence
os.environ["no_proxy"] = "*"
req = Request("http://noproxy.com", meta={"proxy": "http://proxy.com"})
assert mw.process_request(req) is None
assert req.meta == {"proxy": "http://proxy.com"}
def test_no_proxy_invalid_values(self):
os.environ["no_proxy"] = "/var/run/docker.sock"
mw = HttpProxyMiddleware()
# '/var/run/docker.sock' may be used by the user for
# no_proxy value but is not parseable and should be skipped
assert "no" not in mw.proxies
def test_add_proxy_without_credentials(self):
middleware = HttpProxyMiddleware()
request = Request("https://example.com")
assert middleware.process_request(request) is None
request.meta["proxy"] = "https://example.com"
assert middleware.process_request(request) is None
assert request.meta["proxy"] == "https://example.com"
assert b"Proxy-Authorization" not in request.headers
def test_add_proxy_with_credentials(self):
middleware = HttpProxyMiddleware()
request = Request("https://example.com")
assert middleware.process_request(request) is None
request.meta["proxy"] = "https://user1:password1@example.com"
assert middleware.process_request(request) is None
assert request.meta["proxy"] == "https://example.com"
encoded_credentials = middleware._basic_auth_header(
"user1",
"password1",
)
assert request.headers["Proxy-Authorization"] == b"Basic " + encoded_credentials
def test_remove_proxy_without_credentials(self):
middleware = HttpProxyMiddleware()
request = Request(
"https://example.com",
meta={"proxy": "https://example.com"},
)
assert middleware.process_request(request) is None
request.meta["proxy"] = None
assert middleware.process_request(request) is None
assert request.meta["proxy"] is None
assert b"Proxy-Authorization" not in request.headers
def test_remove_proxy_with_credentials(self):
middleware = HttpProxyMiddleware()
request = Request(
"https://example.com",
meta={"proxy": "https://user1:password1@example.com"},
)
assert middleware.process_request(request) is None
request.meta["proxy"] = None
assert middleware.process_request(request) is None
assert request.meta["proxy"] is None
assert b"Proxy-Authorization" not in request.headers
def test_add_credentials(self):
"""If the proxy request meta switches to a proxy URL with the same
proxy and adds credentials (there were no credentials before), the new
credentials must be used."""
middleware = HttpProxyMiddleware()
request = Request(
"https://example.com",
meta={"proxy": "https://example.com"},
)
assert middleware.process_request(request) is None
request.meta["proxy"] = "https://user1:password1@example.com"
assert middleware.process_request(request) is None
assert request.meta["proxy"] == "https://example.com"
encoded_credentials = middleware._basic_auth_header(
"user1",
"password1",
)
assert request.headers["Proxy-Authorization"] == b"Basic " + encoded_credentials
def test_change_credentials(self):
"""If the proxy request meta switches to a proxy URL with different
credentials, those new credentials must be used."""
middleware = HttpProxyMiddleware()
request = Request(
"https://example.com",
meta={"proxy": "https://user1:password1@example.com"},
)
assert middleware.process_request(request) is None
request.meta["proxy"] = "https://user2:password2@example.com"
assert middleware.process_request(request) is None
assert request.meta["proxy"] == "https://example.com"
encoded_credentials = middleware._basic_auth_header(
"user2",
"password2",
)
assert request.headers["Proxy-Authorization"] == b"Basic " + encoded_credentials
def test_remove_credentials(self):
"""If the proxy request meta switches to a proxy URL with the same
proxy but no credentials, the original credentials must be still
used.
To remove credentials while keeping the same proxy URL, users must
delete the Proxy-Authorization header.
"""
middleware = HttpProxyMiddleware()
request = Request(
"https://example.com",
meta={"proxy": "https://user1:password1@example.com"},
)
assert middleware.process_request(request) is None
request.meta["proxy"] = "https://example.com"
assert middleware.process_request(request) is None
assert request.meta["proxy"] == "https://example.com"
encoded_credentials = middleware._basic_auth_header(
"user1",
"password1",
)
assert request.headers["Proxy-Authorization"] == b"Basic " + encoded_credentials
request.meta["proxy"] = "https://example.com"
del request.headers[b"Proxy-Authorization"]
assert middleware.process_request(request) is None
assert request.meta["proxy"] == "https://example.com"
assert b"Proxy-Authorization" not in request.headers
def test_change_proxy_add_credentials(self):
middleware = HttpProxyMiddleware()
request = Request(
"https://example.com",
meta={"proxy": "https://example.com"},
)
assert middleware.process_request(request) is None
request.meta["proxy"] = "https://user1:password1@example.org"
assert middleware.process_request(request) is None
assert request.meta["proxy"] == "https://example.org"
encoded_credentials = middleware._basic_auth_header(
"user1",
"password1",
)
assert request.headers["Proxy-Authorization"] == b"Basic " + encoded_credentials
def test_change_proxy_keep_credentials(self):
middleware = HttpProxyMiddleware()
request = Request(
"https://example.com",
meta={"proxy": "https://user1:password1@example.com"},
)
assert middleware.process_request(request) is None
request.meta["proxy"] = "https://user1:password1@example.org"
assert middleware.process_request(request) is None
assert request.meta["proxy"] == "https://example.org"
encoded_credentials = middleware._basic_auth_header(
"user1",
"password1",
)
assert request.headers["Proxy-Authorization"] == b"Basic " + encoded_credentials
# Make sure, indirectly, that _auth_proxy is updated.
request.meta["proxy"] = "https://example.com"
assert middleware.process_request(request) is None
assert request.meta["proxy"] == "https://example.com"
assert b"Proxy-Authorization" not in request.headers
def test_change_proxy_change_credentials(self):
middleware = HttpProxyMiddleware()
request = Request(
"https://example.com",
meta={"proxy": "https://user1:password1@example.com"},
)
assert middleware.process_request(request) is None
request.meta["proxy"] = "https://user2:password2@example.org"
assert middleware.process_request(request) is None
assert request.meta["proxy"] == "https://example.org"
encoded_credentials = middleware._basic_auth_header(
"user2",
"password2",
)
assert request.headers["Proxy-Authorization"] == b"Basic " + encoded_credentials
def test_change_proxy_remove_credentials(self):
"""If the proxy request meta switches to a proxy URL with a different
proxy and no credentials, no credentials must be used."""
middleware = HttpProxyMiddleware()
request = Request(
"https://example.com",
meta={"proxy": "https://user1:password1@example.com"},
)
assert middleware.process_request(request) is None
request.meta["proxy"] = "https://example.org"
assert middleware.process_request(request) is None
assert request.meta == {"proxy": "https://example.org"}
assert b"Proxy-Authorization" not in request.headers
def test_change_proxy_remove_credentials_preremoved_header(self):
"""Corner case of proxy switch with credentials removal where the
credentials have been removed beforehand.
It ensures that our implementation does not assume that the credentials
header exists when trying to remove it.
"""
middleware = HttpProxyMiddleware()
request = Request(
"https://example.com",
meta={"proxy": "https://user1:password1@example.com"},
)
assert middleware.process_request(request) is None
request.meta["proxy"] = "https://example.org"
del request.headers[b"Proxy-Authorization"]
assert middleware.process_request(request) is None
assert request.meta == {"proxy": "https://example.org"}
assert b"Proxy-Authorization" not in request.headers
def test_proxy_authentication_header_undefined_proxy(self):
middleware = HttpProxyMiddleware()
request = Request(
"https://example.com",
headers={"Proxy-Authorization": "Basic foo"},
)
assert middleware.process_request(request) is None
assert "proxy" not in request.meta
assert b"Proxy-Authorization" not in request.headers
def test_proxy_authentication_header_disabled_proxy(self):
middleware = HttpProxyMiddleware()
request = Request(
"https://example.com",
headers={"Proxy-Authorization": "Basic foo"},
meta={"proxy": None},
)
assert middleware.process_request(request) is None
assert request.meta["proxy"] is None
assert b"Proxy-Authorization" not in request.headers
def test_proxy_authentication_header_proxy_without_credentials(self):
"""As long as the proxy URL in request metadata remains the same, the
Proxy-Authorization header is used and kept, and may even be
changed."""
middleware = HttpProxyMiddleware()
request = Request(
"https://example.com",
headers={"Proxy-Authorization": "Basic foo"},
meta={"proxy": "https://example.com"},
)
assert middleware.process_request(request) is None
assert request.meta["proxy"] == "https://example.com"
assert request.headers["Proxy-Authorization"] == b"Basic foo"
assert middleware.process_request(request) is None
assert request.meta["proxy"] == "https://example.com"
assert request.headers["Proxy-Authorization"] == b"Basic foo"
request.headers["Proxy-Authorization"] = b"Basic bar"
assert middleware.process_request(request) is None
assert request.meta["proxy"] == "https://example.com"
assert request.headers["Proxy-Authorization"] == b"Basic bar"
def test_proxy_authentication_header_proxy_with_same_credentials(self):
middleware = HttpProxyMiddleware()
encoded_credentials = middleware._basic_auth_header(
"user1",
"password1",
)
request = Request(
"https://example.com",
headers={"Proxy-Authorization": b"Basic " + encoded_credentials},
meta={"proxy": "https://user1:password1@example.com"},
)
assert middleware.process_request(request) is None
assert request.meta["proxy"] == "https://example.com"
assert request.headers["Proxy-Authorization"] == b"Basic " + encoded_credentials
def test_proxy_authentication_header_proxy_with_different_credentials(self):
middleware = HttpProxyMiddleware()
encoded_credentials1 = middleware._basic_auth_header(
"user1",
"password1",
)
request = Request(
"https://example.com",
headers={"Proxy-Authorization": b"Basic " + encoded_credentials1},
meta={"proxy": "https://user2:password2@example.com"},
)
assert middleware.process_request(request) is None
assert request.meta["proxy"] == "https://example.com"
encoded_credentials2 = middleware._basic_auth_header(
"user2",
"password2",
)
assert (
request.headers["Proxy-Authorization"] == b"Basic " + encoded_credentials2
)
| python | BSD-3-Clause | d1bd8eb49f7aba9289e4ff692006cead8bcd9080 | 2026-01-04T14:38:41.023839Z | false |
scrapy/scrapy | https://github.com/scrapy/scrapy/blob/d1bd8eb49f7aba9289e4ff692006cead8bcd9080/tests/test_downloadermiddleware.py | tests/test_downloadermiddleware.py | from __future__ import annotations
import asyncio
from contextlib import asynccontextmanager
from gzip import BadGzipFile
from typing import TYPE_CHECKING
from unittest import mock
import pytest
from twisted.internet.defer import Deferred, succeed
from scrapy.core.downloader.middleware import DownloaderMiddlewareManager
from scrapy.exceptions import ScrapyDeprecationWarning, _InvalidOutput
from scrapy.http import Request, Response
from scrapy.spiders import Spider
from scrapy.utils.defer import deferred_f_from_coro_f, maybe_deferred_to_future
from scrapy.utils.python import to_bytes
from scrapy.utils.test import get_crawler, get_from_asyncio_queue
if TYPE_CHECKING:
from collections.abc import AsyncGenerator
class TestManagerBase:
settings_dict = None
# should be a fixture but async fixtures that use Futures are problematic with pytest-twisted
@asynccontextmanager
async def get_mwman(self) -> AsyncGenerator[DownloaderMiddlewareManager]:
crawler = get_crawler(Spider, self.settings_dict)
crawler.spider = crawler._create_spider("foo")
mwman = DownloaderMiddlewareManager.from_crawler(crawler)
crawler.engine = crawler._create_engine()
await crawler.engine.open_spider_async()
try:
yield mwman
finally:
await crawler.engine.close_spider_async()
@staticmethod
async def _download(
mwman: DownloaderMiddlewareManager,
request: Request,
response: Response | None = None,
) -> Response | Request:
"""Executes downloader mw manager's download method and returns
the result (Request or Response) or raises exception in case of
failure.
"""
if not response:
response = Response(request.url)
async def download_func(request: Request) -> Response:
return response
return await mwman.download_async(download_func, request)
class TestDefaults(TestManagerBase):
"""Tests default behavior with default settings"""
@deferred_f_from_coro_f
async def test_request_response(self):
req = Request("http://example.com/index.html")
resp = Response(req.url, status=200)
async with self.get_mwman() as mwman:
ret = await self._download(mwman, req, resp)
assert isinstance(ret, Response), "Non-response returned"
@deferred_f_from_coro_f
async def test_3xx_and_invalid_gzipped_body_must_redirect(self):
"""Regression test for a failure when redirecting a compressed
request.
This happens when httpcompression middleware is executed before redirect
middleware and attempts to decompress a non-compressed body.
In particular when some website returns a 30x response with header
'Content-Encoding: gzip' giving as result the error below:
BadGzipFile: Not a gzipped file (...)
"""
req = Request("http://example.com")
body = b"<p>You are being redirected</p>"
resp = Response(
req.url,
status=302,
body=body,
headers={
"Content-Length": str(len(body)),
"Content-Type": "text/html",
"Content-Encoding": "gzip",
"Location": "http://example.com/login",
},
)
async with self.get_mwman() as mwman:
ret = await self._download(mwman, req, resp)
assert isinstance(ret, Request), f"Not redirected: {ret!r}"
assert to_bytes(ret.url) == resp.headers["Location"], (
"Not redirected to location header"
)
@deferred_f_from_coro_f
async def test_200_and_invalid_gzipped_body_must_fail(self):
req = Request("http://example.com")
body = b"<p>You are being redirected</p>"
resp = Response(
req.url,
status=200,
body=body,
headers={
"Content-Length": str(len(body)),
"Content-Type": "text/html",
"Content-Encoding": "gzip",
"Location": "http://example.com/login",
},
)
with pytest.raises(BadGzipFile):
async with self.get_mwman() as mwman:
await self._download(mwman, req, resp)
class TestResponseFromProcessRequest(TestManagerBase):
"""Tests middleware returning a response from process_request."""
@deferred_f_from_coro_f
async def test_download_func_not_called(self):
req = Request("http://example.com/index.html")
resp = Response("http://example.com/index.html")
download_func = mock.MagicMock()
class ResponseMiddleware:
def process_request(self, request):
return resp
async with self.get_mwman() as mwman:
mwman._add_middleware(ResponseMiddleware())
result = await mwman.download_async(download_func, req)
assert result is resp
assert not download_func.called
class TestResponseFromProcessException(TestManagerBase):
"""Tests middleware returning a response from process_exception."""
@deferred_f_from_coro_f
async def test_process_response_called(self):
req = Request("http://example.com/index.html")
resp = Response("http://example.com/index.html")
calls = []
def download_func(request):
raise ValueError("test")
class ResponseMiddleware:
def process_response(self, request, response):
calls.append("process_response")
return resp
def process_exception(self, request, exception):
calls.append("process_exception")
return resp
async with self.get_mwman() as mwman:
mwman._add_middleware(ResponseMiddleware())
result = await mwman.download_async(download_func, req)
assert result is resp
assert calls == [
"process_exception",
"process_response",
]
class TestInvalidOutput(TestManagerBase):
@deferred_f_from_coro_f
async def test_invalid_process_request(self):
"""Invalid return value for process_request method should raise an exception"""
req = Request("http://example.com/index.html")
class InvalidProcessRequestMiddleware:
def process_request(self, request):
return 1
async with self.get_mwman() as mwman:
mwman._add_middleware(InvalidProcessRequestMiddleware())
with pytest.raises(_InvalidOutput):
await self._download(mwman, req)
@deferred_f_from_coro_f
async def test_invalid_process_response(self):
"""Invalid return value for process_response method should raise an exception"""
req = Request("http://example.com/index.html")
class InvalidProcessResponseMiddleware:
def process_response(self, request, response):
return 1
async with self.get_mwman() as mwman:
mwman._add_middleware(InvalidProcessResponseMiddleware())
with pytest.raises(_InvalidOutput):
await self._download(mwman, req)
@deferred_f_from_coro_f
async def test_invalid_process_exception(self):
"""Invalid return value for process_exception method should raise an exception"""
req = Request("http://example.com/index.html")
class InvalidProcessExceptionMiddleware:
def process_request(self, request):
raise RuntimeError
def process_exception(self, request, exception):
return 1
async with self.get_mwman() as mwman:
mwman._add_middleware(InvalidProcessExceptionMiddleware())
with pytest.raises(_InvalidOutput):
await self._download(mwman, req)
class TestMiddlewareUsingDeferreds(TestManagerBase):
"""Middlewares using Deferreds (deprecated) should work"""
@deferred_f_from_coro_f
async def test_deferred(self):
req = Request("http://example.com/index.html")
resp = Response("http://example.com/index.html")
download_func = mock.MagicMock()
class DeferredMiddleware:
def cb(self, result):
return result
def process_request(self, request):
d = Deferred()
d.addCallback(self.cb)
d.callback(resp)
return d
async with self.get_mwman() as mwman:
mwman._add_middleware(DeferredMiddleware())
with pytest.warns(
ScrapyDeprecationWarning,
match="returned a Deferred, this is deprecated",
):
result = await mwman.download_async(download_func, req)
assert result is resp
assert not download_func.called
class TestMiddlewareUsingCoro(TestManagerBase):
"""Middlewares using asyncio coroutines should work"""
@deferred_f_from_coro_f
async def test_asyncdef(self):
req = Request("http://example.com/index.html")
resp = Response("http://example.com/index.html")
download_func = mock.MagicMock()
class CoroMiddleware:
async def process_request(self, request):
await succeed(42)
return resp
async with self.get_mwman() as mwman:
mwman._add_middleware(CoroMiddleware())
result = await mwman.download_async(download_func, req)
assert result is resp
assert not download_func.called
@pytest.mark.only_asyncio
@deferred_f_from_coro_f
async def test_asyncdef_asyncio(self):
req = Request("http://example.com/index.html")
resp = Response("http://example.com/index.html")
download_func = mock.MagicMock()
class CoroMiddleware:
async def process_request(self, request):
await asyncio.sleep(0.1)
return await get_from_asyncio_queue(resp)
async with self.get_mwman() as mwman:
mwman._add_middleware(CoroMiddleware())
result = await mwman.download_async(download_func, req)
assert result is resp
assert not download_func.called
class TestDownloadDeprecated(TestManagerBase):
@deferred_f_from_coro_f
async def test_mwman_download(self):
req = Request("http://example.com/index.html")
resp = Response(req.url, status=200)
def download_func(request: Request, spider: Spider) -> Deferred[Response]:
return succeed(resp)
async with self.get_mwman() as mwman:
with pytest.warns(
ScrapyDeprecationWarning,
match=r"DownloaderMiddlewareManager.download\(\) is deprecated, use download_async\(\) instead",
):
ret = await maybe_deferred_to_future(
mwman.download(download_func, req, mwman.crawler.spider)
)
assert isinstance(ret, Response)
class TestDeprecatedSpiderArg(TestManagerBase):
@deferred_f_from_coro_f
async def test_deprecated_spider_arg(self):
req = Request("http://example.com/index.html")
resp = Response("http://example.com/index.html")
download_func = mock.MagicMock()
class DeprecatedSpiderArgMiddleware:
def process_request(self, request, spider):
1 / 0
def process_response(self, request, response, spider):
return response
def process_exception(self, request, exception, spider):
return resp
async with self.get_mwman() as mwman:
with (
pytest.warns(
ScrapyDeprecationWarning,
match=r"process_request\(\) requires a spider argument",
),
pytest.warns(
ScrapyDeprecationWarning,
match=r"process_response\(\) requires a spider argument",
),
pytest.warns(
ScrapyDeprecationWarning,
match=r"process_exception\(\) requires a spider argument",
),
):
mwman._add_middleware(DeprecatedSpiderArgMiddleware())
result = await mwman.download_async(download_func, req)
assert result is resp
assert not download_func.called
| python | BSD-3-Clause | d1bd8eb49f7aba9289e4ff692006cead8bcd9080 | 2026-01-04T14:38:41.023839Z | false |
scrapy/scrapy | https://github.com/scrapy/scrapy/blob/d1bd8eb49f7aba9289e4ff692006cead8bcd9080/tests/test_downloader_handler_twisted_http10.py | tests/test_downloader_handler_twisted_http10.py | """Tests for scrapy.core.downloader.handlers.http10.HTTP10DownloadHandler."""
from __future__ import annotations
from typing import TYPE_CHECKING
import pytest
from scrapy.core.downloader.handlers.http10 import HTTP10DownloadHandler
from scrapy.http import Request
from scrapy.utils.defer import deferred_f_from_coro_f
from tests.test_downloader_handlers_http_base import TestHttpBase, TestHttpProxyBase
if TYPE_CHECKING:
from scrapy.core.downloader.handlers import DownloadHandlerProtocol
from tests.mockserver.http import MockServer
class HTTP10DownloadHandlerMixin:
@property
def download_handler_cls(self) -> type[DownloadHandlerProtocol]:
return HTTP10DownloadHandler
@pytest.mark.filterwarnings("ignore::scrapy.exceptions.ScrapyDeprecationWarning")
class TestHttp10(HTTP10DownloadHandlerMixin, TestHttpBase):
"""HTTP 1.0 test case"""
@deferred_f_from_coro_f
async def test_protocol(self, mockserver: MockServer) -> None:
request = Request(
mockserver.url("/host", is_secure=self.is_secure), method="GET"
)
async with self.get_dh() as download_handler:
response = await download_handler.download_request(request)
assert response.protocol == "HTTP/1.0"
class TestHttps10(TestHttp10):
is_secure = True
@pytest.mark.filterwarnings("ignore::scrapy.exceptions.ScrapyDeprecationWarning")
class TestHttp10Proxy(HTTP10DownloadHandlerMixin, TestHttpProxyBase):
@deferred_f_from_coro_f
async def test_download_with_proxy_https_timeout(self):
pytest.skip("Not implemented")
@deferred_f_from_coro_f
async def test_download_with_proxy_without_http_scheme(self):
pytest.skip("Not implemented")
| python | BSD-3-Clause | d1bd8eb49f7aba9289e4ff692006cead8bcd9080 | 2026-01-04T14:38:41.023839Z | false |
scrapy/scrapy | https://github.com/scrapy/scrapy/blob/d1bd8eb49f7aba9289e4ff692006cead8bcd9080/tests/__init__.py | tests/__init__.py | """
tests: this package contains all Scrapy unittests
see https://docs.scrapy.org/en/latest/contributing.html#running-tests
"""
import os
import socket
from pathlib import Path
from twisted import version as TWISTED_VERSION
from twisted.python.versions import Version
# ignore system-wide proxies for tests
# which would send requests to a totally unsuspecting server
# (e.g. because urllib does not fully understand the proxy spec)
os.environ["http_proxy"] = ""
os.environ["https_proxy"] = ""
os.environ["ftp_proxy"] = ""
tests_datadir = str(Path(__file__).parent.resolve() / "sample_data")
# In some environments accessing a non-existing host doesn't raise an
# error. In such cases we're going to skip tests which rely on it.
try:
socket.getaddrinfo("non-existing-host", 80)
NON_EXISTING_RESOLVABLE = True
except socket.gaierror:
NON_EXISTING_RESOLVABLE = False
def get_testdata(*paths: str) -> bytes:
"""Return test data"""
return Path(tests_datadir, *paths).read_bytes()
TWISTED_KEEPS_TRACEBACKS = TWISTED_VERSION >= Version("twisted", 24, 10, 0)
| python | BSD-3-Clause | d1bd8eb49f7aba9289e4ff692006cead8bcd9080 | 2026-01-04T14:38:41.023839Z | false |
scrapy/scrapy | https://github.com/scrapy/scrapy/blob/d1bd8eb49f7aba9289e4ff692006cead8bcd9080/tests/test_spidermiddleware_httperror.py | tests/test_spidermiddleware_httperror.py | from __future__ import annotations
import logging
import pytest
from testfixtures import LogCapture
from twisted.internet.defer import inlineCallbacks
from scrapy.http import Request, Response
from scrapy.spidermiddlewares.httperror import HttpError, HttpErrorMiddleware
from scrapy.utils.spider import DefaultSpider
from scrapy.utils.test import get_crawler
from tests.mockserver.http import MockServer
from tests.spiders import MockServerSpider
class _HttpErrorSpider(MockServerSpider):
name = "httperror"
bypass_status_codes: set[int] = set()
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.start_urls = [
self.mockserver.url("/status?n=200"),
self.mockserver.url("/status?n=404"),
self.mockserver.url("/status?n=402"),
self.mockserver.url("/status?n=500"),
]
self.failed = set()
self.skipped = set()
self.parsed = set()
async def start(self):
for url in self.start_urls:
yield Request(url, self.parse, errback=self.on_error)
def parse(self, response):
self.parsed.add(response.url[-3:])
def on_error(self, failure):
if isinstance(failure.value, HttpError):
response = failure.value.response
if response.status in self.bypass_status_codes:
self.skipped.add(response.url[-3:])
return self.parse(response)
# it assumes there is a response attached to failure
self.failed.add(failure.value.response.url[-3:])
return failure
req = Request("http://scrapytest.org")
def _response(request: Request, status_code: int) -> Response:
return Response(request.url, status=status_code, request=request)
@pytest.fixture
def res200() -> Response:
return _response(req, 200)
@pytest.fixture
def res402() -> Response:
return _response(req, 402)
@pytest.fixture
def res404() -> Response:
return _response(req, 404)
class TestHttpErrorMiddleware:
@pytest.fixture
def mw(self) -> HttpErrorMiddleware:
crawler = get_crawler(DefaultSpider)
crawler.spider = crawler._create_spider()
return HttpErrorMiddleware.from_crawler(crawler)
def test_process_spider_input(
self, mw: HttpErrorMiddleware, res200: Response, res404: Response
) -> None:
mw.process_spider_input(res200)
with pytest.raises(HttpError):
mw.process_spider_input(res404)
def test_process_spider_exception(
self, mw: HttpErrorMiddleware, res404: Response
) -> None:
assert mw.process_spider_exception(res404, HttpError(res404)) == []
assert mw.process_spider_exception(res404, Exception()) is None
def test_handle_httpstatus_list(
self, mw: HttpErrorMiddleware, res404: Response
) -> None:
request = Request(
"http://scrapytest.org", meta={"handle_httpstatus_list": [404]}
)
res = _response(request, 404)
mw.process_spider_input(res)
assert mw.crawler.spider
mw.crawler.spider.handle_httpstatus_list = [404] # type: ignore[attr-defined]
mw.process_spider_input(res404)
class TestHttpErrorMiddlewareSettings:
"""Similar test, but with settings"""
@pytest.fixture
def mw(self) -> HttpErrorMiddleware:
crawler = get_crawler(DefaultSpider, {"HTTPERROR_ALLOWED_CODES": (402,)})
crawler.spider = crawler._create_spider()
return HttpErrorMiddleware.from_crawler(crawler)
def test_process_spider_input(
self,
mw: HttpErrorMiddleware,
res200: Response,
res402: Response,
res404: Response,
) -> None:
mw.process_spider_input(res200)
with pytest.raises(HttpError):
mw.process_spider_input(res404)
mw.process_spider_input(res402)
def test_meta_overrides_settings(self, mw: HttpErrorMiddleware) -> None:
request = Request(
"http://scrapytest.org", meta={"handle_httpstatus_list": [404]}
)
res404 = _response(request, 404)
res402 = _response(request, 402)
mw.process_spider_input(res404)
with pytest.raises(HttpError):
mw.process_spider_input(res402)
def test_spider_override_settings(
self, mw: HttpErrorMiddleware, res402: Response, res404: Response
) -> None:
assert mw.crawler.spider
mw.crawler.spider.handle_httpstatus_list = [404] # type: ignore[attr-defined]
mw.process_spider_input(res404)
with pytest.raises(HttpError):
mw.process_spider_input(res402)
class TestHttpErrorMiddlewareHandleAll:
@pytest.fixture
def mw(self) -> HttpErrorMiddleware:
crawler = get_crawler(DefaultSpider, {"HTTPERROR_ALLOW_ALL": True})
crawler.spider = crawler._create_spider()
return HttpErrorMiddleware.from_crawler(crawler)
def test_process_spider_input(
self,
mw: HttpErrorMiddleware,
res200: Response,
res404: Response,
) -> None:
mw.process_spider_input(res200)
mw.process_spider_input(res404)
def test_meta_overrides_settings(self, mw: HttpErrorMiddleware) -> None:
request = Request(
"http://scrapytest.org", meta={"handle_httpstatus_list": [404]}
)
res404 = _response(request, 404)
res402 = _response(request, 402)
mw.process_spider_input(res404)
with pytest.raises(HttpError):
mw.process_spider_input(res402)
def test_httperror_allow_all_false(self) -> None:
crawler = get_crawler(_HttpErrorSpider)
mw = HttpErrorMiddleware.from_crawler(crawler)
request_httpstatus_false = Request(
"http://scrapytest.org", meta={"handle_httpstatus_all": False}
)
request_httpstatus_true = Request(
"http://scrapytest.org", meta={"handle_httpstatus_all": True}
)
res404 = _response(request_httpstatus_false, 404)
res402 = _response(request_httpstatus_true, 402)
with pytest.raises(HttpError):
mw.process_spider_input(res404)
mw.process_spider_input(res402)
class TestHttpErrorMiddlewareIntegrational:
@classmethod
def setup_class(cls):
cls.mockserver = MockServer()
cls.mockserver.__enter__()
@classmethod
def teardown_class(cls):
cls.mockserver.__exit__(None, None, None)
@inlineCallbacks
def test_middleware_works(self):
crawler = get_crawler(_HttpErrorSpider)
yield crawler.crawl(mockserver=self.mockserver)
assert not crawler.spider.skipped
assert crawler.spider.parsed == {"200"}
assert crawler.spider.failed == {"404", "402", "500"}
get_value = crawler.stats.get_value
assert get_value("httperror/response_ignored_count") == 3
assert get_value("httperror/response_ignored_status_count/404") == 1
assert get_value("httperror/response_ignored_status_count/402") == 1
assert get_value("httperror/response_ignored_status_count/500") == 1
@inlineCallbacks
def test_logging(self):
crawler = get_crawler(_HttpErrorSpider)
with LogCapture() as log:
yield crawler.crawl(mockserver=self.mockserver, bypass_status_codes={402})
assert crawler.spider.parsed == {"200", "402"}
assert crawler.spider.skipped == {"402"}
assert crawler.spider.failed == {"404", "500"}
assert "Ignoring response <404" in str(log)
assert "Ignoring response <500" in str(log)
assert "Ignoring response <200" not in str(log)
assert "Ignoring response <402" not in str(log)
@inlineCallbacks
def test_logging_level(self):
# HttpError logs ignored responses with level INFO
crawler = get_crawler(_HttpErrorSpider)
with LogCapture(level=logging.INFO) as log:
yield crawler.crawl(mockserver=self.mockserver)
assert crawler.spider.parsed == {"200"}
assert crawler.spider.failed == {"404", "402", "500"}
assert "Ignoring response <402" in str(log)
assert "Ignoring response <404" in str(log)
assert "Ignoring response <500" in str(log)
assert "Ignoring response <200" not in str(log)
# with level WARNING, we shouldn't capture anything from HttpError
crawler = get_crawler(_HttpErrorSpider)
with LogCapture(level=logging.WARNING) as log:
yield crawler.crawl(mockserver=self.mockserver)
assert crawler.spider.parsed == {"200"}
assert crawler.spider.failed == {"404", "402", "500"}
assert "Ignoring response <402" not in str(log)
assert "Ignoring response <404" not in str(log)
assert "Ignoring response <500" not in str(log)
assert "Ignoring response <200" not in str(log)
| python | BSD-3-Clause | d1bd8eb49f7aba9289e4ff692006cead8bcd9080 | 2026-01-04T14:38:41.023839Z | false |
scrapy/scrapy | https://github.com/scrapy/scrapy/blob/d1bd8eb49f7aba9289e4ff692006cead8bcd9080/tests/test_utils_console.py | tests/test_utils_console.py | import pytest
from scrapy.utils.console import get_shell_embed_func
try:
import bpython
bpy = True
del bpython
except ImportError:
bpy = False
try:
import IPython
ipy = True
del IPython
except ImportError:
ipy = False
def test_get_shell_embed_func():
shell = get_shell_embed_func(["invalid"])
assert shell is None
shell = get_shell_embed_func(["invalid", "python"])
assert callable(shell)
assert shell.__name__ == "_embed_standard_shell"
@pytest.mark.skipif(not bpy, reason="bpython not available in testenv")
def test_get_shell_embed_func_bpython():
shell = get_shell_embed_func(["bpython"])
assert callable(shell)
assert shell.__name__ == "_embed_bpython_shell"
@pytest.mark.skipif(not ipy, reason="IPython not available in testenv")
def test_get_shell_embed_func_ipython():
# default shell should be 'ipython'
shell = get_shell_embed_func()
assert shell.__name__ == "_embed_ipython_shell"
| python | BSD-3-Clause | d1bd8eb49f7aba9289e4ff692006cead8bcd9080 | 2026-01-04T14:38:41.023839Z | false |
scrapy/scrapy | https://github.com/scrapy/scrapy/blob/d1bd8eb49f7aba9289e4ff692006cead8bcd9080/tests/test_command_shell.py | tests/test_command_shell.py | from __future__ import annotations
import os
import sys
from io import BytesIO
from pathlib import Path
from typing import TYPE_CHECKING, cast
import pytest
from pexpect.popen_spawn import PopenSpawn
from scrapy.utils.reactor import _asyncio_reactor_path
from tests import NON_EXISTING_RESOLVABLE, tests_datadir
from tests.utils.cmdline import proc
if TYPE_CHECKING:
from tests.mockserver.http import MockServer
class TestShellCommand:
def test_empty(self) -> None:
_, out, _ = proc("shell", "-c", "item")
assert "{}" in out
def test_response_body(self, mockserver: MockServer) -> None:
_, out, _ = proc("shell", mockserver.url("/text"), "-c", "response.body")
assert "Works" in out
def test_response_type_text(self, mockserver: MockServer) -> None:
_, out, _ = proc("shell", mockserver.url("/text"), "-c", "type(response)")
assert "TextResponse" in out
def test_response_type_html(self, mockserver: MockServer) -> None:
_, out, _ = proc("shell", mockserver.url("/html"), "-c", "type(response)")
assert "HtmlResponse" in out
def test_response_selector_html(self, mockserver: MockServer) -> None:
xpath = "response.xpath(\"//p[@class='one']/text()\").get()"
_, out, _ = proc("shell", mockserver.url("/html"), "-c", xpath)
assert out.strip() == "Works"
def test_response_encoding_gb18030(self, mockserver: MockServer) -> None:
_, out, _ = proc(
"shell", mockserver.url("/enc-gb18030"), "-c", "response.encoding"
)
assert out.strip() == "gb18030"
def test_redirect(self, mockserver: MockServer) -> None:
_, out, _ = proc("shell", mockserver.url("/redirect"), "-c", "response.url")
assert out.strip().endswith("/redirected")
def test_redirect_follow_302(self, mockserver: MockServer) -> None:
_, out, _ = proc(
"shell",
mockserver.url("/redirect-no-meta-refresh"),
"-c",
"response.status",
)
assert out.strip().endswith("200")
def test_redirect_not_follow_302(self, mockserver: MockServer) -> None:
_, out, _ = proc(
"shell",
"--no-redirect",
mockserver.url("/redirect-no-meta-refresh"),
"-c",
"response.status",
)
assert out.strip().endswith("302")
def test_fetch_redirect_follow_302(self, mockserver: MockServer) -> None:
"""Test that calling ``fetch(url)`` follows HTTP redirects by default."""
url = mockserver.url("/redirect-no-meta-refresh")
code = f"fetch('{url}')"
ret, out, err = proc("shell", "-c", code)
assert ret == 0, out
assert "Redirecting (302)" in err
assert "Crawled (200)" in err
def test_fetch_redirect_not_follow_302(self, mockserver: MockServer) -> None:
"""Test that calling ``fetch(url, redirect=False)`` disables automatic redirects."""
url = mockserver.url("/redirect-no-meta-refresh")
code = f"fetch('{url}', redirect=False)"
ret, out, err = proc("shell", "-c", code)
assert ret == 0, out
assert "Crawled (302)" in err
def test_request_replace(self, mockserver: MockServer) -> None:
url = mockserver.url("/text")
code = f"fetch('{url}') or fetch(response.request.replace(method='POST'))"
ret, out, _ = proc("shell", "-c", code)
assert ret == 0, out
def test_scrapy_import(self, mockserver: MockServer) -> None:
url = mockserver.url("/text")
code = f"fetch(scrapy.Request('{url}'))"
ret, out, _ = proc("shell", "-c", code)
assert ret == 0, out
def test_local_file(self) -> None:
filepath = Path(tests_datadir, "test_site", "index.html")
_, out, _ = proc("shell", str(filepath), "-c", "item")
assert "{}" in out
def test_local_nofile(self) -> None:
filepath = "file:///tests/sample_data/test_site/nothinghere.html"
ret, out, err = proc("shell", filepath, "-c", "item")
assert ret == 1, out or err
assert "No such file or directory" in err
def test_dns_failures(self, mockserver: MockServer) -> None:
if NON_EXISTING_RESOLVABLE:
pytest.skip("Non-existing hosts are resolvable")
url = "www.somedomainthatdoesntexi.st"
ret, out, err = proc("shell", url, "-c", "item")
assert ret == 1, out or err
assert "DNS lookup failed" in err
def test_shell_fetch_async(self, mockserver: MockServer) -> None:
url = mockserver.url("/html")
code = f"fetch('{url}')"
ret, _, err = proc(
"shell", "-c", code, "--set", f"TWISTED_REACTOR={_asyncio_reactor_path}"
)
assert ret == 0, err
assert "RuntimeError: There is no current event loop in thread" not in err
class TestInteractiveShell:
def test_fetch(self, mockserver: MockServer) -> None:
args = (
sys.executable,
"-m",
"scrapy.cmdline",
"shell",
)
env = os.environ.copy()
env["SCRAPY_PYTHON_SHELL"] = "python"
logfile = BytesIO()
# https://github.com/python/typeshed/issues/14915
p = PopenSpawn(args, env=cast("os._Environ", env), timeout=5)
p.logfile_read = logfile
p.expect_exact("Available Scrapy objects")
p.sendline(f"fetch('{mockserver.url('/')}')")
p.sendline("type(response)")
p.expect_exact("HtmlResponse")
p.sendeof()
p.wait()
logfile.seek(0)
assert "Traceback" not in logfile.read().decode()
| python | BSD-3-Clause | d1bd8eb49f7aba9289e4ff692006cead8bcd9080 | 2026-01-04T14:38:41.023839Z | false |
scrapy/scrapy | https://github.com/scrapy/scrapy/blob/d1bd8eb49f7aba9289e4ff692006cead8bcd9080/tests/test_exporters.py | tests/test_exporters.py | import dataclasses
import json
import marshal
import pickle
import re
import tempfile
from abc import ABC, abstractmethod
from datetime import datetime
from io import BytesIO
from typing import Any
import lxml.etree
import pytest
from itemadapter import ItemAdapter
from scrapy.exporters import (
BaseItemExporter,
CsvItemExporter,
JsonItemExporter,
JsonLinesItemExporter,
MarshalItemExporter,
PickleItemExporter,
PprintItemExporter,
PythonItemExporter,
XmlItemExporter,
)
from scrapy.item import Field, Item
from scrapy.utils.python import to_unicode
def custom_serializer(value):
return str(int(value) + 2)
class MyItem(Item):
name = Field()
age = Field()
class CustomFieldItem(Item):
name = Field()
age = Field(serializer=custom_serializer)
@dataclasses.dataclass
class MyDataClass:
name: str
age: int
@dataclasses.dataclass
class CustomFieldDataclass:
name: str
age: int = dataclasses.field(metadata={"serializer": custom_serializer})
class TestBaseItemExporter(ABC):
item_class: type = MyItem
custom_field_item_class: type = CustomFieldItem
def setup_method(self):
self.i = self.item_class(name="John\xa3", age="22")
self.output = BytesIO()
self.ie = self._get_exporter()
@abstractmethod
def _get_exporter(self, **kwargs) -> BaseItemExporter:
raise NotImplementedError
def _check_output(self): # noqa: B027
pass
def _assert_expected_item(self, exported_dict):
for k, v in exported_dict.items():
exported_dict[k] = to_unicode(v)
assert self.i == self.item_class(**exported_dict)
def _get_nonstring_types_item(self):
return {
"boolean": False,
"number": 22,
"time": datetime(2015, 1, 1, 1, 1, 1),
"float": 3.14,
}
def assertItemExportWorks(self, item):
self.ie.start_exporting()
self.ie.export_item(item)
self.ie.finish_exporting()
# Delete the item exporter object, so that if it causes the output
# file handle to be closed, which should not be the case, follow-up
# interactions with the output file handle will surface the issue.
del self.ie
self._check_output()
def test_export_item(self):
self.assertItemExportWorks(self.i)
def test_export_dict_item(self):
self.assertItemExportWorks(ItemAdapter(self.i).asdict())
def test_serialize_field(self):
a = ItemAdapter(self.i)
res = self.ie.serialize_field(a.get_field_meta("name"), "name", a["name"])
assert res == "John\xa3"
res = self.ie.serialize_field(a.get_field_meta("age"), "age", a["age"])
assert res == "22"
def test_fields_to_export(self):
ie = self._get_exporter(fields_to_export=["name"])
assert list(ie._get_serialized_fields(self.i)) == [("name", "John\xa3")]
ie = self._get_exporter(fields_to_export=["name"], encoding="latin-1")
_, name = next(iter(ie._get_serialized_fields(self.i)))
assert isinstance(name, str)
assert name == "John\xa3"
ie = self._get_exporter(fields_to_export={"name": "名稱"})
assert list(ie._get_serialized_fields(self.i)) == [("名稱", "John\xa3")]
def test_field_custom_serializer(self):
i = self.custom_field_item_class(name="John\xa3", age="22")
a = ItemAdapter(i)
ie = self._get_exporter()
assert (
ie.serialize_field(a.get_field_meta("name"), "name", a["name"])
== "John\xa3"
)
assert ie.serialize_field(a.get_field_meta("age"), "age", a["age"]) == "24"
class TestPythonItemExporter(TestBaseItemExporter):
def _get_exporter(self, **kwargs):
return PythonItemExporter(**kwargs)
def test_invalid_option(self):
with pytest.raises(TypeError, match="Unexpected options: invalid_option"):
PythonItemExporter(invalid_option="something")
def test_nested_item(self):
i1 = self.item_class(name="Joseph", age="22")
i2 = {"name": "Maria", "age": i1}
i3 = self.item_class(name="Jesus", age=i2)
ie = self._get_exporter()
exported = ie.export_item(i3)
assert isinstance(exported, dict)
assert exported == {
"age": {"age": {"age": "22", "name": "Joseph"}, "name": "Maria"},
"name": "Jesus",
}
assert isinstance(exported["age"], dict)
assert isinstance(exported["age"]["age"], dict)
def test_export_list(self):
i1 = self.item_class(name="Joseph", age="22")
i2 = self.item_class(name="Maria", age=[i1])
i3 = self.item_class(name="Jesus", age=[i2])
ie = self._get_exporter()
exported = ie.export_item(i3)
assert exported == {
"age": [{"age": [{"age": "22", "name": "Joseph"}], "name": "Maria"}],
"name": "Jesus",
}
assert isinstance(exported["age"][0], dict)
assert isinstance(exported["age"][0]["age"][0], dict)
def test_export_item_dict_list(self):
i1 = self.item_class(name="Joseph", age="22")
i2 = {"name": "Maria", "age": [i1]}
i3 = self.item_class(name="Jesus", age=[i2])
ie = self._get_exporter()
exported = ie.export_item(i3)
assert exported == {
"age": [{"age": [{"age": "22", "name": "Joseph"}], "name": "Maria"}],
"name": "Jesus",
}
assert isinstance(exported["age"][0], dict)
assert isinstance(exported["age"][0]["age"][0], dict)
def test_nonstring_types_item(self):
item = self._get_nonstring_types_item()
ie = self._get_exporter()
exported = ie.export_item(item)
assert exported == item
class TestPythonItemExporterDataclass(TestPythonItemExporter):
item_class = MyDataClass
custom_field_item_class = CustomFieldDataclass
class TestPprintItemExporter(TestBaseItemExporter):
def _get_exporter(self, **kwargs):
return PprintItemExporter(self.output, **kwargs)
def _check_output(self):
self._assert_expected_item(
eval(self.output.getvalue()) # pylint: disable=eval-used
)
class TestPprintItemExporterDataclass(TestPprintItemExporter):
item_class = MyDataClass
custom_field_item_class = CustomFieldDataclass
class TestPickleItemExporter(TestBaseItemExporter):
def _get_exporter(self, **kwargs):
return PickleItemExporter(self.output, **kwargs)
def _check_output(self):
self._assert_expected_item(pickle.loads(self.output.getvalue()))
def test_export_multiple_items(self):
i1 = self.item_class(name="hello", age="world")
i2 = self.item_class(name="bye", age="world")
f = BytesIO()
ie = PickleItemExporter(f)
ie.start_exporting()
ie.export_item(i1)
ie.export_item(i2)
ie.finish_exporting()
del ie # See the first “del self.ie” in this file for context.
f.seek(0)
assert self.item_class(**pickle.load(f)) == i1
assert self.item_class(**pickle.load(f)) == i2
def test_nonstring_types_item(self):
item = self._get_nonstring_types_item()
fp = BytesIO()
ie = PickleItemExporter(fp)
ie.start_exporting()
ie.export_item(item)
ie.finish_exporting()
del ie # See the first “del self.ie” in this file for context.
assert pickle.loads(fp.getvalue()) == item
class TestPickleItemExporterDataclass(TestPickleItemExporter):
item_class = MyDataClass
custom_field_item_class = CustomFieldDataclass
class TestMarshalItemExporter(TestBaseItemExporter):
def _get_exporter(self, **kwargs):
self.output = tempfile.TemporaryFile()
return MarshalItemExporter(self.output, **kwargs)
def _check_output(self):
self.output.seek(0)
self._assert_expected_item(marshal.load(self.output))
def test_nonstring_types_item(self):
item = self._get_nonstring_types_item()
item.pop("time") # datetime is not marshallable
fp = tempfile.TemporaryFile()
ie = MarshalItemExporter(fp)
ie.start_exporting()
ie.export_item(item)
ie.finish_exporting()
del ie # See the first “del self.ie” in this file for context.
fp.seek(0)
assert marshal.load(fp) == item
class TestMarshalItemExporterDataclass(TestMarshalItemExporter):
item_class = MyDataClass
custom_field_item_class = CustomFieldDataclass
class TestCsvItemExporter(TestBaseItemExporter):
def _get_exporter(self, **kwargs):
self.output = tempfile.TemporaryFile()
return CsvItemExporter(self.output, **kwargs)
def assertCsvEqual(self, first, second, msg=None):
def split_csv(csv):
return [
sorted(re.split(r"(,|\s+)", line))
for line in to_unicode(csv).splitlines(True)
]
assert split_csv(first) == split_csv(second), msg
def _check_output(self):
self.output.seek(0)
self.assertCsvEqual(
to_unicode(self.output.read()), "age,name\r\n22,John\xa3\r\n"
)
def assertExportResult(self, item, expected, **kwargs):
fp = BytesIO()
ie = CsvItemExporter(fp, **kwargs)
ie.start_exporting()
ie.export_item(item)
ie.finish_exporting()
del ie # See the first “del self.ie” in this file for context.
self.assertCsvEqual(fp.getvalue(), expected)
def test_header_export_all(self):
self.assertExportResult(
item=self.i,
fields_to_export=ItemAdapter(self.i).field_names(),
expected=b"age,name\r\n22,John\xc2\xa3\r\n",
)
def test_header_export_all_dict(self):
self.assertExportResult(
item=ItemAdapter(self.i).asdict(),
expected=b"age,name\r\n22,John\xc2\xa3\r\n",
)
def test_header_export_single_field(self):
for item in [self.i, ItemAdapter(self.i).asdict()]:
self.assertExportResult(
item=item,
fields_to_export=["age"],
expected=b"age\r\n22\r\n",
)
def test_header_export_two_items(self):
for item in [self.i, ItemAdapter(self.i).asdict()]:
output = BytesIO()
ie = CsvItemExporter(output)
ie.start_exporting()
ie.export_item(item)
ie.export_item(item)
ie.finish_exporting()
del ie # See the first “del self.ie” in this file for context.
self.assertCsvEqual(
output.getvalue(), b"age,name\r\n22,John\xc2\xa3\r\n22,John\xc2\xa3\r\n"
)
def test_header_no_header_line(self):
for item in [self.i, ItemAdapter(self.i).asdict()]:
self.assertExportResult(
item=item,
include_headers_line=False,
expected=b"22,John\xc2\xa3\r\n",
)
def test_join_multivalue(self):
class TestItem2(Item):
name = Field()
friends = Field()
for cls in TestItem2, dict:
self.assertExportResult(
item=cls(name="John", friends=["Mary", "Paul"]),
include_headers_line=False,
expected='"Mary,Paul",John\r\n',
)
def test_join_multivalue_not_strings(self):
self.assertExportResult(
item={"name": "John", "friends": [4, 8]},
include_headers_line=False,
expected='"[4, 8]",John\r\n',
)
def test_nonstring_types_item(self):
self.assertExportResult(
item=self._get_nonstring_types_item(),
include_headers_line=False,
expected="22,False,3.14,2015-01-01 01:01:01\r\n",
)
def test_errors_default(self):
with pytest.raises(UnicodeEncodeError):
self.assertExportResult(
item={"text": "W\u0275\u200brd"},
expected=None,
encoding="windows-1251",
)
def test_errors_xmlcharrefreplace(self):
self.assertExportResult(
item={"text": "W\u0275\u200brd"},
include_headers_line=False,
expected="Wɵ​rd\r\n",
encoding="windows-1251",
errors="xmlcharrefreplace",
)
class TestCsvItemExporterDataclass(TestCsvItemExporter):
item_class = MyDataClass
custom_field_item_class = CustomFieldDataclass
class TestXmlItemExporter(TestBaseItemExporter):
def _get_exporter(self, **kwargs):
return XmlItemExporter(self.output, **kwargs)
def assertXmlEquivalent(self, first, second, msg=None):
def xmltuple(elem):
children = list(elem.iterchildren())
if children:
return [(child.tag, sorted(xmltuple(child))) for child in children]
return [(elem.tag, [(elem.text, ())])]
def xmlsplit(xmlcontent):
doc = lxml.etree.fromstring(xmlcontent)
return xmltuple(doc)
assert xmlsplit(first) == xmlsplit(second), msg
def assertExportResult(self, item, expected_value):
fp = BytesIO()
ie = XmlItemExporter(fp)
ie.start_exporting()
ie.export_item(item)
ie.finish_exporting()
del ie # See the first “del self.ie” in this file for context.
self.assertXmlEquivalent(fp.getvalue(), expected_value)
def _check_output(self):
expected_value = (
b'<?xml version="1.0" encoding="utf-8"?>\n'
b"<items><item><age>22</age><name>John\xc2\xa3</name></item></items>"
)
self.assertXmlEquivalent(self.output.getvalue(), expected_value)
def test_multivalued_fields(self):
self.assertExportResult(
self.item_class(name=["John\xa3", "Doe"], age=[1, 2, 3]),
b"""<?xml version="1.0" encoding="utf-8"?>\n
<items>
<item>
<name><value>John\xc2\xa3</value><value>Doe</value></name>
<age><value>1</value><value>2</value><value>3</value></age>
</item>
</items>
""",
)
def test_nested_item(self):
i1 = {"name": "foo\xa3hoo", "age": "22"}
i2 = {"name": "bar", "age": i1}
i3 = self.item_class(name="buz", age=i2)
self.assertExportResult(
i3,
b"""<?xml version="1.0" encoding="utf-8"?>\n
<items>
<item>
<age>
<age>
<age>22</age>
<name>foo\xc2\xa3hoo</name>
</age>
<name>bar</name>
</age>
<name>buz</name>
</item>
</items>
""",
)
def test_nested_list_item(self):
i1 = {"name": "foo"}
i2 = {"name": "bar", "v2": {"egg": ["spam"]}}
i3 = self.item_class(name="buz", age=[i1, i2])
self.assertExportResult(
i3,
b"""<?xml version="1.0" encoding="utf-8"?>\n
<items>
<item>
<age>
<value><name>foo</name></value>
<value><name>bar</name><v2><egg><value>spam</value></egg></v2></value>
</age>
<name>buz</name>
</item>
</items>
""",
)
def test_nonstring_types_item(self):
item = self._get_nonstring_types_item()
self.assertExportResult(
item,
b"""<?xml version="1.0" encoding="utf-8"?>\n
<items>
<item>
<float>3.14</float>
<boolean>False</boolean>
<number>22</number>
<time>2015-01-01 01:01:01</time>
</item>
</items>
""",
)
class TestXmlItemExporterDataclass(TestXmlItemExporter):
item_class = MyDataClass
custom_field_item_class = CustomFieldDataclass
class TestJsonLinesItemExporter(TestBaseItemExporter):
_expected_nested: Any = {
"name": "Jesus",
"age": {"name": "Maria", "age": {"name": "Joseph", "age": "22"}},
}
def _get_exporter(self, **kwargs):
return JsonLinesItemExporter(self.output, **kwargs)
def _check_output(self):
exported = json.loads(to_unicode(self.output.getvalue().strip()))
assert exported == ItemAdapter(self.i).asdict()
def test_nested_item(self):
i1 = self.item_class(name="Joseph", age="22")
i2 = {"name": "Maria", "age": i1}
i3 = self.item_class(name="Jesus", age=i2)
self.ie.start_exporting()
self.ie.export_item(i3)
self.ie.finish_exporting()
del self.ie # See the first “del self.ie” in this file for context.
exported = json.loads(to_unicode(self.output.getvalue()))
assert exported == self._expected_nested
def test_extra_keywords(self):
self.ie = self._get_exporter(sort_keys=True)
self.test_export_item()
self._check_output()
with pytest.raises(TypeError):
self._get_exporter(foo_unknown_keyword_bar=True)
def test_nonstring_types_item(self):
item = self._get_nonstring_types_item()
self.ie.start_exporting()
self.ie.export_item(item)
self.ie.finish_exporting()
del self.ie # See the first “del self.ie” in this file for context.
exported = json.loads(to_unicode(self.output.getvalue()))
item["time"] = str(item["time"])
assert exported == item
class TestJsonLinesItemExporterDataclass(TestJsonLinesItemExporter):
item_class = MyDataClass
custom_field_item_class = CustomFieldDataclass
class TestJsonItemExporter(TestJsonLinesItemExporter):
_expected_nested = [TestJsonLinesItemExporter._expected_nested]
def _get_exporter(self, **kwargs):
return JsonItemExporter(self.output, **kwargs)
def _check_output(self):
exported = json.loads(to_unicode(self.output.getvalue().strip()))
assert exported == [ItemAdapter(self.i).asdict()]
def assertTwoItemsExported(self, item):
self.ie.start_exporting()
self.ie.export_item(item)
self.ie.export_item(item)
self.ie.finish_exporting()
del self.ie # See the first “del self.ie” in this file for context.
exported = json.loads(to_unicode(self.output.getvalue()))
assert exported == [ItemAdapter(item).asdict(), ItemAdapter(item).asdict()]
def test_two_items(self):
self.assertTwoItemsExported(self.i)
def test_two_dict_items(self):
self.assertTwoItemsExported(ItemAdapter(self.i).asdict())
def test_two_items_with_failure_between(self):
i1 = MyItem(name="Joseph\xa3", age="22")
i2 = MyItem(
name="Maria", age=1j
) # Invalid datetimes didn't consistently fail between Python versions
i3 = MyItem(name="Jesus", age="44")
self.ie.start_exporting()
self.ie.export_item(i1)
with pytest.raises(TypeError):
self.ie.export_item(i2)
self.ie.export_item(i3)
self.ie.finish_exporting()
exported = json.loads(to_unicode(self.output.getvalue()))
assert exported == [dict(i1), dict(i3)]
def test_nested_item(self):
i1 = self.item_class(name="Joseph\xa3", age="22")
i2 = self.item_class(name="Maria", age=i1)
i3 = self.item_class(name="Jesus", age=i2)
self.ie.start_exporting()
self.ie.export_item(i3)
self.ie.finish_exporting()
del self.ie # See the first “del self.ie” in this file for context.
exported = json.loads(to_unicode(self.output.getvalue()))
expected = {
"name": "Jesus",
"age": {"name": "Maria", "age": ItemAdapter(i1).asdict()},
}
assert exported == [expected]
def test_nested_dict_item(self):
i1 = {"name": "Joseph\xa3", "age": "22"}
i2 = self.item_class(name="Maria", age=i1)
i3 = {"name": "Jesus", "age": i2}
self.ie.start_exporting()
self.ie.export_item(i3)
self.ie.finish_exporting()
del self.ie # See the first “del self.ie” in this file for context.
exported = json.loads(to_unicode(self.output.getvalue()))
expected = {"name": "Jesus", "age": {"name": "Maria", "age": i1}}
assert exported == [expected]
def test_nonstring_types_item(self):
item = self._get_nonstring_types_item()
self.ie.start_exporting()
self.ie.export_item(item)
self.ie.finish_exporting()
del self.ie # See the first “del self.ie” in this file for context.
exported = json.loads(to_unicode(self.output.getvalue()))
item["time"] = str(item["time"])
assert exported == [item]
class TestJsonItemExporterToBytes(TestBaseItemExporter):
def _get_exporter(self, **kwargs):
kwargs["encoding"] = "latin"
return JsonItemExporter(self.output, **kwargs)
def test_two_items_with_failure_between(self):
i1 = MyItem(name="Joseph", age="22")
i2 = MyItem(name="\u263a", age="11")
i3 = MyItem(name="Jesus", age="44")
self.ie.start_exporting()
self.ie.export_item(i1)
with pytest.raises(UnicodeEncodeError):
self.ie.export_item(i2)
self.ie.export_item(i3)
self.ie.finish_exporting()
exported = json.loads(to_unicode(self.output.getvalue(), encoding="latin"))
assert exported == [dict(i1), dict(i3)]
class TestJsonItemExporterDataclass(TestJsonItemExporter):
item_class = MyDataClass
custom_field_item_class = CustomFieldDataclass
class TestCustomExporterItem:
item_class: type = MyItem
def setup_method(self):
if self.item_class is None:
pytest.skip("item class is None")
def test_exporter_custom_serializer(self):
class CustomItemExporter(BaseItemExporter):
def serialize_field(self, field, name, value):
if name == "age":
return str(int(value) + 1)
return super().serialize_field(field, name, value)
def export_item(self, item: Any) -> None:
pass
i = self.item_class(name="John", age="22")
a = ItemAdapter(i)
ie = CustomItemExporter()
assert ie.serialize_field(a.get_field_meta("name"), "name", a["name"]) == "John"
assert ie.serialize_field(a.get_field_meta("age"), "age", a["age"]) == "23"
i2 = {"name": "John", "age": "22"}
assert ie.serialize_field({}, "name", i2["name"]) == "John"
assert ie.serialize_field({}, "age", i2["age"]) == "23"
class TestCustomExporterDataclass(TestCustomExporterItem):
item_class = MyDataClass
| python | BSD-3-Clause | d1bd8eb49f7aba9289e4ff692006cead8bcd9080 | 2026-01-04T14:38:41.023839Z | false |
scrapy/scrapy | https://github.com/scrapy/scrapy/blob/d1bd8eb49f7aba9289e4ff692006cead8bcd9080/tests/test_http_request.py | tests/test_http_request.py | import json
import re
import warnings
import xmlrpc.client
from typing import Any
from unittest import mock
from urllib.parse import parse_qs, unquote_to_bytes
import pytest
from scrapy.http import (
FormRequest,
Headers,
HtmlResponse,
JsonRequest,
Request,
XmlRpcRequest,
)
from scrapy.http.request import NO_CALLBACK
from scrapy.utils.httpobj import urlparse_cached
from scrapy.utils.python import to_bytes, to_unicode
class TestRequest:
request_class = Request
default_method = "GET"
default_headers: dict[bytes, list[bytes]] = {}
default_meta: dict[str, Any] = {}
def test_init(self):
# Request requires url in the __init__ method
with pytest.raises(TypeError):
self.request_class()
# url argument must be basestring
with pytest.raises(TypeError):
self.request_class(123)
r = self.request_class("http://www.example.com")
r = self.request_class("http://www.example.com")
assert isinstance(r.url, str)
assert r.url == "http://www.example.com"
assert r.method == self.default_method
assert isinstance(r.headers, Headers)
assert r.headers == self.default_headers
assert r.meta == self.default_meta
meta = {"lala": "lolo"}
headers = {b"caca": b"coco"}
r = self.request_class(
"http://www.example.com", meta=meta, headers=headers, body="a body"
)
assert r.meta is not meta
assert r.meta == meta
assert r.headers is not headers
assert r.headers[b"caca"] == b"coco"
def test_url_scheme(self):
# This test passes by not raising any (ValueError) exception
self.request_class("http://example.org")
self.request_class("https://example.org")
self.request_class("s3://example.org")
self.request_class("ftp://example.org")
self.request_class("about:config")
self.request_class("data:,Hello%2C%20World!")
def test_url_no_scheme(self):
msg = "Missing scheme in request url:"
with pytest.raises(ValueError, match=msg):
self.request_class("foo")
with pytest.raises(ValueError, match=msg):
self.request_class("/foo/")
with pytest.raises(ValueError, match=msg):
self.request_class("/foo:bar")
def test_headers(self):
# Different ways of setting headers attribute
url = "http://www.scrapy.org"
headers = {b"Accept": "gzip", b"Custom-Header": "nothing to tell you"}
r = self.request_class(url=url, headers=headers)
p = self.request_class(url=url, headers=r.headers)
assert r.headers == p.headers
assert r.headers is not headers
assert p.headers is not r.headers
# headers must not be unicode
h = Headers({"key1": "val1", "key2": "val2"})
h["newkey"] = "newval"
for k, v in h.items():
assert isinstance(k, bytes)
for s in v:
assert isinstance(s, bytes)
def test_eq(self):
url = "http://www.scrapy.org"
r1 = self.request_class(url=url)
r2 = self.request_class(url=url)
assert r1 != r2
set_ = set()
set_.add(r1)
set_.add(r2)
assert len(set_) == 2
def test_url(self):
r = self.request_class(url="http://www.scrapy.org/path")
assert r.url == "http://www.scrapy.org/path"
def test_url_quoting(self):
r = self.request_class(url="http://www.scrapy.org/blank%20space")
assert r.url == "http://www.scrapy.org/blank%20space"
r = self.request_class(url="http://www.scrapy.org/blank space")
assert r.url == "http://www.scrapy.org/blank%20space"
def test_url_encoding(self):
r = self.request_class(url="http://www.scrapy.org/price/£")
assert r.url == "http://www.scrapy.org/price/%C2%A3"
def test_url_encoding_other(self):
# encoding affects only query part of URI, not path
# path part should always be UTF-8 encoded before percent-escaping
r = self.request_class(url="http://www.scrapy.org/price/£", encoding="utf-8")
assert r.url == "http://www.scrapy.org/price/%C2%A3"
r = self.request_class(url="http://www.scrapy.org/price/£", encoding="latin1")
assert r.url == "http://www.scrapy.org/price/%C2%A3"
def test_url_encoding_query(self):
r1 = self.request_class(url="http://www.scrapy.org/price/£?unit=µ")
assert r1.url == "http://www.scrapy.org/price/%C2%A3?unit=%C2%B5"
# should be same as above
r2 = self.request_class(
url="http://www.scrapy.org/price/£?unit=µ", encoding="utf-8"
)
assert r2.url == "http://www.scrapy.org/price/%C2%A3?unit=%C2%B5"
def test_url_encoding_query_latin1(self):
# encoding is used for encoding query-string before percent-escaping;
# path is still UTF-8 encoded before percent-escaping
r3 = self.request_class(
url="http://www.scrapy.org/price/µ?currency=£", encoding="latin1"
)
assert r3.url == "http://www.scrapy.org/price/%C2%B5?currency=%A3"
def test_url_encoding_nonutf8_untouched(self):
# percent-escaping sequences that do not match valid UTF-8 sequences
# should be kept untouched (just upper-cased perhaps)
#
# See https://datatracker.ietf.org/doc/html/rfc3987#section-3.2
#
# "Conversions from URIs to IRIs MUST NOT use any character encoding
# other than UTF-8 in steps 3 and 4, even if it might be possible to
# guess from the context that another character encoding than UTF-8 was
# used in the URI. For example, the URI
# "http://www.example.org/r%E9sum%E9.html" might with some guessing be
# interpreted to contain two e-acute characters encoded as iso-8859-1.
# It must not be converted to an IRI containing these e-acute
# characters. Otherwise, in the future the IRI will be mapped to
# "http://www.example.org/r%C3%A9sum%C3%A9.html", which is a different
# URI from "http://www.example.org/r%E9sum%E9.html".
r1 = self.request_class(url="http://www.scrapy.org/price/%a3")
assert r1.url == "http://www.scrapy.org/price/%a3"
r2 = self.request_class(url="http://www.scrapy.org/r%C3%A9sum%C3%A9/%a3")
assert r2.url == "http://www.scrapy.org/r%C3%A9sum%C3%A9/%a3"
r3 = self.request_class(url="http://www.scrapy.org/résumé/%a3")
assert r3.url == "http://www.scrapy.org/r%C3%A9sum%C3%A9/%a3"
r4 = self.request_class(url="http://www.example.org/r%E9sum%E9.html")
assert r4.url == "http://www.example.org/r%E9sum%E9.html"
def test_body(self):
r1 = self.request_class(url="http://www.example.com/")
assert r1.body == b""
r2 = self.request_class(url="http://www.example.com/", body=b"")
assert isinstance(r2.body, bytes)
assert r2.encoding == "utf-8" # default encoding
r3 = self.request_class(
url="http://www.example.com/", body="Price: \xa3100", encoding="utf-8"
)
assert isinstance(r3.body, bytes)
assert r3.body == b"Price: \xc2\xa3100"
r4 = self.request_class(
url="http://www.example.com/", body="Price: \xa3100", encoding="latin1"
)
assert isinstance(r4.body, bytes)
assert r4.body == b"Price: \xa3100"
def test_copy(self):
"""Test Request copy"""
def somecallback():
pass
r1 = self.request_class(
"http://www.example.com",
flags=["f1", "f2"],
callback=somecallback,
errback=somecallback,
)
r1.meta["foo"] = "bar"
r1.cb_kwargs["key"] = "value"
r2 = r1.copy()
# make sure copy does not propagate callbacks
assert r1.callback is somecallback
assert r1.errback is somecallback
assert r2.callback is r1.callback
assert r2.errback is r2.errback
# make sure flags list is shallow copied
assert r1.flags is not r2.flags, "flags must be a shallow copy, not identical"
assert r1.flags == r2.flags
# make sure cb_kwargs dict is shallow copied
assert r1.cb_kwargs is not r2.cb_kwargs, (
"cb_kwargs must be a shallow copy, not identical"
)
assert r1.cb_kwargs == r2.cb_kwargs
# make sure meta dict is shallow copied
assert r1.meta is not r2.meta, "meta must be a shallow copy, not identical"
assert r1.meta == r2.meta
# make sure headers attribute is shallow copied
assert r1.headers is not r2.headers, (
"headers must be a shallow copy, not identical"
)
assert r1.headers == r2.headers
assert r1.encoding == r2.encoding
assert r1.dont_filter == r2.dont_filter
# Request.body can be identical since it's an immutable object (str)
def test_copy_inherited_classes(self):
"""Test Request children copies preserve their class"""
class CustomRequest(self.request_class):
pass
r1 = CustomRequest("http://www.example.com")
r2 = r1.copy()
assert isinstance(r2, CustomRequest)
def test_replace(self):
"""Test Request.replace() method"""
r1 = self.request_class("http://www.example.com", method="GET")
hdrs = Headers(r1.headers)
hdrs[b"key"] = b"value"
r2 = r1.replace(method="POST", body="New body", headers=hdrs)
assert r1.url == r2.url
assert (r1.method, r2.method) == ("GET", "POST")
assert (r1.body, r2.body) == (b"", b"New body")
assert (r1.headers, r2.headers) == (self.default_headers, hdrs)
# Empty attributes (which may fail if not compared properly)
r3 = self.request_class(
"http://www.example.com", meta={"a": 1}, dont_filter=True
)
r4 = r3.replace(
url="http://www.example.com/2", body=b"", meta={}, dont_filter=False
)
assert r4.url == "http://www.example.com/2"
assert r4.body == b""
assert r4.meta == {}
assert r4.dont_filter is False
def test_method_always_str(self):
r = self.request_class("http://www.example.com", method="POST")
assert isinstance(r.method, str)
def test_immutable_attributes(self):
r = self.request_class("http://example.com")
with pytest.raises(AttributeError):
r.url = "http://example2.com"
with pytest.raises(AttributeError):
r.body = "xxx"
def test_callback_and_errback(self):
def a_function():
pass
r1 = self.request_class("http://example.com")
assert r1.callback is None
assert r1.errback is None
r2 = self.request_class("http://example.com", callback=a_function)
assert r2.callback is a_function
assert r2.errback is None
r3 = self.request_class("http://example.com", errback=a_function)
assert r3.callback is None
assert r3.errback is a_function
r4 = self.request_class(
url="http://example.com",
callback=a_function,
errback=a_function,
)
assert r4.callback is a_function
assert r4.errback is a_function
r5 = self.request_class(
url="http://example.com",
callback=NO_CALLBACK,
errback=NO_CALLBACK,
)
assert r5.callback is NO_CALLBACK
assert r5.errback is NO_CALLBACK
def test_callback_and_errback_type(self):
with pytest.raises(TypeError):
self.request_class("http://example.com", callback="a_function")
with pytest.raises(TypeError):
self.request_class("http://example.com", errback="a_function")
with pytest.raises(TypeError):
self.request_class(
url="http://example.com",
callback="a_function",
errback="a_function",
)
def test_no_callback(self):
with pytest.raises(RuntimeError):
NO_CALLBACK()
def test_from_curl(self):
# Note: more curated tests regarding curl conversion are in
# `test_utils_curl.py`
curl_command = (
"curl 'http://httpbin.org/post' -X POST -H 'Cookie: _gauges_unique"
"_year=1; _gauges_unique=1; _gauges_unique_month=1; _gauges_unique"
"_hour=1; _gauges_unique_day=1' -H 'Origin: http://httpbin.org' -H"
" 'Accept-Encoding: gzip, deflate' -H 'Accept-Language: en-US,en;q"
"=0.9,ru;q=0.8,es;q=0.7' -H 'Upgrade-Insecure-Requests: 1' -H 'Use"
"r-Agent: Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTM"
"L, like Gecko) Ubuntu Chromium/62.0.3202.75 Chrome/62.0.3202.75 S"
"afari/537.36' -H 'Content-Type: application /x-www-form-urlencode"
"d' -H 'Accept: text/html,application/xhtml+xml,application/xml;q="
"0.9,image/webp,image/apng,*/*;q=0.8' -H 'Cache-Control: max-age=0"
"' -H 'Referer: http://httpbin.org/forms/post' -H 'Connection: kee"
"p-alive' --data 'custname=John+Smith&custtel=500&custemail=jsmith"
"%40example.org&size=small&topping=cheese&topping=onion&delivery=1"
"2%3A15&comments=' --compressed"
)
r = self.request_class.from_curl(curl_command)
assert r.method == "POST"
assert r.url == "http://httpbin.org/post"
assert (
r.body == b"custname=John+Smith&custtel=500&custemail=jsmith%40"
b"example.org&size=small&topping=cheese&topping=onion"
b"&delivery=12%3A15&comments="
)
assert r.cookies == {
"_gauges_unique_year": "1",
"_gauges_unique": "1",
"_gauges_unique_month": "1",
"_gauges_unique_hour": "1",
"_gauges_unique_day": "1",
}
assert r.headers == {
b"Origin": [b"http://httpbin.org"],
b"Accept-Encoding": [b"gzip, deflate"],
b"Accept-Language": [b"en-US,en;q=0.9,ru;q=0.8,es;q=0.7"],
b"Upgrade-Insecure-Requests": [b"1"],
b"User-Agent": [
b"Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537."
b"36 (KHTML, like Gecko) Ubuntu Chromium/62.0.3202"
b".75 Chrome/62.0.3202.75 Safari/537.36"
],
b"Content-Type": [b"application /x-www-form-urlencoded"],
b"Accept": [
b"text/html,application/xhtml+xml,application/xml;q=0."
b"9,image/webp,image/apng,*/*;q=0.8"
],
b"Cache-Control": [b"max-age=0"],
b"Referer": [b"http://httpbin.org/forms/post"],
b"Connection": [b"keep-alive"],
}
def test_from_curl_with_kwargs(self):
r = self.request_class.from_curl(
'curl -X PATCH "http://example.org"', method="POST", meta={"key": "value"}
)
assert r.method == "POST"
assert r.meta == {"key": "value"}
def test_from_curl_ignore_unknown_options(self):
# By default: it works and ignores the unknown options: --foo and -z
with warnings.catch_warnings(): # avoid warning when executing tests
warnings.simplefilter("ignore")
r = self.request_class.from_curl(
'curl -X DELETE "http://example.org" --foo -z',
)
assert r.method == "DELETE"
# If `ignore_unknown_options` is set to `False` it raises an error with
# the unknown options: --foo and -z
with pytest.raises(ValueError, match="Unrecognized options:"):
self.request_class.from_curl(
'curl -X PATCH "http://example.org" --foo -z',
ignore_unknown_options=False,
)
class TestFormRequest(TestRequest):
request_class = FormRequest
def assertQueryEqual(self, first, second, msg=None):
first = to_unicode(first).split("&")
second = to_unicode(second).split("&")
assert sorted(first) == sorted(second), msg
def test_empty_formdata(self):
r1 = self.request_class("http://www.example.com", formdata={})
assert r1.body == b""
def test_formdata_overrides_querystring(self):
data = (("a", "one"), ("a", "two"), ("b", "2"))
url = self.request_class(
"http://www.example.com/?a=0&b=1&c=3#fragment", method="GET", formdata=data
).url.split("#", maxsplit=1)[0]
fs = _qs(self.request_class(url, method="GET", formdata=data))
assert set(fs[b"a"]) == {b"one", b"two"}
assert fs[b"b"] == [b"2"]
assert fs.get(b"c") is None
data = {"a": "1", "b": "2"}
fs = _qs(
self.request_class("http://www.example.com/", method="GET", formdata=data)
)
assert fs[b"a"] == [b"1"]
assert fs[b"b"] == [b"2"]
def test_default_encoding_bytes(self):
# using default encoding (utf-8)
data = {b"one": b"two", b"price": b"\xc2\xa3 100"}
r2 = self.request_class("http://www.example.com", formdata=data)
assert r2.method == "POST"
assert r2.encoding == "utf-8"
self.assertQueryEqual(r2.body, b"price=%C2%A3+100&one=two")
assert r2.headers[b"Content-Type"] == b"application/x-www-form-urlencoded"
def test_default_encoding_textual_data(self):
# using default encoding (utf-8)
data = {"µ one": "two", "price": "£ 100"}
r2 = self.request_class("http://www.example.com", formdata=data)
assert r2.method == "POST"
assert r2.encoding == "utf-8"
self.assertQueryEqual(r2.body, b"price=%C2%A3+100&%C2%B5+one=two")
assert r2.headers[b"Content-Type"] == b"application/x-www-form-urlencoded"
def test_default_encoding_mixed_data(self):
# using default encoding (utf-8)
data = {"\u00b5one": b"two", b"price\xc2\xa3": "\u00a3 100"}
r2 = self.request_class("http://www.example.com", formdata=data)
assert r2.method == "POST"
assert r2.encoding == "utf-8"
self.assertQueryEqual(r2.body, b"%C2%B5one=two&price%C2%A3=%C2%A3+100")
assert r2.headers[b"Content-Type"] == b"application/x-www-form-urlencoded"
def test_custom_encoding_bytes(self):
data = {b"\xb5 one": b"two", b"price": b"\xa3 100"}
r2 = self.request_class(
"http://www.example.com", formdata=data, encoding="latin1"
)
assert r2.method == "POST"
assert r2.encoding == "latin1"
self.assertQueryEqual(r2.body, b"price=%A3+100&%B5+one=two")
assert r2.headers[b"Content-Type"] == b"application/x-www-form-urlencoded"
def test_custom_encoding_textual_data(self):
data = {"price": "£ 100"}
r3 = self.request_class(
"http://www.example.com", formdata=data, encoding="latin1"
)
assert r3.encoding == "latin1"
assert r3.body == b"price=%A3+100"
def test_multi_key_values(self):
# using multiples values for a single key
data = {"price": "\xa3 100", "colours": ["red", "blue", "green"]}
r3 = self.request_class("http://www.example.com", formdata=data)
self.assertQueryEqual(
r3.body, b"colours=red&colours=blue&colours=green&price=%C2%A3+100"
)
def test_from_response_post(self):
response = _buildresponse(
b"""<form action="post.php" method="POST">
<input type="hidden" name="test" value="val1">
<input type="hidden" name="test" value="val2">
<input type="hidden" name="test2" value="xxx">
</form>""",
url="http://www.example.com/this/list.html",
)
req = self.request_class.from_response(
response, formdata={"one": ["two", "three"], "six": "seven"}
)
assert req.method == "POST"
assert req.headers[b"Content-type"] == b"application/x-www-form-urlencoded"
assert req.url == "http://www.example.com/this/post.php"
fs = _qs(req)
assert set(fs[b"test"]) == {b"val1", b"val2"}
assert set(fs[b"one"]) == {b"two", b"three"}
assert fs[b"test2"] == [b"xxx"]
assert fs[b"six"] == [b"seven"]
def test_from_response_post_nonascii_bytes_utf8(self):
response = _buildresponse(
b"""<form action="post.php" method="POST">
<input type="hidden" name="test \xc2\xa3" value="val1">
<input type="hidden" name="test \xc2\xa3" value="val2">
<input type="hidden" name="test2" value="xxx \xc2\xb5">
</form>""",
url="http://www.example.com/this/list.html",
)
req = self.request_class.from_response(
response, formdata={"one": ["two", "three"], "six": "seven"}
)
assert req.method == "POST"
assert req.headers[b"Content-type"] == b"application/x-www-form-urlencoded"
assert req.url == "http://www.example.com/this/post.php"
fs = _qs(req, to_unicode=True)
assert set(fs["test £"]) == {"val1", "val2"}
assert set(fs["one"]) == {"two", "three"}
assert fs["test2"] == ["xxx µ"]
assert fs["six"] == ["seven"]
def test_from_response_post_nonascii_bytes_latin1(self):
response = _buildresponse(
b"""<form action="post.php" method="POST">
<input type="hidden" name="test \xa3" value="val1">
<input type="hidden" name="test \xa3" value="val2">
<input type="hidden" name="test2" value="xxx \xb5">
</form>""",
url="http://www.example.com/this/list.html",
encoding="latin1",
)
req = self.request_class.from_response(
response, formdata={"one": ["two", "three"], "six": "seven"}
)
assert req.method == "POST"
assert req.headers[b"Content-type"] == b"application/x-www-form-urlencoded"
assert req.url == "http://www.example.com/this/post.php"
fs = _qs(req, to_unicode=True, encoding="latin1")
assert set(fs["test £"]) == {"val1", "val2"}
assert set(fs["one"]) == {"two", "three"}
assert fs["test2"] == ["xxx µ"]
assert fs["six"] == ["seven"]
def test_from_response_post_nonascii_unicode(self):
response = _buildresponse(
"""<form action="post.php" method="POST">
<input type="hidden" name="test £" value="val1">
<input type="hidden" name="test £" value="val2">
<input type="hidden" name="test2" value="xxx µ">
</form>""",
url="http://www.example.com/this/list.html",
)
req = self.request_class.from_response(
response, formdata={"one": ["two", "three"], "six": "seven"}
)
assert req.method == "POST"
assert req.headers[b"Content-type"] == b"application/x-www-form-urlencoded"
assert req.url == "http://www.example.com/this/post.php"
fs = _qs(req, to_unicode=True)
assert set(fs["test £"]) == {"val1", "val2"}
assert set(fs["one"]) == {"two", "three"}
assert fs["test2"] == ["xxx µ"]
assert fs["six"] == ["seven"]
def test_from_response_duplicate_form_key(self):
response = _buildresponse("<form></form>", url="http://www.example.com")
req = self.request_class.from_response(
response=response,
method="GET",
formdata=(("foo", "bar"), ("foo", "baz")),
)
assert urlparse_cached(req).hostname == "www.example.com"
assert urlparse_cached(req).query == "foo=bar&foo=baz"
def test_from_response_override_duplicate_form_key(self):
response = _buildresponse(
"""<form action="get.php" method="POST">
<input type="hidden" name="one" value="1">
<input type="hidden" name="two" value="3">
</form>"""
)
req = self.request_class.from_response(
response, formdata=(("two", "2"), ("two", "4"))
)
fs = _qs(req)
assert fs[b"one"] == [b"1"]
assert fs[b"two"] == [b"2", b"4"]
def test_from_response_extra_headers(self):
response = _buildresponse(
"""<form action="post.php" method="POST">
<input type="hidden" name="test" value="val1">
<input type="hidden" name="test" value="val2">
<input type="hidden" name="test2" value="xxx">
</form>"""
)
req = self.request_class.from_response(
response=response,
formdata={"one": ["two", "three"], "six": "seven"},
headers={"Accept-Encoding": "gzip,deflate"},
)
assert req.method == "POST"
assert req.headers["Content-type"] == b"application/x-www-form-urlencoded"
assert req.headers["Accept-Encoding"] == b"gzip,deflate"
def test_from_response_get(self):
response = _buildresponse(
"""<form action="get.php" method="GET">
<input type="hidden" name="test" value="val1">
<input type="hidden" name="test" value="val2">
<input type="hidden" name="test2" value="xxx">
</form>""",
url="http://www.example.com/this/list.html",
)
r1 = self.request_class.from_response(
response, formdata={"one": ["two", "three"], "six": "seven"}
)
assert r1.method == "GET"
assert urlparse_cached(r1).hostname == "www.example.com"
assert urlparse_cached(r1).path == "/this/get.php"
fs = _qs(r1)
assert set(fs[b"test"]) == {b"val1", b"val2"}
assert set(fs[b"one"]) == {b"two", b"three"}
assert fs[b"test2"] == [b"xxx"]
assert fs[b"six"] == [b"seven"]
def test_from_response_override_params(self):
response = _buildresponse(
"""<form action="get.php" method="POST">
<input type="hidden" name="one" value="1">
<input type="hidden" name="two" value="3">
</form>"""
)
req = self.request_class.from_response(response, formdata={"two": "2"})
fs = _qs(req)
assert fs[b"one"] == [b"1"]
assert fs[b"two"] == [b"2"]
def test_from_response_drop_params(self):
response = _buildresponse(
"""<form action="get.php" method="POST">
<input type="hidden" name="one" value="1">
<input type="hidden" name="two" value="3">
</form>"""
)
req = self.request_class.from_response(response, formdata={"two": None})
fs = _qs(req)
assert fs[b"one"] == [b"1"]
assert b"two" not in fs
def test_from_response_override_method(self):
response = _buildresponse(
"""<html><body>
<form action="/app"></form>
</body></html>"""
)
request = FormRequest.from_response(response)
assert request.method == "GET"
request = FormRequest.from_response(response, method="POST")
assert request.method == "POST"
def test_from_response_override_url(self):
response = _buildresponse(
"""<html><body>
<form action="/app"></form>
</body></html>"""
)
request = FormRequest.from_response(response)
assert request.url == "http://example.com/app"
request = FormRequest.from_response(response, url="http://foo.bar/absolute")
assert request.url == "http://foo.bar/absolute"
request = FormRequest.from_response(response, url="/relative")
assert request.url == "http://example.com/relative"
def test_from_response_case_insensitive(self):
response = _buildresponse(
"""<form action="get.php" method="GET">
<input type="SuBmIt" name="clickable1" value="clicked1">
<input type="iMaGe" name="i1" src="http://my.image.org/1.jpg">
<input type="submit" name="clickable2" value="clicked2">
</form>"""
)
req = self.request_class.from_response(response)
fs = _qs(req)
assert fs[b"clickable1"] == [b"clicked1"]
assert b"i1" not in fs, fs # xpath in _get_inputs()
assert b"clickable2" not in fs, fs # xpath in _get_clickable()
def test_from_response_submit_first_clickable(self):
response = _buildresponse(
"""<form action="get.php" method="GET">
<input type="submit" name="clickable1" value="clicked1">
<input type="hidden" name="one" value="1">
<input type="hidden" name="two" value="3">
<input type="submit" name="clickable2" value="clicked2">
</form>"""
)
req = self.request_class.from_response(response, formdata={"two": "2"})
fs = _qs(req)
assert fs[b"clickable1"] == [b"clicked1"]
assert b"clickable2" not in fs, fs
assert fs[b"one"] == [b"1"]
assert fs[b"two"] == [b"2"]
def test_from_response_submit_not_first_clickable(self):
response = _buildresponse(
"""<form action="get.php" method="GET">
<input type="submit" name="clickable1" value="clicked1">
<input type="hidden" name="one" value="1">
<input type="hidden" name="two" value="3">
<input type="submit" name="clickable2" value="clicked2">
</form>"""
)
req = self.request_class.from_response(
response, formdata={"two": "2"}, clickdata={"name": "clickable2"}
)
fs = _qs(req)
assert fs[b"clickable2"] == [b"clicked2"]
assert b"clickable1" not in fs, fs
assert fs[b"one"] == [b"1"]
assert fs[b"two"] == [b"2"]
def test_from_response_dont_submit_image_as_input(self):
response = _buildresponse(
"""<form>
<input type="hidden" name="i1" value="i1v">
<input type="image" name="i2" src="http://my.image.org/1.jpg">
<input type="submit" name="i3" value="i3v">
</form>"""
)
req = self.request_class.from_response(response, dont_click=True)
fs = _qs(req)
assert fs == {b"i1": [b"i1v"]}
def test_from_response_dont_submit_reset_as_input(self):
response = _buildresponse(
"""<form>
<input type="hidden" name="i1" value="i1v">
<input type="text" name="i2" value="i2v">
<input type="reset" name="resetme">
<input type="submit" name="i3" value="i3v">
</form>"""
)
req = self.request_class.from_response(response, dont_click=True)
fs = _qs(req)
assert fs == {b"i1": [b"i1v"], b"i2": [b"i2v"]}
def test_from_response_clickdata_does_not_ignore_image(self):
response = _buildresponse(
"""<form>
<input type="text" name="i1" value="i1v">
<input id="image" name="i2" type="image" value="i2v" alt="Login" src="http://my.image.org/1.jpg">
</form>"""
)
req = self.request_class.from_response(response)
fs = _qs(req)
assert fs == {b"i1": [b"i1v"], b"i2": [b"i2v"]}
def test_from_response_multiple_clickdata(self):
response = _buildresponse(
"""<form action="get.php" method="GET">
<input type="submit" name="clickable" value="clicked1">
<input type="submit" name="clickable" value="clicked2">
<input type="hidden" name="one" value="clicked1">
<input type="hidden" name="two" value="clicked2">
</form>"""
)
req = self.request_class.from_response(
response, clickdata={"name": "clickable", "value": "clicked2"}
)
fs = _qs(req)
assert fs[b"clickable"] == [b"clicked2"]
assert fs[b"one"] == [b"clicked1"]
assert fs[b"two"] == [b"clicked2"]
def test_from_response_unicode_clickdata(self):
response = _buildresponse(
"""<form action="get.php" method="GET">
<input type="submit" name="price in \u00a3" value="\u00a3 1000">
<input type="submit" name="price in \u20ac" value="\u20ac 2000">
<input type="hidden" name="poundsign" value="\u00a3">
<input type="hidden" name="eurosign" value="\u20ac">
</form>"""
)
req = self.request_class.from_response(
response, clickdata={"name": "price in \u00a3"}
)
fs = _qs(req, to_unicode=True)
| python | BSD-3-Clause | d1bd8eb49f7aba9289e4ff692006cead8bcd9080 | 2026-01-04T14:38:41.023839Z | true |
scrapy/scrapy | https://github.com/scrapy/scrapy/blob/d1bd8eb49f7aba9289e4ff692006cead8bcd9080/tests/test_extension_throttle.py | tests/test_extension_throttle.py | from logging import INFO
from unittest.mock import Mock
import pytest
from scrapy import Request, Spider
from scrapy.exceptions import NotConfigured
from scrapy.extensions.throttle import AutoThrottle
from scrapy.http.response import Response
from scrapy.settings.default_settings import (
AUTOTHROTTLE_MAX_DELAY,
AUTOTHROTTLE_START_DELAY,
DOWNLOAD_DELAY,
)
from scrapy.utils.misc import build_from_crawler
from scrapy.utils.spider import DefaultSpider
from scrapy.utils.test import get_crawler as _get_crawler
UNSET = object()
def get_crawler(settings=None, spidercls=None):
settings = settings or {}
settings["AUTOTHROTTLE_ENABLED"] = True
return _get_crawler(settings_dict=settings, spidercls=spidercls)
@pytest.mark.parametrize(
("value", "expected"),
[
(UNSET, False),
(False, False),
(True, True),
],
)
def test_enabled(value, expected):
settings = {}
if value is not UNSET:
settings["AUTOTHROTTLE_ENABLED"] = value
crawler = _get_crawler(settings_dict=settings)
if expected:
build_from_crawler(AutoThrottle, crawler)
else:
with pytest.raises(NotConfigured):
build_from_crawler(AutoThrottle, crawler)
@pytest.mark.parametrize(
"value",
[
0.0,
-1.0,
],
)
def test_target_concurrency_invalid(value):
settings = {"AUTOTHROTTLE_TARGET_CONCURRENCY": value}
crawler = get_crawler(settings)
with pytest.raises(NotConfigured):
build_from_crawler(AutoThrottle, crawler)
@pytest.mark.parametrize(
("spider", "setting", "expected"),
[
(UNSET, UNSET, DOWNLOAD_DELAY),
(1.0, UNSET, 1.0),
(UNSET, 1.0, 1.0),
(1.0, 2.0, 1.0),
(3.0, 2.0, 3.0),
],
)
def test_mindelay_definition(spider, setting, expected):
settings = {}
if setting is not UNSET:
settings["DOWNLOAD_DELAY"] = setting
class _TestSpider(Spider):
name = "test"
if spider is not UNSET:
_TestSpider.download_delay = spider
crawler = get_crawler(settings, _TestSpider)
at = build_from_crawler(AutoThrottle, crawler)
at._spider_opened(_TestSpider())
assert at.mindelay == expected
@pytest.mark.parametrize(
("value", "expected"),
[
(UNSET, AUTOTHROTTLE_MAX_DELAY),
(1.0, 1.0),
],
)
def test_maxdelay_definition(value, expected):
settings = {}
if value is not UNSET:
settings["AUTOTHROTTLE_MAX_DELAY"] = value
crawler = get_crawler(settings)
at = build_from_crawler(AutoThrottle, crawler)
at._spider_opened(DefaultSpider())
assert at.maxdelay == expected
@pytest.mark.parametrize(
("min_spider", "min_setting", "start_setting", "expected"),
[
(UNSET, UNSET, UNSET, AUTOTHROTTLE_START_DELAY),
(AUTOTHROTTLE_START_DELAY - 1.0, UNSET, UNSET, AUTOTHROTTLE_START_DELAY),
(AUTOTHROTTLE_START_DELAY + 1.0, UNSET, UNSET, AUTOTHROTTLE_START_DELAY + 1.0),
(UNSET, AUTOTHROTTLE_START_DELAY - 1.0, UNSET, AUTOTHROTTLE_START_DELAY),
(UNSET, AUTOTHROTTLE_START_DELAY + 1.0, UNSET, AUTOTHROTTLE_START_DELAY + 1.0),
(UNSET, UNSET, AUTOTHROTTLE_START_DELAY - 1.0, AUTOTHROTTLE_START_DELAY - 1.0),
(UNSET, UNSET, AUTOTHROTTLE_START_DELAY + 1.0, AUTOTHROTTLE_START_DELAY + 1.0),
(
AUTOTHROTTLE_START_DELAY + 1.0,
AUTOTHROTTLE_START_DELAY + 2.0,
UNSET,
AUTOTHROTTLE_START_DELAY + 1.0,
),
(
AUTOTHROTTLE_START_DELAY + 2.0,
UNSET,
AUTOTHROTTLE_START_DELAY + 1.0,
AUTOTHROTTLE_START_DELAY + 2.0,
),
(
AUTOTHROTTLE_START_DELAY + 1.0,
UNSET,
AUTOTHROTTLE_START_DELAY + 2.0,
AUTOTHROTTLE_START_DELAY + 2.0,
),
],
)
def test_startdelay_definition(min_spider, min_setting, start_setting, expected):
settings = {}
if min_setting is not UNSET:
settings["DOWNLOAD_DELAY"] = min_setting
if start_setting is not UNSET:
settings["AUTOTHROTTLE_START_DELAY"] = start_setting
class _TestSpider(Spider):
name = "test"
if min_spider is not UNSET:
_TestSpider.download_delay = min_spider
crawler = get_crawler(settings, _TestSpider)
at = build_from_crawler(AutoThrottle, crawler)
spider = _TestSpider()
at._spider_opened(spider)
assert spider.download_delay == expected
@pytest.mark.parametrize(
("meta", "slot"),
[
({}, None),
({"download_latency": 1.0}, None),
({"download_slot": "foo"}, None),
({"download_slot": "foo"}, "foo"),
({"download_latency": 1.0, "download_slot": "foo"}, None),
(
{
"download_latency": 1.0,
"download_slot": "foo",
"autothrottle_dont_adjust_delay": True,
},
"foo",
),
],
)
def test_skipped(meta, slot):
crawler = get_crawler()
at = build_from_crawler(AutoThrottle, crawler)
spider = DefaultSpider()
at._spider_opened(spider)
request = Request("https://example.com", meta=meta)
crawler.engine = Mock()
crawler.engine.downloader = Mock()
crawler.engine.downloader.slots = {}
if slot is not None:
crawler.engine.downloader.slots[slot] = object()
at._adjust_delay = None # Raise exception if called.
at._response_downloaded(None, request, spider)
@pytest.mark.parametrize(
("download_latency", "target_concurrency", "slot_delay", "expected"),
[
(2.0, 2.0, 1.0, 1.0),
(1.0, 2.0, 1.0, 0.75),
(4.0, 2.0, 1.0, 2.0),
(2.0, 1.0, 1.0, 2.0),
(2.0, 4.0, 1.0, 0.75),
(2.0, 2.0, 0.5, 1.0),
(2.0, 2.0, 2.0, 1.5),
],
)
def test_adjustment(download_latency, target_concurrency, slot_delay, expected):
settings = {"AUTOTHROTTLE_TARGET_CONCURRENCY": target_concurrency}
crawler = get_crawler(settings)
at = build_from_crawler(AutoThrottle, crawler)
spider = DefaultSpider()
at._spider_opened(spider)
meta = {"download_latency": download_latency, "download_slot": "foo"}
request = Request("https://example.com", meta=meta)
response = Response(request.url)
crawler.engine = Mock()
crawler.engine.downloader = Mock()
crawler.engine.downloader.slots = {}
slot = Mock()
slot.delay = slot_delay
crawler.engine.downloader.slots["foo"] = slot
at._response_downloaded(response, request, spider)
assert slot.delay == expected, f"{slot.delay} != {expected}"
@pytest.mark.parametrize(
("mindelay", "maxdelay", "expected"),
[
(0.5, 2.0, 1.0),
(0.25, 0.5, 0.5),
(2.0, 4.0, 2.0),
],
)
def test_adjustment_limits(mindelay, maxdelay, expected):
download_latency, target_concurrency, slot_delay = (2.0, 2.0, 1.0)
# expected adjustment without limits with these values: 1.0
settings = {
"AUTOTHROTTLE_MAX_DELAY": maxdelay,
"AUTOTHROTTLE_TARGET_CONCURRENCY": target_concurrency,
"DOWNLOAD_DELAY": mindelay,
}
crawler = get_crawler(settings)
at = build_from_crawler(AutoThrottle, crawler)
spider = DefaultSpider()
at._spider_opened(spider)
meta = {"download_latency": download_latency, "download_slot": "foo"}
request = Request("https://example.com", meta=meta)
response = Response(request.url)
crawler.engine = Mock()
crawler.engine.downloader = Mock()
crawler.engine.downloader.slots = {}
slot = Mock()
slot.delay = slot_delay
crawler.engine.downloader.slots["foo"] = slot
at._response_downloaded(response, request, spider)
assert slot.delay == expected, f"{slot.delay} != {expected}"
@pytest.mark.parametrize(
("download_latency", "target_concurrency", "slot_delay", "expected"),
[
(2.0, 2.0, 1.0, 1.0),
(1.0, 2.0, 1.0, 1.0), # Instead of 0.75
(4.0, 2.0, 1.0, 2.0),
],
)
def test_adjustment_bad_response(
download_latency, target_concurrency, slot_delay, expected
):
settings = {"AUTOTHROTTLE_TARGET_CONCURRENCY": target_concurrency}
crawler = get_crawler(settings)
at = build_from_crawler(AutoThrottle, crawler)
spider = DefaultSpider()
at._spider_opened(spider)
meta = {"download_latency": download_latency, "download_slot": "foo"}
request = Request("https://example.com", meta=meta)
response = Response(request.url, status=400)
crawler.engine = Mock()
crawler.engine.downloader = Mock()
crawler.engine.downloader.slots = {}
slot = Mock()
slot.delay = slot_delay
crawler.engine.downloader.slots["foo"] = slot
at._response_downloaded(response, request, spider)
assert slot.delay == expected, f"{slot.delay} != {expected}"
def test_debug(caplog):
settings = {"AUTOTHROTTLE_DEBUG": True}
crawler = get_crawler(settings)
at = build_from_crawler(AutoThrottle, crawler)
spider = DefaultSpider()
at._spider_opened(spider)
meta = {"download_latency": 1.0, "download_slot": "foo"}
request = Request("https://example.com", meta=meta)
response = Response(request.url, body=b"foo")
crawler.engine = Mock()
crawler.engine.downloader = Mock()
crawler.engine.downloader.slots = {}
slot = Mock()
slot.delay = 2.0
slot.transferring = (None, None)
crawler.engine.downloader.slots["foo"] = slot
caplog.clear()
with caplog.at_level(INFO):
at._response_downloaded(response, request, spider)
assert caplog.record_tuples == [
(
"scrapy.extensions.throttle",
INFO,
"slot: foo | conc: 2 | delay: 1500 ms (-500) | latency: 1000 ms | size: 3 bytes",
),
]
def test_debug_disabled(caplog):
crawler = get_crawler()
at = build_from_crawler(AutoThrottle, crawler)
spider = DefaultSpider()
at._spider_opened(spider)
meta = {"download_latency": 1.0, "download_slot": "foo"}
request = Request("https://example.com", meta=meta)
response = Response(request.url, body=b"foo")
crawler.engine = Mock()
crawler.engine.downloader = Mock()
crawler.engine.downloader.slots = {}
slot = Mock()
slot.delay = 2.0
slot.transferring = (None, None)
crawler.engine.downloader.slots["foo"] = slot
caplog.clear()
with caplog.at_level(INFO):
at._response_downloaded(response, request, spider)
assert caplog.record_tuples == []
| python | BSD-3-Clause | d1bd8eb49f7aba9289e4ff692006cead8bcd9080 | 2026-01-04T14:38:41.023839Z | false |
scrapy/scrapy | https://github.com/scrapy/scrapy/blob/d1bd8eb49f7aba9289e4ff692006cead8bcd9080/tests/test_downloader_handlers_http_base.py | tests/test_downloader_handlers_http_base.py | """Base classes for HTTP download handler tests."""
from __future__ import annotations
import gzip
import json
import sys
from abc import ABC, abstractmethod
from contextlib import asynccontextmanager
from http import HTTPStatus
from typing import TYPE_CHECKING, Any
from unittest import mock
import pytest
from testfixtures import LogCapture
from twisted.internet import defer, error
from twisted.web._newclient import ResponseFailed
from twisted.web.http import _DataLoss
from scrapy.http import Headers, HtmlResponse, Request, Response, TextResponse
from scrapy.utils.asyncio import call_later
from scrapy.utils.defer import (
deferred_f_from_coro_f,
deferred_from_coro,
maybe_deferred_to_future,
)
from scrapy.utils.misc import build_from_crawler
from scrapy.utils.spider import DefaultSpider
from scrapy.utils.test import get_crawler
from tests import NON_EXISTING_RESOLVABLE
from tests.mockserver.proxy_echo import ProxyEchoMockServer
from tests.mockserver.simple_https import SimpleMockServer
from tests.spiders import SingleRequestSpider
if TYPE_CHECKING:
from collections.abc import AsyncGenerator, Generator
from scrapy.core.downloader.handlers import DownloadHandlerProtocol
from tests.mockserver.http import MockServer
class TestHttpBase(ABC):
is_secure = False
@property
@abstractmethod
def download_handler_cls(self) -> type[DownloadHandlerProtocol]:
raise NotImplementedError
@asynccontextmanager
async def get_dh(
self, settings_dict: dict[str, Any] | None = None
) -> AsyncGenerator[DownloadHandlerProtocol]:
crawler = get_crawler(DefaultSpider, settings_dict)
crawler.spider = crawler._create_spider()
dh = build_from_crawler(self.download_handler_cls, crawler)
try:
yield dh
finally:
await dh.close()
@deferred_f_from_coro_f
async def test_download(self, mockserver: MockServer) -> None:
request = Request(mockserver.url("/text", is_secure=self.is_secure))
async with self.get_dh() as download_handler:
response = await download_handler.download_request(request)
assert response.body == b"Works"
@deferred_f_from_coro_f
async def test_download_head(self, mockserver: MockServer) -> None:
request = Request(
mockserver.url("/text", is_secure=self.is_secure), method="HEAD"
)
async with self.get_dh() as download_handler:
response = await download_handler.download_request(request)
assert response.body == b""
@pytest.mark.parametrize(
"http_status",
[
pytest.param(http_status, id=f"status={http_status.value}")
for http_status in HTTPStatus
if http_status.value == 200 or http_status.value // 100 in (4, 5)
],
)
@deferred_f_from_coro_f
async def test_download_has_correct_http_status_code(
self, mockserver: MockServer, http_status: HTTPStatus
) -> None:
request = Request(
mockserver.url(f"/status?n={http_status.value}", is_secure=self.is_secure)
)
async with self.get_dh() as download_handler:
response = await download_handler.download_request(request)
assert response.status == http_status.value
@deferred_f_from_coro_f
async def test_server_receives_correct_request_headers(
self, mockserver: MockServer
) -> None:
request_headers = {
# common request headers
"Accept": "text/html",
"Accept-Charset": "utf-8",
"Accept-Datetime": "Thu, 31 May 2007 20:35:00 GMT",
"Accept-Encoding": "gzip, deflate",
# custom headers
"X-Custom-Header": "Custom Value",
}
request = Request(
mockserver.url("/echo", is_secure=self.is_secure),
headers=request_headers,
)
async with self.get_dh() as download_handler:
response = await download_handler.download_request(request)
assert response.status == HTTPStatus.OK
body = json.loads(response.body.decode("utf-8"))
assert "headers" in body
for header_name, header_value in request_headers.items():
assert header_name in body["headers"]
assert body["headers"][header_name] == [header_value]
@deferred_f_from_coro_f
async def test_server_receives_correct_request_body(
self, mockserver: MockServer
) -> None:
request_body = {
"message": "It works!",
}
request = Request(
mockserver.url("/echo", is_secure=self.is_secure),
body=json.dumps(request_body),
)
async with self.get_dh() as download_handler:
response = await download_handler.download_request(request)
assert response.status == HTTPStatus.OK
body = json.loads(response.body.decode("utf-8"))
assert json.loads(body["body"]) == request_body
@deferred_f_from_coro_f
async def test_download_has_correct_response_headers(
self, mockserver: MockServer
) -> None:
# these headers will be set on the response in the resource and returned
response_headers = {
# common response headers
"Access-Control-Allow-Origin": "*",
"Allow": "Get, Head",
"Age": "12",
"Cache-Control": "max-age=3600",
"Content-Encoding": "gzip",
"Content-MD5": "Q2hlY2sgSW50ZWdyaXR5IQ==",
"Content-Type": "text/html; charset=utf-8",
"Date": "Date: Tue, 15 Nov 1994 08:12:31 GMT",
"Pragma": "no-cache",
"Retry-After": "120",
"Set-Cookie": "CookieName=CookieValue; Max-Age=3600; Version=1",
"WWW-Authenticate": "Basic",
# custom headers
"X-Custom-Header": "Custom Header Value",
}
request = Request(
mockserver.url("/response-headers", is_secure=self.is_secure),
headers={"content-type": "application/json"},
body=json.dumps(response_headers),
)
async with self.get_dh() as download_handler:
response = await download_handler.download_request(request)
assert response.status == 200
for header_name, header_value in response_headers.items():
assert header_name in response.headers, (
f"Response was missing expected header {header_name}"
)
assert response.headers[header_name] == bytes(
header_value, encoding="utf-8"
)
@deferred_f_from_coro_f
async def test_redirect_status(self, mockserver: MockServer) -> None:
request = Request(mockserver.url("/redirect", is_secure=self.is_secure))
async with self.get_dh() as download_handler:
response = await download_handler.download_request(request)
assert response.status == 302
@deferred_f_from_coro_f
async def test_redirect_status_head(self, mockserver: MockServer) -> None:
request = Request(
mockserver.url("/redirect", is_secure=self.is_secure), method="HEAD"
)
async with self.get_dh() as download_handler:
response = await download_handler.download_request(request)
assert response.status == 302
@deferred_f_from_coro_f
async def test_timeout_download_from_spider_nodata_rcvd(
self, mockserver: MockServer, reactor_pytest: str
) -> None:
if reactor_pytest == "asyncio" and sys.platform == "win32":
# https://twistedmatrix.com/trac/ticket/10279
pytest.skip(
"This test produces DirtyReactorAggregateError on Windows with asyncio"
)
# client connects but no data is received
meta = {"download_timeout": 0.5}
request = Request(mockserver.url("/wait", is_secure=self.is_secure), meta=meta)
async with self.get_dh() as download_handler:
d = deferred_from_coro(download_handler.download_request(request))
with pytest.raises((defer.TimeoutError, error.TimeoutError)):
await maybe_deferred_to_future(d)
@deferred_f_from_coro_f
async def test_timeout_download_from_spider_server_hangs(
self,
mockserver: MockServer,
reactor_pytest: str,
) -> None:
if reactor_pytest == "asyncio" and sys.platform == "win32":
# https://twistedmatrix.com/trac/ticket/10279
pytest.skip(
"This test produces DirtyReactorAggregateError on Windows with asyncio"
)
# client connects, server send headers and some body bytes but hangs
meta = {"download_timeout": 0.5}
request = Request(
mockserver.url("/hang-after-headers", is_secure=self.is_secure), meta=meta
)
async with self.get_dh() as download_handler:
d = deferred_from_coro(download_handler.download_request(request))
with pytest.raises((defer.TimeoutError, error.TimeoutError)):
await maybe_deferred_to_future(d)
@pytest.mark.parametrize("send_header", [True, False])
@deferred_f_from_coro_f
async def test_host_header(self, send_header: bool, mockserver: MockServer) -> None:
host_port = f"{mockserver.host}:{mockserver.port(is_secure=self.is_secure)}"
request = Request(
mockserver.url("/host", is_secure=self.is_secure),
headers={"Host": host_port} if send_header else {},
)
async with self.get_dh() as download_handler:
response = await download_handler.download_request(request)
assert response.body == host_port.encode()
if send_header:
assert request.headers.get("Host") == host_port.encode()
else:
assert not request.headers
@deferred_f_from_coro_f
async def test_content_length_zero_bodyless_post_request_headers(
self, mockserver: MockServer
) -> None:
"""Tests if "Content-Length: 0" is sent for bodyless POST requests.
This is not strictly required by HTTP RFCs but can cause trouble
for some web servers.
See:
https://github.com/scrapy/scrapy/issues/823
https://issues.apache.org/jira/browse/TS-2902
https://github.com/kennethreitz/requests/issues/405
https://bugs.python.org/issue14721
"""
request = Request(
mockserver.url("/contentlength", is_secure=self.is_secure), method="POST"
)
async with self.get_dh() as download_handler:
response = await download_handler.download_request(request)
assert response.body == b"0"
@deferred_f_from_coro_f
async def test_content_length_zero_bodyless_post_only_one(
self, mockserver: MockServer
) -> None:
request = Request(
mockserver.url("/echo", is_secure=self.is_secure), method="POST"
)
async with self.get_dh() as download_handler:
response = await download_handler.download_request(request)
headers = Headers(json.loads(response.text)["headers"])
contentlengths = headers.getlist("Content-Length")
assert len(contentlengths) == 1
assert contentlengths == [b"0"]
@deferred_f_from_coro_f
async def test_payload(self, mockserver: MockServer) -> None:
body = b"1" * 100 # PayloadResource requires body length to be 100
request = Request(
mockserver.url("/payload", is_secure=self.is_secure),
method="POST",
body=body,
)
async with self.get_dh() as download_handler:
response = await download_handler.download_request(request)
assert response.body == body
@deferred_f_from_coro_f
async def test_response_header_content_length(self, mockserver: MockServer) -> None:
request = Request(
mockserver.url("/text", is_secure=self.is_secure), method="GET"
)
async with self.get_dh() as download_handler:
response = await download_handler.download_request(request)
assert response.headers[b"content-length"] == b"5"
@pytest.mark.parametrize(
("filename", "body", "response_class"),
[
("foo.html", b"", HtmlResponse),
("foo", b"<!DOCTYPE html>\n<title>.</title>", HtmlResponse),
],
)
@deferred_f_from_coro_f
async def test_response_class(
self,
filename: str,
body: bytes,
response_class: type[Response],
mockserver: MockServer,
) -> None:
request = Request(
mockserver.url(f"/{filename}", is_secure=self.is_secure), body=body
)
async with self.get_dh() as download_handler:
response = await download_handler.download_request(request)
assert type(response) is response_class # pylint: disable=unidiomatic-typecheck
@deferred_f_from_coro_f
async def test_get_duplicate_header(self, mockserver: MockServer) -> None:
request = Request(mockserver.url("/duplicate-header", is_secure=self.is_secure))
async with self.get_dh() as download_handler:
response = await download_handler.download_request(request)
assert response.headers.getlist(b"Set-Cookie") == [b"a=b", b"c=d"]
@deferred_f_from_coro_f
async def test_download_is_not_automatically_gzip_decoded(
self, mockserver: MockServer
) -> None:
"""Test download handler does not automatically decode content using the scheme provided in Content-Encoding header"""
data = "compress-me"
# send a request to mock resource that gzip encodes the "data" url parameter
request = Request(
mockserver.url(f"/compress?data={data}", is_secure=self.is_secure),
headers={
"accept-encoding": "gzip",
},
)
async with self.get_dh() as download_handler:
response = await download_handler.download_request(request)
assert response.status == 200
# check that the Content-Encoding header is gzip
content_encoding = response.headers[b"Content-Encoding"]
assert content_encoding == b"gzip"
# check that the response is still encoded
# by checking for the magic number that is always included at the start of a gzip encoding
# see https://datatracker.ietf.org/doc/html/rfc1952#page-5 section 2.3.1
GZIP_MAGIC = b"\x1f\x8b"
assert response.body[:2] == GZIP_MAGIC, "Response body was not in gzip format"
# check that a gzip decoding matches the data sent in the request
expected_decoding = bytes(data, encoding="utf-8")
assert gzip.decompress(response.body) == expected_decoding
@deferred_f_from_coro_f
async def test_no_cookie_processing_or_persistence(
self, mockserver: MockServer
) -> None:
cookie_name = "foo"
cookie_value = "bar"
# check that cookies are not modified
request = Request(
mockserver.url(
f"/set-cookie?{cookie_name}={cookie_value}", is_secure=self.is_secure
)
)
async with self.get_dh() as download_handler:
response = await download_handler.download_request(request)
assert response.status == 200
set_cookie = response.headers.get(b"Set-Cookie")
assert set_cookie == f"{cookie_name}={cookie_value}".encode()
# check that cookies are not sent in the next request
request = Request(mockserver.url("/echo", is_secure=self.is_secure))
response = await download_handler.download_request(request)
assert response.status == 200
headers = Headers(json.loads(response.text)["headers"])
assert "Cookie" not in headers
assert "cookie" not in headers
class TestHttp11Base(TestHttpBase):
"""HTTP 1.1 test case"""
@deferred_f_from_coro_f
async def test_download_without_maxsize_limit(self, mockserver: MockServer) -> None:
request = Request(mockserver.url("/text", is_secure=self.is_secure))
async with self.get_dh() as download_handler:
response = await download_handler.download_request(request)
assert response.body == b"Works"
@deferred_f_from_coro_f
async def test_response_class_choosing_request(
self, mockserver: MockServer
) -> None:
"""Tests choosing of correct response type
in case of Content-Type is empty but body contains text.
"""
body = b"Some plain text\ndata with tabs\t and null bytes\0"
request = Request(
mockserver.url("/nocontenttype", is_secure=self.is_secure), body=body
)
async with self.get_dh() as download_handler:
response = await download_handler.download_request(request)
assert type(response) is TextResponse # pylint: disable=unidiomatic-typecheck
@deferred_f_from_coro_f
async def test_download_with_maxsize(self, mockserver: MockServer) -> None:
request = Request(mockserver.url("/text", is_secure=self.is_secure))
# 10 is minimal size for this request and the limit is only counted on
# response body. (regardless of headers)
async with self.get_dh({"DOWNLOAD_MAXSIZE": 5}) as download_handler:
response = await download_handler.download_request(request)
assert response.body == b"Works"
async with self.get_dh({"DOWNLOAD_MAXSIZE": 4}) as download_handler:
with pytest.raises((defer.CancelledError, error.ConnectionAborted)):
await download_handler.download_request(request)
@deferred_f_from_coro_f
async def test_download_with_maxsize_very_large_file(
self, mockserver: MockServer
) -> None:
# TODO: the logger check is specific to scrapy.core.downloader.handlers.http11
with mock.patch("scrapy.core.downloader.handlers.http11.logger") as logger:
request = Request(
mockserver.url("/largechunkedfile", is_secure=self.is_secure)
)
def check(logger: mock.Mock) -> None:
logger.warning.assert_called_once_with(mock.ANY, mock.ANY)
async with self.get_dh({"DOWNLOAD_MAXSIZE": 1_500}) as download_handler:
with pytest.raises((defer.CancelledError, error.ConnectionAborted)):
await download_handler.download_request(request)
# As the error message is logged in the dataReceived callback, we
# have to give a bit of time to the reactor to process the queue
# after closing the connection.
d: defer.Deferred[mock.Mock] = defer.Deferred()
d.addCallback(check)
call_later(0.1, d.callback, logger)
await maybe_deferred_to_future(d)
@deferred_f_from_coro_f
async def test_download_with_maxsize_per_req(self, mockserver: MockServer) -> None:
meta = {"download_maxsize": 2}
request = Request(mockserver.url("/text", is_secure=self.is_secure), meta=meta)
async with self.get_dh() as download_handler:
with pytest.raises((defer.CancelledError, error.ConnectionAborted)):
await download_handler.download_request(request)
@deferred_f_from_coro_f
async def test_download_with_small_maxsize_via_setting(
self, mockserver: MockServer
) -> None:
request = Request(mockserver.url("/text", is_secure=self.is_secure))
async with self.get_dh({"DOWNLOAD_MAXSIZE": 2}) as download_handler:
with pytest.raises((defer.CancelledError, error.ConnectionAborted)):
await download_handler.download_request(request)
@deferred_f_from_coro_f
async def test_download_with_large_maxsize_via_setting(
self, mockserver: MockServer
) -> None:
request = Request(mockserver.url("/text", is_secure=self.is_secure))
async with self.get_dh({"DOWNLOAD_MAXSIZE": 100}) as download_handler:
response = await download_handler.download_request(request)
assert response.body == b"Works"
@deferred_f_from_coro_f
async def test_download_chunked_content(self, mockserver: MockServer) -> None:
request = Request(mockserver.url("/chunked", is_secure=self.is_secure))
async with self.get_dh() as download_handler:
response = await download_handler.download_request(request)
assert response.body == b"chunked content\n"
@pytest.mark.parametrize("url", ["broken", "broken-chunked"])
@deferred_f_from_coro_f
async def test_download_cause_data_loss(
self, url: str, mockserver: MockServer
) -> None:
# TODO: this one checks for Twisted-specific exceptions
request = Request(mockserver.url(f"/{url}", is_secure=self.is_secure))
async with self.get_dh() as download_handler:
with pytest.raises(ResponseFailed) as exc_info:
await download_handler.download_request(request)
assert any(r.check(_DataLoss) for r in exc_info.value.reasons)
@pytest.mark.parametrize("url", ["broken", "broken-chunked"])
@deferred_f_from_coro_f
async def test_download_allow_data_loss(
self, url: str, mockserver: MockServer
) -> None:
request = Request(
mockserver.url(f"/{url}", is_secure=self.is_secure),
meta={"download_fail_on_dataloss": False},
)
async with self.get_dh() as download_handler:
response = await download_handler.download_request(request)
assert response.flags == ["dataloss"]
@pytest.mark.parametrize("url", ["broken", "broken-chunked"])
@deferred_f_from_coro_f
async def test_download_allow_data_loss_via_setting(
self, url: str, mockserver: MockServer
) -> None:
request = Request(mockserver.url(f"/{url}", is_secure=self.is_secure))
async with self.get_dh(
{"DOWNLOAD_FAIL_ON_DATALOSS": False}
) as download_handler:
response = await download_handler.download_request(request)
assert response.flags == ["dataloss"]
@deferred_f_from_coro_f
async def test_protocol(self, mockserver: MockServer) -> None:
request = Request(
mockserver.url("/host", is_secure=self.is_secure), method="GET"
)
async with self.get_dh() as download_handler:
response = await download_handler.download_request(request)
assert response.protocol == "HTTP/1.1"
class TestHttps11Base(TestHttp11Base):
is_secure = True
tls_log_message = (
'SSL connection certificate: issuer "/C=IE/O=Scrapy/CN=localhost", '
'subject "/C=IE/O=Scrapy/CN=localhost"'
)
@deferred_f_from_coro_f
async def test_tls_logging(self, mockserver: MockServer) -> None:
request = Request(mockserver.url("/text", is_secure=self.is_secure))
async with self.get_dh(
{"DOWNLOADER_CLIENT_TLS_VERBOSE_LOGGING": True}
) as download_handler:
with LogCapture() as log_capture:
response = await download_handler.download_request(request)
assert response.body == b"Works"
log_capture.check_present(
("scrapy.core.downloader.tls", "DEBUG", self.tls_log_message)
)
class TestSimpleHttpsBase(ABC):
"""Base class for special cases tested with just one simple request"""
keyfile = "keys/localhost.key"
certfile = "keys/localhost.crt"
host = "localhost"
cipher_string: str | None = None
@pytest.fixture(scope="class")
def simple_mockserver(self) -> Generator[SimpleMockServer]:
with SimpleMockServer(
self.keyfile, self.certfile, self.cipher_string
) as simple_mockserver:
yield simple_mockserver
@pytest.fixture(scope="class")
def url(self, simple_mockserver: SimpleMockServer) -> str:
# need to use self.host instead of what mockserver returns
return f"https://{self.host}:{simple_mockserver.port(is_secure=True)}/file"
@property
@abstractmethod
def download_handler_cls(self) -> type[DownloadHandlerProtocol]:
raise NotImplementedError
@asynccontextmanager
async def get_dh(self) -> AsyncGenerator[DownloadHandlerProtocol]:
if self.cipher_string is not None:
settings_dict = {"DOWNLOADER_CLIENT_TLS_CIPHERS": self.cipher_string}
else:
settings_dict = None
crawler = get_crawler(DefaultSpider, settings_dict=settings_dict)
crawler.spider = crawler._create_spider()
dh = build_from_crawler(self.download_handler_cls, crawler)
try:
yield dh
finally:
await dh.close()
@deferred_f_from_coro_f
async def test_download(self, url: str) -> None:
request = Request(url)
async with self.get_dh() as download_handler:
response = await download_handler.download_request(request)
assert response.body == b"0123456789"
class TestHttpsWrongHostnameBase(TestSimpleHttpsBase):
# above tests use a server certificate for "localhost",
# client connection to "localhost" too.
# here we test that even if the server certificate is for another domain,
# "www.example.com" in this case,
# the tests still pass
keyfile = "keys/example-com.key.pem"
certfile = "keys/example-com.cert.pem"
class TestHttpsInvalidDNSIdBase(TestSimpleHttpsBase):
"""Connect to HTTPS hosts with IP while certificate uses domain names IDs."""
host = "127.0.0.1"
class TestHttpsInvalidDNSPatternBase(TestSimpleHttpsBase):
"""Connect to HTTPS hosts where the certificate are issued to an ip instead of a domain."""
keyfile = "keys/localhost.ip.key"
certfile = "keys/localhost.ip.crt"
class TestHttpsCustomCiphersBase(TestSimpleHttpsBase):
cipher_string = "CAMELLIA256-SHA"
class TestHttpWithCrawlerBase(ABC):
@property
@abstractmethod
def settings_dict(self) -> dict[str, Any] | None:
raise NotImplementedError
is_secure = False
@deferred_f_from_coro_f
async def test_download_with_content_length(self, mockserver: MockServer) -> None:
crawler = get_crawler(SingleRequestSpider, self.settings_dict)
# http://localhost:8998/partial set Content-Length to 1024, use download_maxsize= 1000 to avoid
# download it
await maybe_deferred_to_future(
crawler.crawl(
seed=Request(
url=mockserver.url("/partial", is_secure=self.is_secure),
meta={"download_maxsize": 1000},
)
)
)
assert crawler.spider
failure = crawler.spider.meta["failure"] # type: ignore[attr-defined]
assert isinstance(failure.value, defer.CancelledError)
@deferred_f_from_coro_f
async def test_download(self, mockserver: MockServer) -> None:
crawler = get_crawler(SingleRequestSpider, self.settings_dict)
await maybe_deferred_to_future(
crawler.crawl(
seed=Request(url=mockserver.url("", is_secure=self.is_secure))
)
)
assert crawler.spider
failure = crawler.spider.meta.get("failure") # type: ignore[attr-defined]
assert failure is None
reason = crawler.spider.meta["close_reason"] # type: ignore[attr-defined]
assert reason == "finished"
class TestHttpProxyBase(ABC):
is_secure = False
expected_http_proxy_request_body = b"http://example.com"
@property
@abstractmethod
def download_handler_cls(self) -> type[DownloadHandlerProtocol]:
raise NotImplementedError
@pytest.fixture(scope="session")
def proxy_mockserver(self) -> Generator[ProxyEchoMockServer]:
with ProxyEchoMockServer() as proxy:
yield proxy
@asynccontextmanager
async def get_dh(self) -> AsyncGenerator[DownloadHandlerProtocol]:
crawler = get_crawler(DefaultSpider)
crawler.spider = crawler._create_spider()
dh = build_from_crawler(self.download_handler_cls, crawler)
try:
yield dh
finally:
await dh.close()
@deferred_f_from_coro_f
async def test_download_with_proxy(
self, proxy_mockserver: ProxyEchoMockServer
) -> None:
http_proxy = proxy_mockserver.url("", is_secure=self.is_secure)
request = Request("http://example.com", meta={"proxy": http_proxy})
async with self.get_dh() as download_handler:
response = await download_handler.download_request(request)
assert response.status == 200
assert response.url == request.url
assert response.body == self.expected_http_proxy_request_body
@deferred_f_from_coro_f
async def test_download_without_proxy(
self, proxy_mockserver: ProxyEchoMockServer
) -> None:
request = Request(
proxy_mockserver.url("/path/to/resource", is_secure=self.is_secure)
)
async with self.get_dh() as download_handler:
response = await download_handler.download_request(request)
assert response.status == 200
assert response.url == request.url
assert response.body == b"/path/to/resource"
@deferred_f_from_coro_f
async def test_download_with_proxy_https_timeout(
self, proxy_mockserver: ProxyEchoMockServer
) -> None:
if NON_EXISTING_RESOLVABLE:
pytest.skip("Non-existing hosts are resolvable")
http_proxy = proxy_mockserver.url("", is_secure=self.is_secure)
domain = "https://no-such-domain.nosuch"
request = Request(domain, meta={"proxy": http_proxy, "download_timeout": 0.2})
async with self.get_dh() as download_handler:
with pytest.raises(error.TimeoutError) as exc_info:
await download_handler.download_request(request)
assert domain in exc_info.value.osError
@deferred_f_from_coro_f
async def test_download_with_proxy_without_http_scheme(
self, proxy_mockserver: ProxyEchoMockServer
) -> None:
http_proxy = f"{proxy_mockserver.host}:{proxy_mockserver.port()}"
request = Request("http://example.com", meta={"proxy": http_proxy})
async with self.get_dh() as download_handler:
response = await download_handler.download_request(request)
assert response.status == 200
assert response.url == request.url
assert response.body == self.expected_http_proxy_request_body
| python | BSD-3-Clause | d1bd8eb49f7aba9289e4ff692006cead8bcd9080 | 2026-01-04T14:38:41.023839Z | false |
scrapy/scrapy | https://github.com/scrapy/scrapy/blob/d1bd8eb49f7aba9289e4ff692006cead8bcd9080/tests/test_downloader_handler_twisted_ftp.py | tests/test_downloader_handler_twisted_ftp.py | from __future__ import annotations
import os
import sys
from abc import ABC, abstractmethod
from pathlib import Path
from tempfile import mkstemp
from typing import TYPE_CHECKING, Any
import pytest
from pytest_twisted import async_yield_fixture
from twisted.cred import checkers, credentials, portal
from scrapy.core.downloader.handlers.ftp import FTPDownloadHandler
from scrapy.http import HtmlResponse, Request, Response
from scrapy.http.response.text import TextResponse
from scrapy.utils.defer import deferred_f_from_coro_f
from scrapy.utils.misc import build_from_crawler
from scrapy.utils.python import to_bytes
from scrapy.utils.test import get_crawler
if TYPE_CHECKING:
from collections.abc import AsyncGenerator, Generator
from twisted.protocols.ftp import FTPFactory
class TestFTPBase(ABC):
username = "scrapy"
password = "passwd"
req_meta: dict[str, Any] = {"ftp_user": username, "ftp_password": password}
test_files = (
("file.txt", b"I have the power!"),
("file with spaces.txt", b"Moooooooooo power!"),
("html-file-without-extension", b"<!DOCTYPE html>\n<title>.</title>"),
)
@abstractmethod
def _create_files(self, root: Path) -> None:
raise NotImplementedError
@abstractmethod
def _get_factory(self, tmp_path: Path) -> FTPFactory:
raise NotImplementedError
@async_yield_fixture
async def server_url(self, tmp_path: Path) -> AsyncGenerator[str]:
from twisted.internet import reactor
self._create_files(tmp_path)
factory = self._get_factory(tmp_path)
port = reactor.listenTCP(0, factory, interface="127.0.0.1")
portno = port.getHost().port
yield f"https://127.0.0.1:{portno}/"
await port.stopListening()
@staticmethod
@pytest.fixture
def dh() -> Generator[FTPDownloadHandler]:
crawler = get_crawler()
dh = build_from_crawler(FTPDownloadHandler, crawler)
yield dh
# if the test was skipped, there will be no client attribute
if hasattr(dh, "client"):
assert dh.client.transport
dh.client.transport.loseConnection()
@deferred_f_from_coro_f
async def test_ftp_download_success(
self, server_url: str, dh: FTPDownloadHandler
) -> None:
request = Request(url=server_url + "file.txt", meta=self.req_meta)
r = await dh.download_request(request)
assert r.status == 200
assert r.body == b"I have the power!"
assert r.headers == {b"Local Filename": [b""], b"Size": [b"17"]}
assert r.protocol is None
@deferred_f_from_coro_f
async def test_ftp_download_path_with_spaces(
self, server_url: str, dh: FTPDownloadHandler
) -> None:
request = Request(
url=server_url + "file with spaces.txt",
meta=self.req_meta,
)
r = await dh.download_request(request)
assert r.status == 200
assert r.body == b"Moooooooooo power!"
assert r.headers == {b"Local Filename": [b""], b"Size": [b"18"]}
@deferred_f_from_coro_f
async def test_ftp_download_nonexistent(
self, server_url: str, dh: FTPDownloadHandler
) -> None:
request = Request(url=server_url + "nonexistent.txt", meta=self.req_meta)
r = await dh.download_request(request)
assert r.status == 404
assert r.body == b"['550 nonexistent.txt: No such file or directory.']"
@deferred_f_from_coro_f
async def test_ftp_local_filename(
self, server_url: str, dh: FTPDownloadHandler
) -> None:
f, local_fname = mkstemp()
fname_bytes = to_bytes(local_fname)
local_path = Path(local_fname)
os.close(f)
meta = {"ftp_local_filename": fname_bytes}
meta.update(self.req_meta)
request = Request(url=server_url + "file.txt", meta=meta)
r = await dh.download_request(request)
assert r.body == fname_bytes
assert r.headers == {b"Local Filename": [fname_bytes], b"Size": [b"17"]}
assert local_path.exists()
assert local_path.read_bytes() == b"I have the power!"
local_path.unlink()
@pytest.mark.parametrize(
("filename", "response_class"),
[
("file.txt", TextResponse),
("html-file-without-extension", HtmlResponse),
],
)
@deferred_f_from_coro_f
async def test_response_class(
self,
filename: str,
response_class: type[Response],
server_url: str,
dh: FTPDownloadHandler,
) -> None:
f, local_fname = mkstemp()
local_fname_path = Path(local_fname)
os.close(f)
meta = {}
meta.update(self.req_meta)
request = Request(url=server_url + filename, meta=meta)
r = await dh.download_request(request)
assert type(r) is response_class # pylint: disable=unidiomatic-typecheck
local_fname_path.unlink()
class TestFTP(TestFTPBase):
def _create_files(self, root: Path) -> None:
userdir = root / self.username
userdir.mkdir()
for filename, content in self.test_files:
(userdir / filename).write_bytes(content)
def _get_factory(self, root):
from twisted.protocols.ftp import FTPFactory, FTPRealm
realm = FTPRealm(anonymousRoot=str(root), userHome=str(root))
p = portal.Portal(realm)
users_checker = checkers.InMemoryUsernamePasswordDatabaseDontUse()
users_checker.addUser(self.username, self.password)
p.registerChecker(users_checker, credentials.IUsernamePassword)
return FTPFactory(portal=p)
@deferred_f_from_coro_f
async def test_invalid_credentials(
self, server_url: str, dh: FTPDownloadHandler, reactor_pytest: str
) -> None:
if reactor_pytest == "asyncio" and sys.platform == "win32":
pytest.skip(
"This test produces DirtyReactorAggregateError on Windows with asyncio"
)
from twisted.protocols.ftp import ConnectionLost
meta = dict(self.req_meta)
meta.update({"ftp_password": "invalid"})
request = Request(url=server_url + "file.txt", meta=meta)
with pytest.raises(ConnectionLost):
await dh.download_request(request)
class TestAnonymousFTP(TestFTPBase):
username = "anonymous"
req_meta = {}
def _create_files(self, root: Path) -> None:
for filename, content in self.test_files:
(root / filename).write_bytes(content)
def _get_factory(self, tmp_path):
from twisted.protocols.ftp import FTPFactory, FTPRealm
realm = FTPRealm(anonymousRoot=str(tmp_path))
p = portal.Portal(realm)
p.registerChecker(checkers.AllowAnonymousAccess(), credentials.IAnonymous)
return FTPFactory(portal=p, userAnonymous=self.username)
| python | BSD-3-Clause | d1bd8eb49f7aba9289e4ff692006cead8bcd9080 | 2026-01-04T14:38:41.023839Z | false |
scrapy/scrapy | https://github.com/scrapy/scrapy/blob/d1bd8eb49f7aba9289e4ff692006cead8bcd9080/tests/test_pipeline_media.py | tests/test_pipeline_media.py | from __future__ import annotations
import warnings
from unittest.mock import MagicMock
import pytest
from testfixtures import LogCapture
from twisted.python.failure import Failure
from scrapy import signals
from scrapy.exceptions import ScrapyDeprecationWarning
from scrapy.http import Request, Response
from scrapy.http.request import NO_CALLBACK
from scrapy.pipelines.files import FileException
from scrapy.pipelines.media import MediaPipeline
from scrapy.utils.defer import _defer_sleep_async, deferred_f_from_coro_f
from scrapy.utils.log import failure_to_exc_info
from scrapy.utils.signal import disconnect_all
from scrapy.utils.spider import DefaultSpider
from scrapy.utils.test import get_crawler
async def _mocked_download_func(request):
assert request.callback is NO_CALLBACK
response = request.meta.get("response")
if callable(response):
response = await response()
if isinstance(response, Exception):
raise response
return response
class UserDefinedPipeline(MediaPipeline):
def media_to_download(self, request, info, *, item=None):
pass
def get_media_requests(self, item, info):
pass
def media_downloaded(self, response, request, info, *, item=None):
return {}
def media_failed(self, failure, request, info):
failure.raiseException()
def file_path(self, request, response=None, info=None, *, item=None):
return ""
class TestBaseMediaPipeline:
pipeline_class = UserDefinedPipeline
settings = None
def setup_method(self):
crawler = get_crawler(DefaultSpider, self.settings)
crawler.spider = crawler._create_spider()
crawler.engine = MagicMock(download_async=_mocked_download_func)
self.pipe = self.pipeline_class.from_crawler(crawler)
self.pipe.open_spider()
self.info = self.pipe.spiderinfo
self.fingerprint = crawler.request_fingerprinter.fingerprint
def teardown_method(self):
for name, signal in vars(signals).items():
if not name.startswith("_"):
disconnect_all(signal)
def test_modify_media_request(self):
request = Request("http://url")
self.pipe._modify_media_request(request)
assert request.meta == {"handle_httpstatus_all": True}
def test_should_remove_req_res_references_before_caching_the_results(self):
"""Regression test case to prevent a memory leak in the Media Pipeline.
The memory leak is triggered when an exception is raised when a Response
scheduled by the Media Pipeline is being returned. For example, when a
FileException('download-error') is raised because the Response status
code is not 200 OK.
It happens because we are keeping a reference to the Response object
inside the FileException context. This is caused by the way Twisted
return values from inline callbacks. It raises a custom exception
encapsulating the original return value.
The solution is to remove the exception context when this context is a
_DefGen_Return instance, the BaseException used by Twisted to pass the
returned value from those inline callbacks.
Maybe there's a better and more reliable way to test the case described
here, but it would be more complicated and involve running - or at least
mocking - some async steps from the Media Pipeline. The current test
case is simple and detects the problem very fast. On the other hand, it
would not detect another kind of leak happening due to old object
references being kept inside the Media Pipeline cache.
This problem does not occur in Python 2.7 since we don't have Exception
Chaining (https://www.python.org/dev/peps/pep-3134/).
"""
# Create sample pair of Request and Response objects
request = Request("http://url")
response = Response("http://url", body=b"", request=request)
# Simulate the Media Pipeline behavior to produce a Twisted Failure
try:
# Simulate a Twisted inline callback returning a Response
raise StopIteration(response)
except StopIteration as exc:
def_gen_return_exc = exc
try:
# Simulate the media_downloaded callback raising a FileException
# This usually happens when the status code is not 200 OK
raise FileException("download-error")
except Exception as exc:
file_exc = exc
# Simulate Twisted capturing the FileException
# It encapsulates the exception inside a Twisted Failure
failure = Failure(file_exc)
# The Failure should encapsulate a FileException ...
assert failure.value == file_exc
# ... and it should have the StopIteration exception set as its context
assert failure.value.__context__ == def_gen_return_exc
# Let's calculate the request fingerprint and fake some runtime data...
fp = self.fingerprint(request)
info = self.pipe.spiderinfo
info.downloading.add(fp)
info.waiting[fp] = []
# When calling the method that caches the Request's result ...
self.pipe._cache_result_and_execute_waiters(failure, fp, info)
# ... it should store the Twisted Failure ...
assert info.downloaded[fp] == failure
# ... encapsulating the original FileException ...
assert info.downloaded[fp].value == file_exc
# ... but it should not store the StopIteration exception on its context
context = getattr(info.downloaded[fp].value, "__context__", None)
assert context is None
def test_default_item_completed(self):
item = {"name": "name"}
assert self.pipe.item_completed([], item, self.info) is item
# Check that failures are logged by default
fail = Failure(Exception())
results = [(True, 1), (False, fail)]
with LogCapture() as log:
new_item = self.pipe.item_completed(results, item, self.info)
assert new_item is item
assert len(log.records) == 1
record = log.records[0]
assert record.levelname == "ERROR"
assert record.exc_info == failure_to_exc_info(fail)
# disable failure logging and check again
self.pipe.LOG_FAILED_RESULTS = False
with LogCapture() as log:
new_item = self.pipe.item_completed(results, item, self.info)
assert new_item is item
assert len(log.records) == 0
@deferred_f_from_coro_f
async def test_default_process_item(self):
item = {"name": "name"}
new_item = await self.pipe.process_item(item)
assert new_item is item
class MockedMediaPipeline(UserDefinedPipeline):
def __init__(self, *args, crawler=None, **kwargs):
super().__init__(*args, crawler=crawler, **kwargs)
self._mockcalled = []
def download(self, request, info):
self._mockcalled.append("download")
return super().download(request, info)
def media_to_download(self, request, info, *, item=None):
self._mockcalled.append("media_to_download")
if "result" in request.meta:
return request.meta.get("result")
return super().media_to_download(request, info)
def get_media_requests(self, item, info):
self._mockcalled.append("get_media_requests")
return item.get("requests")
def media_downloaded(self, response, request, info, *, item=None):
self._mockcalled.append("media_downloaded")
return super().media_downloaded(response, request, info)
def media_failed(self, failure, request, info):
self._mockcalled.append("media_failed")
return super().media_failed(failure, request, info)
def item_completed(self, results, item, info):
self._mockcalled.append("item_completed")
item = super().item_completed(results, item, info)
item["results"] = results
return item
class TestMediaPipeline(TestBaseMediaPipeline):
pipeline_class = MockedMediaPipeline
def _errback(self, result):
self.pipe._mockcalled.append("request_errback")
return result
@deferred_f_from_coro_f
async def test_result_succeed(self):
rsp = Response("http://url1")
req = Request(
"http://url1",
meta={"response": rsp},
errback=self._errback,
)
item = {"requests": req}
new_item = await self.pipe.process_item(item)
assert new_item["results"] == [(True, {})]
assert self.pipe._mockcalled == [
"get_media_requests",
"media_to_download",
"media_downloaded",
"item_completed",
]
@deferred_f_from_coro_f
async def test_result_failure(self):
self.pipe.LOG_FAILED_RESULTS = False
exc = Exception("foo")
req = Request(
"http://url1",
meta={"response": exc},
errback=self._errback,
)
item = {"requests": req}
new_item = await self.pipe.process_item(item)
assert len(new_item["results"]) == 1
assert new_item["results"][0][0] is False
assert isinstance(new_item["results"][0][1], Failure)
assert new_item["results"][0][1].value == exc
assert self.pipe._mockcalled == [
"get_media_requests",
"media_to_download",
"media_failed",
"request_errback",
"item_completed",
]
@deferred_f_from_coro_f
async def test_mix_of_success_and_failure(self):
self.pipe.LOG_FAILED_RESULTS = False
rsp1 = Response("http://url1")
req1 = Request("http://url1", meta={"response": rsp1})
exc = Exception("foo")
req2 = Request("http://url2", meta={"response": exc})
item = {"requests": [req1, req2]}
new_item = await self.pipe.process_item(item)
assert len(new_item["results"]) == 2
assert new_item["results"][0] == (True, {})
assert new_item["results"][1][0] is False
assert isinstance(new_item["results"][1][1], Failure)
assert new_item["results"][1][1].value == exc
m = self.pipe._mockcalled
# only once
assert m[0] == "get_media_requests" # first hook called
assert m.count("get_media_requests") == 1
assert m.count("item_completed") == 1
assert m[-1] == "item_completed" # last hook called
# twice, one per request
assert m.count("media_to_download") == 2
# one to handle success and other for failure
assert m.count("media_downloaded") == 1
assert m.count("media_failed") == 1
@deferred_f_from_coro_f
async def test_get_media_requests(self):
# returns single Request (without callback)
req = Request("http://url")
item = {"requests": req} # pass a single item
new_item = await self.pipe.process_item(item)
assert new_item is item
assert self.fingerprint(req) in self.info.downloaded
# returns iterable of Requests
req1 = Request("http://url1")
req2 = Request("http://url2")
item = {"requests": iter([req1, req2])}
new_item = await self.pipe.process_item(item)
assert new_item is item
assert self.fingerprint(req1) in self.info.downloaded
assert self.fingerprint(req2) in self.info.downloaded
@deferred_f_from_coro_f
async def test_results_are_cached_across_multiple_items(self):
rsp1 = Response("http://url1")
req1 = Request("http://url1", meta={"response": rsp1})
item = {"requests": req1}
new_item = await self.pipe.process_item(item)
assert new_item is item
assert new_item["results"] == [(True, {})]
# rsp2 is ignored, rsp1 must be in results because request fingerprints are the same
req2 = Request(
req1.url, meta={"response": Response("http://donot.download.me")}
)
item = {"requests": req2}
new_item = await self.pipe.process_item(item)
assert new_item is item
assert self.fingerprint(req1) == self.fingerprint(req2)
assert new_item["results"] == [(True, {})]
@deferred_f_from_coro_f
async def test_results_are_cached_for_requests_of_single_item(self):
rsp1 = Response("http://url1")
req1 = Request("http://url1", meta={"response": rsp1})
req2 = Request(
req1.url, meta={"response": Response("http://donot.download.me")}
)
item = {"requests": [req1, req2]}
new_item = await self.pipe.process_item(item)
assert new_item is item
assert new_item["results"] == [(True, {}), (True, {})]
@deferred_f_from_coro_f
async def test_wait_if_request_is_downloading(self):
def _check_downloading(response):
fp = self.fingerprint(req1)
assert fp in self.info.downloading
assert fp in self.info.waiting
assert fp not in self.info.downloaded
assert len(self.info.waiting[fp]) == 2
return response
rsp1 = Response("http://url")
async def rsp1_func():
await _defer_sleep_async()
_check_downloading(rsp1)
async def rsp2_func():
pytest.fail("it must cache rsp1 result and must not try to redownload")
req1 = Request("http://url", meta={"response": rsp1_func})
req2 = Request(req1.url, meta={"response": rsp2_func})
item = {"requests": [req1, req2]}
new_item = await self.pipe.process_item(item)
assert new_item["results"] == [(True, {}), (True, {})]
@deferred_f_from_coro_f
async def test_use_media_to_download_result(self):
req = Request("http://url", meta={"result": "ITSME"})
item = {"requests": req}
new_item = await self.pipe.process_item(item)
assert new_item["results"] == [(True, "ITSME")]
assert self.pipe._mockcalled == [
"get_media_requests",
"media_to_download",
"item_completed",
]
def test_key_for_pipe(self):
assert (
self.pipe._key_for_pipe("IMAGES", base_class_name="MediaPipeline")
== "MOCKEDMEDIAPIPELINE_IMAGES"
)
class TestMediaPipelineAllowRedirectSettings:
def _assert_request_no3xx(self, pipeline_class, settings):
pipe = pipeline_class(crawler=get_crawler(None, settings))
request = Request("http://url")
pipe._modify_media_request(request)
assert "handle_httpstatus_list" in request.meta
for status, check in [
(200, True),
# These are the status codes we want
# the downloader to handle itself
(301, False),
(302, False),
(302, False),
(307, False),
(308, False),
# we still want to get 4xx and 5xx
(400, True),
(404, True),
(500, True),
]:
if check:
assert status in request.meta["handle_httpstatus_list"]
else:
assert status not in request.meta["handle_httpstatus_list"]
def test_subclass_standard_setting(self):
self._assert_request_no3xx(UserDefinedPipeline, {"MEDIA_ALLOW_REDIRECTS": True})
def test_subclass_specific_setting(self):
self._assert_request_no3xx(
UserDefinedPipeline, {"USERDEFINEDPIPELINE_MEDIA_ALLOW_REDIRECTS": True}
)
class TestBuildFromCrawler:
def setup_method(self):
self.crawler = get_crawler(None, {"FILES_STORE": "/foo"})
def test_simple(self):
class Pipeline(UserDefinedPipeline):
pass
with warnings.catch_warnings(record=True) as w:
pipe = Pipeline.from_crawler(self.crawler)
assert pipe.crawler == self.crawler
assert pipe._fingerprinter
assert len(w) == 0
def test_has_from_crawler_and_init(self):
class Pipeline(UserDefinedPipeline):
_from_crawler_called = False
def __init__(self, store_uri, settings, *, crawler):
super().__init__(crawler=crawler)
self._init_called = True
@classmethod
def from_crawler(cls, crawler):
settings = crawler.settings
store_uri = settings["FILES_STORE"]
o = cls(store_uri, settings=settings, crawler=crawler)
o._from_crawler_called = True
return o
with warnings.catch_warnings(record=True) as w:
pipe = Pipeline.from_crawler(self.crawler)
assert pipe.crawler == self.crawler
assert pipe._fingerprinter
assert len(w) == 0
assert pipe._from_crawler_called
assert pipe._init_called
def test_has_from_crawler(self):
class Pipeline(UserDefinedPipeline):
_from_crawler_called = False
@classmethod
def from_crawler(cls, crawler):
settings = crawler.settings
o = super().from_crawler(crawler)
o._from_crawler_called = True
o.store_uri = settings["FILES_STORE"]
return o
with warnings.catch_warnings(record=True) as w:
pipe = Pipeline.from_crawler(self.crawler)
# this and the next assert will fail as MediaPipeline.from_crawler() wasn't called
assert pipe.crawler == self.crawler
assert pipe._fingerprinter
assert len(w) == 0
assert pipe._from_crawler_called
class MediaFailedFailurePipeline(MockedMediaPipeline):
def media_failed(self, failure, request, info):
self._mockcalled.append("media_failed")
return failure # deprecated
class TestMediaFailedFailure(TestBaseMediaPipeline):
"""Test that media_failed() can return a failure instead of raising."""
pipeline_class = MediaFailedFailurePipeline
def _errback(self, result):
self.pipe._mockcalled.append("request_errback")
return result
@deferred_f_from_coro_f
async def test_result_failure(self):
self.pipe.LOG_FAILED_RESULTS = False
exc = Exception("foo")
req = Request(
"http://url1",
meta={"response": exc},
errback=self._errback,
)
item = {"requests": req}
with pytest.warns(
ScrapyDeprecationWarning, match="media_failed returned a Failure instance"
):
new_item = await self.pipe.process_item(item)
assert len(new_item["results"]) == 1
assert new_item["results"][0][0] is False
assert isinstance(new_item["results"][0][1], Failure)
assert new_item["results"][0][1].value == exc
assert self.pipe._mockcalled == [
"get_media_requests",
"media_to_download",
"media_failed",
"request_errback",
"item_completed",
]
| python | BSD-3-Clause | d1bd8eb49f7aba9289e4ff692006cead8bcd9080 | 2026-01-04T14:38:41.023839Z | false |
scrapy/scrapy | https://github.com/scrapy/scrapy/blob/d1bd8eb49f7aba9289e4ff692006cead8bcd9080/tests/test_utils_asyncio.py | tests/test_utils_asyncio.py | from __future__ import annotations
import asyncio
import random
from typing import TYPE_CHECKING
from unittest import mock
import pytest
from twisted.internet.defer import Deferred
from scrapy.utils.asyncgen import as_async_generator
from scrapy.utils.asyncio import (
AsyncioLoopingCall,
_parallel_asyncio,
is_asyncio_available,
)
from scrapy.utils.defer import deferred_f_from_coro_f
if TYPE_CHECKING:
from collections.abc import AsyncGenerator
class TestAsyncio:
def test_is_asyncio_available(self, reactor_pytest: str) -> None:
# the result should depend only on the pytest --reactor argument
assert is_asyncio_available() == (reactor_pytest == "asyncio")
@pytest.mark.only_asyncio
class TestParallelAsyncio:
"""Test for scrapy.utils.asyncio.parallel_asyncio(), based on tests.test_utils_defer.TestParallelAsync."""
CONCURRENT_ITEMS = 50
@staticmethod
async def callable(o: int, results: list[int]) -> None:
if random.random() < 0.4:
# simulate async processing
await asyncio.sleep(random.random() / 8)
# simulate trivial sync processing
results.append(o)
async def callable_wrapped(
self,
o: int,
results: list[int],
parallel_count: list[int],
max_parallel_count: list[int],
) -> None:
parallel_count[0] += 1
max_parallel_count[0] = max(max_parallel_count[0], parallel_count[0])
await self.callable(o, results)
assert parallel_count[0] > 0, parallel_count[0]
parallel_count[0] -= 1
@staticmethod
def get_async_iterable(length: int) -> AsyncGenerator[int, None]:
# simulate a simple callback without delays between results
return as_async_generator(range(length))
@staticmethod
async def get_async_iterable_with_delays(length: int) -> AsyncGenerator[int, None]:
# simulate a callback with delays between some of the results
for i in range(length):
if random.random() < 0.1:
await asyncio.sleep(random.random() / 20)
yield i
@deferred_f_from_coro_f
async def test_simple(self):
for length in [20, 50, 100]:
parallel_count = [0]
max_parallel_count = [0]
results = []
ait = self.get_async_iterable(length)
await _parallel_asyncio(
ait,
self.CONCURRENT_ITEMS,
self.callable_wrapped,
results,
parallel_count,
max_parallel_count,
)
assert list(range(length)) == sorted(results)
assert max_parallel_count[0] <= self.CONCURRENT_ITEMS
@deferred_f_from_coro_f
async def test_delays(self):
for length in [20, 50, 100]:
parallel_count = [0]
max_parallel_count = [0]
results = []
ait = self.get_async_iterable_with_delays(length)
await _parallel_asyncio(
ait,
self.CONCURRENT_ITEMS,
self.callable_wrapped,
results,
parallel_count,
max_parallel_count,
)
assert list(range(length)) == sorted(results)
assert max_parallel_count[0] <= self.CONCURRENT_ITEMS
@pytest.mark.only_asyncio
class TestAsyncioLoopingCall:
def test_looping_call(self):
func = mock.MagicMock()
looping_call = AsyncioLoopingCall(func)
looping_call.start(1, now=False)
assert looping_call.running
looping_call.stop()
assert not looping_call.running
assert not func.called
def test_looping_call_now(self):
func = mock.MagicMock()
looping_call = AsyncioLoopingCall(func)
looping_call.start(1)
looping_call.stop()
assert func.called
def test_looping_call_already_running(self):
looping_call = AsyncioLoopingCall(lambda: None)
looping_call.start(1)
with pytest.raises(RuntimeError):
looping_call.start(1)
looping_call.stop()
def test_looping_call_interval(self):
looping_call = AsyncioLoopingCall(lambda: None)
with pytest.raises(ValueError, match="Interval must be greater than 0"):
looping_call.start(0)
with pytest.raises(ValueError, match="Interval must be greater than 0"):
looping_call.start(-1)
assert not looping_call.running
def test_looping_call_bad_function(self):
looping_call = AsyncioLoopingCall(Deferred)
with pytest.raises(TypeError):
looping_call.start(0.1)
assert not looping_call.running
| python | BSD-3-Clause | d1bd8eb49f7aba9289e4ff692006cead8bcd9080 | 2026-01-04T14:38:41.023839Z | false |
scrapy/scrapy | https://github.com/scrapy/scrapy/blob/d1bd8eb49f7aba9289e4ff692006cead8bcd9080/tests/test_signals.py | tests/test_signals.py | import pytest
from twisted.internet.defer import inlineCallbacks
from scrapy import Request, Spider, signals
from scrapy.utils.defer import deferred_f_from_coro_f
from scrapy.utils.test import get_crawler, get_from_asyncio_queue
from tests.mockserver.http import MockServer
class ItemSpider(Spider):
name = "itemspider"
async def start(self):
for index in range(10):
yield Request(
self.mockserver.url(f"/status?n=200&id={index}"), meta={"index": index}
)
def parse(self, response):
return {"index": response.meta["index"]}
class TestMain:
@deferred_f_from_coro_f
async def test_scheduler_empty(self):
crawler = get_crawler()
calls = []
def track_call():
calls.append(object())
crawler.signals.connect(track_call, signals.scheduler_empty)
await crawler.crawl_async()
assert len(calls) >= 1
class TestMockServer:
@classmethod
def setup_class(cls):
cls.mockserver = MockServer()
cls.mockserver.__enter__()
@classmethod
def teardown_class(cls):
cls.mockserver.__exit__(None, None, None)
def setup_method(self):
self.items = []
async def _on_item_scraped(self, item):
item = await get_from_asyncio_queue(item)
self.items.append(item)
@pytest.mark.only_asyncio
@inlineCallbacks
def test_simple_pipeline(self):
crawler = get_crawler(ItemSpider)
crawler.signals.connect(self._on_item_scraped, signals.item_scraped)
yield crawler.crawl(mockserver=self.mockserver)
assert len(self.items) == 10
for index in range(10):
assert {"index": index} in self.items
| python | BSD-3-Clause | d1bd8eb49f7aba9289e4ff692006cead8bcd9080 | 2026-01-04T14:38:41.023839Z | false |
scrapy/scrapy | https://github.com/scrapy/scrapy/blob/d1bd8eb49f7aba9289e4ff692006cead8bcd9080/tests/test_core_scraper.py | tests/test_core_scraper.py | from __future__ import annotations
from typing import TYPE_CHECKING
from scrapy.utils.defer import deferred_f_from_coro_f
from scrapy.utils.test import get_crawler
from tests.spiders import SimpleSpider
if TYPE_CHECKING:
import pytest
from tests.mockserver.http import MockServer
@deferred_f_from_coro_f
async def test_scraper_exception(
mockserver: MockServer,
caplog: pytest.LogCaptureFixture,
monkeypatch: pytest.MonkeyPatch,
) -> None:
crawler = get_crawler(SimpleSpider)
monkeypatch.setattr(
"scrapy.core.engine.Scraper.handle_spider_output_async",
lambda *args, **kwargs: 1 / 0,
)
await crawler.crawl_async(url=mockserver.url("/"))
assert "Scraper bug processing" in caplog.text
| python | BSD-3-Clause | d1bd8eb49f7aba9289e4ff692006cead8bcd9080 | 2026-01-04T14:38:41.023839Z | false |
scrapy/scrapy | https://github.com/scrapy/scrapy/blob/d1bd8eb49f7aba9289e4ff692006cead8bcd9080/tests/test_downloadermiddleware_httpcompression.py | tests/test_downloadermiddleware_httpcompression.py | from gzip import GzipFile
from io import BytesIO
from logging import WARNING
from pathlib import Path
import pytest
from testfixtures import LogCapture
from w3lib.encoding import resolve_encoding
from scrapy.downloadermiddlewares.httpcompression import (
ACCEPTED_ENCODINGS,
HttpCompressionMiddleware,
)
from scrapy.exceptions import IgnoreRequest, NotConfigured
from scrapy.http import HtmlResponse, Request, Response
from scrapy.responsetypes import responsetypes
from scrapy.spiders import Spider
from scrapy.utils.gz import gunzip
from scrapy.utils.test import get_crawler
from tests import tests_datadir
SAMPLEDIR = Path(tests_datadir, "compressed")
FORMAT = {
"gzip": ("html-gzip.bin", "gzip"),
"x-gzip": ("html-gzip.bin", "x-gzip"),
"rawdeflate": ("html-rawdeflate.bin", "deflate"),
"zlibdeflate": ("html-zlibdeflate.bin", "deflate"),
"gzip-deflate": ("html-gzip-deflate.bin", "gzip, deflate"),
"gzip-deflate-gzip": ("html-gzip-deflate-gzip.bin", "gzip, deflate, gzip"),
"br": ("html-br.bin", "br"),
# $ zstd raw.html --content-size -o html-zstd-static-content-size.bin
"zstd-static-content-size": ("html-zstd-static-content-size.bin", "zstd"),
# $ zstd raw.html --no-content-size -o html-zstd-static-no-content-size.bin
"zstd-static-no-content-size": ("html-zstd-static-no-content-size.bin", "zstd"),
# $ cat raw.html | zstd -o html-zstd-streaming-no-content-size.bin
"zstd-streaming-no-content-size": (
"html-zstd-streaming-no-content-size.bin",
"zstd",
),
**{
f"bomb-{format_id}": (f"bomb-{format_id}.bin", format_id)
for format_id in (
"br", # 34 → 11 511 612
"deflate", # 27 968 → 11 511 612
"gzip", # 27 988 → 11 511 612
"zstd", # 1 096 → 11 511 612
)
},
}
def _skip_if_no_br() -> None:
try:
try:
import brotli # noqa: PLC0415
brotli.Decompressor.can_accept_more_data
except (ImportError, AttributeError):
import brotlicffi # noqa: PLC0415
brotlicffi.Decompressor.can_accept_more_data
except (ImportError, AttributeError):
pytest.skip("no brotli support")
def _skip_if_no_zstd() -> None:
try:
import zstandard # noqa: F401,PLC0415
except ImportError:
pytest.skip("no zstd support (zstandard)")
class TestHttpCompression:
def setup_method(self):
self.crawler = get_crawler(Spider)
self.mw = HttpCompressionMiddleware.from_crawler(self.crawler)
self.crawler.stats.open_spider()
def _getresponse(self, coding):
if coding not in FORMAT:
raise ValueError
samplefile, contentencoding = FORMAT[coding]
body = (SAMPLEDIR / samplefile).read_bytes()
headers = {
"Server": "Yaws/1.49 Yet Another Web Server",
"Date": "Sun, 08 Mar 2009 00:41:03 GMT",
"Content-Length": len(body),
"Content-Type": "text/html",
"Content-Encoding": contentencoding,
}
response = Response("http://scrapytest.org/", body=body, headers=headers)
response.request = Request(
"http://scrapytest.org", headers={"Accept-Encoding": "gzip, deflate"}
)
return response
def assertStatsEqual(self, key, value):
assert self.crawler.stats.get_value(key) == value, str(
self.crawler.stats.get_stats()
)
def test_setting_false_compression_enabled(self):
with pytest.raises(NotConfigured):
HttpCompressionMiddleware.from_crawler(
get_crawler(settings_dict={"COMPRESSION_ENABLED": False})
)
def test_setting_default_compression_enabled(self):
assert isinstance(
HttpCompressionMiddleware.from_crawler(get_crawler()),
HttpCompressionMiddleware,
)
def test_setting_true_compression_enabled(self):
assert isinstance(
HttpCompressionMiddleware.from_crawler(
get_crawler(settings_dict={"COMPRESSION_ENABLED": True})
),
HttpCompressionMiddleware,
)
def test_process_request(self):
request = Request("http://scrapytest.org")
assert "Accept-Encoding" not in request.headers
self.mw.process_request(request)
assert request.headers.get("Accept-Encoding") == b", ".join(ACCEPTED_ENCODINGS)
def test_process_response_gzip(self):
response = self._getresponse("gzip")
request = response.request
assert response.headers["Content-Encoding"] == b"gzip"
newresponse = self.mw.process_response(request, response)
assert newresponse is not response
assert newresponse.body.startswith(b"<!DOCTYPE")
assert "Content-Encoding" not in newresponse.headers
self.assertStatsEqual("httpcompression/response_count", 1)
self.assertStatsEqual("httpcompression/response_bytes", 74837)
def test_process_response_br(self):
_skip_if_no_br()
response = self._getresponse("br")
request = response.request
assert response.headers["Content-Encoding"] == b"br"
newresponse = self.mw.process_response(request, response)
assert newresponse is not response
assert newresponse.body.startswith(b"<!DOCTYPE")
assert "Content-Encoding" not in newresponse.headers
self.assertStatsEqual("httpcompression/response_count", 1)
self.assertStatsEqual("httpcompression/response_bytes", 74837)
def test_process_response_br_unsupported(self):
try:
try:
import brotli # noqa: F401,PLC0415
pytest.skip("Requires not having brotli support")
except ImportError:
import brotlicffi # noqa: F401,PLC0415
pytest.skip("Requires not having brotli support")
except ImportError:
pass
response = self._getresponse("br")
request = response.request
assert response.headers["Content-Encoding"] == b"br"
with LogCapture(
"scrapy.downloadermiddlewares.httpcompression",
propagate=False,
level=WARNING,
) as log:
newresponse = self.mw.process_response(request, response)
log.check(
(
"scrapy.downloadermiddlewares.httpcompression",
"WARNING",
(
"HttpCompressionMiddleware cannot decode the response for "
"http://scrapytest.org/ from unsupported encoding(s) 'br'. "
"You need to install brotli or brotlicffi >= 1.2.0 to decode 'br'."
),
),
)
assert newresponse is not response
assert newresponse.headers.getlist("Content-Encoding") == [b"br"]
def test_process_response_zstd(self):
_skip_if_no_zstd()
raw_content = None
for check_key in FORMAT:
if not check_key.startswith("zstd-"):
continue
response = self._getresponse(check_key)
request = response.request
assert response.headers["Content-Encoding"] == b"zstd"
newresponse = self.mw.process_response(request, response)
if raw_content is None:
raw_content = newresponse.body
else:
assert raw_content == newresponse.body
assert newresponse is not response
assert newresponse.body.startswith(b"<!DOCTYPE")
assert "Content-Encoding" not in newresponse.headers
def test_process_response_zstd_unsupported(self):
try:
import zstandard # noqa: F401,PLC0415
pytest.skip("Requires not having zstandard support")
except ImportError:
pass
response = self._getresponse("zstd-static-content-size")
request = response.request
assert response.headers["Content-Encoding"] == b"zstd"
with LogCapture(
"scrapy.downloadermiddlewares.httpcompression",
propagate=False,
level=WARNING,
) as log:
newresponse = self.mw.process_response(request, response)
log.check(
(
"scrapy.downloadermiddlewares.httpcompression",
"WARNING",
(
"HttpCompressionMiddleware cannot decode the response for"
" http://scrapytest.org/ from unsupported encoding(s) 'zstd'."
" You need to install zstandard to decode 'zstd'."
),
),
)
assert newresponse is not response
assert newresponse.headers.getlist("Content-Encoding") == [b"zstd"]
def test_process_response_rawdeflate(self):
response = self._getresponse("rawdeflate")
request = response.request
assert response.headers["Content-Encoding"] == b"deflate"
newresponse = self.mw.process_response(request, response)
assert newresponse is not response
assert newresponse.body.startswith(b"<!DOCTYPE")
assert "Content-Encoding" not in newresponse.headers
self.assertStatsEqual("httpcompression/response_count", 1)
self.assertStatsEqual("httpcompression/response_bytes", 74840)
def test_process_response_zlibdelate(self):
response = self._getresponse("zlibdeflate")
request = response.request
assert response.headers["Content-Encoding"] == b"deflate"
newresponse = self.mw.process_response(request, response)
assert newresponse is not response
assert newresponse.body.startswith(b"<!DOCTYPE")
assert "Content-Encoding" not in newresponse.headers
self.assertStatsEqual("httpcompression/response_count", 1)
self.assertStatsEqual("httpcompression/response_bytes", 74840)
def test_process_response_plain(self):
response = Response("http://scrapytest.org", body=b"<!DOCTYPE...")
request = Request("http://scrapytest.org")
assert not response.headers.get("Content-Encoding")
newresponse = self.mw.process_response(request, response)
assert newresponse is response
assert newresponse.body.startswith(b"<!DOCTYPE")
self.assertStatsEqual("httpcompression/response_count", None)
self.assertStatsEqual("httpcompression/response_bytes", None)
def test_multipleencodings(self):
response = self._getresponse("gzip")
response.headers["Content-Encoding"] = ["uuencode", "gzip"]
request = response.request
newresponse = self.mw.process_response(request, response)
assert newresponse is not response
assert newresponse.headers.getlist("Content-Encoding") == [b"uuencode"]
def test_multi_compression_single_header(self):
response = self._getresponse("gzip-deflate")
request = response.request
newresponse = self.mw.process_response(request, response)
assert newresponse is not response
assert "Content-Encoding" not in newresponse.headers
assert newresponse.body.startswith(b"<!DOCTYPE")
def test_multi_compression_single_header_invalid_compression(self):
response = self._getresponse("gzip-deflate")
response.headers["Content-Encoding"] = [b"gzip, foo, deflate"]
request = response.request
with LogCapture(
"scrapy.downloadermiddlewares.httpcompression",
propagate=False,
level=WARNING,
) as log:
newresponse = self.mw.process_response(request, response)
log.check(
(
"scrapy.downloadermiddlewares.httpcompression",
"WARNING",
(
"HttpCompressionMiddleware cannot decode the response for"
" http://scrapytest.org/ from unsupported encoding(s) 'gzip,foo'."
),
),
)
assert newresponse is not response
assert newresponse.headers.getlist("Content-Encoding") == [b"gzip", b"foo"]
def test_multi_compression_multiple_header(self):
response = self._getresponse("gzip-deflate")
response.headers["Content-Encoding"] = ["gzip", "deflate"]
request = response.request
newresponse = self.mw.process_response(request, response)
assert newresponse is not response
assert "Content-Encoding" not in newresponse.headers
assert newresponse.body.startswith(b"<!DOCTYPE")
def test_multi_compression_multiple_header_invalid_compression(self):
response = self._getresponse("gzip-deflate")
response.headers["Content-Encoding"] = ["gzip", "foo", "deflate"]
request = response.request
newresponse = self.mw.process_response(request, response)
assert newresponse is not response
assert newresponse.headers.getlist("Content-Encoding") == [b"gzip", b"foo"]
def test_multi_compression_single_and_multiple_header(self):
response = self._getresponse("gzip-deflate-gzip")
response.headers["Content-Encoding"] = ["gzip", "deflate, gzip"]
request = response.request
newresponse = self.mw.process_response(request, response)
assert newresponse is not response
assert "Content-Encoding" not in newresponse.headers
assert newresponse.body.startswith(b"<!DOCTYPE")
def test_multi_compression_single_and_multiple_header_invalid_compression(self):
response = self._getresponse("gzip-deflate")
response.headers["Content-Encoding"] = ["gzip", "foo,deflate"]
request = response.request
newresponse = self.mw.process_response(request, response)
assert newresponse is not response
assert newresponse.headers.getlist("Content-Encoding") == [b"gzip", b"foo"]
def test_process_response_encoding_inside_body(self):
headers = {
"Content-Type": "text/html",
"Content-Encoding": "gzip",
}
f = BytesIO()
plainbody = (
b"<html><head><title>Some page</title>"
b'<meta http-equiv="Content-Type" content="text/html; charset=gb2312">'
)
zf = GzipFile(fileobj=f, mode="wb")
zf.write(plainbody)
zf.close()
response = Response(
"http;//www.example.com/", headers=headers, body=f.getvalue()
)
request = Request("http://www.example.com/")
newresponse = self.mw.process_response(request, response)
assert isinstance(newresponse, HtmlResponse)
assert newresponse.body == plainbody
assert newresponse.encoding == resolve_encoding("gb2312")
self.assertStatsEqual("httpcompression/response_count", 1)
self.assertStatsEqual("httpcompression/response_bytes", len(plainbody))
def test_process_response_force_recalculate_encoding(self):
headers = {
"Content-Type": "text/html",
"Content-Encoding": "gzip",
}
f = BytesIO()
plainbody = (
b"<html><head><title>Some page</title>"
b'<meta http-equiv="Content-Type" content="text/html; charset=gb2312">'
)
zf = GzipFile(fileobj=f, mode="wb")
zf.write(plainbody)
zf.close()
response = HtmlResponse(
"http;//www.example.com/page.html", headers=headers, body=f.getvalue()
)
request = Request("http://www.example.com/")
newresponse = self.mw.process_response(request, response)
assert isinstance(newresponse, HtmlResponse)
assert newresponse.body == plainbody
assert newresponse.encoding == resolve_encoding("gb2312")
self.assertStatsEqual("httpcompression/response_count", 1)
self.assertStatsEqual("httpcompression/response_bytes", len(plainbody))
def test_process_response_no_content_type_header(self):
headers = {
"Content-Encoding": "identity",
}
plainbody = (
b"<html><head><title>Some page</title>"
b'<meta http-equiv="Content-Type" content="text/html; charset=gb2312">'
)
respcls = responsetypes.from_args(
url="http://www.example.com/index", headers=headers, body=plainbody
)
response = respcls(
"http://www.example.com/index", headers=headers, body=plainbody
)
request = Request("http://www.example.com/index")
newresponse = self.mw.process_response(request, response)
assert isinstance(newresponse, respcls)
assert newresponse.body == plainbody
assert newresponse.encoding == resolve_encoding("gb2312")
self.assertStatsEqual("httpcompression/response_count", 1)
self.assertStatsEqual("httpcompression/response_bytes", len(plainbody))
def test_process_response_gzipped_contenttype(self):
response = self._getresponse("gzip")
response.headers["Content-Type"] = "application/gzip"
request = response.request
newresponse = self.mw.process_response(request, response)
assert newresponse is not response
assert newresponse.body.startswith(b"<!DOCTYPE")
assert "Content-Encoding" not in newresponse.headers
self.assertStatsEqual("httpcompression/response_count", 1)
self.assertStatsEqual("httpcompression/response_bytes", 74837)
def test_process_response_gzip_app_octetstream_contenttype(self):
response = self._getresponse("gzip")
response.headers["Content-Type"] = "application/octet-stream"
request = response.request
newresponse = self.mw.process_response(request, response)
assert newresponse is not response
assert newresponse.body.startswith(b"<!DOCTYPE")
assert "Content-Encoding" not in newresponse.headers
self.assertStatsEqual("httpcompression/response_count", 1)
self.assertStatsEqual("httpcompression/response_bytes", 74837)
def test_process_response_gzip_binary_octetstream_contenttype(self):
response = self._getresponse("x-gzip")
response.headers["Content-Type"] = "binary/octet-stream"
request = response.request
newresponse = self.mw.process_response(request, response)
assert newresponse is not response
assert newresponse.body.startswith(b"<!DOCTYPE")
assert "Content-Encoding" not in newresponse.headers
self.assertStatsEqual("httpcompression/response_count", 1)
self.assertStatsEqual("httpcompression/response_bytes", 74837)
def test_process_response_gzipped_gzip_file(self):
"""Test that a gzip Content-Encoded .gz file is gunzipped
only once by the middleware, leaving gunzipping of the file
to upper layers.
"""
headers = {
"Content-Type": "application/gzip",
"Content-Encoding": "gzip",
}
# build a gzipped file (here, a sitemap)
f = BytesIO()
plainbody = b"""<?xml version="1.0" encoding="UTF-8"?>
<urlset xmlns="http://www.google.com/schemas/sitemap/0.84">
<url>
<loc>http://www.example.com/</loc>
<lastmod>2009-08-16</lastmod>
<changefreq>daily</changefreq>
<priority>1</priority>
</url>
<url>
<loc>http://www.example.com/Special-Offers.html</loc>
<lastmod>2009-08-16</lastmod>
<changefreq>weekly</changefreq>
<priority>0.8</priority>
</url>
</urlset>"""
gz_file = GzipFile(fileobj=f, mode="wb")
gz_file.write(plainbody)
gz_file.close()
# build a gzipped response body containing this gzipped file
r = BytesIO()
gz_resp = GzipFile(fileobj=r, mode="wb")
gz_resp.write(f.getvalue())
gz_resp.close()
response = Response(
"http;//www.example.com/", headers=headers, body=r.getvalue()
)
request = Request("http://www.example.com/")
newresponse = self.mw.process_response(request, response)
assert gunzip(newresponse.body) == plainbody
self.assertStatsEqual("httpcompression/response_count", 1)
self.assertStatsEqual("httpcompression/response_bytes", 230)
def test_process_response_head_request_no_decode_required(self):
response = self._getresponse("gzip")
response.headers["Content-Type"] = "application/gzip"
request = response.request
request.method = "HEAD"
response = response.replace(body=None)
newresponse = self.mw.process_response(request, response)
assert newresponse is response
assert response.body == b""
self.assertStatsEqual("httpcompression/response_count", None)
self.assertStatsEqual("httpcompression/response_bytes", None)
def _test_compression_bomb_setting(self, compression_id):
settings = {"DOWNLOAD_MAXSIZE": 1_000_000}
crawler = get_crawler(Spider, settings_dict=settings)
spider = crawler._create_spider("scrapytest.org")
mw = HttpCompressionMiddleware.from_crawler(crawler)
mw.open_spider(spider)
response = self._getresponse(f"bomb-{compression_id}") # 11_511_612 B
with pytest.raises(IgnoreRequest) as exc_info:
mw.process_response(response.request, response)
assert exc_info.value.__cause__.decompressed_size < 1_100_000
def test_compression_bomb_setting_br(self):
_skip_if_no_br()
self._test_compression_bomb_setting("br")
def test_compression_bomb_setting_deflate(self):
self._test_compression_bomb_setting("deflate")
def test_compression_bomb_setting_gzip(self):
self._test_compression_bomb_setting("gzip")
def test_compression_bomb_setting_zstd(self):
_skip_if_no_zstd()
self._test_compression_bomb_setting("zstd")
def _test_compression_bomb_spider_attr(self, compression_id):
class DownloadMaxSizeSpider(Spider):
download_maxsize = 1_000_000
crawler = get_crawler(DownloadMaxSizeSpider)
spider = crawler._create_spider("scrapytest.org")
mw = HttpCompressionMiddleware.from_crawler(crawler)
mw.open_spider(spider)
response = self._getresponse(f"bomb-{compression_id}")
with pytest.raises(IgnoreRequest) as exc_info:
mw.process_response(response.request, response)
assert exc_info.value.__cause__.decompressed_size < 1_100_000
@pytest.mark.filterwarnings("ignore::scrapy.exceptions.ScrapyDeprecationWarning")
def test_compression_bomb_spider_attr_br(self):
_skip_if_no_br()
self._test_compression_bomb_spider_attr("br")
@pytest.mark.filterwarnings("ignore::scrapy.exceptions.ScrapyDeprecationWarning")
def test_compression_bomb_spider_attr_deflate(self):
self._test_compression_bomb_spider_attr("deflate")
@pytest.mark.filterwarnings("ignore::scrapy.exceptions.ScrapyDeprecationWarning")
def test_compression_bomb_spider_attr_gzip(self):
self._test_compression_bomb_spider_attr("gzip")
@pytest.mark.filterwarnings("ignore::scrapy.exceptions.ScrapyDeprecationWarning")
def test_compression_bomb_spider_attr_zstd(self):
_skip_if_no_zstd()
self._test_compression_bomb_spider_attr("zstd")
def _test_compression_bomb_request_meta(self, compression_id):
crawler = get_crawler(Spider)
spider = crawler._create_spider("scrapytest.org")
mw = HttpCompressionMiddleware.from_crawler(crawler)
mw.open_spider(spider)
response = self._getresponse(f"bomb-{compression_id}")
response.meta["download_maxsize"] = 1_000_000
with pytest.raises(IgnoreRequest) as exc_info:
mw.process_response(response.request, response)
assert exc_info.value.__cause__.decompressed_size < 1_100_000
def test_compression_bomb_request_meta_br(self):
_skip_if_no_br()
self._test_compression_bomb_request_meta("br")
def test_compression_bomb_request_meta_deflate(self):
self._test_compression_bomb_request_meta("deflate")
def test_compression_bomb_request_meta_gzip(self):
self._test_compression_bomb_request_meta("gzip")
def test_compression_bomb_request_meta_zstd(self):
_skip_if_no_zstd()
self._test_compression_bomb_request_meta("zstd")
def _test_download_warnsize_setting(self, compression_id):
settings = {"DOWNLOAD_WARNSIZE": 10_000_000}
crawler = get_crawler(Spider, settings_dict=settings)
spider = crawler._create_spider("scrapytest.org")
mw = HttpCompressionMiddleware.from_crawler(crawler)
mw.open_spider(spider)
response = self._getresponse(f"bomb-{compression_id}")
with LogCapture(
"scrapy.downloadermiddlewares.httpcompression",
propagate=False,
level=WARNING,
) as log:
mw.process_response(response.request, response)
log.check(
(
"scrapy.downloadermiddlewares.httpcompression",
"WARNING",
(
"<200 http://scrapytest.org/> body size after "
"decompression (11511612 B) is larger than the download "
"warning size (10000000 B)."
),
),
)
def test_download_warnsize_setting_br(self):
_skip_if_no_br()
self._test_download_warnsize_setting("br")
def test_download_warnsize_setting_deflate(self):
self._test_download_warnsize_setting("deflate")
def test_download_warnsize_setting_gzip(self):
self._test_download_warnsize_setting("gzip")
def test_download_warnsize_setting_zstd(self):
_skip_if_no_zstd()
self._test_download_warnsize_setting("zstd")
def _test_download_warnsize_spider_attr(self, compression_id):
class DownloadWarnSizeSpider(Spider):
download_warnsize = 10_000_000
crawler = get_crawler(DownloadWarnSizeSpider)
spider = crawler._create_spider("scrapytest.org")
mw = HttpCompressionMiddleware.from_crawler(crawler)
mw.open_spider(spider)
response = self._getresponse(f"bomb-{compression_id}")
with LogCapture(
"scrapy.downloadermiddlewares.httpcompression",
propagate=False,
level=WARNING,
) as log:
mw.process_response(response.request, response)
log.check(
(
"scrapy.downloadermiddlewares.httpcompression",
"WARNING",
(
"<200 http://scrapytest.org/> body size after "
"decompression (11511612 B) is larger than the download "
"warning size (10000000 B)."
),
),
)
@pytest.mark.filterwarnings("ignore::scrapy.exceptions.ScrapyDeprecationWarning")
def test_download_warnsize_spider_attr_br(self):
_skip_if_no_br()
self._test_download_warnsize_spider_attr("br")
@pytest.mark.filterwarnings("ignore::scrapy.exceptions.ScrapyDeprecationWarning")
def test_download_warnsize_spider_attr_deflate(self):
self._test_download_warnsize_spider_attr("deflate")
@pytest.mark.filterwarnings("ignore::scrapy.exceptions.ScrapyDeprecationWarning")
def test_download_warnsize_spider_attr_gzip(self):
self._test_download_warnsize_spider_attr("gzip")
@pytest.mark.filterwarnings("ignore::scrapy.exceptions.ScrapyDeprecationWarning")
def test_download_warnsize_spider_attr_zstd(self):
_skip_if_no_zstd()
self._test_download_warnsize_spider_attr("zstd")
def _test_download_warnsize_request_meta(self, compression_id):
crawler = get_crawler(Spider)
spider = crawler._create_spider("scrapytest.org")
mw = HttpCompressionMiddleware.from_crawler(crawler)
mw.open_spider(spider)
response = self._getresponse(f"bomb-{compression_id}")
response.meta["download_warnsize"] = 10_000_000
with LogCapture(
"scrapy.downloadermiddlewares.httpcompression",
propagate=False,
level=WARNING,
) as log:
mw.process_response(response.request, response)
log.check(
(
"scrapy.downloadermiddlewares.httpcompression",
"WARNING",
(
"<200 http://scrapytest.org/> body size after "
"decompression (11511612 B) is larger than the download "
"warning size (10000000 B)."
),
),
)
def test_download_warnsize_request_meta_br(self):
_skip_if_no_br()
self._test_download_warnsize_request_meta("br")
def test_download_warnsize_request_meta_deflate(self):
self._test_download_warnsize_request_meta("deflate")
def test_download_warnsize_request_meta_gzip(self):
self._test_download_warnsize_request_meta("gzip")
def test_download_warnsize_request_meta_zstd(self):
_skip_if_no_zstd()
self._test_download_warnsize_request_meta("zstd")
def _get_truncated_response(self, compression_id):
crawler = get_crawler(Spider)
spider = crawler._create_spider("scrapytest.org")
mw = HttpCompressionMiddleware.from_crawler(crawler)
mw.open_spider(spider)
response = self._getresponse(compression_id)
truncated_body = response.body[: len(response.body) // 2]
response = response.replace(body=truncated_body)
return mw.process_response(response.request, response)
def test_process_truncated_response_br(self):
_skip_if_no_br()
resp = self._get_truncated_response("br")
assert resp.body.startswith(b"<!DOCTYPE")
def test_process_truncated_response_zlibdeflate(self):
resp = self._get_truncated_response("zlibdeflate")
assert resp.body.startswith(b"<!DOCTYPE")
def test_process_truncated_response_gzip(self):
resp = self._get_truncated_response("gzip")
assert resp.body.startswith(b"<!DOCTYPE")
def test_process_truncated_response_zstd(self):
_skip_if_no_zstd()
for check_key in FORMAT:
if not check_key.startswith("zstd-"):
continue
resp = self._get_truncated_response(check_key)
assert len(resp.body) == 0
| python | BSD-3-Clause | d1bd8eb49f7aba9289e4ff692006cead8bcd9080 | 2026-01-04T14:38:41.023839Z | false |
scrapy/scrapy | https://github.com/scrapy/scrapy/blob/d1bd8eb49f7aba9289e4ff692006cead8bcd9080/tests/test_utils_response.py | tests/test_utils_response.py | from pathlib import Path
from time import process_time
from urllib.parse import urlparse
import pytest
from scrapy.http import HtmlResponse, Response
from scrapy.utils.python import to_bytes
from scrapy.utils.response import (
_remove_html_comments,
get_base_url,
get_meta_refresh,
open_in_browser,
response_status_message,
)
def test_open_in_browser():
url = "http:///www.example.com/some/page.html"
body = (
b"<html> <head> <title>test page</title> </head> <body>test body</body> </html>"
)
def browser_open(burl: str) -> bool:
path = urlparse(burl).path
if not path or not Path(path).exists():
path = burl.replace("file://", "")
bbody = Path(path).read_bytes()
assert b'<base href="' + to_bytes(url) + b'">' in bbody
return True
response = HtmlResponse(url, body=body)
assert open_in_browser(response, _openfunc=browser_open), "Browser not called"
resp = Response(url, body=body)
with pytest.raises(TypeError):
open_in_browser(resp, debug=True) # pylint: disable=unexpected-keyword-arg
def test_get_meta_refresh():
r1 = HtmlResponse(
"http://www.example.com",
body=b"""
<html>
<head><title>Dummy</title><meta http-equiv="refresh" content="5;url=http://example.org/newpage" /></head>
<body>blahablsdfsal&</body>
</html>""",
)
r2 = HtmlResponse(
"http://www.example.com",
body=b"""
<html>
<head><title>Dummy</title><noScript>
<meta http-equiv="refresh" content="5;url=http://example.org/newpage" /></head>
</noSCRIPT>
<body>blahablsdfsal&</body>
</html>""",
)
r3 = HtmlResponse(
"http://www.example.com",
body=b"""
<noscript><meta http-equiv="REFRESH" content="0;url=http://www.example.com/newpage</noscript>
<script type="text/javascript">
if(!checkCookies()){
document.write('<meta http-equiv="REFRESH" content="0;url=http://www.example.com/newpage">');
}
</script>
""",
)
r4 = HtmlResponse(
"http://www.example.com",
body=b"""
<html>
<head><title>Dummy</title>
<base href="http://www.another-domain.com/base/path/">
<meta http-equiv="refresh" content="5;url=target.html"</head>
<body>blahablsdfsal&</body>
</html>""",
)
assert get_meta_refresh(r1) == (5.0, "http://example.org/newpage")
assert get_meta_refresh(r2) == (None, None)
assert get_meta_refresh(r3) == (None, None)
assert get_meta_refresh(r4) == (
5.0,
"http://www.another-domain.com/base/path/target.html",
)
def test_get_base_url():
resp = HtmlResponse(
"http://www.example.com",
body=b"""
<html>
<head><base href="http://www.example.com/img/" target="_blank"></head>
<body>blahablsdfsal&</body>
</html>""",
)
assert get_base_url(resp) == "http://www.example.com/img/"
resp2 = HtmlResponse(
"http://www.example.com",
body=b"""
<html><body>blahablsdfsal&</body></html>""",
)
assert get_base_url(resp2) == "http://www.example.com"
def test_response_status_message():
assert response_status_message(200) == "200 OK"
assert response_status_message(404) == "404 Not Found"
assert response_status_message(573) == "573 Unknown Status"
def test_inject_base_url():
url = "http://www.example.com"
def check_base_url(burl):
path = urlparse(burl).path
if not path or not Path(path).exists():
path = burl.replace("file://", "")
bbody = Path(path).read_bytes()
assert bbody.count(b'<base href="' + to_bytes(url) + b'">') == 1
return True
r1 = HtmlResponse(
url,
body=b"""
<html>
<head><title>Dummy</title></head>
<body><p>Hello world.</p></body>
</html>""",
)
r2 = HtmlResponse(
url,
body=b"""
<html>
<head id="foo"><title>Dummy</title></head>
<body>Hello world.</body>
</html>""",
)
r3 = HtmlResponse(
url,
body=b"""
<html>
<head><title>Dummy</title></head>
<body>
<header>Hello header</header>
<p>Hello world.</p>
</body>
</html>""",
)
r4 = HtmlResponse(
url,
body=b"""
<html>
<!-- <head>Dummy comment</head> -->
<head><title>Dummy</title></head>
<body><p>Hello world.</p></body>
</html>""",
)
r5 = HtmlResponse(
url,
body=b"""
<html>
<!--[if IE]>
<head><title>IE head</title></head>
<![endif]-->
<!--[if !IE]>-->
<head><title>Standard head</title></head>
<!--<![endif]-->
<body><p>Hello world.</p></body>
</html>""",
)
assert open_in_browser(r1, _openfunc=check_base_url), "Inject base url"
assert open_in_browser(r2, _openfunc=check_base_url), (
"Inject base url with argumented head"
)
assert open_in_browser(r3, _openfunc=check_base_url), (
"Inject unique base url with misleading tag"
)
assert open_in_browser(r4, _openfunc=check_base_url), (
"Inject unique base url with misleading comment"
)
assert open_in_browser(r5, _openfunc=check_base_url), (
"Inject unique base url with conditional comment"
)
def test_open_in_browser_redos_comment():
MAX_CPU_TIME = 0.02
# Exploit input from
# https://makenowjust-labs.github.io/recheck/playground/
# for /<!--.*?-->/ (old pattern to remove comments).
body = b"-><!--\x00" * 25_000 + b"->\n<!---->"
response = HtmlResponse("https://example.com", body=body)
start_time = process_time()
open_in_browser(response, lambda url: True)
end_time = process_time()
assert end_time - start_time < MAX_CPU_TIME
def test_open_in_browser_redos_head():
MAX_CPU_TIME = 0.02
# Exploit input from
# https://makenowjust-labs.github.io/recheck/playground/
# for /(<head(?:>|\s.*?>))/ (old pattern to find the head element).
body = b"<head\t" * 8_000
response = HtmlResponse("https://example.com", body=body)
start_time = process_time()
open_in_browser(response, lambda url: True)
end_time = process_time()
assert end_time - start_time < MAX_CPU_TIME
@pytest.mark.parametrize(
("input_body", "output_body"),
[
(b"a<!--", b"a"),
(b"a<!---->b", b"ab"),
(b"a<!--b-->c", b"ac"),
(b"a<!--b-->c<!--", b"ac"),
(b"a<!--b-->c<!--d", b"ac"),
(b"a<!--b-->c<!---->d", b"acd"),
(b"a<!--b--><!--c-->d", b"ad"),
],
)
def test_remove_html_comments(input_body, output_body):
assert _remove_html_comments(input_body) == output_body
| python | BSD-3-Clause | d1bd8eb49f7aba9289e4ff692006cead8bcd9080 | 2026-01-04T14:38:41.023839Z | false |
scrapy/scrapy | https://github.com/scrapy/scrapy/blob/d1bd8eb49f7aba9289e4ff692006cead8bcd9080/tests/test_utils_deprecate.py | tests/test_utils_deprecate.py | import inspect
import warnings
from unittest import mock
from warnings import WarningMessage
import pytest
from scrapy.exceptions import ScrapyDeprecationWarning
from scrapy.utils.deprecate import create_deprecated_class, update_classpath
class MyWarning(UserWarning):
pass
class SomeBaseClass:
pass
class NewName(SomeBaseClass):
pass
class TestWarnWhenSubclassed:
def _mywarnings(
self, w: list[WarningMessage], category: type[Warning] = MyWarning
) -> list[WarningMessage]:
return [x for x in w if x.category is MyWarning]
def test_no_warning_on_definition(self):
with warnings.catch_warnings(record=True) as w:
create_deprecated_class("Deprecated", NewName)
w = self._mywarnings(w)
assert w == []
def test_subclassing_warning_message(self):
Deprecated = create_deprecated_class(
"Deprecated", NewName, warn_category=MyWarning
)
with warnings.catch_warnings(record=True) as w:
class UserClass(Deprecated):
pass
w = self._mywarnings(w)
assert len(w) == 1
assert (
str(w[0].message) == "tests.test_utils_deprecate.UserClass inherits from "
"deprecated class tests.test_utils_deprecate.Deprecated, "
"please inherit from tests.test_utils_deprecate.NewName."
" (warning only on first subclass, there may be others)"
)
assert w[0].lineno == inspect.getsourcelines(UserClass)[1]
def test_custom_class_paths(self):
Deprecated = create_deprecated_class(
"Deprecated",
NewName,
new_class_path="foo.NewClass",
old_class_path="bar.OldClass",
warn_category=MyWarning,
)
with warnings.catch_warnings(record=True) as w:
class UserClass(Deprecated):
pass
_ = Deprecated()
w = self._mywarnings(w)
assert len(w) == 2
assert "foo.NewClass" in str(w[0].message)
assert "bar.OldClass" in str(w[0].message)
assert "foo.NewClass" in str(w[1].message)
assert "bar.OldClass" in str(w[1].message)
def test_subclassing_warns_only_on_direct_children(self):
Deprecated = create_deprecated_class(
"Deprecated", NewName, warn_once=False, warn_category=MyWarning
)
with warnings.catch_warnings(record=True) as w:
class UserClass(Deprecated):
pass
class NoWarnOnMe(UserClass):
pass
w = self._mywarnings(w)
assert len(w) == 1
assert "UserClass" in str(w[0].message)
def test_subclassing_warns_once_by_default(self):
Deprecated = create_deprecated_class(
"Deprecated", NewName, warn_category=MyWarning
)
with warnings.catch_warnings(record=True) as w:
class UserClass(Deprecated):
pass
class FooClass(Deprecated):
pass
class BarClass(Deprecated):
pass
w = self._mywarnings(w)
assert len(w) == 1
assert "UserClass" in str(w[0].message)
def test_warning_on_instance(self):
Deprecated = create_deprecated_class(
"Deprecated", NewName, warn_category=MyWarning
)
# ignore subclassing warnings
with warnings.catch_warnings():
warnings.simplefilter("ignore", MyWarning)
class UserClass(Deprecated):
pass
with warnings.catch_warnings(record=True) as w:
_, lineno = Deprecated(), inspect.getlineno(inspect.currentframe())
_ = UserClass() # subclass instances don't warn
w = self._mywarnings(w)
assert len(w) == 1
assert (
str(w[0].message) == "tests.test_utils_deprecate.Deprecated is deprecated, "
"instantiate tests.test_utils_deprecate.NewName instead."
)
assert w[0].lineno == lineno
def test_warning_auto_message(self):
with warnings.catch_warnings(record=True) as w:
Deprecated = create_deprecated_class("Deprecated", NewName)
class UserClass2(Deprecated):
pass
msg = str(w[0].message)
assert "tests.test_utils_deprecate.NewName" in msg
assert "tests.test_utils_deprecate.Deprecated" in msg
def test_issubclass(self):
with warnings.catch_warnings():
warnings.simplefilter("ignore", ScrapyDeprecationWarning)
DeprecatedName = create_deprecated_class("DeprecatedName", NewName)
class UpdatedUserClass1(NewName):
pass
class UpdatedUserClass1a(NewName):
pass
class OutdatedUserClass1(DeprecatedName):
pass
class OutdatedUserClass1a(DeprecatedName):
pass
class UnrelatedClass:
pass
assert issubclass(UpdatedUserClass1, NewName)
assert issubclass(UpdatedUserClass1a, NewName)
assert issubclass(UpdatedUserClass1, DeprecatedName)
assert issubclass(UpdatedUserClass1a, DeprecatedName)
assert issubclass(OutdatedUserClass1, DeprecatedName)
assert not issubclass(UnrelatedClass, DeprecatedName)
assert not issubclass(OutdatedUserClass1, OutdatedUserClass1a)
assert not issubclass(OutdatedUserClass1a, OutdatedUserClass1)
with pytest.raises(TypeError):
issubclass(object(), DeprecatedName)
def test_isinstance(self):
with warnings.catch_warnings():
warnings.simplefilter("ignore", ScrapyDeprecationWarning)
DeprecatedName = create_deprecated_class("DeprecatedName", NewName)
class UpdatedUserClass2(NewName):
pass
class UpdatedUserClass2a(NewName):
pass
class OutdatedUserClass2(DeprecatedName):
pass
class OutdatedUserClass2a(DeprecatedName):
pass
class UnrelatedClass:
pass
assert isinstance(UpdatedUserClass2(), NewName)
assert isinstance(UpdatedUserClass2a(), NewName)
assert isinstance(UpdatedUserClass2(), DeprecatedName)
assert isinstance(UpdatedUserClass2a(), DeprecatedName)
assert isinstance(OutdatedUserClass2(), DeprecatedName)
assert isinstance(OutdatedUserClass2a(), DeprecatedName)
assert not isinstance(OutdatedUserClass2a(), OutdatedUserClass2)
assert not isinstance(OutdatedUserClass2(), OutdatedUserClass2a)
assert not isinstance(UnrelatedClass(), DeprecatedName)
def test_clsdict(self):
with warnings.catch_warnings():
warnings.simplefilter("ignore", ScrapyDeprecationWarning)
Deprecated = create_deprecated_class("Deprecated", NewName, {"foo": "bar"})
assert Deprecated.foo == "bar"
def test_deprecate_a_class_with_custom_metaclass(self):
Meta1 = type("Meta1", (type,), {})
New = Meta1("New", (), {})
create_deprecated_class("Deprecated", New)
def test_deprecate_subclass_of_deprecated_class(self):
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("always")
Deprecated = create_deprecated_class(
"Deprecated", NewName, warn_category=MyWarning
)
AlsoDeprecated = create_deprecated_class(
"AlsoDeprecated",
Deprecated,
new_class_path="foo.Bar",
warn_category=MyWarning,
)
w = self._mywarnings(w)
assert len(w) == 0, [str(warning) for warning in w]
with warnings.catch_warnings(record=True) as w:
AlsoDeprecated()
class UserClass(AlsoDeprecated):
pass
w = self._mywarnings(w)
assert len(w) == 2
assert "AlsoDeprecated" in str(w[0].message)
assert "foo.Bar" in str(w[0].message)
assert "AlsoDeprecated" in str(w[1].message)
assert "foo.Bar" in str(w[1].message)
def test_inspect_stack(self):
with (
mock.patch("inspect.stack", side_effect=IndexError),
warnings.catch_warnings(record=True) as w,
):
DeprecatedName = create_deprecated_class("DeprecatedName", NewName)
class SubClass(DeprecatedName):
pass
assert "Error detecting parent module" in str(w[0].message)
@mock.patch(
"scrapy.utils.deprecate.DEPRECATION_RULES",
[
("scrapy.contrib.pipeline.", "scrapy.pipelines."),
("scrapy.contrib.", "scrapy.extensions."),
],
)
class TestUpdateClassPath:
def test_old_path_gets_fixed(self):
with warnings.catch_warnings(record=True) as w:
output = update_classpath("scrapy.contrib.debug.Debug")
assert output == "scrapy.extensions.debug.Debug"
assert len(w) == 1
assert "scrapy.contrib.debug.Debug" in str(w[0].message)
assert "scrapy.extensions.debug.Debug" in str(w[0].message)
def test_sorted_replacement(self):
with warnings.catch_warnings():
warnings.simplefilter("ignore", ScrapyDeprecationWarning)
output = update_classpath("scrapy.contrib.pipeline.Pipeline")
assert output == "scrapy.pipelines.Pipeline"
def test_unmatched_path_stays_the_same(self):
with warnings.catch_warnings(record=True) as w:
output = update_classpath("scrapy.unmatched.Path")
assert output == "scrapy.unmatched.Path"
assert len(w) == 0
def test_returns_nonstring(self):
for notastring in [None, True, [1, 2, 3], object()]:
assert update_classpath(notastring) == notastring
| python | BSD-3-Clause | d1bd8eb49f7aba9289e4ff692006cead8bcd9080 | 2026-01-04T14:38:41.023839Z | false |
scrapy/scrapy | https://github.com/scrapy/scrapy/blob/d1bd8eb49f7aba9289e4ff692006cead8bcd9080/tests/test_pqueues.py | tests/test_pqueues.py | import tempfile
import pytest
import queuelib
from scrapy.http.request import Request
from scrapy.pqueues import DownloaderAwarePriorityQueue, ScrapyPriorityQueue
from scrapy.spiders import Spider
from scrapy.squeues import FifoMemoryQueue
from scrapy.utils.misc import build_from_crawler, load_object
from scrapy.utils.test import get_crawler
from tests.test_scheduler import MockDownloader, MockEngine
class TestPriorityQueue:
def setup_method(self):
self.crawler = get_crawler(Spider)
self.spider = self.crawler._create_spider("foo")
def test_queue_push_pop_one(self):
temp_dir = tempfile.mkdtemp()
queue = ScrapyPriorityQueue.from_crawler(
self.crawler, FifoMemoryQueue, temp_dir
)
assert queue.pop() is None
assert len(queue) == 0
req1 = Request("https://example.org/1", priority=1)
queue.push(req1)
assert len(queue) == 1
dequeued = queue.pop()
assert len(queue) == 0
assert dequeued.url == req1.url
assert dequeued.priority == req1.priority
assert not queue.close()
def test_no_peek_raises(self):
if hasattr(queuelib.queue.FifoMemoryQueue, "peek"):
pytest.skip("queuelib.queue.FifoMemoryQueue.peek is defined")
temp_dir = tempfile.mkdtemp()
queue = ScrapyPriorityQueue.from_crawler(
self.crawler, FifoMemoryQueue, temp_dir
)
queue.push(Request("https://example.org"))
with pytest.raises(
NotImplementedError,
match="The underlying queue class does not implement 'peek'",
):
queue.peek()
queue.close()
def test_peek(self):
if not hasattr(queuelib.queue.FifoMemoryQueue, "peek"):
pytest.skip("queuelib.queue.FifoMemoryQueue.peek is undefined")
temp_dir = tempfile.mkdtemp()
queue = ScrapyPriorityQueue.from_crawler(
self.crawler, FifoMemoryQueue, temp_dir
)
assert len(queue) == 0
assert queue.peek() is None
req1 = Request("https://example.org/1")
req2 = Request("https://example.org/2")
req3 = Request("https://example.org/3")
queue.push(req1)
queue.push(req2)
queue.push(req3)
assert len(queue) == 3
assert queue.peek().url == req1.url
assert queue.pop().url == req1.url
assert len(queue) == 2
assert queue.peek().url == req2.url
assert queue.pop().url == req2.url
assert len(queue) == 1
assert queue.peek().url == req3.url
assert queue.pop().url == req3.url
assert not queue.close()
def test_queue_push_pop_priorities(self):
temp_dir = tempfile.mkdtemp()
queue = ScrapyPriorityQueue.from_crawler(
self.crawler, FifoMemoryQueue, temp_dir, [-1, -2, -3]
)
assert queue.pop() is None
assert len(queue) == 0
req1 = Request("https://example.org/1", priority=1)
req2 = Request("https://example.org/2", priority=2)
req3 = Request("https://example.org/3", priority=3)
queue.push(req1)
queue.push(req2)
queue.push(req3)
assert len(queue) == 3
dequeued = queue.pop()
assert len(queue) == 2
assert dequeued.url == req3.url
assert dequeued.priority == req3.priority
assert set(queue.close()) == {-1, -2}
class TestDownloaderAwarePriorityQueue:
def setup_method(self):
crawler = get_crawler(Spider)
crawler.engine = MockEngine(downloader=MockDownloader())
self.queue = DownloaderAwarePriorityQueue.from_crawler(
crawler=crawler,
downstream_queue_cls=FifoMemoryQueue,
key="foo/bar",
)
def teardown_method(self):
self.queue.close()
def test_push_pop(self):
assert len(self.queue) == 0
assert self.queue.pop() is None
req1 = Request("http://www.example.com/1")
req2 = Request("http://www.example.com/2")
req3 = Request("http://www.example.com/3")
self.queue.push(req1)
self.queue.push(req2)
self.queue.push(req3)
assert len(self.queue) == 3
assert self.queue.pop().url == req1.url
assert len(self.queue) == 2
assert self.queue.pop().url == req2.url
assert len(self.queue) == 1
assert self.queue.pop().url == req3.url
assert len(self.queue) == 0
assert self.queue.pop() is None
def test_no_peek_raises(self):
if hasattr(queuelib.queue.FifoMemoryQueue, "peek"):
pytest.skip("queuelib.queue.FifoMemoryQueue.peek is defined")
self.queue.push(Request("https://example.org"))
with pytest.raises(
NotImplementedError,
match="The underlying queue class does not implement 'peek'",
):
self.queue.peek()
def test_peek(self):
if not hasattr(queuelib.queue.FifoMemoryQueue, "peek"):
pytest.skip("queuelib.queue.FifoMemoryQueue.peek is undefined")
assert len(self.queue) == 0
req1 = Request("https://example.org/1")
req2 = Request("https://example.org/2")
req3 = Request("https://example.org/3")
self.queue.push(req1)
self.queue.push(req2)
self.queue.push(req3)
assert len(self.queue) == 3
assert self.queue.peek().url == req1.url
assert self.queue.pop().url == req1.url
assert len(self.queue) == 2
assert self.queue.peek().url == req2.url
assert self.queue.pop().url == req2.url
assert len(self.queue) == 1
assert self.queue.peek().url == req3.url
assert self.queue.pop().url == req3.url
assert self.queue.peek() is None
@pytest.mark.parametrize(
("input_", "output"),
[
# By default, start requests are FIFO, other requests are LIFO.
([{}, {}], [2, 1]),
([{"start": True}, {"start": True}], [1, 2]),
# Priority matters.
([{"priority": 1}, {"start": True}], [1, 2]),
([{}, {"start": True, "priority": 1}], [2, 1]),
# For the same priority, start requests pop last.
([{}, {"start": True}], [1, 2]),
([{"start": True}, {}], [2, 1]),
],
)
def test_pop_order(input_, output):
def make_url(index):
return f"https://toscrape.com/{index}"
def make_request(index, data):
meta = {}
if data.get("start", False):
meta["is_start_request"] = True
return Request(
url=make_url(index),
priority=data.get("priority", 0),
meta=meta,
)
input_requests = [
make_request(index, data) for index, data in enumerate(input_, start=1)
]
expected_output_urls = [make_url(index) for index in output]
crawler = get_crawler(Spider)
settings = crawler.settings
queue = build_from_crawler(
ScrapyPriorityQueue,
crawler,
downstream_queue_cls=load_object(settings["SCHEDULER_MEMORY_QUEUE"]),
key="",
start_queue_cls=load_object(settings["SCHEDULER_START_MEMORY_QUEUE"]),
)
for request in input_requests:
queue.push(request)
actual_output_urls = []
while request := queue.pop():
actual_output_urls.append(request.url)
assert actual_output_urls == expected_output_urls
| python | BSD-3-Clause | d1bd8eb49f7aba9289e4ff692006cead8bcd9080 | 2026-01-04T14:38:41.023839Z | false |
scrapy/scrapy | https://github.com/scrapy/scrapy/blob/d1bd8eb49f7aba9289e4ff692006cead8bcd9080/tests/test_utils_signal.py | tests/test_utils_signal.py | import asyncio
import pytest
from pydispatch import dispatcher
from testfixtures import LogCapture
from twisted.internet import defer
from twisted.internet.defer import inlineCallbacks
from twisted.python.failure import Failure
from scrapy.utils.asyncio import call_later
from scrapy.utils.defer import deferred_from_coro
from scrapy.utils.signal import (
send_catch_log,
send_catch_log_async,
send_catch_log_deferred,
)
from scrapy.utils.test import get_from_asyncio_queue
class TestSendCatchLog:
# whether the function being tested returns exceptions or failures
returns_exceptions: bool = False
@inlineCallbacks
def test_send_catch_log(self):
test_signal = object()
handlers_called = set()
dispatcher.connect(self.error_handler, signal=test_signal)
dispatcher.connect(self.ok_handler, signal=test_signal)
with LogCapture() as log:
result = yield defer.maybeDeferred(
self._get_result,
test_signal,
arg="test",
handlers_called=handlers_called,
)
assert self.error_handler in handlers_called
assert self.ok_handler in handlers_called
assert len(log.records) == 1
record = log.records[0]
assert "error_handler" in record.getMessage()
assert record.levelname == "ERROR"
assert result[0][0] == self.error_handler # pylint: disable=comparison-with-callable
assert isinstance(
result[0][1], Exception if self.returns_exceptions else Failure
)
assert result[1] == (self.ok_handler, "OK")
dispatcher.disconnect(self.error_handler, signal=test_signal)
dispatcher.disconnect(self.ok_handler, signal=test_signal)
def _get_result(self, signal, *a, **kw):
return send_catch_log(signal, *a, **kw)
def error_handler(self, arg, handlers_called):
handlers_called.add(self.error_handler)
1 / 0
def ok_handler(self, arg, handlers_called):
handlers_called.add(self.ok_handler)
assert arg == "test"
return "OK"
@pytest.mark.filterwarnings("ignore::scrapy.exceptions.ScrapyDeprecationWarning")
class TestSendCatchLogDeferred(TestSendCatchLog):
def _get_result(self, signal, *a, **kw):
return send_catch_log_deferred(signal, *a, **kw)
class TestSendCatchLogDeferred2(TestSendCatchLogDeferred):
def ok_handler(self, arg, handlers_called):
handlers_called.add(self.ok_handler)
assert arg == "test"
d = defer.Deferred()
call_later(0, d.callback, "OK")
return d
class TestSendCatchLogDeferredAsyncDef(TestSendCatchLogDeferred):
async def ok_handler(self, arg, handlers_called):
handlers_called.add(self.ok_handler)
assert arg == "test"
await defer.succeed(42)
return "OK"
@pytest.mark.only_asyncio
class TestSendCatchLogDeferredAsyncio(TestSendCatchLogDeferred):
async def ok_handler(self, arg, handlers_called):
handlers_called.add(self.ok_handler)
assert arg == "test"
await asyncio.sleep(0.2)
return await get_from_asyncio_queue("OK")
class TestSendCatchLogAsync(TestSendCatchLog):
returns_exceptions = True
def _get_result(self, signal, *a, **kw):
return deferred_from_coro(send_catch_log_async(signal, *a, **kw))
@pytest.mark.filterwarnings("ignore::scrapy.exceptions.ScrapyDeprecationWarning")
class TestSendCatchLogAsync2(TestSendCatchLogAsync):
def ok_handler(self, arg, handlers_called):
handlers_called.add(self.ok_handler)
assert arg == "test"
d = defer.Deferred()
call_later(0, d.callback, "OK")
return d
class TestSendCatchLogAsyncAsyncDef(TestSendCatchLogAsync):
async def ok_handler(self, arg, handlers_called):
handlers_called.add(self.ok_handler)
assert arg == "test"
await defer.succeed(42)
return "OK"
@pytest.mark.only_asyncio
class TestSendCatchLogAsyncAsyncio(TestSendCatchLogAsync):
async def ok_handler(self, arg, handlers_called):
handlers_called.add(self.ok_handler)
assert arg == "test"
await asyncio.sleep(0.2)
return await get_from_asyncio_queue("OK")
class TestSendCatchLog2:
def test_error_logged_if_deferred_not_supported(self):
def test_handler():
return defer.Deferred()
test_signal = object()
dispatcher.connect(test_handler, test_signal)
with LogCapture() as log:
send_catch_log(test_signal)
assert len(log.records) == 1
assert "Cannot return deferreds from signal handler" in str(log)
dispatcher.disconnect(test_handler, test_signal)
| python | BSD-3-Clause | d1bd8eb49f7aba9289e4ff692006cead8bcd9080 | 2026-01-04T14:38:41.023839Z | false |
scrapy/scrapy | https://github.com/scrapy/scrapy/blob/d1bd8eb49f7aba9289e4ff692006cead8bcd9080/tests/test_pipeline_crawl.py | tests/test_pipeline_crawl.py | from __future__ import annotations
import shutil
from pathlib import Path
from tempfile import mkdtemp
from typing import TYPE_CHECKING, Any
import pytest
from testfixtures import LogCapture
from twisted.internet.defer import inlineCallbacks
from w3lib.url import add_or_replace_parameter
from scrapy import Spider, signals
from scrapy.utils.misc import load_object
from scrapy.utils.test import get_crawler
from tests.mockserver.http import MockServer
from tests.spiders import SimpleSpider
if TYPE_CHECKING:
from scrapy.crawler import Crawler
class MediaDownloadSpider(SimpleSpider):
name = "mediadownload"
def _process_url(self, url):
return url
def parse(self, response):
self.logger.info(response.headers)
self.logger.info(response.text)
item = {
self.media_key: [],
self.media_urls_key: [
self._process_url(response.urljoin(href))
for href in response.xpath(
'//table[thead/tr/th="Filename"]/tbody//a/@href'
).getall()
],
}
yield item
class BrokenLinksMediaDownloadSpider(MediaDownloadSpider):
name = "brokenmedia"
def _process_url(self, url):
return url + ".foo"
class RedirectedMediaDownloadSpider(MediaDownloadSpider):
name = "redirectedmedia"
def _process_url(self, url):
return add_or_replace_parameter(
self.mockserver.url("/redirect-to"), "goto", url
)
class TestFileDownloadCrawl:
pipeline_class = "scrapy.pipelines.files.FilesPipeline"
store_setting_key = "FILES_STORE"
media_key = "files"
media_urls_key = "file_urls"
expected_checksums: set[str] | None = {
"5547178b89448faf0015a13f904c936e",
"c2281c83670e31d8aaab7cb642b824db",
"ed3f6538dc15d4d9179dae57319edc5f",
}
@classmethod
def setup_class(cls):
cls.mockserver = MockServer()
cls.mockserver.__enter__()
@classmethod
def teardown_class(cls):
cls.mockserver.__exit__(None, None, None)
def setup_method(self):
# prepare a directory for storing files
self.tmpmediastore = Path(mkdtemp())
self.settings = {
"ITEM_PIPELINES": {self.pipeline_class: 1},
self.store_setting_key: str(self.tmpmediastore),
}
self.items = []
def teardown_method(self):
shutil.rmtree(self.tmpmediastore)
self.items = []
def _on_item_scraped(self, item):
self.items.append(item)
def _create_crawler(
self, spider_class: type[Spider], settings: dict[str, Any] | None = None
) -> Crawler:
if settings is None:
settings = self.settings
crawler = get_crawler(spider_class, settings)
crawler.signals.connect(self._on_item_scraped, signals.item_scraped)
return crawler
def _assert_files_downloaded(self, items, logs):
assert len(items) == 1
assert self.media_key in items[0]
# check that logs show the expected number of successful file downloads
file_dl_success = "File (downloaded): Downloaded file from"
assert logs.count(file_dl_success) == 3
# check that the images/files status is `downloaded`
for item in items:
for i in item[self.media_key]:
assert i["status"] == "downloaded"
# check that the images/files checksums are what we know they should be
if self.expected_checksums is not None:
checksums = {i["checksum"] for item in items for i in item[self.media_key]}
assert checksums == self.expected_checksums
# check that the image files where actually written to the media store
for item in items:
for i in item[self.media_key]:
assert (self.tmpmediastore / i["path"]).exists()
def _assert_files_download_failure(self, crawler, items, code, logs):
# check that the item does NOT have the "images/files" field populated
assert len(items) == 1
assert self.media_key in items[0]
assert not items[0][self.media_key]
# check that there was 1 successful fetch and 3 other responses with non-200 code
assert crawler.stats.get_value("downloader/request_method_count/GET") == 4
assert crawler.stats.get_value("downloader/response_count") == 4
assert crawler.stats.get_value("downloader/response_status_count/200") == 1
assert crawler.stats.get_value(f"downloader/response_status_count/{code}") == 3
# check that logs do show the failure on the file downloads
file_dl_failure = f"File (code: {code}): Error downloading file from"
assert logs.count(file_dl_failure) == 3
# check that no files were written to the media store
assert not list(self.tmpmediastore.iterdir())
@inlineCallbacks
def test_download_media(self):
crawler = self._create_crawler(MediaDownloadSpider)
with LogCapture() as log:
yield crawler.crawl(
self.mockserver.url("/static/files/images/"),
media_key=self.media_key,
media_urls_key=self.media_urls_key,
)
self._assert_files_downloaded(self.items, str(log))
@inlineCallbacks
def test_download_media_wrong_urls(self):
crawler = self._create_crawler(BrokenLinksMediaDownloadSpider)
with LogCapture() as log:
yield crawler.crawl(
self.mockserver.url("/static/files/images/"),
media_key=self.media_key,
media_urls_key=self.media_urls_key,
)
self._assert_files_download_failure(crawler, self.items, 404, str(log))
@inlineCallbacks
def test_download_media_redirected_default_failure(self):
crawler = self._create_crawler(RedirectedMediaDownloadSpider)
with LogCapture() as log:
yield crawler.crawl(
self.mockserver.url("/static/files/images/"),
media_key=self.media_key,
media_urls_key=self.media_urls_key,
mockserver=self.mockserver,
)
self._assert_files_download_failure(crawler, self.items, 302, str(log))
@inlineCallbacks
def test_download_media_redirected_allowed(self):
settings = {
**self.settings,
"MEDIA_ALLOW_REDIRECTS": True,
}
crawler = self._create_crawler(RedirectedMediaDownloadSpider, settings)
with LogCapture() as log:
yield crawler.crawl(
self.mockserver.url("/static/files/images/"),
media_key=self.media_key,
media_urls_key=self.media_urls_key,
mockserver=self.mockserver,
)
self._assert_files_downloaded(self.items, str(log))
assert crawler.stats.get_value("downloader/response_status_count/302") == 3
@inlineCallbacks
def test_download_media_file_path_error(self):
cls = load_object(self.pipeline_class)
class ExceptionRaisingMediaPipeline(cls):
def file_path(self, request, response=None, info=None, *, item=None):
return 1 / 0
settings = {
**self.settings,
"ITEM_PIPELINES": {ExceptionRaisingMediaPipeline: 1},
}
crawler = self._create_crawler(MediaDownloadSpider, settings)
with LogCapture() as log:
yield crawler.crawl(
self.mockserver.url("/static/files/images/"),
media_key=self.media_key,
media_urls_key=self.media_urls_key,
mockserver=self.mockserver,
)
assert "ZeroDivisionError" in str(log)
pillow_available: bool
try:
from PIL import Image # noqa: F401
except ImportError:
pillow_available = False
else:
pillow_available = True
@pytest.mark.skipif(
not pillow_available,
reason="Missing Python Imaging Library, install https://pypi.org/pypi/Pillow",
)
class TestImageDownloadCrawl(TestFileDownloadCrawl):
pipeline_class = "scrapy.pipelines.images.ImagesPipeline"
store_setting_key = "IMAGES_STORE"
media_key = "images"
media_urls_key = "image_urls"
# somehow checksums for images are different for Python 3.3
expected_checksums = None
| python | BSD-3-Clause | d1bd8eb49f7aba9289e4ff692006cead8bcd9080 | 2026-01-04T14:38:41.023839Z | false |
scrapy/scrapy | https://github.com/scrapy/scrapy/blob/d1bd8eb49f7aba9289e4ff692006cead8bcd9080/tests/test_downloadermiddleware_redirect.py | tests/test_downloadermiddleware_redirect.py | from itertools import chain, product
import pytest
from scrapy.downloadermiddlewares.httpproxy import HttpProxyMiddleware
from scrapy.downloadermiddlewares.redirect import (
MetaRefreshMiddleware,
RedirectMiddleware,
)
from scrapy.exceptions import IgnoreRequest
from scrapy.http import HtmlResponse, Request, Response
from scrapy.spiders import Spider
from scrapy.utils.misc import set_environ
from scrapy.utils.spider import DefaultSpider
from scrapy.utils.test import get_crawler
class Base:
class Test:
def test_priority_adjust(self):
req = Request("http://a.com")
rsp = self.get_response(req, "http://a.com/redirected")
req2 = self.mw.process_response(req, rsp)
assert req2.priority > req.priority
def test_dont_redirect(self):
url = "http://www.example.com/301"
url2 = "http://www.example.com/redirected"
req = Request(url, meta={"dont_redirect": True})
rsp = self.get_response(req, url2)
r = self.mw.process_response(req, rsp)
assert isinstance(r, Response)
assert r is rsp
# Test that it redirects when dont_redirect is False
req = Request(url, meta={"dont_redirect": False})
rsp = self.get_response(req, url2)
r = self.mw.process_response(req, rsp)
assert isinstance(r, Request)
def test_post(self):
url = "http://www.example.com/302"
url2 = "http://www.example.com/redirected2"
req = Request(
url,
method="POST",
body="test",
headers={"Content-Type": "text/plain", "Content-length": "4"},
)
rsp = self.get_response(req, url2)
req2 = self.mw.process_response(req, rsp)
assert isinstance(req2, Request)
assert req2.url == url2
assert req2.method == "GET"
assert "Content-Type" not in req2.headers, (
"Content-Type header must not be present in redirected request"
)
assert "Content-Length" not in req2.headers, (
"Content-Length header must not be present in redirected request"
)
assert not req2.body, f"Redirected body must be empty, not '{req2.body}'"
def test_max_redirect_times(self):
self.mw.max_redirect_times = 1
req = Request("http://scrapytest.org/302")
rsp = self.get_response(req, "/redirected")
req = self.mw.process_response(req, rsp)
assert isinstance(req, Request)
assert "redirect_times" in req.meta
assert req.meta["redirect_times"] == 1
with pytest.raises(IgnoreRequest):
self.mw.process_response(req, rsp)
def test_ttl(self):
self.mw.max_redirect_times = 100
req = Request("http://scrapytest.org/302", meta={"redirect_ttl": 1})
rsp = self.get_response(req, "/a")
req = self.mw.process_response(req, rsp)
assert isinstance(req, Request)
with pytest.raises(IgnoreRequest):
self.mw.process_response(req, rsp)
def test_redirect_urls(self):
req1 = Request("http://scrapytest.org/first")
rsp1 = self.get_response(req1, "/redirected")
req2 = self.mw.process_response(req1, rsp1)
rsp2 = self.get_response(req1, "/redirected2")
req3 = self.mw.process_response(req2, rsp2)
assert req2.url == "http://scrapytest.org/redirected"
assert req2.meta["redirect_urls"] == ["http://scrapytest.org/first"]
assert req3.url == "http://scrapytest.org/redirected2"
assert req3.meta["redirect_urls"] == [
"http://scrapytest.org/first",
"http://scrapytest.org/redirected",
]
def test_redirect_reasons(self):
req1 = Request("http://scrapytest.org/first")
rsp1 = self.get_response(req1, "/redirected1")
req2 = self.mw.process_response(req1, rsp1)
rsp2 = self.get_response(req2, "/redirected2")
req3 = self.mw.process_response(req2, rsp2)
assert req2.meta["redirect_reasons"] == [self.reason]
assert req3.meta["redirect_reasons"] == [self.reason, self.reason]
def test_cross_origin_header_dropping(self):
safe_headers = {"A": "B"}
cookie_header = {"Cookie": "a=b"}
authorization_header = {"Authorization": "Bearer 123456"}
original_request = Request(
"https://example.com",
headers={**safe_headers, **cookie_header, **authorization_header},
)
# Redirects to the same origin (same scheme, same domain, same port)
# keep all headers.
internal_response = self.get_response(
original_request, "https://example.com/a"
)
internal_redirect_request = self.mw.process_response(
original_request, internal_response
)
assert isinstance(internal_redirect_request, Request)
assert original_request.headers == internal_redirect_request.headers
# Redirects to the same origin (same scheme, same domain, same port)
# keep all headers also when the scheme is http.
http_request = Request(
"http://example.com",
headers={**safe_headers, **cookie_header, **authorization_header},
)
http_response = self.get_response(http_request, "http://example.com/a")
http_redirect_request = self.mw.process_response(
http_request, http_response
)
assert isinstance(http_redirect_request, Request)
assert http_request.headers == http_redirect_request.headers
# For default ports, whether the port is explicit or implicit does not
# affect the outcome, it is still the same origin.
to_explicit_port_response = self.get_response(
original_request, "https://example.com:443/a"
)
to_explicit_port_redirect_request = self.mw.process_response(
original_request, to_explicit_port_response
)
assert isinstance(to_explicit_port_redirect_request, Request)
assert original_request.headers == to_explicit_port_redirect_request.headers
# For default ports, whether the port is explicit or implicit does not
# affect the outcome, it is still the same origin.
to_implicit_port_response = self.get_response(
original_request, "https://example.com/a"
)
to_implicit_port_redirect_request = self.mw.process_response(
original_request, to_implicit_port_response
)
assert isinstance(to_implicit_port_redirect_request, Request)
assert original_request.headers == to_implicit_port_redirect_request.headers
# A port change drops the Authorization header because the origin
# changes, but keeps the Cookie header because the domain remains the
# same.
different_port_response = self.get_response(
original_request, "https://example.com:8080/a"
)
different_port_redirect_request = self.mw.process_response(
original_request, different_port_response
)
assert isinstance(different_port_redirect_request, Request)
assert {
**safe_headers,
**cookie_header,
} == different_port_redirect_request.headers.to_unicode_dict()
# A domain change drops both the Authorization and the Cookie header.
external_response = self.get_response(
original_request, "https://example.org/a"
)
external_redirect_request = self.mw.process_response(
original_request, external_response
)
assert isinstance(external_redirect_request, Request)
assert safe_headers == external_redirect_request.headers.to_unicode_dict()
# A scheme upgrade (http → https) drops the Authorization header
# because the origin changes, but keeps the Cookie header because the
# domain remains the same.
upgrade_response = self.get_response(http_request, "https://example.com/a")
upgrade_redirect_request = self.mw.process_response(
http_request, upgrade_response
)
assert isinstance(upgrade_redirect_request, Request)
assert {
**safe_headers,
**cookie_header,
} == upgrade_redirect_request.headers.to_unicode_dict()
# A scheme downgrade (https → http) drops the Authorization header
# because the origin changes, and the Cookie header because its value
# cannot indicate whether the cookies were secure (HTTPS-only) or not.
#
# Note: If the Cookie header is set by the cookie management
# middleware, as recommended in the docs, the dropping of Cookie on
# scheme downgrade is not an issue, because the cookie management
# middleware will add again the Cookie header to the new request if
# appropriate.
downgrade_response = self.get_response(
original_request, "http://example.com/a"
)
downgrade_redirect_request = self.mw.process_response(
original_request, downgrade_response
)
assert isinstance(downgrade_redirect_request, Request)
assert safe_headers == downgrade_redirect_request.headers.to_unicode_dict()
def test_meta_proxy_http_absolute(self):
crawler = get_crawler()
redirect_mw = self.mwcls.from_crawler(crawler)
proxy_mw = HttpProxyMiddleware.from_crawler(crawler)
meta = {"proxy": "https://a:@a.example"}
request1 = Request("http://example.com", meta=meta)
proxy_mw.process_request(request1)
assert request1.headers["Proxy-Authorization"] == b"Basic YTo="
assert request1.meta["_auth_proxy"] == "https://a.example"
assert request1.meta["proxy"] == "https://a.example"
response1 = self.get_response(request1, "http://example.com")
request2 = redirect_mw.process_response(request1, response1)
assert isinstance(request2, Request)
assert request2.headers["Proxy-Authorization"] == b"Basic YTo="
assert request2.meta["_auth_proxy"] == "https://a.example"
assert request2.meta["proxy"] == "https://a.example"
proxy_mw.process_request(request2)
assert request2.headers["Proxy-Authorization"] == b"Basic YTo="
assert request2.meta["_auth_proxy"] == "https://a.example"
assert request2.meta["proxy"] == "https://a.example"
response2 = self.get_response(request2, "http://example.com")
request3 = redirect_mw.process_response(request2, response2)
assert isinstance(request3, Request)
assert request3.headers["Proxy-Authorization"] == b"Basic YTo="
assert request3.meta["_auth_proxy"] == "https://a.example"
assert request3.meta["proxy"] == "https://a.example"
proxy_mw.process_request(request3)
assert request3.headers["Proxy-Authorization"] == b"Basic YTo="
assert request3.meta["_auth_proxy"] == "https://a.example"
assert request3.meta["proxy"] == "https://a.example"
def test_meta_proxy_http_relative(self):
crawler = get_crawler()
redirect_mw = self.mwcls.from_crawler(crawler)
proxy_mw = HttpProxyMiddleware.from_crawler(crawler)
meta = {"proxy": "https://a:@a.example"}
request1 = Request("http://example.com", meta=meta)
proxy_mw.process_request(request1)
assert request1.headers["Proxy-Authorization"] == b"Basic YTo="
assert request1.meta["_auth_proxy"] == "https://a.example"
assert request1.meta["proxy"] == "https://a.example"
response1 = self.get_response(request1, "/a")
request2 = redirect_mw.process_response(request1, response1)
assert isinstance(request2, Request)
assert request2.headers["Proxy-Authorization"] == b"Basic YTo="
assert request2.meta["_auth_proxy"] == "https://a.example"
assert request2.meta["proxy"] == "https://a.example"
proxy_mw.process_request(request2)
assert request2.headers["Proxy-Authorization"] == b"Basic YTo="
assert request2.meta["_auth_proxy"] == "https://a.example"
assert request2.meta["proxy"] == "https://a.example"
response2 = self.get_response(request2, "/a")
request3 = redirect_mw.process_response(request2, response2)
assert isinstance(request3, Request)
assert request3.headers["Proxy-Authorization"] == b"Basic YTo="
assert request3.meta["_auth_proxy"] == "https://a.example"
assert request3.meta["proxy"] == "https://a.example"
proxy_mw.process_request(request3)
assert request3.headers["Proxy-Authorization"] == b"Basic YTo="
assert request3.meta["_auth_proxy"] == "https://a.example"
assert request3.meta["proxy"] == "https://a.example"
def test_meta_proxy_https_absolute(self):
crawler = get_crawler()
redirect_mw = self.mwcls.from_crawler(crawler)
proxy_mw = HttpProxyMiddleware.from_crawler(crawler)
meta = {"proxy": "https://a:@a.example"}
request1 = Request("https://example.com", meta=meta)
proxy_mw.process_request(request1)
assert request1.headers["Proxy-Authorization"] == b"Basic YTo="
assert request1.meta["_auth_proxy"] == "https://a.example"
assert request1.meta["proxy"] == "https://a.example"
response1 = self.get_response(request1, "https://example.com")
request2 = redirect_mw.process_response(request1, response1)
assert isinstance(request2, Request)
assert request2.headers["Proxy-Authorization"] == b"Basic YTo="
assert request2.meta["_auth_proxy"] == "https://a.example"
assert request2.meta["proxy"] == "https://a.example"
proxy_mw.process_request(request2)
assert request2.headers["Proxy-Authorization"] == b"Basic YTo="
assert request2.meta["_auth_proxy"] == "https://a.example"
assert request2.meta["proxy"] == "https://a.example"
response2 = self.get_response(request2, "https://example.com")
request3 = redirect_mw.process_response(request2, response2)
assert isinstance(request3, Request)
assert request3.headers["Proxy-Authorization"] == b"Basic YTo="
assert request3.meta["_auth_proxy"] == "https://a.example"
assert request3.meta["proxy"] == "https://a.example"
proxy_mw.process_request(request3)
assert request3.headers["Proxy-Authorization"] == b"Basic YTo="
assert request3.meta["_auth_proxy"] == "https://a.example"
assert request3.meta["proxy"] == "https://a.example"
def test_meta_proxy_https_relative(self):
crawler = get_crawler()
redirect_mw = self.mwcls.from_crawler(crawler)
proxy_mw = HttpProxyMiddleware.from_crawler(crawler)
meta = {"proxy": "https://a:@a.example"}
request1 = Request("https://example.com", meta=meta)
proxy_mw.process_request(request1)
assert request1.headers["Proxy-Authorization"] == b"Basic YTo="
assert request1.meta["_auth_proxy"] == "https://a.example"
assert request1.meta["proxy"] == "https://a.example"
response1 = self.get_response(request1, "/a")
request2 = redirect_mw.process_response(request1, response1)
assert isinstance(request2, Request)
assert request2.headers["Proxy-Authorization"] == b"Basic YTo="
assert request2.meta["_auth_proxy"] == "https://a.example"
assert request2.meta["proxy"] == "https://a.example"
proxy_mw.process_request(request2)
assert request2.headers["Proxy-Authorization"] == b"Basic YTo="
assert request2.meta["_auth_proxy"] == "https://a.example"
assert request2.meta["proxy"] == "https://a.example"
response2 = self.get_response(request2, "/a")
request3 = redirect_mw.process_response(request2, response2)
assert isinstance(request3, Request)
assert request3.headers["Proxy-Authorization"] == b"Basic YTo="
assert request3.meta["_auth_proxy"] == "https://a.example"
assert request3.meta["proxy"] == "https://a.example"
proxy_mw.process_request(request3)
assert request3.headers["Proxy-Authorization"] == b"Basic YTo="
assert request3.meta["_auth_proxy"] == "https://a.example"
assert request3.meta["proxy"] == "https://a.example"
def test_meta_proxy_http_to_https(self):
crawler = get_crawler()
redirect_mw = self.mwcls.from_crawler(crawler)
proxy_mw = HttpProxyMiddleware.from_crawler(crawler)
meta = {"proxy": "https://a:@a.example"}
request1 = Request("http://example.com", meta=meta)
proxy_mw.process_request(request1)
assert request1.headers["Proxy-Authorization"] == b"Basic YTo="
assert request1.meta["_auth_proxy"] == "https://a.example"
assert request1.meta["proxy"] == "https://a.example"
response1 = self.get_response(request1, "https://example.com")
request2 = redirect_mw.process_response(request1, response1)
assert isinstance(request2, Request)
assert request2.headers["Proxy-Authorization"] == b"Basic YTo="
assert request2.meta["_auth_proxy"] == "https://a.example"
assert request2.meta["proxy"] == "https://a.example"
proxy_mw.process_request(request2)
assert request2.headers["Proxy-Authorization"] == b"Basic YTo="
assert request2.meta["_auth_proxy"] == "https://a.example"
assert request2.meta["proxy"] == "https://a.example"
response2 = self.get_response(request2, "http://example.com")
request3 = redirect_mw.process_response(request2, response2)
assert isinstance(request3, Request)
assert request3.headers["Proxy-Authorization"] == b"Basic YTo="
assert request3.meta["_auth_proxy"] == "https://a.example"
assert request3.meta["proxy"] == "https://a.example"
proxy_mw.process_request(request3)
assert request3.headers["Proxy-Authorization"] == b"Basic YTo="
assert request3.meta["_auth_proxy"] == "https://a.example"
assert request3.meta["proxy"] == "https://a.example"
def test_meta_proxy_https_to_http(self):
crawler = get_crawler()
redirect_mw = self.mwcls.from_crawler(crawler)
proxy_mw = HttpProxyMiddleware.from_crawler(crawler)
meta = {"proxy": "https://a:@a.example"}
request1 = Request("https://example.com", meta=meta)
proxy_mw.process_request(request1)
assert request1.headers["Proxy-Authorization"] == b"Basic YTo="
assert request1.meta["_auth_proxy"] == "https://a.example"
assert request1.meta["proxy"] == "https://a.example"
response1 = self.get_response(request1, "http://example.com")
request2 = redirect_mw.process_response(request1, response1)
assert isinstance(request2, Request)
assert request2.headers["Proxy-Authorization"] == b"Basic YTo="
assert request2.meta["_auth_proxy"] == "https://a.example"
assert request2.meta["proxy"] == "https://a.example"
proxy_mw.process_request(request2)
assert request2.headers["Proxy-Authorization"] == b"Basic YTo="
assert request2.meta["_auth_proxy"] == "https://a.example"
assert request2.meta["proxy"] == "https://a.example"
response2 = self.get_response(request2, "https://example.com")
request3 = redirect_mw.process_response(request2, response2)
assert isinstance(request3, Request)
assert request3.headers["Proxy-Authorization"] == b"Basic YTo="
assert request3.meta["_auth_proxy"] == "https://a.example"
assert request3.meta["proxy"] == "https://a.example"
proxy_mw.process_request(request3)
assert request3.headers["Proxy-Authorization"] == b"Basic YTo="
assert request3.meta["_auth_proxy"] == "https://a.example"
assert request3.meta["proxy"] == "https://a.example"
def test_system_proxy_http_absolute(self):
crawler = get_crawler()
redirect_mw = self.mwcls.from_crawler(crawler)
env = {
"http_proxy": "https://a:@a.example",
}
with set_environ(**env):
proxy_mw = HttpProxyMiddleware.from_crawler(crawler)
request1 = Request("http://example.com")
proxy_mw.process_request(request1)
assert request1.headers["Proxy-Authorization"] == b"Basic YTo="
assert request1.meta["_auth_proxy"] == "https://a.example"
assert request1.meta["proxy"] == "https://a.example"
response1 = self.get_response(request1, "http://example.com")
request2 = redirect_mw.process_response(request1, response1)
assert isinstance(request2, Request)
assert request2.headers["Proxy-Authorization"] == b"Basic YTo="
assert request2.meta["_auth_proxy"] == "https://a.example"
assert request2.meta["proxy"] == "https://a.example"
proxy_mw.process_request(request2)
assert request2.headers["Proxy-Authorization"] == b"Basic YTo="
assert request2.meta["_auth_proxy"] == "https://a.example"
assert request2.meta["proxy"] == "https://a.example"
response2 = self.get_response(request2, "http://example.com")
request3 = redirect_mw.process_response(request2, response2)
assert isinstance(request3, Request)
assert request3.headers["Proxy-Authorization"] == b"Basic YTo="
assert request3.meta["_auth_proxy"] == "https://a.example"
assert request3.meta["proxy"] == "https://a.example"
proxy_mw.process_request(request3)
assert request3.headers["Proxy-Authorization"] == b"Basic YTo="
assert request3.meta["_auth_proxy"] == "https://a.example"
assert request3.meta["proxy"] == "https://a.example"
def test_system_proxy_http_relative(self):
crawler = get_crawler()
redirect_mw = self.mwcls.from_crawler(crawler)
env = {
"http_proxy": "https://a:@a.example",
}
with set_environ(**env):
proxy_mw = HttpProxyMiddleware.from_crawler(crawler)
request1 = Request("http://example.com")
proxy_mw.process_request(request1)
assert request1.headers["Proxy-Authorization"] == b"Basic YTo="
assert request1.meta["_auth_proxy"] == "https://a.example"
assert request1.meta["proxy"] == "https://a.example"
response1 = self.get_response(request1, "/a")
request2 = redirect_mw.process_response(request1, response1)
assert isinstance(request2, Request)
assert request2.headers["Proxy-Authorization"] == b"Basic YTo="
assert request2.meta["_auth_proxy"] == "https://a.example"
assert request2.meta["proxy"] == "https://a.example"
proxy_mw.process_request(request2)
assert request2.headers["Proxy-Authorization"] == b"Basic YTo="
assert request2.meta["_auth_proxy"] == "https://a.example"
assert request2.meta["proxy"] == "https://a.example"
response2 = self.get_response(request2, "/a")
request3 = redirect_mw.process_response(request2, response2)
assert isinstance(request3, Request)
assert request3.headers["Proxy-Authorization"] == b"Basic YTo="
assert request3.meta["_auth_proxy"] == "https://a.example"
assert request3.meta["proxy"] == "https://a.example"
proxy_mw.process_request(request3)
assert request3.headers["Proxy-Authorization"] == b"Basic YTo="
assert request3.meta["_auth_proxy"] == "https://a.example"
assert request3.meta["proxy"] == "https://a.example"
def test_system_proxy_https_absolute(self):
crawler = get_crawler()
redirect_mw = self.mwcls.from_crawler(crawler)
env = {
"https_proxy": "https://a:@a.example",
}
with set_environ(**env):
proxy_mw = HttpProxyMiddleware.from_crawler(crawler)
request1 = Request("https://example.com")
proxy_mw.process_request(request1)
assert request1.headers["Proxy-Authorization"] == b"Basic YTo="
assert request1.meta["_auth_proxy"] == "https://a.example"
assert request1.meta["proxy"] == "https://a.example"
response1 = self.get_response(request1, "https://example.com")
request2 = redirect_mw.process_response(request1, response1)
assert isinstance(request2, Request)
assert request2.headers["Proxy-Authorization"] == b"Basic YTo="
assert request2.meta["_auth_proxy"] == "https://a.example"
assert request2.meta["proxy"] == "https://a.example"
proxy_mw.process_request(request2)
assert request2.headers["Proxy-Authorization"] == b"Basic YTo="
assert request2.meta["_auth_proxy"] == "https://a.example"
assert request2.meta["proxy"] == "https://a.example"
response2 = self.get_response(request2, "https://example.com")
request3 = redirect_mw.process_response(request2, response2)
assert isinstance(request3, Request)
assert request3.headers["Proxy-Authorization"] == b"Basic YTo="
assert request3.meta["_auth_proxy"] == "https://a.example"
assert request3.meta["proxy"] == "https://a.example"
proxy_mw.process_request(request3)
assert request3.headers["Proxy-Authorization"] == b"Basic YTo="
assert request3.meta["_auth_proxy"] == "https://a.example"
assert request3.meta["proxy"] == "https://a.example"
def test_system_proxy_https_relative(self):
crawler = get_crawler()
redirect_mw = self.mwcls.from_crawler(crawler)
env = {
"https_proxy": "https://a:@a.example",
}
with set_environ(**env):
proxy_mw = HttpProxyMiddleware.from_crawler(crawler)
request1 = Request("https://example.com")
proxy_mw.process_request(request1)
assert request1.headers["Proxy-Authorization"] == b"Basic YTo="
assert request1.meta["_auth_proxy"] == "https://a.example"
assert request1.meta["proxy"] == "https://a.example"
response1 = self.get_response(request1, "/a")
request2 = redirect_mw.process_response(request1, response1)
assert isinstance(request2, Request)
assert request2.headers["Proxy-Authorization"] == b"Basic YTo="
assert request2.meta["_auth_proxy"] == "https://a.example"
assert request2.meta["proxy"] == "https://a.example"
proxy_mw.process_request(request2)
assert request2.headers["Proxy-Authorization"] == b"Basic YTo="
assert request2.meta["_auth_proxy"] == "https://a.example"
assert request2.meta["proxy"] == "https://a.example"
response2 = self.get_response(request2, "/a")
request3 = redirect_mw.process_response(request2, response2)
assert isinstance(request3, Request)
assert request3.headers["Proxy-Authorization"] == b"Basic YTo="
assert request3.meta["_auth_proxy"] == "https://a.example"
assert request3.meta["proxy"] == "https://a.example"
proxy_mw.process_request(request3)
assert request3.headers["Proxy-Authorization"] == b"Basic YTo="
assert request3.meta["_auth_proxy"] == "https://a.example"
assert request3.meta["proxy"] == "https://a.example"
def test_system_proxy_proxied_http_to_proxied_https(self):
crawler = get_crawler()
redirect_mw = self.mwcls.from_crawler(crawler)
env = {
"http_proxy": "https://a:@a.example",
"https_proxy": "https://b:@b.example",
}
with set_environ(**env):
proxy_mw = HttpProxyMiddleware.from_crawler(crawler)
request1 = Request("http://example.com")
proxy_mw.process_request(request1)
assert request1.headers["Proxy-Authorization"] == b"Basic YTo="
assert request1.meta["_auth_proxy"] == "https://a.example"
assert request1.meta["proxy"] == "https://a.example"
response1 = self.get_response(request1, "https://example.com")
request2 = redirect_mw.process_response(request1, response1)
assert isinstance(request2, Request)
assert "Proxy-Authorization" not in request2.headers
assert "_auth_proxy" not in request2.meta
assert "proxy" not in request2.meta
proxy_mw.process_request(request2)
assert request2.headers["Proxy-Authorization"] == b"Basic Yjo="
assert request2.meta["_auth_proxy"] == "https://b.example"
assert request2.meta["proxy"] == "https://b.example"
response2 = self.get_response(request2, "http://example.com")
request3 = redirect_mw.process_response(request2, response2)
assert isinstance(request3, Request)
assert "Proxy-Authorization" not in request3.headers
assert "_auth_proxy" not in request3.meta
assert "proxy" not in request3.meta
proxy_mw.process_request(request3)
assert request3.headers["Proxy-Authorization"] == b"Basic YTo="
assert request3.meta["_auth_proxy"] == "https://a.example"
assert request3.meta["proxy"] == "https://a.example"
def test_system_proxy_proxied_http_to_unproxied_https(self):
crawler = get_crawler()
redirect_mw = self.mwcls.from_crawler(crawler)
env = {
"http_proxy": "https://a:@a.example",
}
with set_environ(**env):
proxy_mw = HttpProxyMiddleware.from_crawler(crawler)
request1 = Request("http://example.com")
proxy_mw.process_request(request1)
assert request1.headers["Proxy-Authorization"] == b"Basic YTo="
assert request1.meta["_auth_proxy"] == "https://a.example"
assert request1.meta["proxy"] == "https://a.example"
response1 = self.get_response(request1, "https://example.com")
request2 = redirect_mw.process_response(request1, response1)
assert isinstance(request2, Request)
assert "Proxy-Authorization" not in request2.headers
assert "_auth_proxy" not in request2.meta
assert "proxy" not in request2.meta
proxy_mw.process_request(request2)
assert "Proxy-Authorization" not in request2.headers
assert "_auth_proxy" not in request2.meta
assert "proxy" not in request2.meta
response2 = self.get_response(request2, "http://example.com")
| python | BSD-3-Clause | d1bd8eb49f7aba9289e4ff692006cead8bcd9080 | 2026-01-04T14:38:41.023839Z | true |
scrapy/scrapy | https://github.com/scrapy/scrapy/blob/d1bd8eb49f7aba9289e4ff692006cead8bcd9080/tests/test_contracts.py | tests/test_contracts.py | from unittest import TextTestResult
import pytest
from twisted.internet.defer import inlineCallbacks
from twisted.python import failure
from scrapy import FormRequest
from scrapy.contracts import Contract, ContractsManager
from scrapy.contracts.default import (
CallbackKeywordArgumentsContract,
MetadataContract,
ReturnsContract,
ScrapesContract,
UrlContract,
)
from scrapy.http import Request
from scrapy.item import Field, Item
from scrapy.spidermiddlewares.httperror import HttpError
from scrapy.spiders import Spider
from scrapy.utils.test import get_crawler
from tests.mockserver.http import MockServer
class DemoItem(Item):
name = Field()
url = Field()
class ResponseMock:
url = "http://scrapy.org"
class ResponseMetaMock(ResponseMock):
meta = None
class CustomSuccessContract(Contract):
name = "custom_success_contract"
def adjust_request_args(self, args):
args["url"] = "http://scrapy.org"
return args
class CustomFailContract(Contract):
name = "custom_fail_contract"
def adjust_request_args(self, args):
raise TypeError("Error in adjust_request_args")
class CustomFormContract(Contract):
name = "custom_form"
request_cls = FormRequest
def adjust_request_args(self, args):
args["formdata"] = {"name": "scrapy"}
return args
class DemoSpider(Spider):
name = "demo_spider"
def returns_request(self, response):
"""method which returns request
@url http://scrapy.org
@returns requests 1
"""
return Request("http://scrapy.org", callback=self.returns_item)
async def returns_request_async(self, response):
"""async method which returns request
@url http://scrapy.org
@returns requests 1
"""
return Request("http://scrapy.org", callback=self.returns_item)
def returns_item(self, response):
"""method which returns item
@url http://scrapy.org
@returns items 1 1
"""
return DemoItem(url=response.url)
def returns_request_cb_kwargs(self, response, url):
"""method which returns request
@url https://example.org
@cb_kwargs {"url": "http://scrapy.org"}
@returns requests 1
"""
return Request(url, callback=self.returns_item_cb_kwargs)
def returns_item_cb_kwargs(self, response, name):
"""method which returns item
@url http://scrapy.org
@cb_kwargs {"name": "Scrapy"}
@returns items 1 1
"""
return DemoItem(name=name, url=response.url)
def returns_item_cb_kwargs_error_unexpected_keyword(self, response):
"""method which returns item
@url http://scrapy.org
@cb_kwargs {"arg": "value"}
@returns items 1 1
"""
return DemoItem(url=response.url)
def returns_item_cb_kwargs_error_missing_argument(self, response, arg):
"""method which returns item
@url http://scrapy.org
@returns items 1 1
"""
return DemoItem(url=response.url)
def returns_dict_item(self, response):
"""method which returns item
@url http://scrapy.org
@returns items 1 1
"""
return {"url": response.url}
def returns_fail(self, response):
"""method which returns item
@url http://scrapy.org
@returns items 0 0
"""
return DemoItem(url=response.url)
def returns_dict_fail(self, response):
"""method which returns item
@url http://scrapy.org
@returns items 0 0
"""
return {"url": response.url}
def scrapes_item_ok(self, response):
"""returns item with name and url
@url http://scrapy.org
@returns items 1 1
@scrapes name url
"""
return DemoItem(name="test", url=response.url)
def scrapes_dict_item_ok(self, response):
"""returns item with name and url
@url http://scrapy.org
@returns items 1 1
@scrapes name url
"""
return {"name": "test", "url": response.url}
def scrapes_item_fail(self, response):
"""returns item with no name
@url http://scrapy.org
@returns items 1 1
@scrapes name url
"""
return DemoItem(url=response.url)
def scrapes_dict_item_fail(self, response):
"""returns item with no name
@url http://scrapy.org
@returns items 1 1
@scrapes name url
"""
return {"url": response.url}
def scrapes_multiple_missing_fields(self, response):
"""returns item with no name
@url http://scrapy.org
@returns items 1 1
@scrapes name url
"""
return {}
def parse_no_url(self, response):
"""method with no url
@returns items 1 1
"""
def custom_form(self, response):
"""
@url http://scrapy.org
@custom_form
"""
def invalid_regex(self, response):
"""method with invalid regex
@ Scrapy is awsome
"""
def invalid_regex_with_valid_contract(self, response):
"""method with invalid regex
@ scrapy is awsome
@url http://scrapy.org
"""
def returns_request_meta(self, response):
"""method which returns request
@url https://example.org
@meta {"cookiejar": "session1"}
@returns requests 1
"""
return Request(
"https://example.org", meta=response.meta, callback=self.returns_item_meta
)
def returns_item_meta(self, response):
"""method which returns item
@url http://scrapy.org
@meta {"key": "example"}
@returns items 1 1
"""
return DemoItem(name="example", url=response.url)
def returns_error_missing_meta(self, response):
"""method which depends of metadata be defined
@url http://scrapy.org
@returns items 1
"""
key = response.meta["key"]
yield {key: "value"}
class CustomContractSuccessSpider(Spider):
name = "custom_contract_success_spider"
def parse(self, response):
"""
@custom_success_contract
"""
class CustomContractFailSpider(Spider):
name = "custom_contract_fail_spider"
def parse(self, response):
"""
@custom_fail_contract
"""
class InheritsDemoSpider(DemoSpider):
name = "inherits_demo_spider"
class TestContractsManager:
contracts = [
UrlContract,
CallbackKeywordArgumentsContract,
MetadataContract,
ReturnsContract,
ScrapesContract,
CustomFormContract,
CustomSuccessContract,
CustomFailContract,
]
def setup_method(self):
self.conman = ContractsManager(self.contracts)
self.results = TextTestResult(stream=None, descriptions=False, verbosity=0)
def should_succeed(self):
assert not self.results.failures
assert not self.results.errors
def should_fail(self):
assert self.results.failures
assert not self.results.errors
def should_error(self):
assert self.results.errors
def test_contracts(self):
spider = DemoSpider()
# extract contracts correctly
contracts = self.conman.extract_contracts(spider.returns_request)
assert len(contracts) == 2
assert frozenset(type(x) for x in contracts) == frozenset(
[UrlContract, ReturnsContract]
)
# returns request for valid method
request = self.conman.from_method(spider.returns_request, self.results)
assert request is not None
# no request for missing url
request = self.conman.from_method(spider.parse_no_url, self.results)
assert request is None
def test_cb_kwargs(self):
spider = DemoSpider()
response = ResponseMock()
# extract contracts correctly
contracts = self.conman.extract_contracts(spider.returns_request_cb_kwargs)
assert len(contracts) == 3
assert frozenset(type(x) for x in contracts) == frozenset(
[UrlContract, CallbackKeywordArgumentsContract, ReturnsContract]
)
contracts = self.conman.extract_contracts(spider.returns_item_cb_kwargs)
assert len(contracts) == 3
assert frozenset(type(x) for x in contracts) == frozenset(
[UrlContract, CallbackKeywordArgumentsContract, ReturnsContract]
)
contracts = self.conman.extract_contracts(
spider.returns_item_cb_kwargs_error_unexpected_keyword
)
assert len(contracts) == 3
assert frozenset(type(x) for x in contracts) == frozenset(
[UrlContract, CallbackKeywordArgumentsContract, ReturnsContract]
)
contracts = self.conman.extract_contracts(
spider.returns_item_cb_kwargs_error_missing_argument
)
assert len(contracts) == 2
assert frozenset(type(x) for x in contracts) == frozenset(
[UrlContract, ReturnsContract]
)
# returns_request
request = self.conman.from_method(
spider.returns_request_cb_kwargs, self.results
)
request.callback(response, **request.cb_kwargs)
self.should_succeed()
# returns_item
request = self.conman.from_method(spider.returns_item_cb_kwargs, self.results)
request.callback(response, **request.cb_kwargs)
self.should_succeed()
# returns_item (error, callback doesn't take keyword arguments)
request = self.conman.from_method(
spider.returns_item_cb_kwargs_error_unexpected_keyword, self.results
)
request.callback(response, **request.cb_kwargs)
self.should_error()
# returns_item (error, contract doesn't provide keyword arguments)
request = self.conman.from_method(
spider.returns_item_cb_kwargs_error_missing_argument, self.results
)
request.callback(response, **request.cb_kwargs)
self.should_error()
def test_meta(self):
spider = DemoSpider()
# extract contracts correctly
contracts = self.conman.extract_contracts(spider.returns_request_meta)
assert len(contracts) == 3
assert frozenset(type(x) for x in contracts) == frozenset(
[UrlContract, MetadataContract, ReturnsContract]
)
contracts = self.conman.extract_contracts(spider.returns_item_meta)
assert len(contracts) == 3
assert frozenset(type(x) for x in contracts) == frozenset(
[UrlContract, MetadataContract, ReturnsContract]
)
response = ResponseMetaMock()
# returns_request
request = self.conman.from_method(spider.returns_request_meta, self.results)
assert request.meta["cookiejar"] == "session1"
response.meta = request.meta
request.callback(response)
assert response.meta["cookiejar"] == "session1"
self.should_succeed()
response = ResponseMetaMock()
# returns_item
request = self.conman.from_method(spider.returns_item_meta, self.results)
assert request.meta["key"] == "example"
response.meta = request.meta
request.callback(ResponseMetaMock)
assert response.meta["key"] == "example"
self.should_succeed()
response = ResponseMetaMock()
request = self.conman.from_method(
spider.returns_error_missing_meta, self.results
)
request.callback(response)
self.should_error()
def test_returns(self):
spider = DemoSpider()
response = ResponseMock()
# returns_item
request = self.conman.from_method(spider.returns_item, self.results)
request.callback(response)
self.should_succeed()
# returns_dict_item
request = self.conman.from_method(spider.returns_dict_item, self.results)
request.callback(response)
self.should_succeed()
# returns_request
request = self.conman.from_method(spider.returns_request, self.results)
request.callback(response)
self.should_succeed()
# returns_fail
request = self.conman.from_method(spider.returns_fail, self.results)
request.callback(response)
self.should_fail()
# returns_dict_fail
request = self.conman.from_method(spider.returns_dict_fail, self.results)
request.callback(response)
self.should_fail()
def test_returns_async(self):
spider = DemoSpider()
response = ResponseMock()
request = self.conman.from_method(spider.returns_request_async, self.results)
request.callback(response)
self.should_error()
def test_scrapes(self):
spider = DemoSpider()
response = ResponseMock()
# scrapes_item_ok
request = self.conman.from_method(spider.scrapes_item_ok, self.results)
request.callback(response)
self.should_succeed()
# scrapes_dict_item_ok
request = self.conman.from_method(spider.scrapes_dict_item_ok, self.results)
request.callback(response)
self.should_succeed()
# scrapes_item_fail
request = self.conman.from_method(spider.scrapes_item_fail, self.results)
request.callback(response)
self.should_fail()
# scrapes_dict_item_fail
request = self.conman.from_method(spider.scrapes_dict_item_fail, self.results)
request.callback(response)
self.should_fail()
# scrapes_multiple_missing_fields
request = self.conman.from_method(
spider.scrapes_multiple_missing_fields, self.results
)
request.callback(response)
self.should_fail()
message = "ContractFail: Missing fields: name, url"
assert message in self.results.failures[-1][-1]
def test_regex(self):
spider = DemoSpider()
response = ResponseMock()
# invalid regex
request = self.conman.from_method(spider.invalid_regex, self.results)
self.should_succeed()
# invalid regex with valid contract
request = self.conman.from_method(
spider.invalid_regex_with_valid_contract, self.results
)
self.should_succeed()
request.callback(response)
def test_custom_contracts(self):
self.conman.from_spider(CustomContractSuccessSpider(), self.results)
self.should_succeed()
self.conman.from_spider(CustomContractFailSpider(), self.results)
self.should_error()
def test_errback(self):
spider = DemoSpider()
response = ResponseMock()
try:
raise HttpError(response, "Ignoring non-200 response")
except HttpError:
failure_mock = failure.Failure()
request = self.conman.from_method(spider.returns_request, self.results)
request.errback(failure_mock)
assert not self.results.failures
assert self.results.errors
@inlineCallbacks
def test_same_url(self):
class TestSameUrlSpider(Spider):
name = "test_same_url"
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.visited = 0
async def start(self_): # pylint: disable=no-self-argument
for item_or_request in self.conman.from_spider(self_, self.results):
yield item_or_request
def parse_first(self, response):
self.visited += 1
return DemoItem()
def parse_second(self, response):
self.visited += 1
return DemoItem()
with MockServer() as mockserver:
contract_doc = f"@url {mockserver.url('/status?n=200')}"
TestSameUrlSpider.parse_first.__doc__ = contract_doc
TestSameUrlSpider.parse_second.__doc__ = contract_doc
crawler = get_crawler(TestSameUrlSpider)
yield crawler.crawl()
assert crawler.spider.visited == 2
def test_form_contract(self):
spider = DemoSpider()
request = self.conman.from_method(spider.custom_form, self.results)
assert request.method == "POST"
assert isinstance(request, FormRequest)
def test_inherited_contracts(self):
spider = InheritsDemoSpider()
requests = self.conman.from_spider(spider, self.results)
assert requests
class CustomFailContractPreProcess(Contract):
name = "test_contract"
def pre_process(self, response):
raise KeyboardInterrupt("Pre-process exception")
class CustomFailContractPostProcess(Contract):
name = "test_contract"
def post_process(self, response):
raise KeyboardInterrupt("Post-process exception")
class TestCustomContractPrePostProcess:
def setup_method(self):
self.results = TextTestResult(stream=None, descriptions=False, verbosity=0)
def test_pre_hook_keyboard_interrupt(self):
spider = DemoSpider()
response = ResponseMock()
contract = CustomFailContractPreProcess(spider.returns_request)
conman = ContractsManager([contract])
request = conman.from_method(spider.returns_request, self.results)
contract.add_pre_hook(request, self.results)
with pytest.raises(KeyboardInterrupt, match="Pre-process exception"):
request.callback(response, **request.cb_kwargs)
assert not self.results.failures
assert not self.results.errors
def test_post_hook_keyboard_interrupt(self):
spider = DemoSpider()
response = ResponseMock()
contract = CustomFailContractPostProcess(spider.returns_request)
conman = ContractsManager([contract])
request = conman.from_method(spider.returns_request, self.results)
contract.add_post_hook(request, self.results)
with pytest.raises(KeyboardInterrupt, match="Post-process exception"):
request.callback(response, **request.cb_kwargs)
assert not self.results.failures
assert not self.results.errors
| python | BSD-3-Clause | d1bd8eb49f7aba9289e4ff692006cead8bcd9080 | 2026-01-04T14:38:41.023839Z | false |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.