repo stringlengths 7 90 | file_url stringlengths 81 315 | file_path stringlengths 4 228 | content stringlengths 0 32.8k | language stringclasses 1
value | license stringclasses 7
values | commit_sha stringlengths 40 40 | retrieved_at stringdate 2026-01-04 14:38:15 2026-01-05 02:33:18 | truncated bool 2
classes |
|---|---|---|---|---|---|---|---|---|
mitmproxy/mitmproxy | https://github.com/mitmproxy/mitmproxy/blob/e6aa924bb411a9687b91920b8d094af37bc02b90/test/mitmproxy/proxy/test_commands.py | test/mitmproxy/proxy/test_commands.py | from dataclasses import dataclass
import pytest
from mitmproxy import connection
from mitmproxy.hooks import all_hooks
from mitmproxy.proxy import commands
@pytest.fixture
def tconn() -> connection.Server:
return connection.Server(address=None)
def test_dataclasses(tconn):
assert repr(commands.RequestWakeup(58))
assert repr(commands.SendData(tconn, b"foo"))
assert repr(commands.OpenConnection(tconn))
assert repr(commands.CloseConnection(tconn))
assert repr(commands.CloseTcpConnection(tconn, half_close=True))
assert repr(commands.Log("hello"))
def test_start_hook():
with pytest.raises(TypeError):
commands.StartHook()
@dataclass
class TestHook(commands.StartHook):
data: bytes
f = TestHook(b"foo")
assert f.args() == [b"foo"]
assert TestHook in all_hooks.values()
| python | MIT | e6aa924bb411a9687b91920b8d094af37bc02b90 | 2026-01-04T14:40:00.086164Z | false |
mitmproxy/mitmproxy | https://github.com/mitmproxy/mitmproxy/blob/e6aa924bb411a9687b91920b8d094af37bc02b90/test/mitmproxy/proxy/test_events.py | test/mitmproxy/proxy/test_events.py | from unittest.mock import Mock
import pytest
from mitmproxy import connection
from mitmproxy.proxy import commands
from mitmproxy.proxy import events
@pytest.fixture
def tconn() -> connection.Server:
return connection.Server(address=None)
def test_dataclasses(tconn):
assert repr(events.Start())
assert repr(events.DataReceived(tconn, b"foo"))
assert repr(events.ConnectionClosed(tconn))
def test_command_completed():
with pytest.raises(TypeError):
events.CommandCompleted()
assert repr(events.HookCompleted(Mock(), None))
class FooCommand(commands.Command):
pass
with pytest.warns(RuntimeWarning, match="properly annotated"):
class FooCompleted(events.CommandCompleted):
pass
class FooCompleted1(events.CommandCompleted):
command: FooCommand
with pytest.warns(RuntimeWarning, match="conflicting subclasses"):
class FooCompleted2(events.CommandCompleted):
command: FooCommand
| python | MIT | e6aa924bb411a9687b91920b8d094af37bc02b90 | 2026-01-04T14:40:00.086164Z | false |
mitmproxy/mitmproxy | https://github.com/mitmproxy/mitmproxy/blob/e6aa924bb411a9687b91920b8d094af37bc02b90/test/mitmproxy/proxy/layers/test_dns.py | test/mitmproxy/proxy/layers/test_dns.py | import struct
import time
import pytest
from hypothesis import given
from hypothesis import HealthCheck
from hypothesis import settings
from hypothesis import strategies as st
from ..tutils import Placeholder
from ..tutils import Playbook
from ..tutils import reply
from mitmproxy.dns import DNSFlow
from mitmproxy.net.dns import response_codes
from mitmproxy.proxy.commands import CloseConnection
from mitmproxy.proxy.commands import Log
from mitmproxy.proxy.commands import OpenConnection
from mitmproxy.proxy.commands import SendData
from mitmproxy.proxy.events import ConnectionClosed
from mitmproxy.proxy.events import DataReceived
from mitmproxy.proxy.layers import dns
from mitmproxy.test.tflow import tdnsreq
from mitmproxy.test.tflow import tdnsresp
from mitmproxy.test.tflow import terr
@settings(suppress_health_check=[HealthCheck.function_scoped_fixture])
@given(st.binary())
def test_fuzz_unpack_tcp_message(tctx, data):
layer = dns.DNSLayer(tctx)
try:
layer.unpack_message(data, True)
except struct.error:
pass
@settings(suppress_health_check=[HealthCheck.function_scoped_fixture])
@given(st.binary())
def test_fuzz_unpack_udp_message(tctx, data):
tctx.client.transport_protocol = "udp"
tctx.server.transport_protocol = "udp"
layer = dns.DNSLayer(tctx)
try:
layer.unpack_message(data, True)
except struct.error:
pass
@pytest.mark.parametrize("transport_protocol", ["tcp", "udp"])
def test_invalid_and_dummy_end(tctx, transport_protocol):
tctx.client.transport_protocol = transport_protocol
tctx.server.transport_protocol = transport_protocol
data = b"Not a DNS packet"
if tctx.client.transport_protocol == "tcp":
data = struct.pack("!H", len(data)) + data
assert (
Playbook(dns.DNSLayer(tctx))
>> DataReceived(tctx.client, data)
<< Log(
"Client(client:1234, state=open) sent an invalid message: question #0: unpack encountered a label of length 99"
)
<< CloseConnection(tctx.client)
>> ConnectionClosed(tctx.client)
)
@pytest.mark.parametrize("transport_protocol", ["tcp", "udp"])
def test_regular(tctx, transport_protocol):
tctx.client.transport_protocol = transport_protocol
tctx.server.transport_protocol = transport_protocol
f = Placeholder(DNSFlow)
req = tdnsreq()
resp = tdnsresp()
def resolve(flow: DNSFlow):
nonlocal req, resp
assert flow.request
req.timestamp = flow.request.timestamp
assert flow.request == req
resp.timestamp = time.time()
flow.response = resp
assert (
Playbook(dns.DNSLayer(tctx))
>> DataReceived(tctx.client, dns.pack_message(req, transport_protocol))
<< dns.DnsRequestHook(f)
>> reply(side_effect=resolve)
<< dns.DnsResponseHook(f)
>> reply()
<< SendData(tctx.client, dns.pack_message(resp, transport_protocol))
>> ConnectionClosed(tctx.client)
<< None
)
assert f().request == req
assert f().response == resp
assert not f().live
@pytest.mark.parametrize("transport_protocol", ["tcp", "udp"])
def test_regular_mode_no_hook(tctx, transport_protocol):
tctx.client.transport_protocol = transport_protocol
tctx.server.transport_protocol = transport_protocol
f = Placeholder(DNSFlow)
layer = dns.DNSLayer(tctx)
layer.context.server.address = None
req = tdnsreq()
def no_resolve(flow: DNSFlow):
nonlocal req
assert flow.request
req.timestamp = flow.request.timestamp
assert flow.request == req
assert (
Playbook(layer)
>> DataReceived(
tctx.client, dns.pack_message(req, tctx.client.transport_protocol)
)
<< dns.DnsRequestHook(f)
>> reply(side_effect=no_resolve)
<< dns.DnsErrorHook(f)
>> reply()
<< SendData(
tctx.client,
dns.pack_message(
req.fail(response_codes.SERVFAIL), tctx.client.transport_protocol
),
)
>> ConnectionClosed(tctx.client)
<< None
)
assert f().request == req
assert not f().response
assert not f().live
assert (
f().error.msg == "No hook has set a response and there is no upstream server."
)
@pytest.mark.parametrize("transport_protocol", ["tcp", "udp"])
def test_reverse_premature_close(tctx, transport_protocol):
tctx.client.transport_protocol = transport_protocol
tctx.server.transport_protocol = transport_protocol
f = Placeholder(DNSFlow)
layer = dns.DNSLayer(tctx)
layer.context.server.address = ("8.8.8.8", 53)
req = tdnsreq()
assert (
Playbook(layer)
>> DataReceived(
tctx.client, dns.pack_message(req, tctx.client.transport_protocol)
)
<< dns.DnsRequestHook(f)
>> reply()
<< OpenConnection(tctx.server)
>> reply(None)
<< SendData(tctx.server, dns.pack_message(req, tctx.server.transport_protocol))
>> ConnectionClosed(tctx.client)
<< CloseConnection(tctx.server)
<< None
)
assert f().request
assert not f().response
assert not f().live
req.timestamp = f().request.timestamp
assert f().request == req
def test_regular_hook_err(tctx):
f = Placeholder(DNSFlow)
req = tdnsreq()
def err(flow: DNSFlow):
flow.error = terr()
assert (
Playbook(dns.DNSLayer(tctx))
>> DataReceived(
tctx.client, dns.pack_message(req, tctx.client.transport_protocol)
)
<< dns.DnsRequestHook(f)
>> reply(side_effect=err)
<< dns.DnsErrorHook(f)
>> reply()
<< SendData(
tctx.client,
dns.pack_message(
req.fail(response_codes.SERVFAIL), tctx.client.transport_protocol
),
)
>> ConnectionClosed(tctx.client)
<< None
)
assert f().error
assert not f().live
@pytest.mark.parametrize("transport_protocol", ["tcp", "udp"])
def test_reverse(tctx, transport_protocol):
tctx.client.transport_protocol = transport_protocol
tctx.server.transport_protocol = transport_protocol
f = Placeholder(DNSFlow)
layer = dns.DNSLayer(tctx)
layer.context.server.address = ("8.8.8.8", 53)
req = tdnsreq()
resp = tdnsresp()
assert (
Playbook(layer)
>> DataReceived(
tctx.client, dns.pack_message(req, tctx.client.transport_protocol)
)
<< dns.DnsRequestHook(f)
>> reply()
<< OpenConnection(tctx.server)
>> reply(None)
<< SendData(tctx.server, dns.pack_message(req, tctx.server.transport_protocol))
>> DataReceived(
tctx.server, dns.pack_message(resp, tctx.server.transport_protocol)
)
<< dns.DnsResponseHook(f)
>> reply()
<< SendData(tctx.client, dns.pack_message(resp, tctx.client.transport_protocol))
>> ConnectionClosed(tctx.client)
<< CloseConnection(tctx.server)
<< None
)
assert f().request
assert f().response
assert not f().live
req.timestamp = f().request.timestamp
resp.timestamp = f().response.timestamp
assert f().request == req and f().response == resp
@pytest.mark.parametrize("transport_protocol", ["tcp", "udp"])
def test_reverse_fail_connection(tctx, transport_protocol):
tctx.client.transport_protocol = transport_protocol
tctx.server.transport_protocol = transport_protocol
f = Placeholder(DNSFlow)
layer = dns.DNSLayer(tctx)
layer.context.server.address = ("8.8.8.8", 53)
req = tdnsreq()
assert (
Playbook(layer)
>> DataReceived(
tctx.client, dns.pack_message(req, tctx.client.transport_protocol)
)
<< dns.DnsRequestHook(f)
>> reply()
<< OpenConnection(tctx.server)
>> reply("UDP no likey today.")
<< dns.DnsErrorHook(f)
>> reply()
<< SendData(
tctx.client,
dns.pack_message(
req.fail(response_codes.SERVFAIL), tctx.client.transport_protocol
),
)
<< None
)
assert f().request
assert not f().response
assert f().error.msg == "UDP no likey today."
req.timestamp = f().request.timestamp
assert f().request == req
@pytest.mark.parametrize("transport_protocol", ["tcp", "udp"])
def test_reverse_with_query_resend(tctx, transport_protocol):
tctx.client.transport_protocol = transport_protocol
tctx.server.transport_protocol = transport_protocol
f = Placeholder(DNSFlow)
layer = dns.DNSLayer(tctx)
layer.context.server.address = ("8.8.8.8", 53)
req = tdnsreq()
req2 = tdnsreq()
req2.reserved = 4
resp = tdnsresp()
assert (
Playbook(layer)
>> DataReceived(
tctx.client, dns.pack_message(req, tctx.client.transport_protocol)
)
<< dns.DnsRequestHook(f)
>> reply()
<< OpenConnection(tctx.server)
>> reply(None)
<< SendData(tctx.server, dns.pack_message(req, tctx.server.transport_protocol))
>> DataReceived(
tctx.client, dns.pack_message(req2, tctx.client.transport_protocol)
)
<< dns.DnsRequestHook(f)
>> reply()
<< SendData(tctx.server, dns.pack_message(req2, tctx.server.transport_protocol))
>> DataReceived(
tctx.server, dns.pack_message(resp, tctx.server.transport_protocol)
)
<< dns.DnsResponseHook(f)
>> reply()
<< SendData(tctx.client, dns.pack_message(resp, tctx.client.transport_protocol))
>> ConnectionClosed(tctx.client)
<< CloseConnection(tctx.server)
<< None
)
assert f().request
assert f().response
assert not f().live
req2.timestamp = f().request.timestamp
resp.timestamp = f().response.timestamp
assert f().request == req2
assert f().response == resp
def test_tcp_message_over_multiple_events(tctx):
tctx.client.transport_protocol = "tcp"
tctx.server.transport_protocol = "tcp"
layer = dns.DNSLayer(tctx)
layer.context.server.address = ("8.8.8.8", 53)
f = Placeholder(DNSFlow)
req = tdnsreq()
resp = tdnsresp()
resp_bytes = dns.pack_message(resp, tctx.client.transport_protocol)
split = len(resp_bytes) // 2
assert (
Playbook(layer)
>> DataReceived(
tctx.client, dns.pack_message(req, tctx.client.transport_protocol)
)
<< dns.DnsRequestHook(f)
>> reply()
<< OpenConnection(tctx.server)
>> reply(None)
<< SendData(tctx.server, dns.pack_message(req, tctx.server.transport_protocol))
>> DataReceived(tctx.server, resp_bytes[:split])
>> DataReceived(tctx.server, resp_bytes[split:])
<< dns.DnsResponseHook(f)
>> reply()
<< SendData(tctx.client, dns.pack_message(resp, tctx.client.transport_protocol))
>> ConnectionClosed(tctx.client)
<< CloseConnection(tctx.server)
<< None
)
def test_query_pipelining_same_event(tctx):
tctx.client.transport_protocol = "tcp"
tctx.server.transport_protocol = "tcp"
layer = dns.DNSLayer(tctx)
layer.context.server.address = ("8.8.8.8", 53)
f1 = Placeholder(DNSFlow)
f2 = Placeholder(DNSFlow)
req1 = tdnsreq(id=1)
req2 = tdnsreq(id=2)
resp1 = tdnsresp(id=1)
resp2 = tdnsresp(id=2)
req_bytes = dns.pack_message(
req1, tctx.client.transport_protocol
) + dns.pack_message(req2, tctx.client.transport_protocol)
assert (
Playbook(layer)
>> DataReceived(tctx.client, req_bytes)
<< dns.DnsRequestHook(f1)
>> reply()
<< OpenConnection(tctx.server)
>> reply(None)
<< SendData(tctx.server, dns.pack_message(req1, tctx.server.transport_protocol))
<< dns.DnsRequestHook(f2)
>> reply()
<< SendData(tctx.server, dns.pack_message(req2, tctx.server.transport_protocol))
>> DataReceived(
tctx.server, dns.pack_message(resp1, tctx.server.transport_protocol)
)
<< dns.DnsResponseHook(f1)
>> reply()
<< SendData(
tctx.client, dns.pack_message(resp1, tctx.server.transport_protocol)
)
>> DataReceived(
tctx.server, dns.pack_message(resp2, tctx.server.transport_protocol)
)
<< dns.DnsResponseHook(f2)
>> reply()
<< SendData(
tctx.client, dns.pack_message(resp2, tctx.server.transport_protocol)
)
>> ConnectionClosed(tctx.client)
<< CloseConnection(tctx.server)
<< None
)
def test_query_pipelining_multiple_events(tctx):
tctx.client.transport_protocol = "tcp"
tctx.server.transport_protocol = "tcp"
layer = dns.DNSLayer(tctx)
layer.context.server.address = ("8.8.8.8", 53)
f1 = Placeholder(DNSFlow)
f2 = Placeholder(DNSFlow)
req1 = tdnsreq(id=1)
req2 = tdnsreq(id=2)
resp1 = tdnsresp(id=1)
resp2 = tdnsresp(id=2)
req_bytes = dns.pack_message(
req1, tctx.client.transport_protocol
) + dns.pack_message(req2, tctx.client.transport_protocol)
split = len(req_bytes) * 3 // 4
assert (
Playbook(layer)
>> DataReceived(tctx.client, req_bytes[:split])
<< dns.DnsRequestHook(f1)
>> reply()
<< OpenConnection(tctx.server)
>> reply(None)
<< SendData(tctx.server, dns.pack_message(req1, tctx.server.transport_protocol))
>> DataReceived(
tctx.server, dns.pack_message(resp1, tctx.server.transport_protocol)
)
<< dns.DnsResponseHook(f1)
>> reply()
<< SendData(
tctx.client, dns.pack_message(resp1, tctx.server.transport_protocol)
)
>> DataReceived(tctx.client, req_bytes[split:])
<< dns.DnsRequestHook(f2)
>> reply()
<< SendData(tctx.server, dns.pack_message(req2, tctx.server.transport_protocol))
>> DataReceived(
tctx.server, dns.pack_message(resp2, tctx.server.transport_protocol)
)
<< dns.DnsResponseHook(f2)
>> reply()
<< SendData(
tctx.client, dns.pack_message(resp2, tctx.server.transport_protocol)
)
>> ConnectionClosed(tctx.client)
<< CloseConnection(tctx.server)
<< None
)
def test_invalid_tcp_message_length(tctx):
tctx.client.transport_protocol = "tcp"
tctx.server.transport_protocol = "tcp"
assert (
Playbook(dns.DNSLayer(tctx))
>> DataReceived(tctx.client, b"\x00\x00")
<< Log(
"Client(client:1234, state=open) sent an invalid message: Message length field cannot be zero"
)
<< CloseConnection(tctx.client)
>> ConnectionClosed(tctx.client)
)
| python | MIT | e6aa924bb411a9687b91920b8d094af37bc02b90 | 2026-01-04T14:40:00.086164Z | false |
mitmproxy/mitmproxy | https://github.com/mitmproxy/mitmproxy/blob/e6aa924bb411a9687b91920b8d094af37bc02b90/test/mitmproxy/proxy/layers/test_udp.py | test/mitmproxy/proxy/layers/test_udp.py | import pytest
from ..tutils import Placeholder
from ..tutils import Playbook
from ..tutils import reply
from mitmproxy.proxy.commands import CloseConnection
from mitmproxy.proxy.commands import OpenConnection
from mitmproxy.proxy.commands import SendData
from mitmproxy.proxy.events import ConnectionClosed
from mitmproxy.proxy.events import DataReceived
from mitmproxy.proxy.layers import udp
from mitmproxy.proxy.layers.udp import UdpMessageInjected
from mitmproxy.udp import UDPFlow
from mitmproxy.udp import UDPMessage
def test_open_connection(tctx):
"""
If there is no server connection yet, establish one,
because the server may send data first.
"""
assert Playbook(udp.UDPLayer(tctx, True)) << OpenConnection(tctx.server)
tctx.server.timestamp_start = 1624544785
assert Playbook(udp.UDPLayer(tctx, True)) << None
def test_open_connection_err(tctx):
f = Placeholder(UDPFlow)
assert (
Playbook(udp.UDPLayer(tctx))
<< udp.UdpStartHook(f)
>> reply()
<< OpenConnection(tctx.server)
>> reply("Connect call failed")
<< udp.UdpErrorHook(f)
>> reply()
<< CloseConnection(tctx.client)
)
def test_simple(tctx):
"""open connection, receive data, send it to peer"""
f = Placeholder(UDPFlow)
assert (
Playbook(udp.UDPLayer(tctx))
<< udp.UdpStartHook(f)
>> reply()
<< OpenConnection(tctx.server)
>> reply(None)
>> DataReceived(tctx.client, b"hello!")
<< udp.UdpMessageHook(f)
>> reply()
<< SendData(tctx.server, b"hello!")
>> DataReceived(tctx.server, b"hi")
<< udp.UdpMessageHook(f)
>> reply()
<< SendData(tctx.client, b"hi")
>> ConnectionClosed(tctx.server)
<< CloseConnection(tctx.client)
<< udp.UdpEndHook(f)
>> reply()
>> DataReceived(tctx.server, b"ignored")
<< None
)
assert len(f().messages) == 2
def test_receive_data_before_server_connected(tctx):
"""
assert that data received before a server connection is established
will still be forwarded.
"""
assert (
Playbook(udp.UDPLayer(tctx), hooks=False)
<< OpenConnection(tctx.server)
>> DataReceived(tctx.client, b"hello!")
>> reply(None, to=-2)
<< SendData(tctx.server, b"hello!")
)
@pytest.mark.parametrize("ignore", [True, False])
def test_ignore(tctx, ignore):
"""
no flow hooks when we set ignore.
"""
def no_flow_hooks():
assert (
Playbook(udp.UDPLayer(tctx, ignore=ignore), hooks=True)
<< OpenConnection(tctx.server)
>> reply(None)
>> DataReceived(tctx.client, b"hello!")
<< SendData(tctx.server, b"hello!")
)
if ignore:
no_flow_hooks()
else:
with pytest.raises(AssertionError):
no_flow_hooks()
def test_inject(tctx):
"""inject data into an open connection."""
f = Placeholder(UDPFlow)
assert (
Playbook(udp.UDPLayer(tctx))
<< udp.UdpStartHook(f)
>> UdpMessageInjected(f, UDPMessage(True, b"hello!"))
>> reply(to=-2)
<< OpenConnection(tctx.server)
>> reply(None)
<< udp.UdpMessageHook(f)
>> reply()
<< SendData(tctx.server, b"hello!")
# and the other way...
>> UdpMessageInjected(
f, UDPMessage(False, b"I have already done the greeting for you.")
)
<< udp.UdpMessageHook(f)
>> reply()
<< SendData(tctx.client, b"I have already done the greeting for you.")
<< None
)
assert len(f().messages) == 2
| python | MIT | e6aa924bb411a9687b91920b8d094af37bc02b90 | 2026-01-04T14:40:00.086164Z | false |
mitmproxy/mitmproxy | https://github.com/mitmproxy/mitmproxy/blob/e6aa924bb411a9687b91920b8d094af37bc02b90/test/mitmproxy/proxy/layers/test_tls_fuzz.py | test/mitmproxy/proxy/layers/test_tls_fuzz.py | from hypothesis import example
from hypothesis import given
from hypothesis.strategies import binary
from hypothesis.strategies import integers
from mitmproxy.proxy.layers.tls import parse_client_hello
from mitmproxy.tls import ClientHello
client_hello_with_extensions = bytes.fromhex(
"16030300bb" # record layer
"010000b7" # handshake layer
"03033b70638d2523e1cba15f8364868295305e9c52aceabda4b5147210abc783e6e1000022c02bc02fc02cc030"
"cca9cca8cc14cc13c009c013c00ac014009c009d002f0035000a0100006cff0100010000000010000e00000b65"
"78616d706c652e636f6d0017000000230000000d00120010060106030501050304010403020102030005000501"
"00000000001200000010000e000c02683208687474702f312e3175500000000b00020100000a00080006001d00"
"170018"
)
@given(i=integers(0, len(client_hello_with_extensions)), data=binary())
@example(i=183, data=b"\x00\x00\x00\x00\x00\x00\x00\x00\x00")
def test_fuzz_parse_client_hello(i, data):
try:
ch = parse_client_hello(client_hello_with_extensions[:i] + data)
except ValueError:
pass
else:
assert ch is None or isinstance(ch, ClientHello)
| python | MIT | e6aa924bb411a9687b91920b8d094af37bc02b90 | 2026-01-04T14:40:00.086164Z | false |
mitmproxy/mitmproxy | https://github.com/mitmproxy/mitmproxy/blob/e6aa924bb411a9687b91920b8d094af37bc02b90/test/mitmproxy/proxy/layers/test_tcp.py | test/mitmproxy/proxy/layers/test_tcp.py | import pytest
from ..tutils import Placeholder
from ..tutils import Playbook
from ..tutils import reply
from mitmproxy.proxy.commands import CloseConnection
from mitmproxy.proxy.commands import CloseTcpConnection
from mitmproxy.proxy.commands import OpenConnection
from mitmproxy.proxy.commands import SendData
from mitmproxy.proxy.events import ConnectionClosed
from mitmproxy.proxy.events import DataReceived
from mitmproxy.proxy.layers import tcp
from mitmproxy.proxy.layers.tcp import TcpMessageInjected
from mitmproxy.tcp import TCPFlow
from mitmproxy.tcp import TCPMessage
def test_open_connection(tctx):
"""
If there is no server connection yet, establish one,
because the server may send data first.
"""
assert Playbook(tcp.TCPLayer(tctx, True)) << OpenConnection(tctx.server)
tctx.server.timestamp_start = 1624544785
assert Playbook(tcp.TCPLayer(tctx, True)) << None
def test_open_connection_err(tctx):
f = Placeholder(TCPFlow)
assert (
Playbook(tcp.TCPLayer(tctx))
<< tcp.TcpStartHook(f)
>> reply()
<< OpenConnection(tctx.server)
>> reply("Connect call failed")
<< tcp.TcpErrorHook(f)
>> reply()
<< CloseConnection(tctx.client)
)
def test_simple(tctx):
"""open connection, receive data, send it to peer"""
f = Placeholder(TCPFlow)
assert (
Playbook(tcp.TCPLayer(tctx))
<< tcp.TcpStartHook(f)
>> reply()
<< OpenConnection(tctx.server)
>> reply(None)
>> DataReceived(tctx.client, b"hello!")
<< tcp.TcpMessageHook(f)
>> reply()
<< SendData(tctx.server, b"hello!")
>> DataReceived(tctx.server, b"hi")
<< tcp.TcpMessageHook(f)
>> reply()
<< SendData(tctx.client, b"hi")
>> ConnectionClosed(tctx.server)
<< CloseTcpConnection(tctx.client, half_close=True)
>> ConnectionClosed(tctx.client)
<< CloseConnection(tctx.server)
<< tcp.TcpEndHook(f)
>> reply()
>> ConnectionClosed(tctx.client)
<< None
)
assert len(f().messages) == 2
def test_receive_data_before_server_connected(tctx):
"""
assert that data received before a server connection is established
will still be forwarded.
"""
assert (
Playbook(tcp.TCPLayer(tctx), hooks=False)
<< OpenConnection(tctx.server)
>> DataReceived(tctx.client, b"hello!")
>> reply(None, to=-2)
<< SendData(tctx.server, b"hello!")
)
def test_receive_data_after_half_close(tctx):
"""
data received after the other connection has been half-closed should still be forwarded.
"""
assert (
Playbook(tcp.TCPLayer(tctx), hooks=False)
<< OpenConnection(tctx.server)
>> reply(None)
>> DataReceived(tctx.client, b"eof-delimited-request")
<< SendData(tctx.server, b"eof-delimited-request")
>> ConnectionClosed(tctx.client)
<< CloseTcpConnection(tctx.server, half_close=True)
>> DataReceived(tctx.server, b"i'm late")
<< SendData(tctx.client, b"i'm late")
>> ConnectionClosed(tctx.server)
<< CloseConnection(tctx.client)
)
@pytest.mark.parametrize("ignore", [True, False])
def test_ignore(tctx, ignore):
"""
no flow hooks when we set ignore.
"""
def no_flow_hooks():
assert (
Playbook(tcp.TCPLayer(tctx, ignore=ignore), hooks=True)
<< OpenConnection(tctx.server)
>> reply(None)
>> DataReceived(tctx.client, b"hello!")
<< SendData(tctx.server, b"hello!")
)
if ignore:
no_flow_hooks()
else:
with pytest.raises(AssertionError):
no_flow_hooks()
def test_inject(tctx):
"""inject data into an open connection."""
f = Placeholder(TCPFlow)
assert (
Playbook(tcp.TCPLayer(tctx))
<< tcp.TcpStartHook(f)
>> TcpMessageInjected(f, TCPMessage(True, b"hello!"))
>> reply(to=-2)
<< OpenConnection(tctx.server)
>> reply(None)
<< tcp.TcpMessageHook(f)
>> reply()
<< SendData(tctx.server, b"hello!")
# and the other way...
>> TcpMessageInjected(
f, TCPMessage(False, b"I have already done the greeting for you.")
)
<< tcp.TcpMessageHook(f)
>> reply()
<< SendData(tctx.client, b"I have already done the greeting for you.")
<< None
)
assert len(f().messages) == 2
| python | MIT | e6aa924bb411a9687b91920b8d094af37bc02b90 | 2026-01-04T14:40:00.086164Z | false |
mitmproxy/mitmproxy | https://github.com/mitmproxy/mitmproxy/blob/e6aa924bb411a9687b91920b8d094af37bc02b90/test/mitmproxy/proxy/layers/__init__.py | test/mitmproxy/proxy/layers/__init__.py | python | MIT | e6aa924bb411a9687b91920b8d094af37bc02b90 | 2026-01-04T14:40:00.086164Z | false | |
mitmproxy/mitmproxy | https://github.com/mitmproxy/mitmproxy/blob/e6aa924bb411a9687b91920b8d094af37bc02b90/test/mitmproxy/proxy/layers/test_websocket.py | test/mitmproxy/proxy/layers/test_websocket.py | import secrets
from dataclasses import dataclass
import pytest
import wsproto.events
from wsproto.frame_protocol import Opcode
from mitmproxy.connection import ConnectionState
from mitmproxy.http import HTTPFlow
from mitmproxy.http import Request
from mitmproxy.http import Response
from mitmproxy.proxy.commands import CloseConnection
from mitmproxy.proxy.commands import Log
from mitmproxy.proxy.commands import SendData
from mitmproxy.proxy.events import ConnectionClosed
from mitmproxy.proxy.events import DataReceived
from mitmproxy.proxy.layers import http
from mitmproxy.proxy.layers import websocket
from mitmproxy.proxy.layers.http import HTTPMode
from mitmproxy.proxy.layers.websocket import WebSocketMessageInjected
from mitmproxy.websocket import WebSocketData
from mitmproxy.websocket import WebSocketMessage
from test.mitmproxy.proxy.tutils import Placeholder
from test.mitmproxy.proxy.tutils import Playbook
from test.mitmproxy.proxy.tutils import reply
@dataclass
class _Masked:
unmasked: bytes
def __eq__(self, other):
other = bytearray(other)
assert other[1] & 0b1000_0000 # assert this is actually masked
other[1] &= 0b0111_1111 # remove mask bit
assert other[1] < 126 # (we don't support extended payload length here)
mask = other[2:6]
payload = bytes(x ^ mask[i % 4] for i, x in enumerate(other[6:]))
return self.unmasked == other[:2] + payload
# noinspection PyTypeChecker
def masked(unmasked: bytes) -> bytes:
return _Masked(unmasked) # type: ignore
def masked_bytes(unmasked: bytes) -> bytes:
header = bytearray(unmasked[:2])
assert header[1] < 126 # assert that this is neither masked nor extended payload
header[1] |= 0b1000_0000
mask = secrets.token_bytes(4)
masked = bytes(x ^ mask[i % 4] for i, x in enumerate(unmasked[2:]))
return bytes(header + mask + masked)
def test_masking():
m = masked(b"\x02\x03foo")
assert m == b"\x02\x83\x1c\x96\xd4\rz\xf9\xbb"
assert m == masked_bytes(b"\x02\x03foo")
def test_upgrade(tctx):
"""Test a HTTP -> WebSocket upgrade"""
tctx.server.address = ("example.com", 80)
tctx.server.state = ConnectionState.OPEN
flow = Placeholder(HTTPFlow)
assert (
Playbook(http.HttpLayer(tctx, HTTPMode.transparent))
>> DataReceived(
tctx.client,
b"GET / HTTP/1.1\r\n"
b"Connection: upgrade\r\n"
b"Upgrade: websocket\r\n"
b"Sec-WebSocket-Version: 13\r\n"
b"\r\n",
)
<< http.HttpRequestHeadersHook(flow)
>> reply()
<< http.HttpRequestHook(flow)
>> reply()
<< SendData(
tctx.server,
b"GET / HTTP/1.1\r\n"
b"Connection: upgrade\r\n"
b"Upgrade: websocket\r\n"
b"Sec-WebSocket-Version: 13\r\n"
b"\r\n",
)
>> DataReceived(
tctx.server,
b"HTTP/1.1 101 Switching Protocols\r\n"
b"Upgrade: websocket\r\n"
b"Connection: Upgrade\r\n"
b"\r\n",
)
<< http.HttpResponseHeadersHook(flow)
>> reply()
<< http.HttpResponseHook(flow)
>> reply()
<< SendData(
tctx.client,
b"HTTP/1.1 101 Switching Protocols\r\n"
b"Upgrade: websocket\r\n"
b"Connection: Upgrade\r\n"
b"\r\n",
)
<< websocket.WebsocketStartHook(flow)
>> reply()
>> DataReceived(tctx.client, masked_bytes(b"\x81\x0bhello world"))
<< websocket.WebsocketMessageHook(flow)
>> reply()
<< SendData(tctx.server, masked(b"\x81\x0bhello world"))
>> DataReceived(tctx.server, b"\x82\nhello back")
<< websocket.WebsocketMessageHook(flow)
>> reply()
<< SendData(tctx.client, b"\x82\nhello back")
>> DataReceived(tctx.client, masked_bytes(b"\x81\x0bhello again"))
<< websocket.WebsocketMessageHook(flow)
>> reply()
<< SendData(tctx.server, masked(b"\x81\x0bhello again"))
)
assert len(flow().websocket.messages) == 3
assert flow().websocket.messages[0].content == b"hello world"
assert flow().websocket.messages[0].from_client
assert flow().websocket.messages[0].type == Opcode.TEXT
assert flow().websocket.messages[1].content == b"hello back"
assert flow().websocket.messages[1].from_client is False
assert flow().websocket.messages[1].type == Opcode.BINARY
assert flow().live
def test_upgrade_streamed(tctx):
"""
Test that streaming the response does not change behavior.
"""
tctx.server.address = ("example.com", 80)
tctx.server.state = ConnectionState.OPEN
flow = Placeholder(HTTPFlow)
def enable_streaming(flow: HTTPFlow):
flow.response.stream = True
assert (
Playbook(http.HttpLayer(tctx, HTTPMode.transparent))
>> DataReceived(
tctx.client,
b"GET / HTTP/1.1\r\n"
b"Connection: upgrade\r\n"
b"Upgrade: websocket\r\n"
b"Sec-WebSocket-Version: 13\r\n"
b"\r\n",
)
<< http.HttpRequestHeadersHook(flow)
>> reply()
<< http.HttpRequestHook(flow)
>> reply()
<< SendData(
tctx.server,
b"GET / HTTP/1.1\r\n"
b"Connection: upgrade\r\n"
b"Upgrade: websocket\r\n"
b"Sec-WebSocket-Version: 13\r\n"
b"\r\n",
)
>> DataReceived(
tctx.server,
b"HTTP/1.1 101 Switching Protocols\r\n"
b"Upgrade: websocket\r\n"
b"Connection: Upgrade\r\n"
b"\r\n",
)
<< http.HttpResponseHeadersHook(flow)
>> reply(side_effect=enable_streaming)
# Current implementation: We know that body size for 101 responses must be zero,
# so we never trigger streaming logic in the first place.
<< http.HttpResponseHook(flow)
>> reply()
<< SendData(
tctx.client,
b"HTTP/1.1 101 Switching Protocols\r\n"
b"Upgrade: websocket\r\n"
b"Connection: Upgrade\r\n"
b"\r\n",
)
<< websocket.WebsocketStartHook(flow)
>> DataReceived(tctx.client, masked_bytes(b"\x81\x0bhello world")) # early data
>> reply(to=-2)
<< websocket.WebsocketMessageHook(flow)
>> reply()
<< SendData(tctx.server, masked(b"\x81\x0bhello world"))
>> DataReceived(tctx.server, b"\x82\nhello back")
<< websocket.WebsocketMessageHook(flow)
>> reply()
<< SendData(tctx.client, b"\x82\nhello back")
>> DataReceived(tctx.client, masked_bytes(b"\x81\x0bhello again"))
<< websocket.WebsocketMessageHook(flow)
>> reply()
<< SendData(tctx.server, masked(b"\x81\x0bhello again"))
)
@pytest.fixture()
def ws_testdata(tctx):
tctx.server.address = ("example.com", 80)
tctx.server.state = ConnectionState.OPEN
flow = HTTPFlow(tctx.client, tctx.server)
flow.request = Request.make(
"GET",
"http://example.com/",
headers={
"Connection": "upgrade",
"Upgrade": "websocket",
"Sec-WebSocket-Version": "13",
},
)
flow.response = Response.make(
101,
headers={
"Connection": "upgrade",
"Upgrade": "websocket",
},
)
flow.websocket = WebSocketData()
return tctx, Playbook(websocket.WebsocketLayer(tctx, flow)), flow
def test_modify_message(ws_testdata):
tctx, playbook, flow = ws_testdata
assert (
playbook
<< websocket.WebsocketStartHook(flow)
>> reply()
>> DataReceived(tctx.server, b"\x81\x03foo")
<< websocket.WebsocketMessageHook(flow)
)
flow.websocket.messages[-1].content = flow.websocket.messages[-1].content.replace(
b"foo", b"foobar"
)
playbook >> reply()
playbook << SendData(tctx.client, b"\x81\x06foobar")
assert playbook
def test_empty_message(ws_testdata):
tctx, playbook, flow = ws_testdata
assert (
playbook
<< websocket.WebsocketStartHook(flow)
>> reply()
>> DataReceived(tctx.server, b"\x81\x00")
<< websocket.WebsocketMessageHook(flow)
)
assert flow.websocket.messages[-1].content == b""
playbook >> reply()
playbook << SendData(tctx.client, b"\x81\x00")
assert playbook
def test_drop_message(ws_testdata):
tctx, playbook, flow = ws_testdata
assert (
playbook
<< websocket.WebsocketStartHook(flow)
>> reply()
>> DataReceived(tctx.server, b"\x81\x03foo")
<< websocket.WebsocketMessageHook(flow)
)
flow.websocket.messages[-1].drop()
playbook >> reply()
playbook << None
assert playbook
def test_fragmented(ws_testdata):
tctx, playbook, flow = ws_testdata
assert (
playbook
<< websocket.WebsocketStartHook(flow)
>> reply()
>> DataReceived(tctx.server, b"\x01\x03foo")
>> DataReceived(tctx.server, b"\x80\x03bar")
<< websocket.WebsocketMessageHook(flow)
>> reply()
<< SendData(tctx.client, b"\x01\x03foo")
<< SendData(tctx.client, b"\x80\x03bar")
)
assert flow.websocket.messages[-1].content == b"foobar"
def test_unfragmented(ws_testdata):
tctx, playbook, flow = ws_testdata
assert (
playbook
<< websocket.WebsocketStartHook(flow)
>> reply()
>> DataReceived(tctx.server, b"\x81\x06foo")
)
# This already triggers wsproto to emit a wsproto.events.Message, see
# https://github.com/mitmproxy/mitmproxy/issues/4701
assert (
playbook
>> DataReceived(tctx.server, b"bar")
<< websocket.WebsocketMessageHook(flow)
>> reply()
<< SendData(tctx.client, b"\x81\x06foobar")
)
assert flow.websocket.messages[-1].content == b"foobar"
def test_protocol_error(ws_testdata):
tctx, playbook, flow = ws_testdata
assert (
playbook
<< websocket.WebsocketStartHook(flow)
>> reply()
>> DataReceived(tctx.server, b"\x01\x03foo")
>> DataReceived(tctx.server, b"\x02\x03bar")
<< SendData(
tctx.server,
masked(b"\x88/\x03\xeaexpected CONTINUATION, got <Opcode.BINARY: 2>"),
)
<< CloseConnection(tctx.server)
<< SendData(
tctx.client, b"\x88/\x03\xeaexpected CONTINUATION, got <Opcode.BINARY: 2>"
)
<< CloseConnection(tctx.client)
<< websocket.WebsocketEndHook(flow)
>> reply()
)
assert not flow.websocket.messages
assert not flow.live
def test_ping(ws_testdata):
tctx, playbook, flow = ws_testdata
assert (
playbook
<< websocket.WebsocketStartHook(flow)
>> reply()
>> DataReceived(tctx.client, masked_bytes(b"\x89\x11ping-with-payload"))
<< Log("Received WebSocket ping from client (payload: b'ping-with-payload')")
<< SendData(tctx.server, masked(b"\x89\x11ping-with-payload"))
>> DataReceived(tctx.server, b"\x8a\x11pong-with-payload")
<< Log("Received WebSocket pong from server (payload: b'pong-with-payload')")
<< SendData(tctx.client, b"\x8a\x11pong-with-payload")
)
assert not flow.websocket.messages
def test_close_normal(ws_testdata):
tctx, playbook, flow = ws_testdata
masked_close = Placeholder(bytes)
close = Placeholder(bytes)
assert (
playbook
<< websocket.WebsocketStartHook(flow)
>> reply()
>> DataReceived(tctx.client, masked_bytes(b"\x88\x00"))
<< SendData(tctx.server, masked_close)
<< CloseConnection(tctx.server)
<< SendData(tctx.client, close)
<< CloseConnection(tctx.client)
<< websocket.WebsocketEndHook(flow)
>> reply()
)
# wsproto currently handles this inconsistently, see
# https://github.com/python-hyper/wsproto/pull/153/files
assert masked_close() == masked(b"\x88\x02\x03\xe8") or masked_close() == masked(
b"\x88\x00"
)
assert close() == b"\x88\x02\x03\xe8" or close() == b"\x88\x00"
assert flow.websocket.close_code == 1005
assert not flow.live
def test_close_disconnect(ws_testdata):
tctx, playbook, flow = ws_testdata
assert (
playbook
<< websocket.WebsocketStartHook(flow)
>> reply()
>> ConnectionClosed(tctx.server)
<< CloseConnection(tctx.server)
<< SendData(tctx.client, b"\x88\x02\x03\xe8")
<< CloseConnection(tctx.client)
<< websocket.WebsocketEndHook(flow)
>> reply()
>> ConnectionClosed(tctx.client)
)
# The \x03\xe8 above is code 1000 (normal closure).
# But 1006 (ABNORMAL_CLOSURE) is expected, because the connection was already closed.
assert flow.websocket.close_code == 1006
assert not flow.live
def test_close_code(ws_testdata):
tctx, playbook, flow = ws_testdata
assert (
playbook
<< websocket.WebsocketStartHook(flow)
>> reply()
>> DataReceived(tctx.server, b"\x88\x02\x0f\xa0")
<< SendData(tctx.server, masked(b"\x88\x02\x0f\xa0"))
<< CloseConnection(tctx.server)
<< SendData(tctx.client, b"\x88\x02\x0f\xa0")
<< CloseConnection(tctx.client)
<< websocket.WebsocketEndHook(flow)
>> reply()
)
assert flow.websocket.close_code == 4000
assert not flow.live
def test_deflate(ws_testdata):
tctx, playbook, flow = ws_testdata
flow.response.headers["Sec-WebSocket-Extensions"] = (
"permessage-deflate; server_max_window_bits=10"
)
assert (
playbook
<< websocket.WebsocketStartHook(flow)
>> reply()
# https://tools.ietf.org/html/rfc7692#section-7.2.3.1
>> DataReceived(tctx.server, bytes.fromhex("c1 07 f2 48 cd c9 c9 07 00"))
<< websocket.WebsocketMessageHook(flow)
>> reply()
<< SendData(tctx.client, bytes.fromhex("c1 07 f2 48 cd c9 c9 07 00"))
)
assert flow.websocket.messages[0].content == b"Hello"
def test_unknown_ext(ws_testdata):
tctx, playbook, flow = ws_testdata
flow.response.headers["Sec-WebSocket-Extensions"] = "funky-bits; param=42"
assert (
playbook
<< Log("Ignoring unknown WebSocket extension 'funky-bits'.")
<< websocket.WebsocketStartHook(flow)
>> reply()
)
def test_websocket_connection_repr(tctx):
ws = websocket.WebsocketConnection(wsproto.ConnectionType.SERVER, conn=tctx.client)
assert repr(ws)
class TestFragmentizer:
def test_empty(self):
f = websocket.Fragmentizer([b"foo"], False)
assert list(f(b"")) == [
wsproto.events.BytesMessage(b"", message_finished=True),
]
def test_keep_sizes(self):
f = websocket.Fragmentizer([b"foo", b"bar"], True)
assert list(f(b"foobaz")) == [
wsproto.events.TextMessage("foo", message_finished=False),
wsproto.events.TextMessage("baz", message_finished=True),
]
def test_rechunk(self):
f = websocket.Fragmentizer([b"foo"], False)
f.FRAGMENT_SIZE = 4
assert list(f(b"foobar")) == [
wsproto.events.BytesMessage(b"foob", message_finished=False),
wsproto.events.BytesMessage(b"ar", message_finished=True),
]
def test_inject_message(ws_testdata):
tctx, playbook, flow = ws_testdata
assert (
playbook
<< websocket.WebsocketStartHook(flow)
>> reply()
>> WebSocketMessageInjected(
flow, WebSocketMessage(Opcode.TEXT, False, b"hello")
)
<< websocket.WebsocketMessageHook(flow)
)
assert flow.websocket.messages[-1].content == b"hello"
assert flow.websocket.messages[-1].from_client is False
assert flow.websocket.messages[-1].injected is True
assert playbook >> reply() << SendData(tctx.client, b"\x81\x05hello")
| python | MIT | e6aa924bb411a9687b91920b8d094af37bc02b90 | 2026-01-04T14:40:00.086164Z | false |
mitmproxy/mitmproxy | https://github.com/mitmproxy/mitmproxy/blob/e6aa924bb411a9687b91920b8d094af37bc02b90/test/mitmproxy/proxy/layers/test_tls.py | test/mitmproxy/proxy/layers/test_tls.py | import ssl
import sys
import time
from logging import DEBUG
from logging import WARNING
import pytest
from OpenSSL import SSL
from mitmproxy import connection
from mitmproxy.connection import ConnectionState
from mitmproxy.connection import Server
from mitmproxy.proxy import commands
from mitmproxy.proxy import context
from mitmproxy.proxy import events
from mitmproxy.proxy import layer
from mitmproxy.proxy.layers import tls
from mitmproxy.tls import ClientHelloData
from mitmproxy.tls import TlsData
from mitmproxy.utils import data
from test.mitmproxy.proxy import tutils
from test.mitmproxy.proxy.tutils import BytesMatching
from test.mitmproxy.proxy.tutils import Placeholder
from test.mitmproxy.proxy.tutils import StrMatching
tlsdata = data.Data(__name__)
def test_record_contents():
data = bytes.fromhex("1603010002beef1603010001ff")
assert list(tls.handshake_record_contents(data)) == [b"\xbe\xef", b"\xff"]
for i in range(6):
assert list(tls.handshake_record_contents(data[:i])) == []
def test_record_contents_err():
with pytest.raises(ValueError, match="Expected TLS record"):
next(tls.handshake_record_contents(b"GET /error"))
empty_record = bytes.fromhex("1603010000")
with pytest.raises(ValueError, match="Record must not be empty"):
next(tls.handshake_record_contents(empty_record))
client_hello_no_extensions = bytes.fromhex(
"0100006103015658a756ab2c2bff55f636814deac086b7ca56b65058c7893ffc6074f5245f70205658a75475103a152637"
"78e1bb6d22e8bbd5b6b0a3a59760ad354e91ba20d353001a0035002f000a000500040009000300060008006000"
"61006200640100"
)
client_hello_with_extensions = bytes.fromhex(
"16030300bb" # record layer
"010000b7" # handshake layer
"03033b70638d2523e1cba15f8364868295305e9c52aceabda4b5147210abc783e6e1000022c02bc02fc02cc030"
"cca9cca8cc14cc13c009c013c00ac014009c009d002f0035000a0100006cff0100010000000010000e00000b65"
"78616d706c652e636f6d0017000000230000000d00120010060106030501050304010403020102030005000501"
"00000000001200000010000e000c02683208687474702f312e3175500000000b00020100000a00080006001d00"
"170018"
)
def test_get_client_hello():
single_record = bytes.fromhex("1603010065") + client_hello_no_extensions
assert tls.get_client_hello(single_record) == client_hello_no_extensions
split_over_two_records = (
bytes.fromhex("1603010020")
+ client_hello_no_extensions[:32]
+ bytes.fromhex("1603010045")
+ client_hello_no_extensions[32:]
)
assert tls.get_client_hello(split_over_two_records) == client_hello_no_extensions
incomplete = split_over_two_records[:42]
assert tls.get_client_hello(incomplete) is None
def test_parse_client_hello():
assert tls.parse_client_hello(client_hello_with_extensions).sni == "example.com"
assert tls.parse_client_hello(client_hello_with_extensions[:50]) is None
with pytest.raises(ValueError):
tls.parse_client_hello(
client_hello_with_extensions[:183] + b"\x00\x00\x00\x00\x00\x00\x00\x00\x00"
)
class SSLTest:
"""Helper container for Python's builtin SSL object."""
def __init__(
self,
server_side: bool = False,
alpn: list[str] | None = None,
sni: bytes | None = b"example.mitmproxy.org",
max_ver: ssl.TLSVersion | None = None,
post_handshake_auth: bool = False,
):
self.inc = ssl.MemoryBIO()
self.out = ssl.MemoryBIO()
self.ctx = ssl.SSLContext(
ssl.PROTOCOL_TLS_SERVER if server_side else ssl.PROTOCOL_TLS_CLIENT
)
self.ctx.verify_mode = ssl.CERT_OPTIONAL
self.ctx.post_handshake_auth = post_handshake_auth
self.ctx.load_verify_locations(
cafile=tlsdata.path("../../net/data/verificationcerts/trusted-root.crt"),
)
if alpn:
self.ctx.set_alpn_protocols(alpn)
if server_side:
if sni == b"192.0.2.42":
filename = "trusted-leaf-ip"
else:
filename = "trusted-leaf"
self.ctx.load_cert_chain(
certfile=tlsdata.path(
f"../../net/data/verificationcerts/{filename}.crt"
),
keyfile=tlsdata.path(
f"../../net/data/verificationcerts/{filename}.key"
),
)
if max_ver:
self.ctx.maximum_version = max_ver
self.obj = self.ctx.wrap_bio(
self.inc,
self.out,
server_hostname=None if server_side else sni,
server_side=server_side,
)
def bio_write(self, buf: bytes) -> int:
return self.inc.write(buf)
def bio_read(self, bufsize: int = 2**16) -> bytes:
return self.out.read(bufsize)
def do_handshake(self) -> None:
return self.obj.do_handshake()
def _test_echo(
playbook: tutils.Playbook, tssl: SSLTest, conn: connection.Connection
) -> None:
tssl.obj.write(b"Hello World")
data = tutils.Placeholder(bytes)
assert (
playbook
>> events.DataReceived(conn, tssl.bio_read())
<< commands.SendData(conn, data)
)
tssl.bio_write(data())
assert tssl.obj.read() == b"hello world"
class TlsEchoLayer(tutils.EchoLayer):
err: str | None = None
def _handle_event(self, event: events.Event) -> layer.CommandGenerator[None]:
if isinstance(event, events.DataReceived) and event.data == b"open-connection":
err = yield commands.OpenConnection(self.context.server)
if err:
yield commands.SendData(
event.connection, f"open-connection failed: {err}".encode()
)
else:
yield from super()._handle_event(event)
def finish_handshake(
playbook: tutils.Playbook, conn: connection.Connection, tssl: SSLTest
):
data = tutils.Placeholder(bytes)
tls_hook_data = tutils.Placeholder(TlsData)
if isinstance(conn, connection.Client):
established_hook = tls.TlsEstablishedClientHook(tls_hook_data)
else:
established_hook = tls.TlsEstablishedServerHook(tls_hook_data)
assert (
playbook
>> events.DataReceived(conn, tssl.bio_read())
<< established_hook
>> tutils.reply()
<< commands.SendData(conn, data)
)
assert tls_hook_data().conn.error is None
tssl.bio_write(data())
def reply_tls_start_client(alpn: bytes | None = None, *args, **kwargs) -> tutils.reply:
"""
Helper function to simplify the syntax for tls_start_client hooks.
"""
def make_client_conn(tls_start: TlsData) -> None:
# ssl_context = SSL.Context(Method.TLS_METHOD)
# ssl_context.set_min_proto_version(SSL.TLS1_3_VERSION)
ssl_context = SSL.Context(SSL.SSLv23_METHOD)
ssl_context.set_options(
SSL.OP_NO_SSLv3 | SSL.OP_NO_TLSv1 | SSL.OP_NO_TLSv1_1 | SSL.OP_NO_TLSv1_2
)
ssl_context.use_privatekey_file(
tlsdata.path("../../net/data/verificationcerts/trusted-leaf.key")
)
ssl_context.use_certificate_chain_file(
tlsdata.path("../../net/data/verificationcerts/trusted-leaf.crt")
)
if alpn is not None:
ssl_context.set_alpn_select_callback(lambda conn, protos: alpn)
tls_start.ssl_conn = SSL.Connection(ssl_context)
tls_start.ssl_conn.set_accept_state()
return tutils.reply(*args, side_effect=make_client_conn, **kwargs)
def reply_tls_start_server(
alpn: bytes | None = None, client_cert: bool = False, *args, **kwargs
) -> tutils.reply:
"""
Helper function to simplify the syntax for tls_start_server hooks.
"""
def make_server_conn(tls_start: TlsData) -> None:
# ssl_context = SSL.Context(Method.TLS_METHOD)
# ssl_context.set_min_proto_version(SSL.TLS1_3_VERSION)
ssl_context = SSL.Context(SSL.SSLv23_METHOD)
ssl_context.set_options(
SSL.OP_NO_SSLv3 | SSL.OP_NO_TLSv1 | SSL.OP_NO_TLSv1_1 | SSL.OP_NO_TLSv1_2
)
ssl_context.load_verify_locations(
cafile=tlsdata.path("../../net/data/verificationcerts/trusted-root.crt")
)
if alpn is not None:
ssl_context.set_alpn_protos([alpn])
ssl_context.set_verify(SSL.VERIFY_PEER)
if client_cert:
ssl_context.use_privatekey_file(
tlsdata.path("../../net/data/verificationcerts/trusted-client-cert.pem")
)
ssl_context.use_certificate_file(
tlsdata.path("../../net/data/verificationcerts/trusted-client-cert.pem")
)
SSL._lib.SSL_CTX_set_post_handshake_auth(ssl_context._context, 1)
tls_start.ssl_conn = SSL.Connection(ssl_context)
tls_start.ssl_conn.set_connect_state()
# Set SNI
tls_start.ssl_conn.set_tlsext_host_name(tls_start.conn.sni.encode())
# Manually enable hostname verification.
# Recent OpenSSL versions provide slightly nicer ways to do this, but they are not exposed in
# cryptography and likely a PITA to add.
# https://wiki.openssl.org/index.php/Hostname_validation
param = SSL._lib.SSL_get0_param(tls_start.ssl_conn._ssl)
# Common Name matching is disabled in both Chrome and Firefox, so we should disable it, too.
# https://www.chromestatus.com/feature/4981025180483584
SSL._lib.X509_VERIFY_PARAM_set_hostflags(
param,
SSL._lib.X509_CHECK_FLAG_NO_PARTIAL_WILDCARDS
| SSL._lib.X509_CHECK_FLAG_NEVER_CHECK_SUBJECT,
)
SSL._openssl_assert(
SSL._lib.X509_VERIFY_PARAM_set1_host(param, tls_start.conn.sni.encode(), 0)
== 1
)
return tutils.reply(*args, side_effect=make_server_conn, **kwargs)
class TestServerTLS:
def test_repr(self, tctx):
assert repr(tls.ServerTLSLayer(tctx))
def test_not_connected(self, tctx: context.Context):
"""Test that we don't do anything if no server connection exists."""
layer = tls.ServerTLSLayer(tctx)
layer.child_layer = TlsEchoLayer(tctx)
assert (
tutils.Playbook(layer)
>> events.DataReceived(tctx.client, b"Hello World")
<< commands.SendData(tctx.client, b"hello world")
)
def test_simple(self, tctx):
playbook = tutils.Playbook(tls.ServerTLSLayer(tctx))
tctx.server.address = ("example.mitmproxy.org", 443)
tctx.server.state = ConnectionState.OPEN
tctx.server.sni = "example.mitmproxy.org"
tssl = SSLTest(server_side=True)
# send ClientHello, receive ClientHello
data = tutils.Placeholder(bytes)
assert (
playbook
<< tls.TlsStartServerHook(tutils.Placeholder())
>> reply_tls_start_server()
<< commands.SendData(tctx.server, data)
)
tssl.bio_write(data())
with pytest.raises(ssl.SSLWantReadError):
tssl.do_handshake()
# finish handshake (mitmproxy)
finish_handshake(playbook, tctx.server, tssl)
# finish handshake (locally)
tssl.do_handshake()
playbook >> events.DataReceived(tctx.server, tssl.bio_read())
playbook << None
assert playbook
assert tctx.server.tls_established
# Echo
assert (
playbook
>> events.DataReceived(tctx.client, b"foo")
<< layer.NextLayerHook(tutils.Placeholder())
>> tutils.reply_next_layer(TlsEchoLayer)
<< commands.SendData(tctx.client, b"foo")
)
_test_echo(playbook, tssl, tctx.server)
with pytest.raises(ssl.SSLWantReadError):
tssl.obj.unwrap()
assert (
playbook
>> events.DataReceived(tctx.server, tssl.bio_read())
<< commands.CloseConnection(tctx.server)
>> events.ConnectionClosed(tctx.server)
<< None
)
def test_untrusted_cert(self, tctx):
"""If the certificate is not trusted, we should fail."""
playbook = tutils.Playbook(tls.ServerTLSLayer(tctx))
tctx.server.address = ("wrong.host.mitmproxy.org", 443)
tctx.server.sni = "wrong.host.mitmproxy.org"
tssl = SSLTest(server_side=True)
# send ClientHello
data = tutils.Placeholder(bytes)
assert (
playbook
>> events.DataReceived(tctx.client, b"open-connection")
<< layer.NextLayerHook(tutils.Placeholder())
>> tutils.reply_next_layer(TlsEchoLayer)
<< commands.OpenConnection(tctx.server)
>> tutils.reply(None)
<< tls.TlsStartServerHook(tutils.Placeholder())
>> reply_tls_start_server()
<< commands.SendData(tctx.server, data)
)
# receive ServerHello, finish client handshake
tssl.bio_write(data())
with pytest.raises(ssl.SSLWantReadError):
tssl.do_handshake()
tls_hook_data = tutils.Placeholder(TlsData)
assert (
playbook
>> events.DataReceived(tctx.server, tssl.bio_read())
<< commands.Log(
# different casing in OpenSSL < 3.0
StrMatching(
"Server TLS handshake failed. Certificate verify failed: [Hh]ostname mismatch"
),
WARNING,
)
<< tls.TlsFailedServerHook(tls_hook_data)
>> tutils.reply()
<< commands.CloseConnection(tctx.server)
<< commands.SendData(
tctx.client,
# different casing in OpenSSL < 3.0
BytesMatching(
b"open-connection failed: Certificate verify failed: [Hh]ostname mismatch"
),
)
)
assert (
tls_hook_data().conn.error.lower()
== "Certificate verify failed: Hostname mismatch".lower()
)
assert not tctx.server.tls_established
def test_remote_speaks_no_tls(self, tctx):
playbook = tutils.Playbook(tls.ServerTLSLayer(tctx))
tctx.server.state = ConnectionState.OPEN
tctx.server.sni = "example.mitmproxy.org"
# send ClientHello, receive random garbage back
data = tutils.Placeholder(bytes)
tls_hook_data = tutils.Placeholder(TlsData)
assert (
playbook
<< tls.TlsStartServerHook(tutils.Placeholder())
>> reply_tls_start_server()
<< commands.SendData(tctx.server, data)
>> events.DataReceived(tctx.server, b"HTTP/1.1 404 Not Found\r\n")
<< commands.Log(
"Server TLS handshake failed. The remote server does not speak TLS.",
WARNING,
)
<< tls.TlsFailedServerHook(tls_hook_data)
>> tutils.reply()
<< commands.CloseConnection(tctx.server)
)
assert tls_hook_data().conn.error == "The remote server does not speak TLS."
def test_unsupported_protocol(self, tctx: context.Context):
"""Test the scenario where the server only supports an outdated TLS version by default."""
playbook = tutils.Playbook(tls.ServerTLSLayer(tctx))
tctx.server.address = ("example.mitmproxy.org", 443)
tctx.server.state = ConnectionState.OPEN
tctx.server.sni = "example.mitmproxy.org"
# noinspection PyTypeChecker
tssl = SSLTest(server_side=True, max_ver=ssl.TLSVersion.TLSv1_2)
# send ClientHello
data = tutils.Placeholder(bytes)
assert (
playbook
<< tls.TlsStartServerHook(tutils.Placeholder())
>> reply_tls_start_server()
<< commands.SendData(tctx.server, data)
)
# receive ServerHello
tssl.bio_write(data())
with pytest.raises(ssl.SSLError):
tssl.do_handshake()
# send back error
tls_hook_data = tutils.Placeholder(TlsData)
assert (
playbook
>> events.DataReceived(tctx.server, tssl.bio_read())
<< commands.Log(
"Server TLS handshake failed. The remote server and mitmproxy cannot agree on a TLS version"
" to use. You may need to adjust mitmproxy's tls_version_server_min option.",
WARNING,
)
<< tls.TlsFailedServerHook(tls_hook_data)
>> tutils.reply()
<< commands.CloseConnection(tctx.server)
)
assert tls_hook_data().conn.error
def test_post_handshake_authentication(self, tctx):
playbook = tutils.Playbook(tls.ServerTLSLayer(tctx))
tctx.server.address = ("example.mitmproxy.org", 443)
tctx.server.state = ConnectionState.OPEN
tctx.server.sni = "example.mitmproxy.org"
tssl = SSLTest(server_side=True, post_handshake_auth=True)
# send ClientHello, receive ClientHello
data = tutils.Placeholder(bytes)
assert (
playbook
<< tls.TlsStartServerHook(tutils.Placeholder())
>> reply_tls_start_server(client_cert=True)
<< commands.SendData(tctx.server, data)
)
tssl.bio_write(data())
with pytest.raises(ssl.SSLWantReadError):
tssl.do_handshake()
# finish handshake (mitmproxy)
finish_handshake(playbook, tctx.server, tssl)
# finish handshake (locally)
tssl.do_handshake()
playbook >> events.DataReceived(tctx.server, tssl.bio_read())
playbook << None
assert playbook
assert tctx.server.tls_established
if sys.version_info >= (3, 13):
assert not tssl.obj.get_verified_chain()
tssl.obj.verify_client_post_handshake()
with pytest.raises(ssl.SSLWantReadError):
tssl.obj.read(42)
cert_request = tssl.bio_read()
assert cert_request
client_cert = Placeholder(bytes)
assert (
playbook
>> events.DataReceived(tctx.server, cert_request)
<< commands.SendData(tctx.server, client_cert)
)
tssl.bio_write(client_cert())
with pytest.raises(ssl.SSLWantReadError):
tssl.obj.read(42)
if sys.version_info >= (3, 13):
assert tssl.obj.get_verified_chain()
def make_client_tls_layer(
tctx: context.Context, **kwargs
) -> tuple[tutils.Playbook, tls.ClientTLSLayer, SSLTest]:
# This is a bit contrived as the client layer expects a server layer as parent.
# We also set child layers manually to avoid NextLayer noise.
server_layer = tls.ServerTLSLayer(tctx)
client_layer = tls.ClientTLSLayer(tctx)
server_layer.child_layer = client_layer
client_layer.child_layer = TlsEchoLayer(tctx)
playbook = tutils.Playbook(server_layer)
# Add some server config, this is needed anyways.
tctx.server.__dict__["address"] = (
"example.mitmproxy.org",
443,
) # .address fails because connection is open
tctx.server.sni = "example.mitmproxy.org"
tssl_client = SSLTest(**kwargs)
# Start handshake.
with pytest.raises(ssl.SSLWantReadError):
tssl_client.do_handshake()
return playbook, client_layer, tssl_client
class TestClientTLS:
def test_client_only(self, tctx: context.Context):
"""Test TLS with client only"""
playbook, client_layer, tssl_client = make_client_tls_layer(tctx)
client_layer.debug = " "
assert not tctx.client.tls_established
# Send ClientHello, receive ServerHello
data = tutils.Placeholder(bytes)
assert (
playbook
>> events.DataReceived(tctx.client, tssl_client.bio_read())
<< tls.TlsClienthelloHook(tutils.Placeholder())
>> tutils.reply()
<< tls.TlsStartClientHook(tutils.Placeholder())
>> reply_tls_start_client()
<< commands.SendData(tctx.client, data)
)
tssl_client.bio_write(data())
tssl_client.do_handshake()
# Finish Handshake
finish_handshake(playbook, tctx.client, tssl_client)
assert tssl_client.obj.getpeercert(True)
assert tctx.client.tls_established
# Echo
_test_echo(playbook, tssl_client, tctx.client)
other_server = Server(address=None)
assert (
playbook
>> events.DataReceived(other_server, b"Plaintext")
<< commands.SendData(other_server, b"plaintext")
)
@pytest.mark.parametrize("server_state", ["open", "closed"])
def test_server_required(self, tctx, server_state):
"""
Test the scenario where a server connection is required (for example, because of an unknown ALPN)
to establish TLS with the client.
"""
if server_state == "open":
tctx.server.state = ConnectionState.OPEN
tssl_server = SSLTest(server_side=True, alpn=["quux"])
playbook, client_layer, tssl_client = make_client_tls_layer(tctx, alpn=["quux"])
# We should now get instructed to open a server connection.
data = tutils.Placeholder(bytes)
def require_server_conn(client_hello: ClientHelloData) -> None:
client_hello.establish_server_tls_first = True
(
playbook
>> events.DataReceived(tctx.client, tssl_client.bio_read())
<< tls.TlsClienthelloHook(tutils.Placeholder())
>> tutils.reply(side_effect=require_server_conn)
)
if server_state == "closed":
playbook << commands.OpenConnection(tctx.server)
playbook >> tutils.reply(None)
assert (
playbook
<< tls.TlsStartServerHook(tutils.Placeholder())
>> reply_tls_start_server(alpn=b"quux")
<< commands.SendData(tctx.server, data)
)
# Establish TLS with the server...
tssl_server.bio_write(data())
with pytest.raises(ssl.SSLWantReadError):
tssl_server.do_handshake()
data = tutils.Placeholder(bytes)
assert (
playbook
>> events.DataReceived(tctx.server, tssl_server.bio_read())
<< tls.TlsEstablishedServerHook(tutils.Placeholder())
>> tutils.reply()
<< commands.SendData(tctx.server, data)
<< tls.TlsStartClientHook(tutils.Placeholder())
)
tssl_server.bio_write(data())
assert tctx.server.tls_established
# Server TLS is established, we can now reply to the client handshake...
data = tutils.Placeholder(bytes)
assert (
playbook
>> reply_tls_start_client(alpn=b"quux")
<< commands.SendData(tctx.client, data)
)
tssl_client.bio_write(data())
tssl_client.do_handshake()
finish_handshake(playbook, tctx.client, tssl_client)
# Both handshakes completed!
assert tctx.client.tls_established
assert tctx.server.tls_established
assert tctx.server.sni == tctx.client.sni
assert tctx.client.alpn == b"quux"
assert tctx.server.alpn == b"quux"
_test_echo(playbook, tssl_server, tctx.server)
_test_echo(playbook, tssl_client, tctx.client)
@pytest.mark.parametrize("server_state", ["open", "closed"])
def test_passthrough_from_clienthello(self, tctx, server_state):
"""
Test the scenario where the connection is moved to passthrough mode in the tls_clienthello hook.
"""
if server_state == "open":
tctx.server.timestamp_start = time.time()
tctx.server.state = ConnectionState.OPEN
playbook, client_layer, tssl_client = make_client_tls_layer(tctx, alpn=["quux"])
def make_passthrough(client_hello: ClientHelloData) -> None:
client_hello.ignore_connection = True
client_hello = tssl_client.bio_read()
(
playbook
>> events.DataReceived(tctx.client, client_hello)
<< tls.TlsClienthelloHook(tutils.Placeholder())
>> tutils.reply(side_effect=make_passthrough)
)
if server_state == "closed":
playbook << commands.OpenConnection(tctx.server)
playbook >> tutils.reply(None)
assert (
playbook
<< commands.SendData(tctx.server, client_hello) # passed through unmodified
>> events.DataReceived(
tctx.server, b"ServerHello"
) # and the same for the serverhello.
<< commands.SendData(tctx.client, b"ServerHello")
)
def test_cannot_parse_clienthello(self, tctx: context.Context):
"""Test the scenario where we cannot parse the ClientHello"""
playbook, client_layer, tssl_client = make_client_tls_layer(tctx)
tls_hook_data = tutils.Placeholder(TlsData)
invalid = b"\x16\x03\x01\x00\x00"
assert (
playbook
>> events.DataReceived(tctx.client, invalid)
<< commands.Log(
f"Client TLS handshake failed. Cannot parse ClientHello: {invalid.hex()}",
level=WARNING,
)
<< tls.TlsFailedClientHook(tls_hook_data)
>> tutils.reply()
<< commands.CloseConnection(tctx.client)
)
assert tls_hook_data().conn.error
assert not tctx.client.tls_established
# Make sure that an active server connection does not cause child layers to spawn.
client_layer.debug = ""
assert (
playbook
>> events.DataReceived(Server(address=None), b"data on other stream")
<< commands.Log(">> DataReceived(server, b'data on other stream')", DEBUG)
<< commands.Log(
"[tls] Swallowing DataReceived(server, b'data on other stream') as handshake failed.",
DEBUG,
)
)
def test_mitmproxy_ca_is_untrusted(self, tctx: context.Context):
"""Test the scenario where the client doesn't trust the mitmproxy CA."""
playbook, client_layer, tssl_client = make_client_tls_layer(
tctx, sni=b"wrong.host.mitmproxy.org"
)
playbook.logs = True
data = tutils.Placeholder(bytes)
assert (
playbook
>> events.DataReceived(tctx.client, tssl_client.bio_read())
<< tls.TlsClienthelloHook(tutils.Placeholder())
>> tutils.reply()
<< tls.TlsStartClientHook(tutils.Placeholder())
>> reply_tls_start_client()
<< commands.SendData(tctx.client, data)
)
tssl_client.bio_write(data())
with pytest.raises(ssl.SSLCertVerificationError):
tssl_client.do_handshake()
# Finish Handshake
tls_hook_data = tutils.Placeholder(TlsData)
assert (
playbook
>> events.DataReceived(tctx.client, tssl_client.bio_read())
<< commands.Log(
tutils.StrMatching(
"Client TLS handshake failed. The client does not trust the proxy's certificate "
"for wrong.host.mitmproxy.org"
),
WARNING,
)
<< tls.TlsFailedClientHook(tls_hook_data)
>> tutils.reply()
<< commands.CloseConnection(tctx.client)
>> events.ConnectionClosed(tctx.client)
)
assert not tctx.client.tls_established
assert tls_hook_data().conn.error
@pytest.mark.parametrize(
"close_at", ["tls_clienthello", "tls_start_client", "handshake"]
)
def test_immediate_disconnect(self, tctx: context.Context, close_at):
"""Test the scenario where the client is disconnecting during the handshake.
This may happen because they are not interested in the connection anymore, or because they do not like
the proxy certificate."""
playbook, client_layer, tssl_client = make_client_tls_layer(
tctx, sni=b"wrong.host.mitmproxy.org"
)
playbook.logs = True
tls_hook_data = tutils.Placeholder(TlsData)
playbook >> events.DataReceived(tctx.client, tssl_client.bio_read())
playbook << tls.TlsClienthelloHook(tutils.Placeholder())
if close_at == "tls_clienthello":
assert (
playbook
>> events.ConnectionClosed(tctx.client)
>> tutils.reply(to=-2)
<< tls.TlsStartClientHook(tutils.Placeholder())
>> reply_tls_start_client()
<< tls.TlsFailedClientHook(tls_hook_data)
>> tutils.reply()
<< commands.CloseConnection(tctx.client)
)
assert tls_hook_data().conn.error
return
playbook >> tutils.reply()
playbook << tls.TlsStartClientHook(tutils.Placeholder())
if close_at == "tls_start_client":
assert (
playbook
>> events.ConnectionClosed(tctx.client)
>> reply_tls_start_client(to=-2)
<< tls.TlsFailedClientHook(tls_hook_data)
>> tutils.reply()
<< commands.CloseConnection(tctx.client)
)
assert tls_hook_data().conn.error
return
assert (
playbook
>> reply_tls_start_client()
<< commands.SendData(tctx.client, tutils.Placeholder())
>> events.ConnectionClosed(tctx.client)
<< commands.Log(
"Client TLS handshake failed. The client disconnected during the handshake. "
"If this happens consistently for wrong.host.mitmproxy.org, this may indicate that the "
"client does not trust the proxy's certificate."
)
<< tls.TlsFailedClientHook(tls_hook_data)
>> tutils.reply()
<< commands.CloseConnection(tctx.client)
)
assert tls_hook_data().conn.error
def test_unsupported_protocol(self, tctx: context.Context):
"""Test the scenario where the client only supports an outdated TLS version by default."""
playbook, client_layer, tssl_client = make_client_tls_layer(
tctx, max_ver=ssl.TLSVersion.TLSv1_2
)
playbook.logs = True
tls_hook_data = tutils.Placeholder(TlsData)
assert (
playbook
>> events.DataReceived(tctx.client, tssl_client.bio_read())
<< tls.TlsClienthelloHook(tutils.Placeholder())
>> tutils.reply()
<< tls.TlsStartClientHook(tutils.Placeholder())
>> reply_tls_start_client()
<< commands.Log(
"Client TLS handshake failed. Client and mitmproxy cannot agree on a TLS version to "
"use. You may need to adjust mitmproxy's tls_version_client_min option.",
WARNING,
)
<< tls.TlsFailedClientHook(tls_hook_data)
>> tutils.reply()
<< commands.CloseConnection(tctx.client)
)
assert tls_hook_data().conn.error
def test_dtls_record_contents():
data = bytes.fromhex("16fefd00000000000000000002beef16fefd00000000000000000001ff")
assert list(tls.dtls_handshake_record_contents(data)) == [b"\xbe\xef", b"\xff"]
for i in range(12):
assert list(tls.dtls_handshake_record_contents(data[:i])) == []
def test__dtls_record_contents_err():
with pytest.raises(ValueError, match="Expected DTLS record"):
next(tls.dtls_handshake_record_contents(b"GET /this-will-cause-error"))
empty_record = bytes.fromhex("16fefd00000000000000000000")
with pytest.raises(ValueError, match="Record must not be empty"):
next(tls.dtls_handshake_record_contents(empty_record))
dtls_client_hello_no_extensions = bytes.fromhex(
"010000360000000000000036fefd62be32f048777da890ddd213b0cb8dc3e2903f88dda1cd5f67808e1169110e840000000"
"cc02bc02fc00ac014c02cc03001000000"
)
dtls_client_hello_with_extensions = bytes.fromhex(
"16fefd00000000000000000085" # record layer
"010000790000000000000079" # hanshake layer
"fefd62bf0e0bf809df43e7669197be831919878b1a72c07a584d3c0a8ca6665878010000000cc02bc02fc00ac014c02cc0"
"3001000043000d0010000e0403050306030401050106010807ff01000100000a00080006001d00170018000b00020100001"
"7000000000010000e00000b6578616d706c652e636f6d"
)
def test_dtls_get_client_hello():
single_record = (
bytes.fromhex("16fefd00000000000000000042") + dtls_client_hello_no_extensions
)
| python | MIT | e6aa924bb411a9687b91920b8d094af37bc02b90 | 2026-01-04T14:40:00.086164Z | true |
mitmproxy/mitmproxy | https://github.com/mitmproxy/mitmproxy/blob/e6aa924bb411a9687b91920b8d094af37bc02b90/test/mitmproxy/proxy/layers/test_modes.py | test/mitmproxy/proxy/layers/test_modes.py | import copy
import pytest
from mitmproxy import dns
from mitmproxy.addons.proxyauth import ProxyAuth
from mitmproxy.connection import Client
from mitmproxy.connection import ConnectionState
from mitmproxy.connection import Server
from mitmproxy.proxy import layers
from mitmproxy.proxy.commands import CloseConnection
from mitmproxy.proxy.commands import Log
from mitmproxy.proxy.commands import OpenConnection
from mitmproxy.proxy.commands import RequestWakeup
from mitmproxy.proxy.commands import SendData
from mitmproxy.proxy.context import Context
from mitmproxy.proxy.events import ConnectionClosed
from mitmproxy.proxy.events import DataReceived
from mitmproxy.proxy.layer import NextLayer
from mitmproxy.proxy.layer import NextLayerHook
from mitmproxy.proxy.layers import http
from mitmproxy.proxy.layers import modes
from mitmproxy.proxy.layers import quic
from mitmproxy.proxy.layers import tcp
from mitmproxy.proxy.layers import tls
from mitmproxy.proxy.layers import udp
from mitmproxy.proxy.layers.http import HTTPMode
from mitmproxy.proxy.layers.tcp import TcpMessageHook
from mitmproxy.proxy.layers.tcp import TcpStartHook
from mitmproxy.proxy.layers.tls import ClientTLSLayer
from mitmproxy.proxy.layers.tls import TlsStartClientHook
from mitmproxy.proxy.layers.tls import TlsStartServerHook
from mitmproxy.proxy.mode_specs import ProxyMode
from mitmproxy.tcp import TCPFlow
from mitmproxy.test import taddons
from mitmproxy.test import tflow
from mitmproxy.udp import UDPFlow
from test.mitmproxy.proxy.layers.test_tls import reply_tls_start_client
from test.mitmproxy.proxy.layers.test_tls import reply_tls_start_server
from test.mitmproxy.proxy.tutils import Placeholder
from test.mitmproxy.proxy.tutils import Playbook
from test.mitmproxy.proxy.tutils import reply
from test.mitmproxy.proxy.tutils import reply_next_layer
def test_upstream_https(tctx):
"""
Test mitmproxy in HTTPS upstream mode with another mitmproxy instance upstream.
In other words:
mitmdump --mode upstream:https://localhost:8081 --ssl-insecure
mitmdump -p 8081
curl -x localhost:8080 -k http://example.com
"""
tctx1 = Context(
Client(
peername=("client", 1234),
sockname=("127.0.0.1", 8080),
timestamp_start=1605699329,
state=ConnectionState.OPEN,
),
copy.deepcopy(tctx.options),
)
tctx1.client.proxy_mode = ProxyMode.parse(
"upstream:https://example.mitmproxy.org:8081"
)
tctx2 = Context(
Client(
peername=("client", 4321),
sockname=("127.0.0.1", 8080),
timestamp_start=1605699329,
state=ConnectionState.OPEN,
),
copy.deepcopy(tctx.options),
)
assert tctx2.client.proxy_mode == ProxyMode.parse("regular")
del tctx
proxy1 = Playbook(modes.HttpUpstreamProxy(tctx1), hooks=False)
proxy2 = Playbook(modes.HttpProxy(tctx2), hooks=False)
upstream = Placeholder(Server)
server = Placeholder(Server)
clienthello = Placeholder(bytes)
serverhello = Placeholder(bytes)
request = Placeholder(bytes)
tls_finished = Placeholder(bytes)
response = Placeholder(bytes)
assert (
proxy1
>> DataReceived(
tctx1.client,
b"GET http://example.com/ HTTP/1.1\r\nHost: example.com\r\n\r\n",
)
<< NextLayerHook(Placeholder(NextLayer))
>> reply_next_layer(lambda ctx: http.HttpLayer(ctx, HTTPMode.upstream))
<< OpenConnection(upstream)
>> reply(None)
<< TlsStartServerHook(Placeholder())
>> reply_tls_start_server(alpn=b"http/1.1")
<< SendData(upstream, clienthello)
)
assert upstream().address == ("example.mitmproxy.org", 8081)
assert upstream().sni == "example.mitmproxy.org"
assert (
proxy2
>> DataReceived(tctx2.client, clienthello())
<< NextLayerHook(Placeholder(NextLayer))
>> reply_next_layer(ClientTLSLayer)
<< TlsStartClientHook(Placeholder())
>> reply_tls_start_client(alpn=b"http/1.1")
<< SendData(tctx2.client, serverhello)
)
assert (
proxy1
# forward serverhello to proxy1
>> DataReceived(upstream, serverhello())
<< SendData(upstream, request)
)
assert (
proxy2
>> DataReceived(tctx2.client, request())
<< SendData(tctx2.client, tls_finished)
<< NextLayerHook(Placeholder(NextLayer))
>> reply_next_layer(lambda ctx: http.HttpLayer(ctx, HTTPMode.regular))
<< OpenConnection(server)
>> reply(None)
<< SendData(server, b"GET / HTTP/1.1\r\nHost: example.com\r\n\r\n")
>> DataReceived(server, b"HTTP/1.1 200 OK\r\nContent-Length: 0\r\n\r\n")
<< SendData(tctx2.client, response)
)
assert server().address == ("example.com", 80)
assert (
proxy1
>> DataReceived(upstream, tls_finished() + response())
<< SendData(tctx1.client, b"HTTP/1.1 200 OK\r\nContent-Length: 0\r\n\r\n")
)
@pytest.mark.parametrize("keep_host_header", [True, False])
def test_reverse_proxy(tctx, keep_host_header):
"""Test mitmproxy in reverse proxy mode.
- make sure that we connect to the right host
- make sure that we respect keep_host_header
- make sure that we include non-standard ports in the host header (#4280)
"""
server = Placeholder(Server)
tctx.client.proxy_mode = ProxyMode.parse("reverse:http://localhost:8000")
tctx.options.connection_strategy = "lazy"
tctx.options.keep_host_header = keep_host_header
assert (
Playbook(modes.ReverseProxy(tctx), hooks=False)
>> DataReceived(tctx.client, b"GET /foo HTTP/1.1\r\nHost: example.com\r\n\r\n")
<< NextLayerHook(Placeholder(NextLayer))
>> reply_next_layer(lambda ctx: http.HttpLayer(ctx, HTTPMode.transparent))
<< OpenConnection(server)
>> reply(None)
<< SendData(
server,
b"GET /foo HTTP/1.1\r\n"
b"Host: "
+ (b"example.com" if keep_host_header else b"localhost:8000")
+ b"\r\n\r\n",
)
>> DataReceived(server, b"HTTP/1.1 200 OK\r\nContent-Length: 0\r\n\r\n")
<< SendData(tctx.client, b"HTTP/1.1 200 OK\r\nContent-Length: 0\r\n\r\n")
)
assert server().address == ("localhost", 8000)
def test_reverse_dns(tctx):
tctx.client.transport_protocol = "udp"
tctx.server.transport_protocol = "udp"
f = Placeholder(dns.DNSFlow)
server = Placeholder(Server)
tctx.client.proxy_mode = ProxyMode.parse("reverse:dns://8.8.8.8:53")
tctx.options.connection_strategy = "lazy"
assert (
Playbook(modes.ReverseProxy(tctx), hooks=False)
>> DataReceived(tctx.client, tflow.tdnsreq().packed)
<< NextLayerHook(Placeholder(NextLayer))
>> reply_next_layer(layers.DNSLayer)
<< layers.dns.DnsRequestHook(f)
>> reply(None)
<< OpenConnection(server)
>> reply(None)
<< SendData(tctx.server, tflow.tdnsreq().packed)
)
assert server().address == ("8.8.8.8", 53)
@pytest.mark.parametrize("keep_host_header", [True, False])
def test_quic(tctx: Context, keep_host_header: bool):
with taddons.context():
tctx.options.keep_host_header = keep_host_header
tctx.server.sni = "other.example.com"
tctx.client.proxy_mode = ProxyMode.parse("reverse:quic://example.org:443")
client_hello = Placeholder(bytes)
def set_settings(data: quic.QuicTlsData):
data.settings = quic.QuicTlsSettings()
assert (
Playbook(modes.ReverseProxy(tctx))
<< OpenConnection(tctx.server)
>> reply(None)
>> DataReceived(tctx.client, b"\x00")
<< NextLayerHook(Placeholder(NextLayer))
>> reply_next_layer(layers.ServerQuicLayer)
<< quic.QuicStartServerHook(Placeholder(quic.QuicTlsData))
>> reply(side_effect=set_settings)
<< SendData(tctx.server, client_hello)
<< RequestWakeup(Placeholder(float))
)
assert tctx.server.address == ("example.org", 443)
assert quic.quic_parse_client_hello_from_datagrams([client_hello()]).sni == (
"other.example.com" if keep_host_header else "example.org"
)
def test_udp(tctx: Context):
tctx.client.proxy_mode = ProxyMode.parse("reverse:udp://1.2.3.4:5")
flow = Placeholder(UDPFlow)
assert (
Playbook(modes.ReverseProxy(tctx))
<< OpenConnection(tctx.server)
>> reply(None)
>> DataReceived(tctx.client, b"test-input")
<< NextLayerHook(Placeholder(NextLayer))
>> reply_next_layer(layers.UDPLayer)
<< udp.UdpStartHook(flow)
>> reply()
<< udp.UdpMessageHook(flow)
>> reply()
<< SendData(tctx.server, b"test-input")
)
assert tctx.server.address == ("1.2.3.4", 5)
assert len(flow().messages) == 1
assert flow().messages[0].content == b"test-input"
@pytest.mark.parametrize("patch", [True, False])
@pytest.mark.parametrize("connection_strategy", ["eager", "lazy"])
def test_reverse_proxy_tcp_over_tls(
tctx: Context, monkeypatch, patch, connection_strategy
):
"""
Test
client --TCP-- mitmproxy --TCP over TLS-- server
reverse proxying.
"""
flow = Placeholder(TCPFlow)
data = Placeholder(bytes)
tctx.client.proxy_mode = ProxyMode.parse("reverse:https://localhost:8000")
tctx.options.connection_strategy = connection_strategy
playbook = Playbook(modes.ReverseProxy(tctx))
if connection_strategy == "eager":
(
playbook
<< OpenConnection(tctx.server)
>> DataReceived(tctx.client, b"\x01\x02\x03")
>> reply(None, to=OpenConnection(tctx.server))
)
else:
(playbook >> DataReceived(tctx.client, b"\x01\x02\x03"))
if patch:
(
playbook
<< NextLayerHook(Placeholder(NextLayer))
>> reply_next_layer(tcp.TCPLayer)
<< TcpStartHook(flow)
>> reply()
)
if connection_strategy == "lazy":
(
playbook
# only now we open a connection
<< OpenConnection(tctx.server)
>> reply(None)
)
assert (
playbook << TcpMessageHook(flow) >> reply() << SendData(tctx.server, data)
)
assert data() == b"\x01\x02\x03"
else:
(
playbook
<< NextLayerHook(Placeholder(NextLayer))
>> reply_next_layer(tls.ServerTLSLayer)
)
if connection_strategy == "lazy":
(
playbook
<< NextLayerHook(Placeholder(NextLayer))
>> reply_next_layer(tcp.TCPLayer)
<< TcpStartHook(flow)
>> reply()
<< OpenConnection(tctx.server)
>> reply(None)
)
assert (
playbook
<< TlsStartServerHook(Placeholder())
>> reply_tls_start_server()
<< SendData(tctx.server, data)
)
assert tls.parse_client_hello(data()).sni == "localhost"
@pytest.mark.parametrize("connection_strategy", ["eager", "lazy"])
def test_transparent_tcp(tctx: Context, connection_strategy):
flow = Placeholder(TCPFlow)
tctx.options.connection_strategy = connection_strategy
tctx.server.address = ("address", 22)
playbook = Playbook(modes.TransparentProxy(tctx))
if connection_strategy == "lazy":
assert playbook
else:
assert (
playbook
<< OpenConnection(tctx.server)
>> reply(None)
>> DataReceived(tctx.server, b"hello")
<< NextLayerHook(Placeholder(NextLayer))
>> reply_next_layer(tcp.TCPLayer)
<< TcpStartHook(flow)
>> reply()
<< TcpMessageHook(flow)
>> reply()
<< SendData(tctx.client, b"hello")
)
assert flow().messages[0].content == b"hello"
assert not flow().messages[0].from_client
assert tctx.server.address == ("address", 22)
def test_reverse_eager_connect_failure(tctx: Context):
"""
Test
client --TCP-- mitmproxy --TCP over TLS-- server
reverse proxying.
"""
tctx.client.proxy_mode = ProxyMode.parse("reverse:https://localhost:8000")
tctx.options.connection_strategy = "eager"
playbook = Playbook(modes.ReverseProxy(tctx))
assert (
playbook
<< OpenConnection(tctx.server)
>> reply("IPoAC unstable")
<< CloseConnection(tctx.client)
>> ConnectionClosed(tctx.client)
)
def test_transparent_eager_connect_failure(tctx: Context):
"""Test that we recover from a transparent mode connect error."""
tctx.options.connection_strategy = "eager"
tctx.server.address = ("address", 22)
assert (
Playbook(modes.TransparentProxy(tctx), logs=True)
<< OpenConnection(tctx.server)
>> reply("something something")
<< CloseConnection(tctx.client)
>> ConnectionClosed(tctx.client)
)
CLIENT_HELLO = b"\x05\x01\x00"
SERVER_HELLO = b"\x05\x00"
@pytest.mark.parametrize(
"address,packed",
[
("127.0.0.1", b"\x01\x7f\x00\x00\x01"),
(
"::1",
b"\x04\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x01",
),
("example.com", b"\x03\x0bexample.com"),
],
)
def test_socks5_success(address: str, packed: bytes, tctx: Context):
tctx.options.connection_strategy = "eager"
playbook = Playbook(modes.Socks5Proxy(tctx))
server = Placeholder(Server)
nextlayer = Placeholder(NextLayer)
assert (
playbook
>> DataReceived(tctx.client, CLIENT_HELLO)
<< SendData(tctx.client, SERVER_HELLO)
>> DataReceived(
tctx.client, b"\x05\x01\x00" + packed + b"\x12\x34applicationdata"
)
<< OpenConnection(server)
>> reply(None)
<< SendData(tctx.client, b"\x05\x00\x00\x01\x00\x00\x00\x00\x00\x00")
<< NextLayerHook(nextlayer)
)
assert server().address == (address, 0x1234)
assert nextlayer().data_client() == b"applicationdata"
def _valid_socks_auth(data: modes.Socks5AuthData):
data.valid = True
def test_socks5_trickle(tctx: Context):
ProxyAuth().load(tctx.options)
tctx.options.proxyauth = "user:password"
tctx.options.connection_strategy = "lazy"
playbook = Playbook(modes.Socks5Proxy(tctx))
for x in b"\x05\x01\x02":
playbook >> DataReceived(tctx.client, bytes([x]))
playbook << SendData(tctx.client, b"\x05\x02")
for x in b"\x01\x04user\x08password":
playbook >> DataReceived(tctx.client, bytes([x]))
playbook << modes.Socks5AuthHook(Placeholder())
playbook >> reply(side_effect=_valid_socks_auth)
playbook << SendData(tctx.client, b"\x01\x00")
for x in b"\x05\x01\x00\x01\x7f\x00\x00\x01\x12\x34":
playbook >> DataReceived(tctx.client, bytes([x]))
assert playbook << SendData(
tctx.client, b"\x05\x00\x00\x01\x00\x00\x00\x00\x00\x00"
)
@pytest.mark.parametrize(
"data,err,msg",
[
(
b"GET / HTTP/1.1",
None,
"Probably not a SOCKS request but a regular HTTP request. Invalid SOCKS version. Expected 0x05, got 0x47",
),
(b"abcd", None, "Invalid SOCKS version. Expected 0x05, got 0x61"),
(
CLIENT_HELLO + b"\x05\x02\x00\x01\x7f\x00\x00\x01\x12\x34",
SERVER_HELLO + b"\x05\x07\x00\x01\x00\x00\x00\x00\x00\x00",
r"Unsupported SOCKS5 request: b'\x05\x02\x00\x01\x7f\x00\x00\x01\x124'",
),
(
CLIENT_HELLO + b"\x05\x01\x00\xff\x00\x00",
SERVER_HELLO + b"\x05\x08\x00\x01\x00\x00\x00\x00\x00\x00",
r"Unknown address type: 255",
),
],
)
def test_socks5_err(data: bytes, err: bytes, msg: str, tctx: Context):
playbook = Playbook(modes.Socks5Proxy(tctx), logs=True) >> DataReceived(
tctx.client, data
)
if err:
playbook << SendData(tctx.client, err)
playbook << CloseConnection(tctx.client)
playbook << Log(msg)
assert playbook
@pytest.mark.parametrize(
"client_greeting,server_choice,client_auth,server_resp,address,packed",
[
(
b"\x05\x01\x02",
b"\x05\x02",
b"\x01\x04user\x08password",
b"\x01\x00",
"127.0.0.1",
b"\x01\x7f\x00\x00\x01",
),
(
b"\x05\x02\x01\x02",
b"\x05\x02",
b"\x01\x04user\x08password",
b"\x01\x00",
"127.0.0.1",
b"\x01\x7f\x00\x00\x01",
),
],
)
def test_socks5_auth_success(
client_greeting: bytes,
server_choice: bytes,
client_auth: bytes,
server_resp: bytes,
address: bytes,
packed: bytes,
tctx: Context,
):
ProxyAuth().load(tctx.options)
tctx.options.proxyauth = "user:password"
server = Placeholder(Server)
nextlayer = Placeholder(NextLayer)
playbook = (
Playbook(modes.Socks5Proxy(tctx), logs=True)
>> DataReceived(tctx.client, client_greeting)
<< SendData(tctx.client, server_choice)
>> DataReceived(tctx.client, client_auth)
<< modes.Socks5AuthHook(Placeholder(modes.Socks5AuthData))
>> reply(side_effect=_valid_socks_auth)
<< SendData(tctx.client, server_resp)
>> DataReceived(
tctx.client, b"\x05\x01\x00" + packed + b"\x12\x34applicationdata"
)
<< OpenConnection(server)
>> reply(None)
<< SendData(tctx.client, b"\x05\x00\x00\x01\x00\x00\x00\x00\x00\x00")
<< NextLayerHook(nextlayer)
)
assert playbook
assert server().address == (address, 0x1234)
assert nextlayer().data_client() == b"applicationdata"
@pytest.mark.parametrize(
"client_greeting,server_choice,client_auth,err,msg",
[
(
b"\x05\x01\x00",
None,
None,
b"\x05\xff\x00\x01\x00\x00\x00\x00\x00\x00",
"Client does not support SOCKS5 with user/password authentication.",
),
(
b"\x05\x02\x00\x02",
b"\x05\x02",
b"\x01\x04" + b"user" + b"\x07" + b"errcode",
b"\x01\x01",
"authentication failed",
),
],
)
def test_socks5_auth_fail(
client_greeting: bytes,
server_choice: bytes,
client_auth: bytes,
err: bytes,
msg: str,
tctx: Context,
):
ProxyAuth().load(tctx.options)
tctx.options.proxyauth = "user:password"
playbook = Playbook(modes.Socks5Proxy(tctx), logs=True) >> DataReceived(
tctx.client, client_greeting
)
if server_choice is None:
playbook << SendData(tctx.client, err)
else:
playbook << SendData(tctx.client, server_choice)
playbook >> DataReceived(tctx.client, client_auth)
playbook << modes.Socks5AuthHook(Placeholder(modes.Socks5AuthData))
playbook >> reply()
playbook << SendData(tctx.client, err)
playbook << CloseConnection(tctx.client)
playbook << Log(msg)
assert playbook
def test_socks5_eager_err(tctx: Context):
tctx.options.connection_strategy = "eager"
server = Placeholder(Server)
assert (
Playbook(modes.Socks5Proxy(tctx))
>> DataReceived(tctx.client, CLIENT_HELLO)
<< SendData(tctx.client, SERVER_HELLO)
>> DataReceived(tctx.client, b"\x05\x01\x00\x01\x7f\x00\x00\x01\x12\x34")
<< OpenConnection(server)
>> reply("out of socks")
<< SendData(tctx.client, b"\x05\x04\x00\x01\x00\x00\x00\x00\x00\x00")
<< CloseConnection(tctx.client)
)
def test_socks5_premature_close(tctx: Context):
assert (
Playbook(modes.Socks5Proxy(tctx), logs=True)
>> DataReceived(tctx.client, b"\x05")
>> ConnectionClosed(tctx.client)
<< Log(r"Client closed connection before completing SOCKS5 handshake: b'\x05'")
<< CloseConnection(tctx.client)
)
| python | MIT | e6aa924bb411a9687b91920b8d094af37bc02b90 | 2026-01-04T14:40:00.086164Z | false |
mitmproxy/mitmproxy | https://github.com/mitmproxy/mitmproxy/blob/e6aa924bb411a9687b91920b8d094af37bc02b90/test/mitmproxy/proxy/layers/test_socks5_fuzz.py | test/mitmproxy/proxy/layers/test_socks5_fuzz.py | from hypothesis import given
from hypothesis.strategies import binary
from mitmproxy import options
from mitmproxy.connection import Client
from mitmproxy.proxy.context import Context
from mitmproxy.proxy.events import DataReceived
from mitmproxy.proxy.layers.modes import Socks5Proxy
opts = options.Options()
tctx = Context(
Client(
peername=("client", 1234),
sockname=("127.0.0.1", 8080),
timestamp_start=1605699329,
),
opts,
)
@given(binary())
def test_socks5_fuzz(data):
layer = Socks5Proxy(tctx)
list(layer.handle_event(DataReceived(tctx.client, data)))
| python | MIT | e6aa924bb411a9687b91920b8d094af37bc02b90 | 2026-01-04T14:40:00.086164Z | false |
mitmproxy/mitmproxy | https://github.com/mitmproxy/mitmproxy/blob/e6aa924bb411a9687b91920b8d094af37bc02b90/test/mitmproxy/proxy/layers/quic/test__client_hello_parser.py | test/mitmproxy/proxy/layers/quic/test__client_hello_parser.py | import pytest
from aioquic.quic.connection import QuicConnection
from aioquic.quic.connection import QuicConnectionError
from mitmproxy.proxy.layers.quic import _client_hello_parser
from mitmproxy.proxy.layers.quic._client_hello_parser import (
quic_parse_client_hello_from_datagrams,
)
from test.mitmproxy.proxy.layers.quic.test__stream_layers import client_hello
class TestParseClientHello:
def test_input(self):
assert (
quic_parse_client_hello_from_datagrams([client_hello]).sni == "example.com"
)
with pytest.raises(ValueError):
quic_parse_client_hello_from_datagrams(
[client_hello[:183] + b"\x00\x00\x00\x00\x00\x00\x00\x00\x00"]
)
with pytest.raises(ValueError, match="not initial"):
quic_parse_client_hello_from_datagrams(
[
b"\\s\xd8\xd8\xa5dT\x8bc\xd3\xae\x1c\xb2\x8a7-\x1d\x19j\x85\xb0~\x8c\x80\xa5\x8cY\xac\x0ecK\x7fC2f\xbcm\x1b\xac~"
]
)
def test_invalid(self, monkeypatch):
# XXX: This test is terrible, it should use actual invalid data.
class InvalidClientHello(Exception):
@property
def data(self):
raise EOFError()
monkeypatch.setattr(_client_hello_parser, "QuicClientHello", InvalidClientHello)
with pytest.raises(ValueError, match="Invalid ClientHello"):
quic_parse_client_hello_from_datagrams([client_hello])
def test_connection_error(self, monkeypatch):
def raise_conn_err(self, data, addr, now):
raise QuicConnectionError(0, 0, "Conn err")
monkeypatch.setattr(QuicConnection, "receive_datagram", raise_conn_err)
with pytest.raises(ValueError, match="Conn err"):
quic_parse_client_hello_from_datagrams([client_hello])
def test_no_return(self):
with pytest.raises(
ValueError, match="Invalid ClientHello packet: payload_decrypt_error"
):
quic_parse_client_hello_from_datagrams(
[client_hello[0:1200] + b"\x00" + client_hello[1200:]]
)
| python | MIT | e6aa924bb411a9687b91920b8d094af37bc02b90 | 2026-01-04T14:40:00.086164Z | false |
mitmproxy/mitmproxy | https://github.com/mitmproxy/mitmproxy/blob/e6aa924bb411a9687b91920b8d094af37bc02b90/test/mitmproxy/proxy/layers/quic/test__hooks.py | test/mitmproxy/proxy/layers/quic/test__hooks.py | from mitmproxy.options import Options
from mitmproxy.proxy.context import Context
from mitmproxy.proxy.layers.quic._hooks import QuicStartServerHook
from mitmproxy.proxy.layers.quic._hooks import QuicTlsData
from mitmproxy.proxy.layers.quic._hooks import QuicTlsSettings
from mitmproxy.test.tflow import tclient_conn
def test_reprs():
client = tclient_conn()
assert repr(
QuicStartServerHook(
data=QuicTlsData(
conn=client,
context=Context(client, Options()),
settings=QuicTlsSettings(),
)
)
)
| python | MIT | e6aa924bb411a9687b91920b8d094af37bc02b90 | 2026-01-04T14:40:00.086164Z | false |
mitmproxy/mitmproxy | https://github.com/mitmproxy/mitmproxy/blob/e6aa924bb411a9687b91920b8d094af37bc02b90/test/mitmproxy/proxy/layers/quic/test__stream_layers.py | test/mitmproxy/proxy/layers/quic/test__stream_layers.py | import ssl
import time
from logging import DEBUG
from logging import ERROR
from logging import WARNING
from typing import Literal
from typing import TypeVar
from unittest.mock import MagicMock
import pytest
from aioquic.buffer import Buffer as QuicBuffer
from aioquic.quic import events as quic_events
from aioquic.quic.configuration import QuicConfiguration
from aioquic.quic.connection import pull_quic_header
from aioquic.quic.connection import QuicConnection
from mitmproxy import connection
from mitmproxy.proxy import commands
from mitmproxy.proxy import context
from mitmproxy.proxy import events
from mitmproxy.proxy import layer
from mitmproxy.proxy.layers import tls
from mitmproxy.proxy.layers.quic._commands import CloseQuicConnection
from mitmproxy.proxy.layers.quic._commands import QuicStreamCommand
from mitmproxy.proxy.layers.quic._commands import ResetQuicStream
from mitmproxy.proxy.layers.quic._commands import SendQuicStreamData
from mitmproxy.proxy.layers.quic._commands import StopSendingQuicStream
from mitmproxy.proxy.layers.quic._events import QuicConnectionClosed
from mitmproxy.proxy.layers.quic._events import QuicStreamDataReceived
from mitmproxy.proxy.layers.quic._events import QuicStreamReset
from mitmproxy.proxy.layers.quic._events import QuicStreamStopSending
from mitmproxy.proxy.layers.quic._hooks import QuicStartClientHook
from mitmproxy.proxy.layers.quic._hooks import QuicStartServerHook
from mitmproxy.proxy.layers.quic._hooks import QuicTlsData
from mitmproxy.proxy.layers.quic._hooks import QuicTlsSettings
from mitmproxy.proxy.layers.quic._stream_layers import ClientQuicLayer
from mitmproxy.proxy.layers.quic._stream_layers import error_code_to_str
from mitmproxy.proxy.layers.quic._stream_layers import is_success_error_code
from mitmproxy.proxy.layers.quic._stream_layers import QuicLayer
from mitmproxy.proxy.layers.quic._stream_layers import QuicSecretsLogger
from mitmproxy.proxy.layers.quic._stream_layers import ServerQuicLayer
from mitmproxy.proxy.layers.quic._stream_layers import tls_settings_to_configuration
from mitmproxy.utils import data
from test.mitmproxy.proxy import tutils
tdata = data.Data("test")
T = TypeVar("T", bound=layer.Layer)
class DummyLayer(layer.Layer):
child_layer: layer.Layer | None
def _handle_event(self, event: events.Event) -> layer.CommandGenerator[None]:
assert self.child_layer
return self.child_layer.handle_event(event)
class TlsEchoLayer(tutils.EchoLayer):
err: str | None = None
closed: QuicConnectionClosed | None = None
def _handle_event(self, event: events.Event) -> layer.CommandGenerator[None]:
if isinstance(event, events.DataReceived) and event.data == b"open-connection":
err = yield commands.OpenConnection(self.context.server)
if err:
yield commands.SendData(
event.connection, f"open-connection failed: {err}".encode()
)
elif (
isinstance(event, events.DataReceived) and event.data == b"close-connection"
):
yield commands.CloseConnection(event.connection)
elif (
isinstance(event, events.DataReceived)
and event.data == b"close-connection-error"
):
yield CloseQuicConnection(event.connection, 123, None, "error")
elif (
isinstance(event, events.DataReceived) and event.data == b"invalid-command"
):
class InvalidConnectionCommand(commands.ConnectionCommand):
pass
yield InvalidConnectionCommand(event.connection)
elif (
isinstance(event, events.DataReceived)
and event.data == b"invalid-stream-command"
):
class InvalidStreamCommand(QuicStreamCommand):
pass
yield InvalidStreamCommand(event.connection, 42)
elif isinstance(event, QuicConnectionClosed):
self.closed = event
elif isinstance(event, QuicStreamDataReceived):
yield SendQuicStreamData(
event.connection, event.stream_id, event.data, event.end_stream
)
elif isinstance(event, QuicStreamReset):
yield ResetQuicStream(event.connection, event.stream_id, event.error_code)
elif isinstance(event, QuicStreamStopSending):
yield StopSendingQuicStream(
event.connection, event.stream_id, event.error_code
)
else:
yield from super()._handle_event(event)
client_hello = bytes.fromhex(
"ca0000000108c0618c84b54541320823fcce946c38d8210044e6a93bbb283593f75ffb6f2696b16cfdcb5b1255"
"577b2af5fc5894188c9568bc65eef253faf7f0520e41341cfa81d6aae573586665ce4e1e41676364820402feec"
"a81f3d22dbb476893422069066104a43e121c951a08c53b83f960becf99cf5304d5bc5346f52f472bd1a04d192"
"0bae025064990d27e5e4c325ac46121d3acadebe7babdb96192fb699693d65e2b2e21c53beeb4f40b50673a2f6"
"c22091cb7c76a845384fedee58df862464d1da505a280bfef91ca83a10bebbcb07855219dbc14aecf8a48da049"
"d03c77459b39d5355c95306cd03d6bdb471694fa998ca3b1f875ce87915b88ead15c5d6313a443f39aad808922"
"57ddfa6b4a898d773bb6fb520ede47ebd59d022431b1054a69e0bbbdf9f0fb32fc8bcc4b6879dd8cd5389474b1"
"99e18333e14d0347740a11916429a818bb8d93295d36e99840a373bb0e14c8b3adcf5e2165e70803f15316fd5e"
"5eeec04ae68d98f1adb22c54611c80fcd8ece619dbdf97b1510032ec374b7a71f94d9492b8b8cb56f56556dd97"
"edf1e50fa90e868ff93636a365678bdf3ee3f8e632588cd506b6f44fbfd4d99988238fbd5884c98f6a124108c1"
"878970780e42b111e3be6215776ef5be5a0205915e6d720d22c6a81a475c9e41ba94e4983b964cb5c8e1f40607"
"76d1d8d1adcef7587ea084231016bd6ee2643d11a3a35eb7fe4cca2b3f1a4b21e040b0d426412cca6c4271ea63"
"fb54ed7f57b41cd1af1be5507f87ea4f4a0c997367e883291de2f1b8a49bdaa52bae30064351b1139703400730"
"18a4104344ec6b4454b50a42e804bc70e78b9b3c82497273859c82ed241b643642d76df6ceab8f916392113a62"
"b231f228c7300624d74a846bec2f479ab8a8c3461f91c7bf806236e3bd2f54ba1ef8e2a1e0bfdde0c5ad227f7d"
"364c52510b1ade862ce0c8d7bd24b6d7d21c99b34de6d177eb3d575787b2af55060d76d6c2060befbb7953a816"
"6f66ad88ecf929dbb0ad3a16cf7dfd39d925e0b4b649c6d0c07ad46ed0229c17fb6a1395f16e1b138aab3af760"
"2b0ac762c4f611f7f3468997224ffbe500a7c53f92f65e41a3765a9f1d7e3f78208f5b4e147962d8c97d6c1a80"
"91ffc36090b2043d71853616f34c2185dc883c54ab6d66e10a6c18e0b9a4742597361f8554a42da3373241d0c8"
"54119bfadccffaf2335b2d97ffee627cb891bda8140a39399f853da4859f7e19682e152243efbaffb662edd19b"
"3819a74107c7dbe05ecb32e79dcdb1260f153b1ef133e978ccca3d9e400a7ed6c458d77e2956d2cb897b7a298b"
"fe144b5defdc23dfd2adf69f1fb0917840703402d524987ae3b1dcb85229843c9a419ef46e1ba0ba7783f2a2ec"
"d057a57518836aef2a7839ebd3688da98b54c942941f642e434727108d59ea25875b3050ca53d4637c76cbcbb9"
"e972c2b0b781131ee0a1403138b55486fe86bbd644920ee6aa578e3bab32d7d784b5c140295286d90c99b14823"
"1487f7ea64157001b745aa358c9ea6bec5a8d8b67a7534ec1f7648ff3b435911dfc3dff798d32fbf2efe2c1fcc"
"278865157590572387b76b78e727d3e7682cb501cdcdf9a0f17676f99d9aa67f10edccc9a92080294e88bf28c2"
"a9f32ae535fdb27fff7706540472abb9eab90af12b2bea005da189874b0ca69e6ae1690a6f2adf75be3853c94e"
"fd8098ed579c20cb37be6885d8d713af4ba52958cee383089b98ed9cb26e11127cf88d1b7d254f15f7903dd7ed"
"297c0013924e88248684fe8f2098326ce51aa6e5"
)
fragmented_client_hello1 = bytes.fromhex(
"c20000000108d520c3803f5de4d3000044bcb607af28f41aef1616d37bdc7697d73d7963a2d622e7ccddfb4859"
"f369d840f949a29bb19ad7264728eb31eada17a4e1ba666bba67868cf2c30ca4e1d41f67d392c296787a50615a"
"1caf4282f9cc59c98816e1734b57ba4dedf02c225a3f57163bb77703299fafb46d09a4d281eb44f988edd28984"
"04a7161cf7454d8e184f87ae9be1f3bd2c2ae04ba14233ec92960a75a4201bc114070ecfd4c10a4fb0c72749ee"
"b5fa0e52b53dc0da6a485eb8bb467e7a1972c4e1c3a38622857b44eb94d653ee2f2e1fa3bf3f01cacd17b2668a"
"8578e04da4181f3d6ad4031e4f7adec95d015d4f275505ae14fa03154b18c3b838143fac06cb2c8b395effa47c"
"08923e352d1c4beff9e228760f5a80e6214485c7e53efd8d649492aafb3a9c9472335569c2d7971c86f319069e"
"c6ccd13b0b8f517c51fc2e42dc5e7bc3434f306955cf1dc575ea9e18617699045b92b006599afd94abb25018ea"
"f63cfcc247f76b728c4fc4e663dff64b90059d1d27f8ecd63bb548862b88bcd52e0711f222b15c022d214a2cc3"
"93e537e32d149c67aa84692f1a204475a7acceaa0ab5f823ea90af601bdfb7f4036971e1c786fca7fa7e8ab042"
"24307bcc3093886b54e4c9e6b7cb286d6259a8231ffae0f589f687f92232ac5384988631efb70dc85fc594bb3c"
"1c0ceebc08b37d8989da0ae786e30d1278ffddbac47484346afd8439495aa1d392ce76f8ebc8d3d1870a0698ca"
"b133cabbacae924013025e7bce5ac6aaf87684b6409a6a58e8bb294c249a5c7ca9b2961c57dc031485e3a000ec"
"ea4e908cf9f33e86f0fd5d4ca5996b73c273dfda3fe68aa6a385984cb7fd2bf5f69d997580b407d48845215422"
"c3c9fe52e7aa4b4e11c067db3e7c87c55f3b1400f796a4b873b666b7027c33138c1f310f65e20b53bcba019f1e"
"08aee1a89430744c8bd2dd3a788410caa4356099b87cab2463da107a6919af38c159a258ff6693dd71f1941a52"
"01d6a0b2fc52cfab6e0ba2c84c6231bc2a54fe1b6af1641e1168599ea03da913e537880f13128515085fd17b47"
"fe202b82152d1c7df2e66788a2d0e0aab0e6375d368f8064e29912f32d4c509408a642a597bcf39c3e6fe31873"
"e6173067cf3fc65702152e43a9d2cc7262e69550bd3c10e833c3c5ec48878b214426eca9cdc169f59cbc2c93dc"
"94562e05d94761c9f76191b505097dca964d56b9889f904347f6b250f5a1f2bf3c9e9f4370a164a4185e0d83c0"
"96e1799b8d950535cf96eec690fa765e9e74baea45f3157ba8c78158d365acc1a5abb358093cca6afcde287096"
"ba74b4238789ede0947083facfc9bb3129361a283d72fe860c9666877fb263650410ae5af9fd48e9a2214f9f0a"
"39f3b55edca84c836a745f8fc294d176b878fede1e375358d2e63bbbc0632752b19afda03e527b6e9deb32b0a8"
"e617f5396312b7769ccd164e43ba1ada90d97005ab8e4eda57d3a953b5cf5fac9676fc64dd7163bdb6b17f6984"
"f70070f2eadace62317215f240100db10283cd4b7c62f2ba1191c0feee9e6fc6026dcaec12ecb2329221130aac"
"18f08b091f5292e51c0ca35cfefabf9b86d8478f7cc9f2983260e6cec537081684119a02d51e0895d9ee9294cf"
"a6f695173fa816f168751cf1d79730ded3e7e97325d2582a6516436aa165260f576f330535cf28d6f9c26a6f7d"
"dd74b60e702826392ac9f16a1ccdb5"
)
fragmented_client_hello2 = bytes.fromhex(
"cd0000000108d520c3803f5de4d3000044bceadc93d1a46ab45f934299ba642a3a4bdac62cf80981105cc546c2"
"96b78fda0acdec8e8cb8a69e4d3446033f3edd0f52fe02d99c841336402b9c2419852414b9bc6b17128b1c198e"
"0f2a709895cddef029b738c7a8bf7917162e5709f7fa4933e6a9db5da418db8794e8458dd699eb31752c402cbf"
"3b6f0d7e6983dba686285a49b4c8724f9653ed4430667a242f4b0613aa37b039226b1c42a1cfaeab40cabbf6be"
"d7d49cbca3a10e8aced1560e44a22073a9432f39e16d177ecf89c4b3807ac748fed84d9811fe91aad76bf85bc8"
"c8b1def2985b8cce6226ce441924418f0c4c6895918e86065a3143dda8afce756c7318b3d861a1f0160d0814ef"
"118389f55198b0c5da4ed6d95a72b6f2a35ffc56bda85753dd146dd6eb29f64b51f7ca7e4e0bf7de82a5041e1f"
"a4dc7303f5b7dc31901185f787876ce81213a587cbe42bdcab63be1c146798641664fecc477b8112109cb317f6"
"f6fe1f3e36c2e843ec875ed8631ac7527817ab928c68a16ae672ca56464556a8c4c700c4f40920a028207911f3"
"2cd2840fce3504ca29f25524b1e9108dbded72ff0364443da17573badc99ad33f6c91baeca3c933258500e7b78"
"347ce76cf893a85f163698edd6209ac5d990f092cc609ff7faa6a0c2e5f57e4154bec72e2441028ad00cdce202"
"a07e0e9696578e0c17c152b2880874cad11631db5210efaf260d18dccecd04987f8ceea7e534c381d9aed5be28"
"8b2086103bd84fd6150037cb0bc1abacd3c2b3f1db213998b4b36e86e46264809fa2757f2b2764c0b94dc222ac"
"674a9f5c183fb40ef52e7b36ed8f3aaf1fe776643d819fb55284387b83f0ad688461ae8612784b36494585caa5"
"f05fb391216bedc23b00e759bebe0cd19f1d514b5faba8a061d36204dd7c4e8daf1150ae8441aadbbeff7735ff"
"613ecb2d1dabf256ffcee5b2ba07f0d2d53c7e98691b261cdadf5fac8ed2985720f7f460a8140fdb094870cc65"
"4656779bbfc095cd5bc666f18e44e86d765004b16a763c330fc165fdb604038067288d56fbd2e6ead2a7352406"
"4f6995a54ef529990239065ccf33ab5fa3e56ec2ff15b6981bab32658c5d4184407865f3a0e7c37d8d53ac4850"
"cfdb16887e04eea4284517b2141c1824babae24207ba14e91eb6a30735f33f664d7fefde94d582c06dd26922a6"
"6e4657c144ee9f99b7985ba1fd7dceb700cecdcb8950a57fc3b239709e84a4616d8e0f7865025b37d27e5cc7c2"
"b24b02745a89e12315ff4c4e87ea0d4ff90018f4243de3668b22547ba3a147540582b28152ad9412f0c2aea0c1"
"c0bf71c4176fed4c1d96853ef1d5db80ce4ba66d67c6998c052ebb2cf05511c54d233c24c2f9ed1ea14c305eba"
"9aed02ad0f1c48772646bfc4edc3f735cd3c16c885e1c54918e0070e1bcc68d835097fe43183e3ef26ab3d1993"
"dca6960b6ca0ffb1b90417114e55364211c1bd9688adfbb77ebfd7b7ffe47c45f3813390aeb5020fb63c018641"
"5a260ae26fab479e170843936d8e786120afa6edacecb32abfbe180237b0684507636fe221b2b980683a9f3610"
"8619c5ab4e271dd450d855f0085814750347da051a903bfa251b395cdc59356c68a7dae062e770c37f4d14f8b9"
"dd989248e7449e9b581ef9925d85e06372dd61bcbde872791e71855a5aa0c734d387731dde31d02500e1cd5f51"
"954f0e999398e6b0762bf6bb6bef9a"
)
def test_error_code_to_str():
assert error_code_to_str(0x6) == "FINAL_SIZE_ERROR"
assert error_code_to_str(0x104) == "H3_CLOSED_CRITICAL_STREAM"
assert error_code_to_str(0xDEAD) == f"unknown error (0xdead)"
def test_is_success_error_code():
assert is_success_error_code(0x0)
assert not is_success_error_code(0x6)
assert is_success_error_code(0x100)
assert not is_success_error_code(0x104)
assert not is_success_error_code(0xDEAD)
@pytest.mark.parametrize("value", ["s1 s2\n", "s1 s2"])
def test_secrets_logger(value: str):
logger = MagicMock()
quic_logger = QuicSecretsLogger(logger)
assert quic_logger.write(value) == 6
quic_logger.flush()
logger.assert_called_once_with(None, b"s1 s2")
class MockQuic(QuicConnection):
def __init__(self, event) -> None:
super().__init__(configuration=QuicConfiguration(is_client=True))
self.event = event
def next_event(self):
event = self.event
self.event = None
return event
def datagrams_to_send(self, now: float):
return []
def get_timer(self):
return None
def make_mock_quic(
tctx: context.Context,
event: quic_events.QuicEvent | None = None,
established: bool = True,
) -> tuple[tutils.Playbook, MockQuic]:
tctx.client.state = connection.ConnectionState.CLOSED
quic_layer = QuicLayer(tctx, tctx.client, time=lambda: 0)
quic_layer.child_layer = TlsEchoLayer(tctx)
mock = MockQuic(event)
quic_layer.quic = mock
quic_layer.tunnel_state = (
tls.tunnel.TunnelState.OPEN
if established
else tls.tunnel.TunnelState.ESTABLISHING
)
return tutils.Playbook(quic_layer), mock
class TestQuicLayer:
@pytest.mark.parametrize("established", [True, False])
def test_invalid_event(self, tctx: context.Context, established: bool):
class InvalidEvent(quic_events.QuicEvent):
pass
playbook, conn = make_mock_quic(
tctx, event=InvalidEvent(), established=established
)
with pytest.raises(AssertionError, match="Unexpected event"):
assert playbook >> events.DataReceived(tctx.client, b"")
def test_invalid_stream_command(self, tctx: context.Context):
playbook, conn = make_mock_quic(
tctx, quic_events.DatagramFrameReceived(b"invalid-stream-command")
)
with pytest.raises(AssertionError, match="Unexpected stream command"):
assert playbook >> events.DataReceived(tctx.client, b"")
def test_close(self, tctx: context.Context):
playbook, conn = make_mock_quic(
tctx, quic_events.DatagramFrameReceived(b"close-connection")
)
assert not conn._close_event
assert (
playbook
>> events.DataReceived(tctx.client, b"")
<< commands.CloseConnection(tctx.client)
)
assert conn._close_event
assert conn._close_event.error_code == 0
def test_close_error(self, tctx: context.Context):
playbook, conn = make_mock_quic(
tctx, quic_events.DatagramFrameReceived(b"close-connection-error")
)
assert not conn._close_event
assert (
playbook
>> events.DataReceived(tctx.client, b"")
<< CloseQuicConnection(tctx.client, 123, None, "error")
)
assert conn._close_event
assert conn._close_event.error_code == 123
def test_datagram(self, tctx: context.Context):
playbook, conn = make_mock_quic(
tctx, quic_events.DatagramFrameReceived(b"packet")
)
assert not conn._datagrams_pending
assert playbook >> events.DataReceived(tctx.client, b"")
assert len(conn._datagrams_pending) == 1
assert conn._datagrams_pending[0] == b"packet"
def test_stream_data(self, tctx: context.Context):
playbook, conn = make_mock_quic(
tctx, quic_events.StreamDataReceived(b"packet", False, 42)
)
assert 42 not in conn._streams
assert playbook >> events.DataReceived(tctx.client, b"")
assert b"packet" == conn._streams[42].sender._buffer
def test_stream_reset(self, tctx: context.Context):
playbook, conn = make_mock_quic(tctx, quic_events.StreamReset(123, 42))
assert 42 not in conn._streams
assert playbook >> events.DataReceived(tctx.client, b"")
assert conn._streams[42].sender.reset_pending
assert conn._streams[42].sender._reset_error_code == 123
def test_stream_stop(self, tctx: context.Context):
playbook, conn = make_mock_quic(tctx, quic_events.StopSendingReceived(123, 24))
assert 24 not in conn._streams
conn._get_or_create_stream_for_send(24)
assert playbook >> events.DataReceived(tctx.client, b"")
assert conn._streams[24].receiver.stop_pending
assert conn._streams[24].receiver._stop_error_code == 123
class SSLTest:
"""Helper container for QuicConnection object."""
def __init__(
self,
server_side: bool = False,
alpn: list[str] | None = None,
sni: str | None = "example.mitmproxy.org",
version: int | None = None,
settings: QuicTlsSettings | None = None,
):
if settings is None:
self.ctx = QuicConfiguration(
is_client=not server_side,
max_datagram_frame_size=65536,
)
self.ctx.verify_mode = ssl.CERT_OPTIONAL
self.ctx.load_verify_locations(
cafile=tdata.path(
"mitmproxy/net/data/verificationcerts/trusted-root.crt"
),
)
if alpn:
self.ctx.alpn_protocols = alpn
if server_side:
if sni == "192.0.2.42":
filename = "trusted-leaf-ip"
else:
filename = "trusted-leaf"
self.ctx.load_cert_chain(
certfile=tdata.path(
f"mitmproxy/net/data/verificationcerts/{filename}.crt"
),
keyfile=tdata.path(
f"mitmproxy/net/data/verificationcerts/{filename}.key"
),
)
self.ctx.server_name = None if server_side else sni
if version is not None:
self.ctx.supported_versions = [version]
else:
assert alpn is None
assert version is None
self.ctx = tls_settings_to_configuration(
settings=settings,
is_client=not server_side,
server_name=sni,
)
self.now = 0.0
self.address = (sni, 443)
self.quic = None if server_side else QuicConnection(configuration=self.ctx)
if not server_side:
self.quic.connect(self.address, now=self.now)
def write(self, buf: bytes):
self.now = self.now + 0.1
if self.quic is None:
quic_buf = QuicBuffer(data=buf)
header = pull_quic_header(quic_buf, host_cid_length=8)
self.quic = QuicConnection(
configuration=self.ctx,
original_destination_connection_id=header.destination_cid,
)
self.quic.receive_datagram(buf, self.address, self.now)
def read(self) -> bytes:
self.now = self.now + 0.1
buf = b""
has_data = False
for datagram, addr in self.quic.datagrams_to_send(self.now):
assert addr == self.address
buf += datagram
has_data = True
if not has_data:
raise AssertionError("no datagrams to send")
return buf
def handshake_completed(self) -> bool:
while event := self.quic.next_event():
if isinstance(event, quic_events.HandshakeCompleted):
return True
else:
return False
def _test_echo(
playbook: tutils.Playbook, tssl: SSLTest, conn: connection.Connection
) -> None:
tssl.quic.send_datagram_frame(b"Hello World")
data = tutils.Placeholder(bytes)
assert (
playbook
>> events.DataReceived(conn, tssl.read())
<< commands.SendData(conn, data)
)
tssl.write(data())
while event := tssl.quic.next_event():
if isinstance(event, quic_events.DatagramFrameReceived):
assert event.data == b"hello world"
break
else:
raise AssertionError()
def finish_handshake(
playbook: tutils.Playbook,
conn: connection.Connection,
tssl: SSLTest,
child_layer: type[T],
) -> T:
result: T | None = None
def set_layer(next_layer: layer.NextLayer) -> None:
nonlocal result
result = child_layer(next_layer.context)
next_layer.layer = result
data = tutils.Placeholder(bytes)
tls_hook_data = tutils.Placeholder(tls.TlsData)
if isinstance(conn, connection.Client):
established_hook = tls.TlsEstablishedClientHook(tls_hook_data)
else:
established_hook = tls.TlsEstablishedServerHook(tls_hook_data)
assert (
playbook
>> events.DataReceived(conn, tssl.read())
<< established_hook
>> tutils.reply()
<< commands.SendData(conn, data)
<< layer.NextLayerHook(tutils.Placeholder())
>> tutils.reply(side_effect=set_layer)
)
assert tls_hook_data().conn.error is None
tssl.write(data())
assert result
return result
def reply_tls_start_client(alpn: str | None = None, *args, **kwargs) -> tutils.reply:
"""
Helper function to simplify the syntax for quic_start_client hooks.
"""
def make_client_conn(tls_start: QuicTlsData) -> None:
config = QuicConfiguration()
config.load_cert_chain(
tdata.path("mitmproxy/net/data/verificationcerts/trusted-leaf.crt"),
tdata.path("mitmproxy/net/data/verificationcerts/trusted-leaf.key"),
)
tls_start.settings = QuicTlsSettings(
certificate=config.certificate,
certificate_chain=config.certificate_chain,
certificate_private_key=config.private_key,
)
if alpn is not None:
tls_start.settings.alpn_protocols = [alpn]
return tutils.reply(*args, side_effect=make_client_conn, **kwargs)
def reply_tls_start_server(alpn: str | None = None, *args, **kwargs) -> tutils.reply:
"""
Helper function to simplify the syntax for quic_start_server hooks.
"""
def make_server_conn(tls_start: QuicTlsData) -> None:
tls_start.settings = QuicTlsSettings(
ca_file=tdata.path("mitmproxy/net/data/verificationcerts/trusted-root.crt"),
verify_mode=ssl.CERT_REQUIRED,
)
if alpn is not None:
tls_start.settings.alpn_protocols = [alpn]
return tutils.reply(*args, side_effect=make_server_conn, **kwargs)
class TestServerQuic:
def test_repr(self, tctx: context.Context):
assert repr(ServerQuicLayer(tctx, time=lambda: 0))
def test_not_connected(self, tctx: context.Context):
"""Test that we don't do anything if no server connection exists."""
layer = ServerQuicLayer(tctx, time=lambda: 0)
layer.child_layer = TlsEchoLayer(tctx)
assert (
tutils.Playbook(layer)
>> events.DataReceived(tctx.client, b"Hello World")
<< commands.SendData(tctx.client, b"hello world")
)
def test_simple(self, tctx: context.Context):
tssl = SSLTest(server_side=True)
playbook = tutils.Playbook(ServerQuicLayer(tctx, time=lambda: tssl.now))
tctx.server.address = ("example.mitmproxy.org", 443)
tctx.server.state = connection.ConnectionState.OPEN
tctx.server.sni = "example.mitmproxy.org"
# send ClientHello, receive ClientHello
data = tutils.Placeholder(bytes)
assert (
playbook
<< QuicStartServerHook(tutils.Placeholder())
>> reply_tls_start_server()
<< commands.SendData(tctx.server, data)
<< commands.RequestWakeup(0.2)
)
tssl.write(data())
assert not tssl.handshake_completed()
# finish handshake (mitmproxy)
echo = finish_handshake(playbook, tctx.server, tssl, TlsEchoLayer)
# finish handshake (locally)
assert tssl.handshake_completed()
playbook >> events.DataReceived(tctx.server, tssl.read())
playbook << None
assert playbook
assert tctx.server.tls_established
# Echo
assert (
playbook
>> events.DataReceived(tctx.client, b"foo")
<< commands.SendData(tctx.client, b"foo")
)
_test_echo(playbook, tssl, tctx.server)
tssl.quic.close(42, None, "goodbye from simple")
playbook >> events.DataReceived(tctx.server, tssl.read())
playbook << None
assert playbook
tssl.now = tssl.now + 60
assert (
playbook
>> tutils.reply(to=commands.RequestWakeup)
<< commands.CloseConnection(tctx.server)
>> events.ConnectionClosed(tctx.server)
<< None
)
assert echo.closed
assert echo.closed.error_code == 42
assert echo.closed.reason_phrase == "goodbye from simple"
def test_untrusted_cert(self, tctx: context.Context):
"""If the certificate is not trusted, we should fail."""
tssl = SSLTest(server_side=True)
playbook = tutils.Playbook(ServerQuicLayer(tctx, time=lambda: tssl.now))
tctx.server.address = ("wrong.host.mitmproxy.org", 443)
tctx.server.sni = "wrong.host.mitmproxy.org"
# send ClientHello
data = tutils.Placeholder(bytes)
assert (
playbook
<< layer.NextLayerHook(tutils.Placeholder())
>> tutils.reply_next_layer(TlsEchoLayer)
>> events.DataReceived(tctx.client, b"open-connection")
<< commands.OpenConnection(tctx.server)
>> tutils.reply(None)
<< QuicStartServerHook(tutils.Placeholder())
>> reply_tls_start_server()
<< commands.SendData(tctx.server, data)
<< commands.RequestWakeup(0.2)
)
# receive ServerHello, finish client handshake
tssl.write(data())
assert not tssl.handshake_completed()
# exchange termination data
data = tutils.Placeholder(bytes)
assert (
playbook
>> events.DataReceived(tctx.server, tssl.read())
<< commands.SendData(tctx.server, data)
)
tssl.write(data())
tssl.now = tssl.now + 60
tls_hook_data = tutils.Placeholder(QuicTlsData)
assert (
playbook
>> tutils.reply(to=commands.RequestWakeup)
<< commands.Log(
tutils.StrMatching(
"Server QUIC handshake failed. hostname 'wrong.host.mitmproxy.org' doesn't match"
),
WARNING,
)
<< tls.TlsFailedServerHook(tls_hook_data)
>> tutils.reply()
<< commands.CloseConnection(tctx.server)
<< commands.SendData(
tctx.client,
tutils.BytesMatching(
b"open-connection failed: hostname 'wrong.host.mitmproxy.org' doesn't match"
),
)
)
assert tls_hook_data().conn.error.startswith(
"hostname 'wrong.host.mitmproxy.org' doesn't match"
)
assert not tctx.server.tls_established
def make_client_tls_layer(
tctx: context.Context, no_server: bool = False, **kwargs
) -> tuple[tutils.Playbook, ClientQuicLayer, SSLTest]:
tssl_client = SSLTest(**kwargs)
# This is a bit contrived as the client layer expects a server layer as parent.
# We also set child layers manually to avoid NextLayer noise.
server_layer = (
DummyLayer(tctx)
if no_server
else ServerQuicLayer(tctx, time=lambda: tssl_client.now)
)
client_layer = ClientQuicLayer(tctx, time=lambda: tssl_client.now)
server_layer.child_layer = client_layer
playbook = tutils.Playbook(server_layer)
# Add some server config, this is needed anyways.
tctx.server.__dict__["address"] = (
"example.mitmproxy.org",
443,
) # .address fails because connection is open
tctx.server.sni = "example.mitmproxy.org"
# Start handshake.
assert not tssl_client.handshake_completed()
return playbook, client_layer, tssl_client
class TestClientQuic:
def test_http3_disabled(self, tctx: context.Context):
"""Test that we swallow QUIC packets if QUIC and HTTP/3 are disabled."""
tctx.options.http3 = False
assert (
tutils.Playbook(ClientQuicLayer(tctx, time=time.time), logs=True)
>> events.DataReceived(tctx.client, client_hello)
<< commands.Log(
"Swallowing QUIC handshake because HTTP/3 is disabled.", DEBUG
)
<< None
)
def test_client_only(self, tctx: context.Context):
"""Test QUIC with client only"""
playbook, client_layer, tssl_client = make_client_tls_layer(tctx)
client_layer.debug = " "
assert not tctx.client.tls_established
# Send ClientHello, receive ServerHello
data = tutils.Placeholder(bytes)
assert (
playbook
>> events.DataReceived(tctx.client, tssl_client.read())
<< tls.TlsClienthelloHook(tutils.Placeholder())
>> tutils.reply()
<< QuicStartClientHook(tutils.Placeholder())
>> reply_tls_start_client()
<< commands.SendData(tctx.client, data)
<< commands.RequestWakeup(tutils.Placeholder())
)
tssl_client.write(data())
assert tssl_client.handshake_completed()
# Finish Handshake
finish_handshake(playbook, tctx.client, tssl_client, TlsEchoLayer)
assert tssl_client.quic.tls._peer_certificate
assert tctx.client.tls_established
# Echo
_test_echo(playbook, tssl_client, tctx.client)
other_server = connection.Server(address=None)
assert (
playbook
>> events.DataReceived(other_server, b"Plaintext")
<< commands.SendData(other_server, b"plaintext")
)
# test the close log
tssl_client.now = tssl_client.now + 60
assert (
playbook
>> tutils.reply(to=commands.RequestWakeup)
<< commands.Log(
tutils.StrMatching(
r" >> Wakeup\(command=RequestWakeup\({'delay': [.\d]+}\)\)"
),
DEBUG,
)
<< commands.Log(
" [quic] close_notify Client(client:1234, state=open, tls) (reason=Idle timeout)",
DEBUG,
)
<< commands.CloseConnection(tctx.client)
)
@pytest.mark.parametrize("server_state", ["open", "closed"])
def test_server_required(
self, tctx: context.Context, server_state: Literal["open", "closed"]
):
"""
Test the scenario where a server connection is required (for example, because of an unknown ALPN)
to establish TLS with the client.
"""
if server_state == "open":
tctx.server.state = connection.ConnectionState.OPEN
tssl_server = SSLTest(server_side=True, alpn=["quux"])
playbook, client_layer, tssl_client = make_client_tls_layer(tctx, alpn=["quux"])
# We should now get instructed to open a server connection.
data = tutils.Placeholder(bytes)
def require_server_conn(client_hello: tls.ClientHelloData) -> None:
client_hello.establish_server_tls_first = True
(
playbook
| python | MIT | e6aa924bb411a9687b91920b8d094af37bc02b90 | 2026-01-04T14:40:00.086164Z | true |
mitmproxy/mitmproxy | https://github.com/mitmproxy/mitmproxy/blob/e6aa924bb411a9687b91920b8d094af37bc02b90/test/mitmproxy/proxy/layers/quic/__init__.py | test/mitmproxy/proxy/layers/quic/__init__.py | python | MIT | e6aa924bb411a9687b91920b8d094af37bc02b90 | 2026-01-04T14:40:00.086164Z | false | |
mitmproxy/mitmproxy | https://github.com/mitmproxy/mitmproxy/blob/e6aa924bb411a9687b91920b8d094af37bc02b90/test/mitmproxy/proxy/layers/quic/test__events.py | test/mitmproxy/proxy/layers/quic/test__events.py | from mitmproxy.proxy.layers.quic._events import QuicConnectionClosed
from mitmproxy.proxy.layers.quic._events import QuicStreamDataReceived
from mitmproxy.test.tflow import tclient_conn
def test_reprs():
client = tclient_conn()
assert repr(QuicStreamDataReceived(client, 42, b"data", end_stream=False))
assert repr(QuicConnectionClosed(client, 0xFF, None, "reason"))
| python | MIT | e6aa924bb411a9687b91920b8d094af37bc02b90 | 2026-01-04T14:40:00.086164Z | false |
mitmproxy/mitmproxy | https://github.com/mitmproxy/mitmproxy/blob/e6aa924bb411a9687b91920b8d094af37bc02b90/test/mitmproxy/proxy/layers/quic/test__commands.py | test/mitmproxy/proxy/layers/quic/test__commands.py | from mitmproxy.proxy.layers.quic._commands import CloseQuicConnection
from mitmproxy.proxy.layers.quic._commands import QuicStreamCommand
from mitmproxy.proxy.layers.quic._commands import ResetQuicStream
from mitmproxy.proxy.layers.quic._commands import SendQuicStreamData
from mitmproxy.proxy.layers.quic._commands import StopSendingQuicStream
from mitmproxy.test.tflow import tclient_conn
def test_reprs():
client = tclient_conn()
assert repr(QuicStreamCommand(client, 42))
assert repr(SendQuicStreamData(client, 42, b"data"))
assert repr(ResetQuicStream(client, 42, 0xFF))
assert repr(StopSendingQuicStream(client, 42, 0xFF))
assert repr(CloseQuicConnection(client, 0xFF, None, "reason"))
| python | MIT | e6aa924bb411a9687b91920b8d094af37bc02b90 | 2026-01-04T14:40:00.086164Z | false |
mitmproxy/mitmproxy | https://github.com/mitmproxy/mitmproxy/blob/e6aa924bb411a9687b91920b8d094af37bc02b90/test/mitmproxy/proxy/layers/quic/test__raw_layers.py | test/mitmproxy/proxy/layers/quic/test__raw_layers.py | import pytest
from mitmproxy import connection
from mitmproxy.proxy import commands
from mitmproxy.proxy import context
from mitmproxy.proxy import events
from mitmproxy.proxy import layer
from mitmproxy.proxy import layers
from mitmproxy.proxy import tunnel
from mitmproxy.proxy.layers import tcp
from mitmproxy.proxy.layers import udp
from mitmproxy.proxy.layers.quic._commands import CloseQuicConnection
from mitmproxy.proxy.layers.quic._commands import ResetQuicStream
from mitmproxy.proxy.layers.quic._commands import SendQuicStreamData
from mitmproxy.proxy.layers.quic._commands import StopSendingQuicStream
from mitmproxy.proxy.layers.quic._events import QuicConnectionClosed
from mitmproxy.proxy.layers.quic._events import QuicStreamDataReceived
from mitmproxy.proxy.layers.quic._events import QuicStreamEvent
from mitmproxy.proxy.layers.quic._events import QuicStreamReset
from mitmproxy.proxy.layers.quic._raw_layers import QuicStreamLayer
from mitmproxy.proxy.layers.quic._raw_layers import RawQuicLayer
from mitmproxy.tcp import TCPFlow
from mitmproxy.udp import UDPFlow
from mitmproxy.udp import UDPMessage
from test.mitmproxy.proxy import tutils
from test.mitmproxy.proxy.layers.quic.test__stream_layers import TlsEchoLayer
class TestQuicStreamLayer:
def test_force_raw(self, tctx: context.Context):
quic_layer = QuicStreamLayer(tctx, True, 1)
assert isinstance(quic_layer.child_layer, layers.TCPLayer)
quic_layer.child_layer.flow = TCPFlow(tctx.client, tctx.server)
quic_layer.refresh_metadata()
assert quic_layer.child_layer.flow.metadata["quic_is_unidirectional"] is False
assert quic_layer.child_layer.flow.metadata["quic_initiator"] == "server"
assert quic_layer.child_layer.flow.metadata["quic_stream_id_client"] == 1
assert quic_layer.child_layer.flow.metadata["quic_stream_id_server"] is None
assert quic_layer.stream_id(True) == 1
assert quic_layer.stream_id(False) is None
def test_simple(self, tctx: context.Context):
quic_layer = QuicStreamLayer(tctx, False, 2)
assert isinstance(quic_layer.child_layer, layer.NextLayer)
tunnel_layer = tunnel.TunnelLayer(tctx, tctx.client, tctx.server)
quic_layer.child_layer.layer = tunnel_layer
tcp_layer = layers.TCPLayer(tctx)
tunnel_layer.child_layer = tcp_layer
quic_layer.open_server_stream(3)
assert tcp_layer.flow.metadata["quic_is_unidirectional"] is True
assert tcp_layer.flow.metadata["quic_initiator"] == "client"
assert tcp_layer.flow.metadata["quic_stream_id_client"] == 2
assert tcp_layer.flow.metadata["quic_stream_id_server"] == 3
assert quic_layer.stream_id(True) == 2
assert quic_layer.stream_id(False) == 3
class TestRawQuicLayer:
@pytest.mark.parametrize("force_raw", [True, False])
def test_error(self, tctx: context.Context, force_raw: bool):
quic_layer = RawQuicLayer(tctx, force_raw=force_raw)
assert (
tutils.Playbook(quic_layer)
<< commands.OpenConnection(tctx.server)
>> tutils.reply("failed to open")
<< commands.CloseConnection(tctx.client)
)
assert quic_layer._handle_event == quic_layer.done
def test_force_raw(self, tctx: context.Context):
quic_layer = RawQuicLayer(tctx, force_raw=True)
assert (
tutils.Playbook(quic_layer, hooks=False)
<< commands.OpenConnection(tctx.server)
>> tutils.reply(None)
>> events.DataReceived(tctx.client, b"msg1")
<< commands.SendData(tctx.server, b"msg1")
>> events.DataReceived(tctx.server, b"msg2")
<< commands.SendData(tctx.client, b"msg2")
>> QuicStreamDataReceived(tctx.client, 0, b"msg3", end_stream=False)
<< SendQuicStreamData(tctx.server, 0, b"msg3", end_stream=False)
>> QuicStreamDataReceived(tctx.client, 6, b"msg4", end_stream=False)
<< SendQuicStreamData(tctx.server, 2, b"msg4", end_stream=False)
>> QuicStreamDataReceived(tctx.server, 9, b"msg5", end_stream=False)
<< SendQuicStreamData(tctx.client, 1, b"msg5", end_stream=False)
>> QuicStreamDataReceived(tctx.client, 0, b"", end_stream=True)
<< SendQuicStreamData(tctx.server, 0, b"", end_stream=True)
>> QuicStreamReset(tctx.client, 6, 142)
<< ResetQuicStream(tctx.server, 2, 142)
>> QuicConnectionClosed(tctx.client, 42, None, "closed")
<< CloseQuicConnection(tctx.server, 42, None, "closed")
>> QuicConnectionClosed(tctx.server, 42, None, "closed")
<< None
)
assert quic_layer._handle_event == quic_layer.done
def test_msg_inject(self, tctx: context.Context):
udpflow = tutils.Placeholder(UDPFlow)
playbook = tutils.Playbook(RawQuicLayer(tctx))
assert (
playbook
<< commands.OpenConnection(tctx.server)
>> tutils.reply(None)
>> events.DataReceived(tctx.client, b"msg1")
<< layer.NextLayerHook(tutils.Placeholder())
>> tutils.reply_next_layer(udp.UDPLayer)
<< udp.UdpStartHook(udpflow)
>> tutils.reply()
<< udp.UdpMessageHook(udpflow)
>> tutils.reply()
<< commands.SendData(tctx.server, b"msg1")
>> udp.UdpMessageInjected(udpflow, UDPMessage(True, b"msg2"))
<< udp.UdpMessageHook(udpflow)
>> tutils.reply()
<< commands.SendData(tctx.server, b"msg2")
>> udp.UdpMessageInjected(
UDPFlow(("other", 80), tctx.server), UDPMessage(True, b"msg3")
)
<< udp.UdpMessageHook(udpflow)
>> tutils.reply()
<< commands.SendData(tctx.server, b"msg3")
)
with pytest.raises(AssertionError, match="not associated"):
playbook >> udp.UdpMessageInjected(
UDPFlow(("notfound", 0), ("noexist", 0)), UDPMessage(True, b"msg2")
)
assert playbook
def test_reset_with_end_hook(self, tctx: context.Context):
tcpflow = tutils.Placeholder(TCPFlow)
assert (
tutils.Playbook(RawQuicLayer(tctx))
<< commands.OpenConnection(tctx.server)
>> tutils.reply(None)
>> QuicStreamDataReceived(tctx.client, 2, b"msg1", end_stream=False)
<< layer.NextLayerHook(tutils.Placeholder())
>> tutils.reply_next_layer(tcp.TCPLayer)
<< tcp.TcpStartHook(tcpflow)
>> tutils.reply()
<< tcp.TcpMessageHook(tcpflow)
>> tutils.reply()
<< SendQuicStreamData(tctx.server, 2, b"msg1", end_stream=False)
>> QuicStreamReset(tctx.client, 2, 42)
<< ResetQuicStream(tctx.server, 2, 42)
<< tcp.TcpEndHook(tcpflow)
>> tutils.reply()
)
def test_close_with_end_hooks(self, tctx: context.Context):
udpflow = tutils.Placeholder(UDPFlow)
tcpflow = tutils.Placeholder(TCPFlow)
assert (
tutils.Playbook(RawQuicLayer(tctx))
<< commands.OpenConnection(tctx.server)
>> tutils.reply(None)
>> events.DataReceived(tctx.client, b"msg1")
<< layer.NextLayerHook(tutils.Placeholder())
>> tutils.reply_next_layer(udp.UDPLayer)
<< udp.UdpStartHook(udpflow)
>> tutils.reply()
<< udp.UdpMessageHook(udpflow)
>> tutils.reply()
<< commands.SendData(tctx.server, b"msg1")
>> QuicStreamDataReceived(tctx.client, 2, b"msg2", end_stream=False)
<< layer.NextLayerHook(tutils.Placeholder())
>> tutils.reply_next_layer(tcp.TCPLayer)
<< tcp.TcpStartHook(tcpflow)
>> tutils.reply()
<< tcp.TcpMessageHook(tcpflow)
>> tutils.reply()
<< SendQuicStreamData(tctx.server, 2, b"msg2", end_stream=False)
>> QuicConnectionClosed(tctx.client, 42, None, "bye")
<< CloseQuicConnection(tctx.server, 42, None, "bye")
<< udp.UdpEndHook(udpflow)
<< tcp.TcpEndHook(tcpflow)
>> tutils.reply(to=-2)
>> tutils.reply(to=-2)
>> QuicConnectionClosed(tctx.server, 42, None, "bye")
)
def test_invalid_stream_event(self, tctx: context.Context):
playbook = tutils.Playbook(RawQuicLayer(tctx))
assert (
tutils.Playbook(RawQuicLayer(tctx))
<< commands.OpenConnection(tctx.server)
>> tutils.reply(None)
)
with pytest.raises(AssertionError, match="Unexpected stream event"):
class InvalidStreamEvent(QuicStreamEvent):
pass
playbook >> InvalidStreamEvent(tctx.client, 0)
assert playbook
def test_invalid_event(self, tctx: context.Context):
playbook = tutils.Playbook(RawQuicLayer(tctx))
assert (
tutils.Playbook(RawQuicLayer(tctx))
<< commands.OpenConnection(tctx.server)
>> tutils.reply(None)
)
with pytest.raises(AssertionError, match="Unexpected event"):
class InvalidEvent(events.Event):
pass
playbook >> InvalidEvent()
assert playbook
def test_full_close(self, tctx: context.Context):
assert (
tutils.Playbook(RawQuicLayer(tctx))
<< commands.OpenConnection(tctx.server)
>> tutils.reply(None)
>> QuicStreamDataReceived(tctx.client, 0, b"msg1", end_stream=True)
<< layer.NextLayerHook(tutils.Placeholder())
>> tutils.reply_next_layer(lambda ctx: udp.UDPLayer(ctx, ignore=True))
<< SendQuicStreamData(tctx.server, 0, b"msg1", end_stream=False)
<< SendQuicStreamData(tctx.server, 0, b"", end_stream=True)
<< StopSendingQuicStream(tctx.server, 0, 0)
)
def test_open_connection(self, tctx: context.Context):
server = connection.Server(address=("other", 80))
def echo_new_server(ctx: context.Context):
echo_layer = TlsEchoLayer(ctx)
echo_layer.context.server = server
return echo_layer
assert (
tutils.Playbook(RawQuicLayer(tctx))
<< commands.OpenConnection(tctx.server)
>> tutils.reply(None)
>> QuicStreamDataReceived(
tctx.client, 0, b"open-connection", end_stream=False
)
<< layer.NextLayerHook(tutils.Placeholder())
>> tutils.reply_next_layer(echo_new_server)
<< commands.OpenConnection(server)
>> tutils.reply("uhoh")
<< SendQuicStreamData(
tctx.client, 0, b"open-connection failed: uhoh", end_stream=False
)
)
def test_invalid_connection_command(self, tctx: context.Context):
playbook = tutils.Playbook(RawQuicLayer(tctx))
assert (
playbook
<< commands.OpenConnection(tctx.server)
>> tutils.reply(None)
>> QuicStreamDataReceived(tctx.client, 0, b"msg1", end_stream=False)
<< layer.NextLayerHook(tutils.Placeholder())
>> tutils.reply_next_layer(TlsEchoLayer)
<< SendQuicStreamData(tctx.client, 0, b"msg1", end_stream=False)
)
with pytest.raises(
AssertionError, match="Unexpected stream connection command"
):
playbook >> QuicStreamDataReceived(
tctx.client, 0, b"invalid-command", end_stream=False
)
assert playbook
| python | MIT | e6aa924bb411a9687b91920b8d094af37bc02b90 | 2026-01-04T14:40:00.086164Z | false |
mitmproxy/mitmproxy | https://github.com/mitmproxy/mitmproxy/blob/e6aa924bb411a9687b91920b8d094af37bc02b90/test/mitmproxy/proxy/layers/http/test_http3.py | test/mitmproxy/proxy/layers/http/test_http3.py | from collections.abc import Callable
from collections.abc import Iterable
import pylsqpack
import pytest
from aioquic._buffer import Buffer
from aioquic.h3.connection import encode_frame
from aioquic.h3.connection import encode_settings
from aioquic.h3.connection import encode_uint_var
from aioquic.h3.connection import ErrorCode
from aioquic.h3.connection import FrameType
from aioquic.h3.connection import Headers as H3Headers
from aioquic.h3.connection import parse_settings
from aioquic.h3.connection import Setting
from aioquic.h3.connection import StreamType
from aioquic.quic.packet import QuicErrorCode
from mitmproxy import connection
from mitmproxy import version
from mitmproxy.flow import Error
from mitmproxy.http import Headers
from mitmproxy.http import HTTPFlow
from mitmproxy.http import Request
from mitmproxy.proxy import commands
from mitmproxy.proxy import context
from mitmproxy.proxy import events
from mitmproxy.proxy import layers
from mitmproxy.proxy.layers import http
from mitmproxy.proxy.layers import quic
from mitmproxy.proxy.layers.http._http3 import Http3Client
from test.mitmproxy.proxy import tutils
example_request_headers = [
(b":method", b"GET"),
(b":scheme", b"http"),
(b":path", b"/"),
(b":authority", b"example.com"),
]
example_response_headers = [(b":status", b"200")]
example_request_trailers = [(b"req-trailer-a", b"a"), (b"req-trailer-b", b"b")]
example_response_trailers = [(b"resp-trailer-a", b"a"), (b"resp-trailer-b", b"b")]
def decode_frame(frame_type: int, frame_data: bytes) -> bytes:
buf = Buffer(data=frame_data)
assert buf.pull_uint_var() == frame_type
return buf.pull_bytes(buf.pull_uint_var())
class CallbackPlaceholder(tutils._Placeholder[bytes]):
"""Data placeholder that invokes a callback once its bytes get set."""
def __init__(self, cb: Callable[[bytes], None]):
super().__init__(bytes)
self._cb = cb
def setdefault(self, value: bytes) -> bytes:
if self._obj is None:
self._cb(value)
return super().setdefault(value)
class DelayedPlaceholder(tutils._Placeholder[bytes]):
"""Data placeholder that resolves its bytes when needed."""
def __init__(self, resolve: Callable[[], bytes]):
super().__init__(bytes)
self._resolve = resolve
def __call__(self) -> bytes:
if self._obj is None:
self._obj = self._resolve()
return super().__call__()
class FrameFactory:
"""Helper class for generating QUIC stream events and commands."""
def __init__(self, conn: connection.Connection, is_client: bool) -> None:
self.conn = conn
self.is_client = is_client
self.decoder = pylsqpack.Decoder(
max_table_capacity=4096,
blocked_streams=16,
)
self.decoder_placeholders: list[tutils.Placeholder[bytes]] = []
self.encoder = pylsqpack.Encoder()
self.encoder_placeholder: tutils.Placeholder[bytes] | None = None
self.peer_stream_id: dict[StreamType, int] = {}
self.local_stream_id: dict[StreamType, int] = {}
def get_default_stream_id(self, stream_type: StreamType, for_local: bool) -> int:
if stream_type == StreamType.CONTROL:
stream_id = 2
elif stream_type == StreamType.QPACK_ENCODER:
stream_id = 6
elif stream_type == StreamType.QPACK_DECODER:
stream_id = 10
else:
raise AssertionError(stream_type)
if self.is_client is not for_local:
stream_id = stream_id + 1
return stream_id
def send_stream_type(
self,
stream_type: StreamType,
stream_id: int | None = None,
) -> quic.SendQuicStreamData:
assert stream_type not in self.peer_stream_id
if stream_id is None:
stream_id = self.get_default_stream_id(stream_type, for_local=False)
self.peer_stream_id[stream_type] = stream_id
return quic.SendQuicStreamData(
connection=self.conn,
stream_id=stream_id,
data=encode_uint_var(stream_type),
end_stream=False,
)
def receive_stream_type(
self,
stream_type: StreamType,
stream_id: int | None = None,
) -> quic.QuicStreamDataReceived:
assert stream_type not in self.local_stream_id
if stream_id is None:
stream_id = self.get_default_stream_id(stream_type, for_local=True)
self.local_stream_id[stream_type] = stream_id
return quic.QuicStreamDataReceived(
connection=self.conn,
stream_id=stream_id,
data=encode_uint_var(stream_type),
end_stream=False,
)
def send_settings(self) -> quic.SendQuicStreamData:
assert self.encoder_placeholder is None
placeholder = tutils.Placeholder(bytes)
self.encoder_placeholder = placeholder
def cb(data: bytes) -> None:
buf = Buffer(data=data)
assert buf.pull_uint_var() == FrameType.SETTINGS
settings = parse_settings(buf.pull_bytes(buf.pull_uint_var()))
placeholder.setdefault(
self.encoder.apply_settings(
max_table_capacity=settings[Setting.QPACK_MAX_TABLE_CAPACITY],
blocked_streams=settings[Setting.QPACK_BLOCKED_STREAMS],
)
)
return quic.SendQuicStreamData(
connection=self.conn,
stream_id=self.peer_stream_id[StreamType.CONTROL],
data=CallbackPlaceholder(cb),
end_stream=False,
)
def receive_settings(
self,
settings: dict[int, int] = {
Setting.QPACK_MAX_TABLE_CAPACITY: 4096,
Setting.QPACK_BLOCKED_STREAMS: 16,
Setting.ENABLE_CONNECT_PROTOCOL: 1,
Setting.DUMMY: 1,
},
) -> quic.QuicStreamDataReceived:
return quic.QuicStreamDataReceived(
connection=self.conn,
stream_id=self.local_stream_id[StreamType.CONTROL],
data=encode_frame(FrameType.SETTINGS, encode_settings(settings)),
end_stream=False,
)
def send_encoder(self) -> quic.SendQuicStreamData:
def cb(data: bytes) -> bytes:
self.decoder.feed_encoder(data)
return data
return quic.SendQuicStreamData(
connection=self.conn,
stream_id=self.peer_stream_id[StreamType.QPACK_ENCODER],
data=CallbackPlaceholder(cb),
end_stream=False,
)
def receive_encoder(self) -> quic.QuicStreamDataReceived:
assert self.encoder_placeholder is not None
placeholder = self.encoder_placeholder
self.encoder_placeholder = None
return quic.QuicStreamDataReceived(
connection=self.conn,
stream_id=self.local_stream_id[StreamType.QPACK_ENCODER],
data=placeholder,
end_stream=False,
)
def send_decoder(self) -> quic.SendQuicStreamData:
def cb(data: bytes) -> None:
self.encoder.feed_decoder(data)
return quic.SendQuicStreamData(
self.conn,
stream_id=self.peer_stream_id[StreamType.QPACK_DECODER],
data=CallbackPlaceholder(cb),
end_stream=False,
)
def receive_decoder(self) -> quic.QuicStreamDataReceived:
assert self.decoder_placeholders
placeholder = self.decoder_placeholders.pop(0)
return quic.QuicStreamDataReceived(
self.conn,
stream_id=self.local_stream_id[StreamType.QPACK_DECODER],
data=placeholder,
end_stream=False,
)
def send_headers(
self,
headers: H3Headers,
stream_id: int = 0,
end_stream: bool = False,
) -> Iterable[quic.SendQuicStreamData]:
placeholder = tutils.Placeholder(bytes)
self.decoder_placeholders.append(placeholder)
def decode(data: bytes) -> None:
buf = Buffer(data=data)
assert buf.pull_uint_var() == FrameType.HEADERS
frame_data = buf.pull_bytes(buf.pull_uint_var())
decoder, actual_headers = self.decoder.feed_header(stream_id, frame_data)
placeholder.setdefault(decoder)
assert headers == actual_headers
yield self.send_encoder()
yield quic.SendQuicStreamData(
connection=self.conn,
stream_id=stream_id,
data=CallbackPlaceholder(decode),
end_stream=end_stream,
)
def receive_headers(
self,
headers: H3Headers,
stream_id: int = 0,
end_stream: bool = False,
) -> Iterable[quic.QuicStreamDataReceived]:
data = tutils.Placeholder(bytes)
def encode() -> bytes:
encoder, frame_data = self.encoder.encode(stream_id, headers)
data.setdefault(encode_frame(FrameType.HEADERS, frame_data))
return encoder
yield quic.QuicStreamDataReceived(
connection=self.conn,
stream_id=self.local_stream_id[StreamType.QPACK_ENCODER],
data=DelayedPlaceholder(encode),
end_stream=False,
)
yield quic.QuicStreamDataReceived(
connection=self.conn,
stream_id=stream_id,
data=data,
end_stream=end_stream,
)
def send_data(
self,
data: bytes,
stream_id: int = 0,
end_stream: bool = False,
) -> quic.SendQuicStreamData:
return quic.SendQuicStreamData(
self.conn,
stream_id=stream_id,
data=encode_frame(FrameType.DATA, data),
end_stream=end_stream,
)
def receive_data(
self,
data: bytes,
stream_id: int = 0,
end_stream: bool = False,
) -> quic.QuicStreamDataReceived:
return quic.QuicStreamDataReceived(
connection=self.conn,
stream_id=stream_id,
data=encode_frame(FrameType.DATA, data),
end_stream=end_stream,
)
def send_reset(
self, error_code: ErrorCode, stream_id: int = 0
) -> quic.ResetQuicStream:
return quic.ResetQuicStream(
connection=self.conn,
stream_id=stream_id,
error_code=int(error_code),
)
def receive_reset(
self, error_code: ErrorCode, stream_id: int = 0
) -> quic.QuicStreamReset:
return quic.QuicStreamReset(
connection=self.conn,
stream_id=stream_id,
error_code=int(error_code),
)
def send_stop(
self, error_code: ErrorCode, stream_id: int = 0
) -> quic.StopSendingQuicStream:
return quic.StopSendingQuicStream(
connection=self.conn,
stream_id=stream_id,
error_code=int(error_code),
)
def receive_stop(
self, error_code: ErrorCode, stream_id: int = 0
) -> quic.QuicStreamStopSending:
return quic.QuicStreamStopSending(
connection=self.conn,
stream_id=stream_id,
error_code=int(error_code),
)
def send_init(self) -> Iterable[quic.SendQuicStreamData]:
yield self.send_stream_type(StreamType.CONTROL)
yield self.send_settings()
yield self.send_stream_type(StreamType.QPACK_ENCODER)
yield self.send_stream_type(StreamType.QPACK_DECODER)
def receive_init(self) -> Iterable[quic.QuicStreamDataReceived]:
yield self.receive_stream_type(StreamType.CONTROL)
yield self.receive_stream_type(StreamType.QPACK_ENCODER)
yield self.receive_stream_type(StreamType.QPACK_DECODER)
yield self.receive_settings()
@property
def is_done(self) -> bool:
return self.encoder_placeholder is None and not self.decoder_placeholders
@pytest.fixture
def open_h3_server_conn():
# this is a bit fake here (port 80, with alpn, but no tls - c'mon),
# but we don't want to pollute our tests with TLS handshakes.
server = connection.Server(address=("example.com", 80), transport_protocol="udp")
server.state = connection.ConnectionState.OPEN
server.alpn = b"h3"
return server
def start_h3_proxy(tctx: context.Context) -> tuple[tutils.Playbook, FrameFactory]:
tctx.client.alpn = b"h3"
tctx.client.transport_protocol = "udp"
tctx.server.transport_protocol = "udp"
playbook = tutils.Playbook(layers.HttpLayer(tctx, layers.http.HTTPMode.regular))
cff = FrameFactory(conn=tctx.client, is_client=True)
assert (
playbook
<< cff.send_init()
>> cff.receive_init()
<< cff.send_encoder()
>> cff.receive_encoder()
)
return playbook, cff
def make_h3(open_connection: commands.OpenConnection) -> None:
open_connection.connection.alpn = b"h3"
def test_ignore_push(tctx: context.Context):
playbook, cff = start_h3_proxy(tctx)
def test_fail_without_header(tctx: context.Context):
playbook = tutils.Playbook(layers.http.Http3Server(tctx))
cff = FrameFactory(tctx.client, is_client=True)
assert (
playbook
<< cff.send_init()
>> cff.receive_init()
<< cff.send_encoder()
>> cff.receive_encoder()
>> http.ResponseProtocolError(0, "first message", http.ErrorCode.KILL)
<< cff.send_reset(ErrorCode.H3_INTERNAL_ERROR)
<< cff.send_stop(ErrorCode.H3_INTERNAL_ERROR)
)
assert cff.is_done
def test_invalid_header(tctx: context.Context):
playbook, cff = start_h3_proxy(tctx)
assert (
playbook
>> cff.receive_headers(
[
(b":method", b"CONNECT"),
(b":path", b"/"),
(b":authority", b"example.com"),
],
end_stream=True,
)
<< cff.send_decoder() # for receive_headers
<< quic.CloseQuicConnection(
tctx.client,
error_code=ErrorCode.H3_GENERAL_PROTOCOL_ERROR.value,
frame_type=None,
reason_phrase="Invalid HTTP/3 request headers: Required pseudo header is missing: b':scheme'",
)
# ensure that once we close, we don't process messages anymore
>> cff.receive_headers(
[
(b":method", b"CONNECT"),
(b":path", b"/"),
(b":authority", b"example.com"),
],
end_stream=True,
)
)
def test_simple(tctx: context.Context):
playbook, cff = start_h3_proxy(tctx)
flow = tutils.Placeholder(HTTPFlow)
server = tutils.Placeholder(connection.Server)
sff = FrameFactory(server, is_client=False)
assert (
playbook
# request client
>> cff.receive_headers(example_request_headers, end_stream=True)
<< (request := http.HttpRequestHeadersHook(flow))
<< cff.send_decoder() # for receive_headers
>> tutils.reply(to=request)
<< http.HttpRequestHook(flow)
>> tutils.reply()
# request server
<< commands.OpenConnection(server)
>> tutils.reply(None, side_effect=make_h3)
<< sff.send_init()
<< sff.send_headers(example_request_headers, end_stream=True)
>> sff.receive_init()
<< sff.send_encoder()
>> sff.receive_encoder()
>> sff.receive_decoder() # for send_headers
# response server
>> sff.receive_headers(example_response_headers)
<< (response := http.HttpResponseHeadersHook(flow))
<< sff.send_decoder() # for receive_headers
>> tutils.reply(to=response)
>> sff.receive_data(b"Hello, World!", end_stream=True)
<< http.HttpResponseHook(flow)
>> tutils.reply()
# response client
<< cff.send_headers(example_response_headers)
<< cff.send_data(b"Hello, World!")
<< cff.send_data(b"", end_stream=True)
>> cff.receive_decoder() # for send_headers
)
assert cff.is_done and sff.is_done
assert flow().request.url == "http://example.com/"
assert flow().response.text == "Hello, World!"
@pytest.mark.parametrize("stream", ["stream", ""])
def test_response_trailers(
tctx: context.Context,
open_h3_server_conn: connection.Server,
stream: str,
):
playbook, cff = start_h3_proxy(tctx)
tctx.server = open_h3_server_conn
sff = FrameFactory(tctx.server, is_client=False)
def enable_streaming(flow: HTTPFlow):
flow.response.stream = stream
flow = tutils.Placeholder(HTTPFlow)
(
playbook
# request client
>> cff.receive_headers(example_request_headers, end_stream=True)
<< (request := http.HttpRequestHeadersHook(flow))
<< cff.send_decoder() # for receive_headers
>> tutils.reply(to=request)
<< http.HttpRequestHook(flow)
>> tutils.reply()
# request server
<< sff.send_init()
<< sff.send_headers(example_request_headers, end_stream=True)
>> sff.receive_init()
<< sff.send_encoder()
>> sff.receive_encoder()
>> sff.receive_decoder() # for send_headers
# response server
>> sff.receive_headers(example_response_headers)
<< (response_headers := http.HttpResponseHeadersHook(flow))
<< sff.send_decoder() # for receive_headers
>> tutils.reply(to=response_headers, side_effect=enable_streaming)
)
if stream:
(
playbook
<< cff.send_headers(example_response_headers)
>> cff.receive_decoder() # for send_headers
>> sff.receive_data(b"Hello, World!")
<< cff.send_data(b"Hello, World!")
)
else:
playbook >> sff.receive_data(b"Hello, World!")
assert (
playbook
>> sff.receive_headers(example_response_trailers, end_stream=True)
<< (response := http.HttpResponseHook(flow))
<< sff.send_decoder() # for receive_headers
)
assert flow().response.trailers
del flow().response.trailers["resp-trailer-a"]
if stream:
assert (
playbook
>> tutils.reply(to=response)
<< cff.send_headers(example_response_trailers[1:], end_stream=True)
>> cff.receive_decoder() # for send_headers
)
else:
assert (
playbook
>> tutils.reply(to=response)
<< cff.send_headers(example_response_headers)
<< cff.send_data(b"Hello, World!")
<< cff.send_headers(example_response_trailers[1:], end_stream=True)
>> cff.receive_decoder() # for send_headers
>> cff.receive_decoder() # for send_headers
)
assert cff.is_done and sff.is_done
@pytest.mark.parametrize("stream", ["stream", ""])
def test_request_trailers(
tctx: context.Context,
open_h3_server_conn: connection.Server,
stream: str,
):
playbook, cff = start_h3_proxy(tctx)
tctx.server = open_h3_server_conn
sff = FrameFactory(tctx.server, is_client=False)
def enable_streaming(flow: HTTPFlow):
flow.request.stream = stream
flow = tutils.Placeholder(HTTPFlow)
(
playbook
# request client
>> cff.receive_headers(example_request_headers)
<< (request_headers := http.HttpRequestHeadersHook(flow))
<< cff.send_decoder() # for receive_headers
>> cff.receive_data(b"Hello World!")
>> tutils.reply(to=request_headers, side_effect=enable_streaming)
)
if not stream:
(
playbook
>> cff.receive_headers(example_request_trailers, end_stream=True)
<< (request := http.HttpRequestHook(flow))
<< cff.send_decoder() # for receive_headers
>> tutils.reply(to=request)
)
(
playbook
# request server
<< sff.send_init()
<< sff.send_headers(example_request_headers)
<< sff.send_data(b"Hello World!")
)
if not stream:
playbook << sff.send_headers(example_request_trailers, end_stream=True)
(
playbook
>> sff.receive_init()
<< sff.send_encoder()
>> sff.receive_encoder()
>> sff.receive_decoder() # for send_headers
)
if stream:
(
playbook
>> cff.receive_headers(example_request_trailers, end_stream=True)
<< (request := http.HttpRequestHook(flow))
<< cff.send_decoder() # for receive_headers
>> tutils.reply(to=request)
<< sff.send_headers(example_request_trailers, end_stream=True)
)
assert playbook >> sff.receive_decoder() # for send_headers
assert cff.is_done and sff.is_done
def test_upstream_error(tctx: context.Context):
playbook, cff = start_h3_proxy(tctx)
flow = tutils.Placeholder(HTTPFlow)
server = tutils.Placeholder(connection.Server)
err = tutils.Placeholder(bytes)
assert (
playbook
# request client
>> cff.receive_headers(example_request_headers, end_stream=True)
<< (request := http.HttpRequestHeadersHook(flow))
<< cff.send_decoder() # for receive_headers
>> tutils.reply(to=request)
<< http.HttpRequestHook(flow)
>> tutils.reply()
# request server
<< commands.OpenConnection(server)
>> tutils.reply("oops server <> error")
<< http.HttpErrorHook(flow)
>> tutils.reply()
<< cff.send_headers(
[
(b":status", b"502"),
(b"server", version.MITMPROXY.encode()),
(b"content-type", b"text/html"),
]
)
<< quic.SendQuicStreamData(
tctx.client,
stream_id=0,
data=err,
end_stream=True,
)
>> cff.receive_decoder() # for send_headers
)
assert cff.is_done
data = decode_frame(FrameType.DATA, err())
assert b"502 Bad Gateway" in data
assert b"server <> error" in data
@pytest.mark.parametrize("stream", ["stream", ""])
@pytest.mark.parametrize("when", ["request", "response"])
@pytest.mark.parametrize("how", ["RST", "disconnect", "RST+disconnect"])
def test_http3_client_aborts(tctx: context.Context, stream: str, when: str, how: str):
"""
Test handling of the case where a client aborts during request or response transmission.
If the client aborts the request transmission, we must trigger an error hook,
if the client disconnects during response transmission, no error hook is triggered.
"""
server = tutils.Placeholder(connection.Server)
flow = tutils.Placeholder(HTTPFlow)
playbook, cff = start_h3_proxy(tctx)
def enable_request_streaming(flow: HTTPFlow):
flow.request.stream = True
def enable_response_streaming(flow: HTTPFlow):
flow.response.stream = True
assert (
playbook
>> cff.receive_headers(example_request_headers)
<< (request_headers := http.HttpRequestHeadersHook(flow))
<< cff.send_decoder() # for receive_headers
)
if stream and when == "request":
assert (
playbook
>> tutils.reply(side_effect=enable_request_streaming, to=request_headers)
<< commands.OpenConnection(server)
>> tutils.reply(None)
<< commands.SendData(server, b"GET / HTTP/1.1\r\nHost: example.com\r\n\r\n")
)
else:
assert playbook >> tutils.reply(to=request_headers)
if when == "request":
if "RST" in how:
playbook >> cff.receive_reset(ErrorCode.H3_REQUEST_CANCELLED)
else:
playbook >> quic.QuicConnectionClosed(
tctx.client,
error_code=ErrorCode.H3_REQUEST_CANCELLED.value,
frame_type=None,
reason_phrase="peer closed connection",
)
if stream:
playbook << commands.CloseConnection(server)
playbook << (error_hook := http.HttpErrorHook(flow))
if "RST" in how:
playbook << cff.send_reset(ErrorCode.H3_REQUEST_CANCELLED)
playbook >> tutils.reply(to=error_hook)
if how == "RST+disconnect":
playbook >> quic.QuicConnectionClosed(
tctx.client,
error_code=ErrorCode.H3_NO_ERROR.value,
frame_type=None,
reason_phrase="peer closed connection",
)
assert playbook
assert (
"stream closed by client" in flow().error.msg
or "peer closed connection" in flow().error.msg
)
return
assert (
playbook
>> cff.receive_data(b"", end_stream=True)
<< http.HttpRequestHook(flow)
>> tutils.reply()
<< commands.OpenConnection(server)
>> tutils.reply(None)
<< commands.SendData(server, b"GET / HTTP/1.1\r\nHost: example.com\r\n\r\n")
>> events.DataReceived(
server, b"HTTP/1.1 200 OK\r\nContent-Length: 6\r\n\r\n123"
)
<< http.HttpResponseHeadersHook(flow)
)
if stream:
assert (
playbook
>> tutils.reply(side_effect=enable_response_streaming)
<< cff.send_headers(
[
(b":status", b"200"),
(b"content-length", b"6"),
]
)
<< cff.send_data(b"123")
)
else:
assert playbook >> tutils.reply()
if "RST" in how:
playbook >> cff.receive_reset(ErrorCode.H3_REQUEST_CANCELLED)
else:
playbook >> quic.QuicConnectionClosed(
tctx.client,
error_code=ErrorCode.H3_REQUEST_CANCELLED.value,
frame_type=None,
reason_phrase="peer closed connection",
)
playbook << commands.CloseConnection(server)
playbook << (error_hook := http.HttpErrorHook(flow))
if "RST" in how:
playbook << cff.send_reset(ErrorCode.H3_REQUEST_CANCELLED)
playbook >> tutils.reply(to=error_hook)
assert playbook
if how == "RST+disconnect":
playbook >> quic.QuicConnectionClosed(
tctx.client,
error_code=ErrorCode.H3_REQUEST_CANCELLED.value,
frame_type=None,
reason_phrase="peer closed connection",
)
assert playbook
if "RST" in how:
assert "stream closed by client" in flow().error.msg
else:
assert "peer closed connection" in flow().error.msg
def test_rst_then_close(tctx):
"""
Test that we properly handle the case of a client that first causes protocol errors and then disconnects.
This is slightly different to H2, as QUIC will close the connection immediately.
"""
playbook, cff = start_h3_proxy(tctx)
flow = tutils.Placeholder(HTTPFlow)
server = tutils.Placeholder(connection.Server)
err = tutils.Placeholder(str)
assert (
playbook
# request client
>> cff.receive_headers(example_request_headers, end_stream=True)
<< (request := http.HttpRequestHeadersHook(flow))
<< cff.send_decoder() # for receive_headers
>> tutils.reply(to=request)
<< http.HttpRequestHook(flow)
>> tutils.reply()
# request server
<< (open := commands.OpenConnection(server))
>> cff.receive_data(b"unexpected data frame")
<< quic.CloseQuicConnection(
tctx.client,
error_code=QuicErrorCode.PROTOCOL_VIOLATION.value,
frame_type=None,
reason_phrase=err,
)
>> quic.QuicConnectionClosed(
tctx.client,
error_code=QuicErrorCode.PROTOCOL_VIOLATION.value,
frame_type=None,
reason_phrase=err,
)
>> tutils.reply("connection cancelled", to=open)
<< http.HttpErrorHook(flow)
>> tutils.reply()
)
assert flow().error.msg == "connection cancelled"
def test_cancel_then_server_disconnect(tctx: context.Context):
"""
Test that we properly handle the case of the following event sequence:
- client cancels a stream
- we start an error hook
- server disconnects
- error hook completes.
"""
playbook, cff = start_h3_proxy(tctx)
flow = tutils.Placeholder(HTTPFlow)
server = tutils.Placeholder(connection.Server)
assert (
playbook
# request client
>> cff.receive_headers(example_request_headers, end_stream=True)
<< (request := http.HttpRequestHeadersHook(flow))
<< cff.send_decoder() # for receive_headers
>> tutils.reply(to=request)
<< http.HttpRequestHook(flow)
>> tutils.reply()
# request server
<< commands.OpenConnection(server)
>> tutils.reply(None)
<< commands.SendData(server, b"GET / HTTP/1.1\r\nHost: example.com\r\n\r\n")
# cancel
>> cff.receive_reset(error_code=ErrorCode.H3_REQUEST_CANCELLED)
<< commands.CloseConnection(server)
<< (err_hook := http.HttpErrorHook(flow))
<< cff.send_reset(ErrorCode.H3_REQUEST_CANCELLED)
>> tutils.reply(to=err_hook)
>> events.ConnectionClosed(server)
<< None
)
assert cff.is_done
def test_cancel_during_response_hook(tctx: context.Context):
"""
Test that we properly handle the case of the following event sequence:
- we receive a server response
- we trigger the response hook
- the client cancels the stream
- the response hook completes
Given that we have already triggered the response hook, we don't want to trigger the error hook.
"""
playbook, cff = start_h3_proxy(tctx)
flow = tutils.Placeholder(HTTPFlow)
server = tutils.Placeholder(connection.Server)
assert (
playbook
# request client
>> cff.receive_headers(example_request_headers, end_stream=True)
<< (request := http.HttpRequestHeadersHook(flow))
<< cff.send_decoder() # for receive_headers
>> tutils.reply(to=request)
<< http.HttpRequestHook(flow)
>> tutils.reply()
# request server
<< commands.OpenConnection(server)
>> tutils.reply(None)
<< commands.SendData(server, b"GET / HTTP/1.1\r\nHost: example.com\r\n\r\n")
# response server
>> events.DataReceived(server, b"HTTP/1.1 204 No Content\r\n\r\n")
<< (reponse_headers := http.HttpResponseHeadersHook(flow))
<< commands.CloseConnection(server)
>> tutils.reply(to=reponse_headers)
<< (response := http.HttpResponseHook(flow))
>> cff.receive_reset(error_code=ErrorCode.H3_REQUEST_CANCELLED)
<< cff.send_reset(ErrorCode.H3_REQUEST_CANCELLED)
>> tutils.reply(to=response)
)
assert cff.is_done
def test_stream_concurrency(tctx: context.Context):
"""Test that we can send an intercepted request with a lower stream id than one that has already been sent."""
playbook, cff = start_h3_proxy(tctx)
flow1 = tutils.Placeholder(HTTPFlow)
flow2 = tutils.Placeholder(HTTPFlow)
server = tutils.Placeholder(connection.Server)
sff = FrameFactory(server, is_client=False)
headers1 = [*example_request_headers, (b"x-order", b"1")]
headers2 = [*example_request_headers, (b"x-order", b"2")]
assert (
playbook
# request client
>> cff.receive_headers(headers1, stream_id=0, end_stream=True)
<< (request_header1 := http.HttpRequestHeadersHook(flow1))
<< cff.send_decoder() # for receive_headers
>> cff.receive_headers(headers2, stream_id=4, end_stream=True)
<< (request_header2 := http.HttpRequestHeadersHook(flow2))
<< cff.send_decoder() # for receive_headers
>> tutils.reply(to=request_header1)
<< (request1 := http.HttpRequestHook(flow1))
>> tutils.reply(to=request_header2)
<< (request2 := http.HttpRequestHook(flow2))
# req 2 overtakes 1 and we already have a reply:
>> tutils.reply(to=request2)
# request server
<< commands.OpenConnection(server)
>> tutils.reply(None, side_effect=make_h3)
<< sff.send_init()
<< sff.send_headers(headers2, stream_id=0, end_stream=True)
>> sff.receive_init()
<< sff.send_encoder()
>> sff.receive_encoder()
>> sff.receive_decoder() # for send_headers
>> tutils.reply(to=request1)
<< sff.send_headers(headers1, stream_id=4, end_stream=True)
>> sff.receive_decoder() # for send_headers
)
assert cff.is_done and sff.is_done
| python | MIT | e6aa924bb411a9687b91920b8d094af37bc02b90 | 2026-01-04T14:40:00.086164Z | true |
mitmproxy/mitmproxy | https://github.com/mitmproxy/mitmproxy/blob/e6aa924bb411a9687b91920b8d094af37bc02b90/test/mitmproxy/proxy/layers/http/test_http.py | test/mitmproxy/proxy/layers/http/test_http.py | import gc
from logging import WARNING
import pytest
from mitmproxy.connection import ConnectionState
from mitmproxy.connection import Server
from mitmproxy.http import HTTPFlow
from mitmproxy.http import Response
from mitmproxy.proxy import layer
from mitmproxy.proxy.commands import CloseConnection
from mitmproxy.proxy.commands import Log
from mitmproxy.proxy.commands import OpenConnection
from mitmproxy.proxy.commands import SendData
from mitmproxy.proxy.events import ConnectionClosed
from mitmproxy.proxy.events import DataReceived
from mitmproxy.proxy.layers import http
from mitmproxy.proxy.layers import TCPLayer
from mitmproxy.proxy.layers import tls
from mitmproxy.proxy.layers.http import HTTPMode
from mitmproxy.proxy.layers.tcp import TcpMessageInjected
from mitmproxy.proxy.layers.tcp import TcpStartHook
from mitmproxy.proxy.layers.websocket import WebsocketStartHook
from mitmproxy.proxy.mode_specs import ProxyMode
from mitmproxy.tcp import TCPFlow
from mitmproxy.tcp import TCPMessage
from test.mitmproxy.proxy.tutils import BytesMatching
from test.mitmproxy.proxy.tutils import Placeholder
from test.mitmproxy.proxy.tutils import Playbook
from test.mitmproxy.proxy.tutils import reply
from test.mitmproxy.proxy.tutils import reply_next_layer
def test_http_proxy(tctx):
"""Test a simple HTTP GET / request"""
server = Placeholder(Server)
flow = Placeholder(HTTPFlow)
assert (
Playbook(http.HttpLayer(tctx, HTTPMode.regular))
>> DataReceived(
tctx.client,
b"GET http://example.com/foo?hello=1 HTTP/1.1\r\nHost: example.com\r\n\r\n",
)
<< http.HttpRequestHeadersHook(flow)
>> reply()
<< http.HttpRequestHook(flow)
>> reply()
<< OpenConnection(server)
>> reply(None)
<< SendData(server, b"GET /foo?hello=1 HTTP/1.1\r\nHost: example.com\r\n\r\n")
>> DataReceived(
server, b"HTTP/1.1 200 OK\r\nContent-Length: 12\r\n\r\nHello World"
)
<< http.HttpResponseHeadersHook(flow)
>> reply()
>> DataReceived(server, b"!")
<< http.HttpResponseHook(flow)
>> reply()
<< SendData(
tctx.client, b"HTTP/1.1 200 OK\r\nContent-Length: 12\r\n\r\nHello World!"
)
)
assert server().address == ("example.com", 80)
@pytest.mark.parametrize("strategy", ["lazy", "eager"])
@pytest.mark.parametrize("http_connect_send_host_header", [True, False])
def test_https_proxy(strategy, http_connect_send_host_header, tctx):
"""Test a CONNECT request, followed by a HTTP GET /"""
server = Placeholder(Server)
flow = Placeholder(HTTPFlow)
playbook = Playbook(http.HttpLayer(tctx, HTTPMode.regular))
tctx.options.connection_strategy = strategy
tctx.options.http_connect_send_host_header = http_connect_send_host_header
(
playbook
>> DataReceived(
tctx.client,
b"CONNECT example.proxy:80 HTTP/1.1"
+ (b"\r\nHost: example.com:80" if http_connect_send_host_header else b"")
+ b"\r\n\r\n",
)
<< http.HttpConnectHook(Placeholder())
>> reply()
)
if strategy == "eager":
playbook << OpenConnection(server)
playbook >> reply(None)
(
playbook
<< http.HttpConnectedHook(Placeholder())
>> reply(None)
<< SendData(tctx.client, b"HTTP/1.1 200 Connection established\r\n\r\n")
>> DataReceived(
tctx.client, b"GET /foo?hello=1 HTTP/1.1\r\nHost: example.com\r\n\r\n"
)
<< layer.NextLayerHook(Placeholder())
>> reply_next_layer(lambda ctx: http.HttpLayer(ctx, HTTPMode.transparent))
<< http.HttpRequestHeadersHook(flow)
>> reply()
<< http.HttpRequestHook(flow)
>> reply()
)
if strategy == "lazy":
playbook << OpenConnection(server)
playbook >> reply(None)
(
playbook
<< SendData(server, b"GET /foo?hello=1 HTTP/1.1\r\nHost: example.com\r\n\r\n")
>> DataReceived(
server, b"HTTP/1.1 200 OK\r\nContent-Length: 12\r\n\r\nHello World!"
)
<< http.HttpResponseHeadersHook(flow)
>> reply()
<< http.HttpResponseHook(flow)
>> reply()
<< SendData(
tctx.client, b"HTTP/1.1 200 OK\r\nContent-Length: 12\r\n\r\nHello World!"
)
)
assert playbook
@pytest.mark.parametrize("https_client", [False, True])
@pytest.mark.parametrize("https_server", [False, True])
@pytest.mark.parametrize("strategy", ["lazy", "eager"])
def test_redirect(strategy, https_server, https_client, tctx, monkeypatch):
"""Test redirects between http:// and https:// in regular proxy mode."""
server = Placeholder(Server)
flow = Placeholder(HTTPFlow)
tctx.options.connection_strategy = strategy
p = Playbook(http.HttpLayer(tctx, HTTPMode.regular), hooks=False)
if https_server:
monkeypatch.setattr(tls, "ServerTLSLayer", tls.MockTLSLayer)
def redirect(flow: HTTPFlow):
if https_server:
flow.request.url = "https://redirected.site/"
else:
flow.request.url = "http://redirected.site/"
if https_client:
p >> DataReceived(
tctx.client,
b"CONNECT example.com:80 HTTP/1.1\r\nHost: example.com:80\r\n\r\n",
)
if strategy == "eager":
p << OpenConnection(Placeholder())
p >> reply(None)
p << SendData(tctx.client, b"HTTP/1.1 200 Connection established\r\n\r\n")
p >> DataReceived(tctx.client, b"GET / HTTP/1.1\r\nHost: example.com\r\n\r\n")
p << layer.NextLayerHook(Placeholder())
p >> reply_next_layer(lambda ctx: http.HttpLayer(ctx, HTTPMode.transparent))
else:
p >> DataReceived(
tctx.client,
b"GET http://example.com/ HTTP/1.1\r\nHost: example.com\r\n\r\n",
)
p << http.HttpRequestHook(flow)
p >> reply(side_effect=redirect)
p << OpenConnection(server)
p >> reply(None)
p << SendData(server, b"GET / HTTP/1.1\r\nHost: redirected.site\r\n\r\n")
p >> DataReceived(
server, b"HTTP/1.1 200 OK\r\nContent-Length: 12\r\n\r\nHello World!"
)
p << SendData(
tctx.client, b"HTTP/1.1 200 OK\r\nContent-Length: 12\r\n\r\nHello World!"
)
assert p
if https_server:
assert server().address == ("redirected.site", 443)
else:
assert server().address == ("redirected.site", 80)
def test_multiple_server_connections(tctx):
"""Test multiple requests being rewritten to different targets."""
server1 = Placeholder(Server)
server2 = Placeholder(Server)
playbook = Playbook(http.HttpLayer(tctx, HTTPMode.regular), hooks=False)
def redirect(to: str):
def side_effect(flow: HTTPFlow):
flow.request.url = to
return side_effect
assert (
playbook
>> DataReceived(
tctx.client,
b"GET http://example.com/ HTTP/1.1\r\nHost: example.com\r\n\r\n",
)
<< http.HttpRequestHook(Placeholder())
>> reply(side_effect=redirect("http://one.redirect/"))
<< OpenConnection(server1)
>> reply(None)
<< SendData(server1, b"GET / HTTP/1.1\r\nHost: one.redirect\r\n\r\n")
>> DataReceived(server1, b"HTTP/1.1 200 OK\r\nContent-Length: 0\r\n\r\n")
<< SendData(tctx.client, b"HTTP/1.1 200 OK\r\nContent-Length: 0\r\n\r\n")
)
assert (
playbook
>> DataReceived(
tctx.client,
b"GET http://example.com/ HTTP/1.1\r\nHost: example.com\r\n\r\n",
)
<< http.HttpRequestHook(Placeholder())
>> reply(side_effect=redirect("http://two.redirect/"))
<< OpenConnection(server2)
>> reply(None)
<< SendData(server2, b"GET / HTTP/1.1\r\nHost: two.redirect\r\n\r\n")
>> DataReceived(server2, b"HTTP/1.1 200 OK\r\nContent-Length: 0\r\n\r\n")
<< SendData(tctx.client, b"HTTP/1.1 200 OK\r\nContent-Length: 0\r\n\r\n")
)
assert server1().address == ("one.redirect", 80)
assert server2().address == ("two.redirect", 80)
@pytest.mark.parametrize("transfer_encoding", ["identity", "chunked"])
def test_pipelining(tctx, transfer_encoding):
"""Test that multiple requests can be processed over the same connection"""
tctx.server.address = ("example.com", 80)
tctx.server.state = ConnectionState.OPEN
req = b"GET / HTTP/1.1\r\nHost: example.com\r\n\r\n"
if transfer_encoding == "identity":
resp = b"HTTP/1.1 200 OK\r\nContent-Length: 12\r\n\r\nHello World!"
else:
resp = (
b"HTTP/1.1 200 OK\r\n"
b"Transfer-Encoding: chunked\r\n"
b"\r\n"
b"c\r\n"
b"Hello World!\r\n"
b"0\r\n"
b"\r\n"
)
assert (
Playbook(http.HttpLayer(tctx, HTTPMode.transparent), hooks=False)
# Roundtrip 1
>> DataReceived(tctx.client, req)
<< SendData(tctx.server, req)
>> DataReceived(tctx.server, resp)
<< SendData(tctx.client, resp)
# Roundtrip 2
>> DataReceived(tctx.client, req)
<< SendData(tctx.server, req)
>> DataReceived(tctx.server, resp)
<< SendData(tctx.client, resp)
)
def test_http_reply_from_proxy(tctx):
"""Test a response served by mitmproxy itself."""
def reply_from_proxy(flow: HTTPFlow):
flow.response = Response.make(418)
assert (
Playbook(http.HttpLayer(tctx, HTTPMode.regular), hooks=False)
>> DataReceived(
tctx.client,
b"GET http://example.com/ HTTP/1.1\r\nHost: example.com\r\n\r\n",
)
<< http.HttpRequestHook(Placeholder())
>> reply(side_effect=reply_from_proxy)
<< SendData(
tctx.client, b"HTTP/1.1 418 I'm a teapot\r\ncontent-length: 0\r\n\r\n"
)
)
def test_response_until_eof(tctx):
"""Test scenario where the server response body is terminated by EOF."""
server = Placeholder(Server)
assert (
Playbook(http.HttpLayer(tctx, HTTPMode.regular), hooks=False)
>> DataReceived(
tctx.client,
b"GET http://example.com/ HTTP/1.1\r\nHost: example.com\r\n\r\n",
)
<< OpenConnection(server)
>> reply(None)
<< SendData(server, b"GET / HTTP/1.1\r\nHost: example.com\r\n\r\n")
>> DataReceived(server, b"HTTP/1.1 200 OK\r\n\r\nfoo")
>> ConnectionClosed(server)
<< CloseConnection(server)
<< SendData(tctx.client, b"HTTP/1.1 200 OK\r\n\r\nfoo")
<< CloseConnection(tctx.client)
)
def test_disconnect_while_intercept(tctx):
"""Test a server disconnect while a request is intercepted."""
tctx.options.connection_strategy = "eager"
server1 = Placeholder(Server)
server2 = Placeholder(Server)
flow = Placeholder(HTTPFlow)
assert (
Playbook(http.HttpLayer(tctx, HTTPMode.regular), hooks=False)
>> DataReceived(
tctx.client,
b"CONNECT example.com:80 HTTP/1.1\r\nHost: example.com:80\r\n\r\n",
)
<< http.HttpConnectHook(Placeholder(HTTPFlow))
>> reply()
<< OpenConnection(server1)
>> reply(None)
<< http.HttpConnectedHook(Placeholder(HTTPFlow))
>> reply(None)
<< SendData(tctx.client, b"HTTP/1.1 200 Connection established\r\n\r\n")
>> DataReceived(tctx.client, b"GET / HTTP/1.1\r\nHost: example.com\r\n\r\n")
<< layer.NextLayerHook(Placeholder())
>> reply_next_layer(lambda ctx: http.HttpLayer(ctx, HTTPMode.transparent))
<< http.HttpRequestHook(flow)
>> ConnectionClosed(server1)
<< CloseConnection(server1)
>> reply(to=-3)
<< OpenConnection(server2)
>> reply(None)
<< SendData(server2, b"GET / HTTP/1.1\r\nHost: example.com\r\n\r\n")
>> DataReceived(server2, b"HTTP/1.1 200 OK\r\nContent-Length: 0\r\n\r\n")
<< SendData(tctx.client, b"HTTP/1.1 200 OK\r\nContent-Length: 0\r\n\r\n")
)
assert server1() != server2()
assert flow().server_conn == server2()
assert not flow().live
@pytest.mark.parametrize("store_streamed_bodies", [False, True])
def test_store_streamed_bodies(tctx, store_streamed_bodies):
"""Test HTTP stream modification"""
tctx.options.store_streamed_bodies = store_streamed_bodies
server = Placeholder(Server)
flow = Placeholder(HTTPFlow)
def enable_streaming(flow: HTTPFlow):
if flow.response is None:
flow.request.stream = lambda x: b"[" + x + b"]"
else:
flow.response.stream = lambda x: b"[" + x + b"]"
assert (
Playbook(http.HttpLayer(tctx, HTTPMode.regular))
>> DataReceived(
tctx.client,
b"POST http://example.com/ HTTP/1.1\r\n"
b"Host: example.com\r\n"
b"Transfer-Encoding: chunked\r\n\r\n"
b"3\r\nabc\r\n"
b"0\r\n\r\n",
)
<< http.HttpRequestHeadersHook(flow)
>> reply(side_effect=enable_streaming)
<< OpenConnection(server)
>> reply(None)
<< SendData(
server,
b"POST / HTTP/1.1\r\n"
b"Host: example.com\r\n"
b"Transfer-Encoding: chunked\r\n\r\n"
b"5\r\n[abc]\r\n"
b"2\r\n[]\r\n",
)
<< http.HttpRequestHook(flow)
>> reply()
<< SendData(server, b"0\r\n\r\n")
>> DataReceived(
server,
b"HTTP/1.1 200 OK\r\n"
b"Transfer-Encoding: chunked\r\n\r\n"
b"3\r\ndef\r\n"
b"0\r\n\r\n",
)
<< http.HttpResponseHeadersHook(flow)
>> reply(side_effect=enable_streaming)
<< SendData(
tctx.client,
b"HTTP/1.1 200 OK\r\n"
b"Transfer-Encoding: chunked\r\n\r\n"
b"5\r\n[def]\r\n"
b"2\r\n[]\r\n",
)
<< http.HttpResponseHook(flow)
>> reply()
<< SendData(tctx.client, b"0\r\n\r\n")
)
if store_streamed_bodies:
assert flow().request.data.content == b"[abc][]"
assert flow().response.data.content == b"[def][]"
else:
assert flow().request.data.content is None
assert flow().response.data.content is None
@pytest.mark.parametrize("why", ["body_size=0", "body_size=3", "addon"])
@pytest.mark.parametrize("transfer_encoding", ["identity", "chunked"])
def test_response_streaming(tctx, why, transfer_encoding):
"""Test HTTP response streaming"""
server = Placeholder(Server)
flow = Placeholder(HTTPFlow)
playbook = Playbook(http.HttpLayer(tctx, HTTPMode.regular))
if why.startswith("body_size"):
tctx.options.stream_large_bodies = why.replace("body_size=", "")
def enable_streaming(flow: HTTPFlow):
if why == "addon":
flow.response.stream = True
assert (
playbook
>> DataReceived(
tctx.client,
b"GET http://example.com/largefile HTTP/1.1\r\nHost: example.com\r\n\r\n",
)
<< http.HttpRequestHeadersHook(flow)
>> reply()
<< http.HttpRequestHook(flow)
>> reply()
<< OpenConnection(server)
>> reply(None)
<< SendData(server, b"GET /largefile HTTP/1.1\r\nHost: example.com\r\n\r\n")
>> DataReceived(server, b"HTTP/1.1 200 OK\r\n")
)
assert flow().live
if transfer_encoding == "identity":
playbook >> DataReceived(server, b"Content-Length: 6\r\n\r\nabc")
else:
playbook >> DataReceived(
server, b"Transfer-Encoding: chunked\r\n\r\n3\r\nabc\r\n"
)
playbook << http.HttpResponseHeadersHook(flow)
playbook >> reply(side_effect=enable_streaming)
if transfer_encoding == "identity":
playbook << SendData(
tctx.client, b"HTTP/1.1 200 OK\r\nContent-Length: 6\r\n\r\nabc"
)
playbook >> DataReceived(server, b"def")
playbook << SendData(tctx.client, b"def")
else:
if why == "body_size=3":
playbook >> DataReceived(server, b"3\r\ndef\r\n")
playbook << SendData(
tctx.client,
b"HTTP/1.1 200 OK\r\nTransfer-Encoding: chunked\r\n\r\n6\r\nabcdef\r\n",
)
else:
playbook << SendData(
tctx.client,
b"HTTP/1.1 200 OK\r\nTransfer-Encoding: chunked\r\n\r\n3\r\nabc\r\n",
)
playbook >> DataReceived(server, b"3\r\ndef\r\n")
playbook << SendData(tctx.client, b"3\r\ndef\r\n")
playbook >> DataReceived(server, b"0\r\n\r\n")
playbook << http.HttpResponseHook(flow)
playbook >> reply()
if transfer_encoding == "chunked":
playbook << SendData(tctx.client, b"0\r\n\r\n")
assert playbook
assert not flow().live
def test_stream_modify(tctx):
"""Test HTTP stream modification"""
server = Placeholder(Server)
flow = Placeholder(HTTPFlow)
def enable_streaming(flow: HTTPFlow):
if flow.response is None:
flow.request.stream = lambda x: b"[" + x + b"]"
else:
flow.response.stream = lambda x: b"[" + x + b"]"
assert (
Playbook(http.HttpLayer(tctx, HTTPMode.regular))
>> DataReceived(
tctx.client,
b"POST http://example.com/ HTTP/1.1\r\n"
b"Host: example.com\r\n"
b"Transfer-Encoding: chunked\r\n\r\n"
b"3\r\nabc\r\n"
b"0\r\n\r\n",
)
<< http.HttpRequestHeadersHook(flow)
>> reply(side_effect=enable_streaming)
<< OpenConnection(server)
>> reply(None)
<< SendData(
server,
b"POST / HTTP/1.1\r\n"
b"Host: example.com\r\n"
b"Transfer-Encoding: chunked\r\n\r\n"
b"5\r\n[abc]\r\n"
b"2\r\n[]\r\n",
)
<< http.HttpRequestHook(flow)
>> reply()
<< SendData(server, b"0\r\n\r\n")
>> DataReceived(
server,
b"HTTP/1.1 200 OK\r\n"
b"Transfer-Encoding: chunked\r\n\r\n"
b"3\r\ndef\r\n"
b"0\r\n\r\n",
)
<< http.HttpResponseHeadersHook(flow)
>> reply(side_effect=enable_streaming)
<< SendData(
tctx.client,
b"HTTP/1.1 200 OK\r\n"
b"Transfer-Encoding: chunked\r\n\r\n"
b"5\r\n[def]\r\n"
b"2\r\n[]\r\n",
)
<< http.HttpResponseHook(flow)
>> reply()
<< SendData(tctx.client, b"0\r\n\r\n")
)
@pytest.mark.parametrize("why", ["body_size=0", "body_size=3", "addon"])
@pytest.mark.parametrize("transfer_encoding", ["identity", "chunked"])
@pytest.mark.parametrize(
"response", ["normal response", "early response", "early close", "early kill"]
)
def test_request_streaming(tctx, why, transfer_encoding, response):
"""
Test HTTP request streaming
This is a bit more contrived as we may receive server data while we are still sending the request.
"""
server = Placeholder(Server)
flow = Placeholder(HTTPFlow)
playbook = Playbook(http.HttpLayer(tctx, HTTPMode.regular))
if why.startswith("body_size"):
tctx.options.stream_large_bodies = why.replace("body_size=", "")
def enable_streaming(flow: HTTPFlow):
if why == "addon":
flow.request.stream = True
playbook >> DataReceived(
tctx.client, b"POST http://example.com/ HTTP/1.1\r\nHost: example.com\r\n"
)
if transfer_encoding == "identity":
playbook >> DataReceived(tctx.client, b"Content-Length: 9\r\n\r\nabc")
else:
playbook >> DataReceived(
tctx.client, b"Transfer-Encoding: chunked\r\n\r\n3\r\nabc\r\n"
)
playbook << http.HttpRequestHeadersHook(flow)
playbook >> reply(side_effect=enable_streaming)
needs_more_data_before_open = (
why == "body_size=3" and transfer_encoding == "chunked"
)
if needs_more_data_before_open:
playbook >> DataReceived(tctx.client, b"3\r\ndef\r\n")
playbook << OpenConnection(server)
playbook >> reply(None)
playbook << SendData(server, b"POST / HTTP/1.1\r\nHost: example.com\r\n")
if transfer_encoding == "identity":
playbook << SendData(server, b"Content-Length: 9\r\n\r\nabc")
playbook >> DataReceived(tctx.client, b"def")
playbook << SendData(server, b"def")
else:
if needs_more_data_before_open:
playbook << SendData(
server, b"Transfer-Encoding: chunked\r\n\r\n6\r\nabcdef\r\n"
)
else:
playbook << SendData(
server, b"Transfer-Encoding: chunked\r\n\r\n3\r\nabc\r\n"
)
playbook >> DataReceived(tctx.client, b"3\r\ndef\r\n")
playbook << SendData(server, b"3\r\ndef\r\n")
if response == "normal response":
if transfer_encoding == "identity":
playbook >> DataReceived(tctx.client, b"ghi")
playbook << SendData(server, b"ghi")
else:
playbook >> DataReceived(tctx.client, b"3\r\nghi\r\n0\r\n\r\n")
playbook << SendData(server, b"3\r\nghi\r\n")
playbook << http.HttpRequestHook(flow)
playbook >> reply()
if transfer_encoding == "chunked":
playbook << SendData(server, b"0\r\n\r\n")
assert (
playbook
>> DataReceived(server, b"HTTP/1.1 200 OK\r\nContent-Length: 0\r\n\r\n")
<< http.HttpResponseHeadersHook(flow)
>> reply()
<< http.HttpResponseHook(flow)
>> reply()
<< SendData(tctx.client, b"HTTP/1.1 200 OK\r\nContent-Length: 0\r\n\r\n")
)
elif response == "early response":
# We may receive a response before we have finished sending our request.
# We continue sending unless the server closes the connection.
# https://tools.ietf.org/html/rfc7231#section-6.5.11
assert (
playbook
>> DataReceived(
server,
b"HTTP/1.1 413 Request Entity Too Large\r\nContent-Length: 0\r\n\r\n",
)
<< http.HttpResponseHeadersHook(flow)
>> reply()
<< http.HttpResponseHook(flow)
>> reply()
<< SendData(
tctx.client,
b"HTTP/1.1 413 Request Entity Too Large\r\nContent-Length: 0\r\n\r\n",
)
)
if transfer_encoding == "identity":
playbook >> DataReceived(tctx.client, b"ghi")
playbook << SendData(server, b"ghi")
else:
playbook >> DataReceived(tctx.client, b"3\r\nghi\r\n0\r\n\r\n")
playbook << SendData(server, b"3\r\nghi\r\n")
playbook << http.HttpRequestHook(flow)
playbook >> reply()
if transfer_encoding == "chunked":
playbook << SendData(server, b"0\r\n\r\n")
assert playbook
elif response == "early close":
assert (
playbook
>> DataReceived(
server,
b"HTTP/1.1 413 Request Entity Too Large\r\nContent-Length: 0\r\n\r\n",
)
<< http.HttpResponseHeadersHook(flow)
>> reply()
<< http.HttpResponseHook(flow)
>> reply()
<< SendData(
tctx.client,
b"HTTP/1.1 413 Request Entity Too Large\r\nContent-Length: 0\r\n\r\n",
)
>> ConnectionClosed(server)
<< CloseConnection(server)
<< CloseConnection(tctx.client)
)
elif response == "early kill":
assert (
playbook
>> ConnectionClosed(server)
<< CloseConnection(server)
<< http.HttpErrorHook(flow)
>> reply()
<< SendData(tctx.client, BytesMatching(b"502 Bad Gateway"))
<< CloseConnection(tctx.client)
)
else: # pragma: no cover
assert False
@pytest.mark.parametrize("where", ["request", "response"])
@pytest.mark.parametrize("transfer_encoding", ["identity", "chunked"])
def test_body_size_limit(tctx, where, transfer_encoding):
"""Test HTTP request body_size_limit"""
tctx.options.body_size_limit = "3"
flow = Placeholder(HTTPFlow)
if transfer_encoding == "identity":
body = b"Content-Length: 6\r\n\r\nabcdef"
else:
body = b"Transfer-Encoding: chunked\r\n\r\n6\r\nabcdef"
if where == "request":
assert (
Playbook(http.HttpLayer(tctx, HTTPMode.regular))
>> DataReceived(
tctx.client,
b"POST http://example.com/ HTTP/1.1\r\nHost: example.com\r\n" + body,
)
<< http.HttpRequestHeadersHook(flow)
>> reply()
<< http.HttpErrorHook(flow)
>> reply()
<< SendData(
tctx.client, BytesMatching(b"413 Payload Too Large.+body_size_limit")
)
<< CloseConnection(tctx.client)
)
assert not flow().live
else:
server = Placeholder(Server)
assert (
Playbook(http.HttpLayer(tctx, HTTPMode.regular))
>> DataReceived(
tctx.client,
b"GET http://example.com/ HTTP/1.1\r\nHost: example.com\r\n\r\n",
)
<< http.HttpRequestHeadersHook(flow)
>> reply()
<< http.HttpRequestHook(flow)
>> reply()
<< OpenConnection(server)
>> reply(None)
<< SendData(server, b"GET / HTTP/1.1\r\nHost: example.com\r\n\r\n")
>> DataReceived(server, b"HTTP/1.1 200 OK\r\n" + body)
<< http.HttpResponseHeadersHook(flow)
>> reply()
<< http.HttpErrorHook(flow)
>> reply()
<< SendData(tctx.client, BytesMatching(b"502 Bad Gateway.+body_size_limit"))
<< CloseConnection(tctx.client)
<< CloseConnection(server)
)
assert not flow().live
@pytest.mark.parametrize("connect", [True, False])
def test_server_unreachable(tctx, connect):
"""Test the scenario where the target server is unreachable."""
tctx.options.connection_strategy = "eager"
server = Placeholder(Server)
flow = Placeholder(HTTPFlow)
playbook = Playbook(http.HttpLayer(tctx, HTTPMode.regular), hooks=False)
if connect:
playbook >> DataReceived(
tctx.client,
b"CONNECT example.com:443 HTTP/1.1\r\nHost: example.com:443\r\n\r\n",
)
else:
playbook >> DataReceived(
tctx.client, b"GET http://example.com/ HTTP/1.1\r\n\r\n"
)
playbook << OpenConnection(server)
playbook >> reply("Connection failed")
if not connect:
playbook << http.HttpErrorHook(flow)
else:
playbook << http.HttpConnectErrorHook(flow)
playbook >> reply()
playbook << SendData(
tctx.client, BytesMatching(b"502 Bad Gateway.+Connection failed")
)
if not connect:
playbook << CloseConnection(tctx.client)
assert playbook
assert not flow().live
if not connect:
assert flow().error
@pytest.mark.parametrize(
"data",
[
None,
b"I don't speak HTTP.",
b"HTTP/1.1 200 OK\r\nContent-Length: 10\r\n\r\nweee",
],
)
def test_server_aborts(tctx, data):
"""Test the scenario where the server doesn't serve a response"""
server = Placeholder(Server)
flow = Placeholder(HTTPFlow)
playbook = Playbook(http.HttpLayer(tctx, HTTPMode.regular), hooks=False)
assert (
playbook
>> DataReceived(
tctx.client,
b"GET http://example.com/ HTTP/1.1\r\nHost: example.com\r\n\r\n",
)
<< OpenConnection(server)
>> reply(None)
<< SendData(server, b"GET / HTTP/1.1\r\nHost: example.com\r\n\r\n")
)
if data:
playbook >> DataReceived(server, data)
assert (
playbook
>> ConnectionClosed(server)
<< CloseConnection(server)
<< http.HttpErrorHook(flow)
>> reply()
<< SendData(tctx.client, BytesMatching(b"502 Bad Gateway"))
<< CloseConnection(tctx.client)
)
assert flow().error
assert not flow().live
@pytest.mark.parametrize("redirect", ["", "change-destination", "change-proxy"])
@pytest.mark.parametrize("domain", [b"example.com", b"xn--eckwd4c7c.xn--zckzah"])
@pytest.mark.parametrize("scheme", ["http", "https"])
def test_upstream_proxy(tctx, redirect, domain, scheme):
"""Test that an upstream HTTP proxy is used."""
server = Placeholder(Server)
server2 = Placeholder(Server)
flow = Placeholder(HTTPFlow)
tctx.client.proxy_mode = ProxyMode.parse("upstream:http://proxy:8080")
playbook = Playbook(http.HttpLayer(tctx, HTTPMode.upstream), hooks=False)
if scheme == "http":
assert (
playbook
>> DataReceived(
tctx.client,
b"GET http://%s/ HTTP/1.1\r\nHost: %s\r\n\r\n" % (domain, domain),
)
<< OpenConnection(server)
>> reply(None)
<< SendData(
server,
b"GET http://%s/ HTTP/1.1\r\nHost: %s\r\n\r\n" % (domain, domain),
)
)
else:
assert (
playbook
>> DataReceived(
tctx.client,
b"CONNECT %s:443 HTTP/1.1\r\nHost: %s:443\r\n\r\n" % (domain, domain),
)
<< SendData(tctx.client, b"HTTP/1.1 200 Connection established\r\n\r\n")
>> DataReceived(tctx.client, b"GET / HTTP/1.1\r\nHost: %s\r\n\r\n" % domain)
<< layer.NextLayerHook(Placeholder())
>> reply_next_layer(lambda ctx: http.HttpLayer(ctx, HTTPMode.transparent))
<< OpenConnection(server)
>> reply(None)
<< SendData(
server,
b"CONNECT %s:443 HTTP/1.1\r\nHost: %s:443\r\n\r\n" % (domain, domain),
)
>> DataReceived(server, b"HTTP/1.1 200 Connection established\r\n\r\n")
<< SendData(server, b"GET / HTTP/1.1\r\nHost: %s\r\n\r\n" % domain)
)
playbook >> DataReceived(server, b"HTTP/1.1 418 OK\r\nContent-Length: 0\r\n\r\n")
playbook << SendData(tctx.client, b"HTTP/1.1 418 OK\r\nContent-Length: 0\r\n\r\n")
assert playbook
assert server().address == ("proxy", 8080)
if scheme == "http":
playbook >> DataReceived(
tctx.client,
b"GET http://%s/two HTTP/1.1\r\nHost: %s\r\n\r\n" % (domain, domain),
)
else:
playbook >> DataReceived(
tctx.client, b"GET /two HTTP/1.1\r\nHost: %s\r\n\r\n" % domain
)
assert playbook << http.HttpRequestHook(flow)
if redirect == "change-destination":
flow().request.host = domain + b".test"
flow().request.host_header = domain
elif redirect == "change-proxy":
flow().server_conn.via = ("http", ("other-proxy", 1234))
playbook >> reply()
if redirect:
# Protocol-wise we wouldn't need to open a new connection for plain http host redirects,
# but we disregard this edge case to simplify implementation.
playbook << OpenConnection(server2)
playbook >> reply(None)
else:
server2 = server
if scheme == "http":
if redirect == "change-destination":
playbook << SendData(
server2,
b"GET http://%s.test/two HTTP/1.1\r\nHost: %s\r\n\r\n"
% (domain, domain),
)
else:
playbook << SendData(
server2,
b"GET http://%s/two HTTP/1.1\r\nHost: %s\r\n\r\n" % (domain, domain),
)
else:
if redirect == "change-destination":
playbook << SendData(
server2,
b"CONNECT %s.test:443 HTTP/1.1\r\nHost: %s.test:443\r\n\r\n"
% (domain, domain),
)
playbook >> DataReceived(
server2, b"HTTP/1.1 200 Connection established\r\n\r\n"
)
elif redirect == "change-proxy":
playbook << SendData(
server2,
b"CONNECT %s:443 HTTP/1.1\r\nHost: %s:443\r\n\r\n" % (domain, domain),
)
playbook >> DataReceived(
server2, b"HTTP/1.1 200 Connection established\r\n\r\n"
)
playbook << SendData(server2, b"GET /two HTTP/1.1\r\nHost: %s\r\n\r\n" % domain)
| python | MIT | e6aa924bb411a9687b91920b8d094af37bc02b90 | 2026-01-04T14:40:00.086164Z | true |
mitmproxy/mitmproxy | https://github.com/mitmproxy/mitmproxy/blob/e6aa924bb411a9687b91920b8d094af37bc02b90/test/mitmproxy/proxy/layers/http/test_http_version_interop.py | test/mitmproxy/proxy/layers/http/test_http_version_interop.py | import h2.config
import h2.connection
import h2.events
from mitmproxy.connection import Server
from mitmproxy.http import HTTPFlow
from mitmproxy.proxy.commands import CloseConnection
from mitmproxy.proxy.commands import OpenConnection
from mitmproxy.proxy.commands import SendData
from mitmproxy.proxy.context import Context
from mitmproxy.proxy.events import DataReceived
from mitmproxy.proxy.layers import http
from mitmproxy.proxy.layers.http import HTTPMode
from test.mitmproxy.proxy.layers.http.hyper_h2_test_helpers import FrameFactory
from test.mitmproxy.proxy.layers.http.test_http2 import example_response_headers
from test.mitmproxy.proxy.layers.http.test_http2 import make_h2
from test.mitmproxy.proxy.tutils import Placeholder
from test.mitmproxy.proxy.tutils import Playbook
from test.mitmproxy.proxy.tutils import reply
example_request_headers = (
(b":method", b"GET"),
(b":scheme", b"http"),
(b":path", b"/"),
(b":authority", b"example.com"),
(b"cookie", "a=1"),
(b"cookie", "b=2"),
)
h2f = FrameFactory()
def event_types(events):
return [type(x) for x in events]
def h2_client(tctx: Context) -> tuple[h2.connection.H2Connection, Playbook]:
tctx.client.alpn = b"h2"
tctx.options.http2_ping_keepalive = 0
playbook = Playbook(http.HttpLayer(tctx, HTTPMode.regular))
conn = h2.connection.H2Connection()
conn.initiate_connection()
server_preamble = Placeholder(bytes)
assert playbook << SendData(tctx.client, server_preamble)
assert event_types(conn.receive_data(server_preamble())) == [
h2.events.RemoteSettingsChanged,
h2.events.WindowUpdated,
]
settings_ack = Placeholder(bytes)
assert (
playbook
>> DataReceived(tctx.client, conn.data_to_send())
<< SendData(tctx.client, settings_ack)
)
assert event_types(conn.receive_data(settings_ack())) == [
h2.events.SettingsAcknowledged
]
return conn, playbook
def test_h2_to_h1(tctx):
"""Test HTTP/2 -> HTTP/1 request translation"""
server = Placeholder(Server)
flow = Placeholder(HTTPFlow)
conn, playbook = h2_client(tctx)
conn.send_headers(1, example_request_headers, end_stream=True)
response = Placeholder(bytes)
assert (
playbook
>> DataReceived(tctx.client, conn.data_to_send())
<< http.HttpRequestHeadersHook(flow)
>> reply()
<< http.HttpRequestHook(flow)
>> reply()
<< OpenConnection(server)
>> reply(None)
<< SendData(
server, b"GET / HTTP/1.1\r\nHost: example.com\r\ncookie: a=1; b=2\r\n\r\n"
)
>> DataReceived(server, b"HTTP/1.1 200 OK\r\nContent-Length: 12\r\n\r\n")
<< http.HttpResponseHeadersHook(flow)
>> reply()
>> DataReceived(server, b"Hello World!")
<< http.HttpResponseHook(flow)
<< CloseConnection(server)
>> reply(to=-2)
<< SendData(tctx.client, response)
)
events = conn.receive_data(response())
assert event_types(events) == [
h2.events.ResponseReceived,
h2.events.DataReceived,
h2.events.DataReceived,
h2.events.StreamEnded,
]
resp: h2.events.ResponseReceived = events[0]
body: h2.events.DataReceived = events[1]
assert resp.headers == [(b":status", b"200"), (b"content-length", b"12")]
assert body.data == b"Hello World!"
def test_h1_to_h2(tctx):
"""Test HTTP/1 -> HTTP/2 request translation"""
server = Placeholder(Server)
flow = Placeholder(HTTPFlow)
tctx.options.http2_ping_keepalive = 0
playbook = Playbook(http.HttpLayer(tctx, HTTPMode.regular))
conf = h2.config.H2Configuration(client_side=False)
conn = h2.connection.H2Connection(conf)
conn.initiate_connection()
request = Placeholder(bytes)
assert (
playbook
>> DataReceived(
tctx.client,
b"GET http://example.com/ HTTP/1.1\r\nHost: example.com\r\n\r\n",
)
<< http.HttpRequestHeadersHook(flow)
>> reply()
<< http.HttpRequestHook(flow)
>> reply()
<< OpenConnection(server)
>> reply(None, side_effect=make_h2)
<< SendData(server, request)
)
events = conn.receive_data(request())
assert event_types(events) == [
h2.events.RemoteSettingsChanged,
h2.events.WindowUpdated,
h2.events.RequestReceived,
h2.events.StreamEnded,
]
conn.send_headers(1, example_response_headers)
conn.send_data(1, b"Hello World!", end_stream=True)
settings_ack = Placeholder(bytes)
assert (
playbook
>> DataReceived(server, conn.data_to_send())
<< http.HttpResponseHeadersHook(flow)
<< SendData(server, settings_ack)
>> reply(to=-2)
<< http.HttpResponseHook(flow)
>> reply()
<< SendData(tctx.client, b"HTTP/1.1 200 OK\r\n\r\nHello World!")
<< CloseConnection(tctx.client)
)
assert settings_ack() == b"\x00\x00\x00\x04\x01\x00\x00\x00\x00"
| python | MIT | e6aa924bb411a9687b91920b8d094af37bc02b90 | 2026-01-04T14:40:00.086164Z | false |
mitmproxy/mitmproxy | https://github.com/mitmproxy/mitmproxy/blob/e6aa924bb411a9687b91920b8d094af37bc02b90/test/mitmproxy/proxy/layers/http/test_http2.py | test/mitmproxy/proxy/layers/http/test_http2.py | import time
from logging import DEBUG
import h2.settings
import hpack
import hyperframe.frame
import pytest
from h2.errors import ErrorCodes
from mitmproxy.connection import ConnectionState
from mitmproxy.connection import Server
from mitmproxy.flow import Error
from mitmproxy.http import Headers
from mitmproxy.http import HTTPFlow
from mitmproxy.http import Request
from mitmproxy.proxy.commands import CloseConnection
from mitmproxy.proxy.commands import Log
from mitmproxy.proxy.commands import OpenConnection
from mitmproxy.proxy.commands import RequestWakeup
from mitmproxy.proxy.commands import SendData
from mitmproxy.proxy.context import Context
from mitmproxy.proxy.events import ConnectionClosed
from mitmproxy.proxy.events import DataReceived
from mitmproxy.proxy.layers import http
from mitmproxy.proxy.layers.http import ErrorCode
from mitmproxy.proxy.layers.http import HTTPMode
from mitmproxy.proxy.layers.http._http2 import Http2Client
from mitmproxy.proxy.layers.http._http2 import split_pseudo_headers
from test.mitmproxy.proxy.layers.http.hyper_h2_test_helpers import FrameFactory
from test.mitmproxy.proxy.tutils import Placeholder
from test.mitmproxy.proxy.tutils import Playbook
from test.mitmproxy.proxy.tutils import reply
example_request_headers = (
(b":method", b"GET"),
(b":scheme", b"http"),
(b":path", b"/"),
(b":authority", b"example.com"),
)
example_response_headers = ((b":status", b"200"),)
example_request_trailers = ((b"req-trailer-a", b"a"), (b"req-trailer-b", b"b"))
example_response_trailers = ((b"resp-trailer-a", b"a"), (b"resp-trailer-b", b"b"))
@pytest.fixture
def open_h2_server_conn():
# this is a bit fake here (port 80, with alpn, but no tls - c'mon),
# but we don't want to pollute our tests with TLS handshakes.
s = Server(address=("example.com", 80))
s.state = ConnectionState.OPEN
s.alpn = b"h2"
return s
def decode_frames(data: bytes) -> list[hyperframe.frame.Frame]:
# swallow preamble
if data.startswith(b"PRI * HTTP/2.0"):
data = data[24:]
frames = []
while data:
f, length = hyperframe.frame.Frame.parse_frame_header(data[:9])
f.parse_body(memoryview(data[9 : 9 + length]))
frames.append(f)
data = data[9 + length :]
return frames
def start_h2_client(tctx: Context, keepalive: int = 0) -> tuple[Playbook, FrameFactory]:
tctx.client.alpn = b"h2"
tctx.options.http2_ping_keepalive = keepalive
frame_factory = FrameFactory()
playbook = Playbook(http.HttpLayer(tctx, HTTPMode.regular))
assert (
playbook
<< SendData(tctx.client, Placeholder()) # initial settings frame
>> DataReceived(tctx.client, frame_factory.preamble())
>> DataReceived(
tctx.client, frame_factory.build_settings_frame({}, ack=True).serialize()
)
)
return playbook, frame_factory
def make_h2(open_connection: OpenConnection) -> None:
assert isinstance(open_connection, OpenConnection), (
f"Expected OpenConnection event, not {open_connection}"
)
open_connection.connection.alpn = b"h2"
def test_simple(tctx):
playbook, cff = start_h2_client(tctx)
flow = Placeholder(HTTPFlow)
server = Placeholder(Server)
initial = Placeholder(bytes)
assert (
playbook
>> DataReceived(
tctx.client,
cff.build_headers_frame(
example_request_headers, flags=["END_STREAM"]
).serialize(),
)
<< http.HttpRequestHeadersHook(flow)
>> reply()
<< http.HttpRequestHook(flow)
>> reply()
<< OpenConnection(server)
>> reply(None, side_effect=make_h2)
<< SendData(server, initial)
)
frames = decode_frames(initial())
assert [type(x) for x in frames] == [
hyperframe.frame.SettingsFrame,
hyperframe.frame.WindowUpdateFrame,
hyperframe.frame.HeadersFrame,
]
sff = FrameFactory()
assert (
playbook
# a conforming h2 server would send settings first, we disregard this for now.
>> DataReceived(
server, sff.build_headers_frame(example_response_headers).serialize()
)
<< http.HttpResponseHeadersHook(flow)
>> reply()
>> DataReceived(
server,
sff.build_data_frame(b"Hello, World!", flags=["END_STREAM"]).serialize(),
)
<< http.HttpResponseHook(flow)
>> reply()
<< SendData(
tctx.client,
cff.build_headers_frame(example_response_headers).serialize()
+ cff.build_data_frame(b"Hello, World!").serialize()
+ cff.build_data_frame(b"", flags=["END_STREAM"]).serialize(),
)
)
assert flow().request.url == "http://example.com/"
assert flow().response.text == "Hello, World!"
@pytest.mark.parametrize("stream", ["stream", ""])
def test_response_trailers(tctx: Context, open_h2_server_conn: Server, stream):
playbook, cff = start_h2_client(tctx)
tctx.server = open_h2_server_conn
sff = FrameFactory()
def enable_streaming(flow: HTTPFlow):
flow.response.stream = bool(stream)
flow = Placeholder(HTTPFlow)
(
playbook
>> DataReceived(
tctx.client,
cff.build_headers_frame(
example_request_headers, flags=["END_STREAM"]
).serialize(),
)
<< http.HttpRequestHeadersHook(flow)
>> reply()
<< http.HttpRequestHook(flow)
>> reply()
<< SendData(tctx.server, Placeholder(bytes))
# a conforming h2 server would send settings first, we disregard this for now.
>> DataReceived(
tctx.server,
sff.build_headers_frame(example_response_headers).serialize()
+ sff.build_data_frame(b"Hello, World!").serialize(),
)
<< http.HttpResponseHeadersHook(flow)
>> reply(side_effect=enable_streaming)
)
if stream:
playbook << SendData(
tctx.client,
cff.build_headers_frame(example_response_headers).serialize()
+ cff.build_data_frame(b"Hello, World!").serialize(),
)
assert (
playbook
>> DataReceived(
tctx.server,
sff.build_headers_frame(
example_response_trailers, flags=["END_STREAM"]
).serialize(),
)
<< http.HttpResponseHook(flow)
)
assert flow().response.trailers
del flow().response.trailers["resp-trailer-a"]
if stream:
assert (
playbook
>> reply()
<< SendData(
tctx.client,
cff.build_headers_frame(
example_response_trailers[1:], flags=["END_STREAM"]
).serialize(),
)
)
else:
assert (
playbook
>> reply()
<< SendData(
tctx.client,
cff.build_headers_frame(example_response_headers).serialize()
+ cff.build_data_frame(b"Hello, World!").serialize()
+ cff.build_headers_frame(
example_response_trailers[1:], flags=["END_STREAM"]
).serialize(),
)
)
@pytest.mark.parametrize("stream", ["stream", ""])
def test_request_trailers(tctx: Context, open_h2_server_conn: Server, stream):
playbook, cff = start_h2_client(tctx)
tctx.server = open_h2_server_conn
def enable_streaming(flow: HTTPFlow):
flow.request.stream = bool(stream)
flow = Placeholder(HTTPFlow)
server_data1 = Placeholder(bytes)
server_data2 = Placeholder(bytes)
(
playbook
>> DataReceived(
tctx.client,
cff.build_headers_frame(example_request_headers).serialize()
+ cff.build_data_frame(b"Hello, World!").serialize(),
)
<< http.HttpRequestHeadersHook(flow)
>> reply(side_effect=enable_streaming)
)
if stream:
playbook << SendData(tctx.server, server_data1)
assert (
playbook
>> DataReceived(
tctx.client,
cff.build_headers_frame(
example_request_trailers, flags=["END_STREAM"]
).serialize(),
)
<< http.HttpRequestHook(flow)
>> reply()
<< SendData(tctx.server, server_data2)
)
frames = decode_frames(server_data1.setdefault(b"") + server_data2())
assert [type(x) for x in frames] == [
hyperframe.frame.SettingsFrame,
hyperframe.frame.WindowUpdateFrame,
hyperframe.frame.HeadersFrame,
hyperframe.frame.DataFrame,
hyperframe.frame.HeadersFrame,
]
def test_upstream_error(tctx):
playbook, cff = start_h2_client(tctx)
flow = Placeholder(HTTPFlow)
server = Placeholder(Server)
err = Placeholder(bytes)
assert (
playbook
>> DataReceived(
tctx.client,
cff.build_headers_frame(
example_request_headers, flags=["END_STREAM"]
).serialize(),
)
<< http.HttpRequestHeadersHook(flow)
>> reply()
<< http.HttpRequestHook(flow)
>> reply()
<< OpenConnection(server)
>> reply("oops server <> error")
<< http.HttpErrorHook(flow)
>> reply()
<< SendData(tctx.client, err)
)
frames = decode_frames(err())
assert [type(x) for x in frames] == [
hyperframe.frame.HeadersFrame,
hyperframe.frame.DataFrame,
]
d = frames[1]
assert isinstance(d, hyperframe.frame.DataFrame)
assert b"502 Bad Gateway" in d.data
assert b"server <> error" in d.data
@pytest.mark.parametrize("trailers", ["trailers", ""])
def test_long_response(tctx: Context, trailers):
playbook, cff = start_h2_client(tctx)
flow = Placeholder(HTTPFlow)
server = Placeholder(Server)
initial = Placeholder(bytes)
assert (
playbook
>> DataReceived(
tctx.client,
cff.build_headers_frame(
example_request_headers, flags=["END_STREAM"]
).serialize(),
)
<< http.HttpRequestHeadersHook(flow)
>> reply()
<< http.HttpRequestHook(flow)
>> reply()
<< OpenConnection(server)
>> reply(None, side_effect=make_h2)
<< SendData(server, initial)
)
frames = decode_frames(initial())
assert [type(x) for x in frames] == [
hyperframe.frame.SettingsFrame,
hyperframe.frame.WindowUpdateFrame,
hyperframe.frame.HeadersFrame,
]
sff = FrameFactory()
assert (
playbook
# a conforming h2 server would send settings first, we disregard this for now.
>> DataReceived(
server, sff.build_headers_frame(example_response_headers).serialize()
)
<< http.HttpResponseHeadersHook(flow)
>> reply()
>> DataReceived(
server, sff.build_data_frame(b"a" * 10000, flags=[]).serialize()
)
>> DataReceived(
server,
sff.build_data_frame(b"a" * 10000, flags=[]).serialize(),
)
>> DataReceived(
server,
sff.build_data_frame(b"a" * 10000, flags=[]).serialize(),
)
>> DataReceived(
server,
sff.build_data_frame(b"a" * 10000, flags=[]).serialize(),
)
>> DataReceived(
server,
sff.build_data_frame(b"a" * 10000, flags=[]).serialize(),
)
>> DataReceived(
server,
sff.build_data_frame(b"a" * 10000, flags=[]).serialize(),
)
>> DataReceived(
server,
sff.build_data_frame(b"a" * 10000, flags=[]).serialize(),
)
)
if trailers:
(
playbook
>> DataReceived(
server,
sff.build_headers_frame(
example_response_trailers, flags=["END_STREAM"]
).serialize(),
)
)
else:
(
playbook
>> DataReceived(
server,
sff.build_data_frame(b"", flags=["END_STREAM"]).serialize(),
)
)
(
playbook
<< http.HttpResponseHook(flow)
>> reply()
<< SendData(
tctx.client,
cff.build_headers_frame(example_response_headers).serialize()
+ cff.build_data_frame(b"a" * 16384).serialize(),
)
<< SendData(
tctx.client,
cff.build_data_frame(b"a" * 16384).serialize(),
)
<< SendData(
tctx.client,
cff.build_data_frame(b"a" * 16384).serialize(),
)
<< SendData(
tctx.client,
cff.build_data_frame(b"a" * 16383).serialize(),
)
>> DataReceived(
tctx.client,
cff.build_window_update_frame(0, 65535).serialize()
+ cff.build_window_update_frame(1, 65535).serialize(),
)
)
if trailers:
assert (
playbook
<< SendData(
tctx.client,
cff.build_data_frame(b"a" * 1).serialize(),
)
<< SendData(tctx.client, cff.build_data_frame(b"a" * 4464).serialize())
<< SendData(
tctx.client,
cff.build_headers_frame(
example_response_trailers, flags=["END_STREAM"]
).serialize(),
)
)
else:
assert (
playbook
<< SendData(
tctx.client,
cff.build_data_frame(b"a" * 1).serialize(),
)
<< SendData(tctx.client, cff.build_data_frame(b"a" * 4464).serialize())
<< SendData(
tctx.client,
cff.build_data_frame(b"", flags=["END_STREAM"]).serialize(),
)
)
assert flow().request.url == "http://example.com/"
assert flow().response.text == "a" * 70000
@pytest.mark.parametrize("stream", ["stream", ""])
@pytest.mark.parametrize("when", ["request", "response"])
@pytest.mark.parametrize("how", ["RST", "disconnect", "RST+disconnect"])
def test_http2_client_aborts(tctx, stream, when, how):
"""
Test handling of the case where a client aborts during request or response transmission.
If the client aborts the request transmission, we must trigger an error hook,
if the client disconnects during response transmission, no error hook is triggered.
"""
server = Placeholder(Server)
flow = Placeholder(HTTPFlow)
playbook, cff = start_h2_client(tctx)
resp = Placeholder(bytes)
def enable_request_streaming(flow: HTTPFlow):
flow.request.stream = True
def enable_response_streaming(flow: HTTPFlow):
flow.response.stream = True
assert (
playbook
>> DataReceived(
tctx.client, cff.build_headers_frame(example_request_headers).serialize()
)
<< http.HttpRequestHeadersHook(flow)
)
if stream and when == "request":
assert (
playbook
>> reply(side_effect=enable_request_streaming)
<< OpenConnection(server)
>> reply(None)
<< SendData(server, b"GET / HTTP/1.1\r\nHost: example.com\r\n\r\n")
)
else:
assert playbook >> reply()
if when == "request":
if "RST" in how:
playbook >> DataReceived(
tctx.client,
cff.build_rst_stream_frame(1, ErrorCodes.CANCEL).serialize(),
)
else:
playbook >> ConnectionClosed(tctx.client)
playbook << CloseConnection(tctx.client)
if stream:
playbook << CloseConnection(server)
playbook << http.HttpErrorHook(flow)
playbook >> reply()
if how == "RST+disconnect":
playbook >> ConnectionClosed(tctx.client)
playbook << CloseConnection(tctx.client)
assert playbook
assert (
"stream reset" in flow().error.msg
or "peer closed connection" in flow().error.msg
)
return
assert (
playbook
>> DataReceived(
tctx.client, cff.build_data_frame(b"", flags=["END_STREAM"]).serialize()
)
<< http.HttpRequestHook(flow)
>> reply()
<< OpenConnection(server)
>> reply(None)
<< SendData(server, b"GET / HTTP/1.1\r\nHost: example.com\r\n\r\n")
>> DataReceived(server, b"HTTP/1.1 200 OK\r\nContent-Length: 6\r\n\r\n123")
<< http.HttpResponseHeadersHook(flow)
)
if stream:
assert (
playbook
>> reply(side_effect=enable_response_streaming)
<< SendData(tctx.client, resp)
)
else:
assert playbook >> reply()
if "RST" in how:
playbook >> DataReceived(
tctx.client, cff.build_rst_stream_frame(1, ErrorCodes.CANCEL).serialize()
)
else:
playbook >> ConnectionClosed(tctx.client)
playbook << CloseConnection(tctx.client)
playbook << CloseConnection(server)
playbook << http.HttpErrorHook(flow)
playbook >> reply()
assert playbook
if how == "RST+disconnect":
playbook >> ConnectionClosed(tctx.client)
playbook << CloseConnection(tctx.client)
assert playbook
if "RST" in how:
assert "stream reset" in flow().error.msg
else:
assert "peer closed connection" in flow().error.msg
@pytest.mark.parametrize("normalize", [True, False])
def test_no_normalization(tctx, normalize):
"""Test that we don't normalize headers when we just pass them through."""
tctx.options.normalize_outbound_headers = normalize
tctx.options.validate_inbound_headers = False
server = Placeholder(Server)
flow = Placeholder(HTTPFlow)
playbook, cff = start_h2_client(tctx)
request_headers = list(example_request_headers) + [
(b"Should-Not-Be-Capitalized! ", b" :) ")
]
request_headers_lower = [(k.lower(), v) for (k, v) in request_headers]
response_headers = list(example_response_headers) + [(b"Same", b"Here")]
response_headers_lower = [(k.lower(), v) for (k, v) in response_headers]
initial = Placeholder(bytes)
assert (
playbook
>> DataReceived(
tctx.client,
cff.build_headers_frame(request_headers, flags=["END_STREAM"]).serialize(),
)
<< http.HttpRequestHeadersHook(flow)
>> reply()
<< http.HttpRequestHook(flow)
>> reply()
<< OpenConnection(server)
>> reply(None, side_effect=make_h2)
<< SendData(server, initial)
)
frames = decode_frames(initial())
assert [type(x) for x in frames] == [
hyperframe.frame.SettingsFrame,
hyperframe.frame.WindowUpdateFrame,
hyperframe.frame.HeadersFrame,
]
assert (
hpack.hpack.Decoder().decode(frames[2].data, True) == request_headers_lower
if normalize
else request_headers
)
sff = FrameFactory()
(
playbook
>> DataReceived(
server,
sff.build_headers_frame(response_headers, flags=["END_STREAM"]).serialize(),
)
<< http.HttpResponseHeadersHook(flow)
>> reply()
<< http.HttpResponseHook(flow)
>> reply()
)
if normalize:
playbook << Log(
"Lowercased 'Same' header as uppercase is not allowed with HTTP/2."
)
hdrs = response_headers_lower if normalize else response_headers
assert playbook << SendData(
tctx.client, cff.build_headers_frame(hdrs, flags=["END_STREAM"]).serialize()
)
assert flow().request.headers.fields == ((b"Should-Not-Be-Capitalized! ", b" :) "),)
assert flow().response.headers.fields == ((b"Same", b"Here"),)
@pytest.mark.parametrize("stream", ["stream", ""])
def test_end_stream_via_headers(tctx, stream):
playbook, cff = start_h2_client(tctx)
server = Placeholder(Server)
flow = Placeholder(HTTPFlow)
sff = FrameFactory()
forwarded_request_frames = Placeholder(bytes)
forwarded_response_frames = Placeholder(bytes)
def enable_streaming(flow: HTTPFlow):
flow.request.stream = bool(stream)
assert (
playbook
>> DataReceived(
tctx.client,
cff.build_headers_frame(
example_request_headers, flags=["END_STREAM"]
).serialize(),
)
<< http.HttpRequestHeadersHook(flow)
>> reply(side_effect=enable_streaming)
<< http.HttpRequestHook(flow)
>> reply()
<< OpenConnection(server)
>> reply(None, side_effect=make_h2)
<< SendData(server, forwarded_request_frames)
>> DataReceived(
server,
sff.build_headers_frame(
example_response_headers, flags=["END_STREAM"]
).serialize(),
)
<< http.HttpResponseHeadersHook(flow)
>> reply()
<< http.HttpResponseHook(flow)
>> reply()
<< SendData(tctx.client, forwarded_response_frames)
)
frames = decode_frames(forwarded_request_frames())
assert [type(x) for x in frames] == [
hyperframe.frame.SettingsFrame,
hyperframe.frame.WindowUpdateFrame,
hyperframe.frame.HeadersFrame,
]
assert "END_STREAM" in frames[2].flags
frames = decode_frames(forwarded_response_frames())
assert [type(x) for x in frames] == [
hyperframe.frame.HeadersFrame,
]
assert "END_STREAM" in frames[0].flags
@pytest.mark.parametrize(
"input,pseudo,headers",
[
([(b"foo", b"bar")], {}, {"foo": "bar"}),
([(b":status", b"418")], {b":status": b"418"}, {}),
(
[(b":status", b"418"), (b"foo", b"bar")],
{b":status": b"418"},
{"foo": "bar"},
),
],
)
def test_split_pseudo_headers(input, pseudo, headers):
actual_pseudo, actual_headers = split_pseudo_headers(input)
assert pseudo == actual_pseudo
assert Headers(**headers) == actual_headers
def test_split_pseudo_headers_err():
with pytest.raises(ValueError, match="Duplicate HTTP/2 pseudo header"):
split_pseudo_headers([(b":status", b"418"), (b":status", b"418")])
def test_rst_then_close(tctx):
"""
Test that we properly handle the case of a client that first causes protocol errors and then disconnects.
Adapted from h2spec http2/5.1/5.
"""
playbook, cff = start_h2_client(tctx)
flow = Placeholder(HTTPFlow)
server = Placeholder(Server)
assert (
playbook
>> DataReceived(
tctx.client,
cff.build_headers_frame(
example_request_headers, flags=["END_STREAM"]
).serialize(),
)
<< http.HttpRequestHeadersHook(flow)
>> reply()
<< http.HttpRequestHook(flow)
>> reply()
<< OpenConnection(server)
>> DataReceived(
tctx.client, cff.build_data_frame(b"unexpected data frame").serialize()
)
<< SendData(
tctx.client,
cff.build_rst_stream_frame(1, ErrorCodes.STREAM_CLOSED).serialize(),
)
>> ConnectionClosed(tctx.client)
<< CloseConnection(tctx.client)
>> reply("connection cancelled", to=-5)
<< http.HttpErrorHook(flow)
>> reply()
)
assert flow().error.msg == "connection cancelled"
def test_cancel_then_server_disconnect(tctx):
"""
Test that we properly handle the case of the following event sequence:
- client cancels a stream
- we start an error hook
- server disconnects
- error hook completes.
"""
playbook, cff = start_h2_client(tctx)
flow = Placeholder(HTTPFlow)
server = Placeholder(Server)
assert (
playbook
>> DataReceived(
tctx.client,
cff.build_headers_frame(
example_request_headers, flags=["END_STREAM"]
).serialize(),
)
<< http.HttpRequestHeadersHook(flow)
>> reply()
<< http.HttpRequestHook(flow)
>> reply()
<< OpenConnection(server)
>> reply(None)
<< SendData(server, b"GET / HTTP/1.1\r\nHost: example.com\r\n\r\n")
>> DataReceived(
tctx.client, cff.build_rst_stream_frame(1, ErrorCodes.CANCEL).serialize()
)
<< CloseConnection(server)
<< http.HttpErrorHook(flow)
>> reply()
>> ConnectionClosed(server)
<< None
)
def test_cancel_during_response_hook(tctx):
"""
Test that we properly handle the case of the following event sequence:
- we receive a server response
- we trigger the response hook
- the client cancels the stream
- the response hook completes
Given that we have already triggered the response hook, we don't want to trigger the error hook.
"""
playbook, cff = start_h2_client(tctx)
flow = Placeholder(HTTPFlow)
server = Placeholder(Server)
assert (
playbook
>> DataReceived(
tctx.client,
cff.build_headers_frame(
example_request_headers, flags=["END_STREAM"]
).serialize(),
)
<< http.HttpRequestHeadersHook(flow)
>> reply()
<< http.HttpRequestHook(flow)
>> reply()
<< OpenConnection(server)
>> reply(None)
<< SendData(server, b"GET / HTTP/1.1\r\nHost: example.com\r\n\r\n")
>> DataReceived(server, b"HTTP/1.1 204 No Content\r\n\r\n")
<< http.HttpResponseHeadersHook(flow)
<< CloseConnection(server)
>> reply(to=-2)
<< http.HttpResponseHook(flow)
>> DataReceived(
tctx.client, cff.build_rst_stream_frame(1, ErrorCodes.CANCEL).serialize()
)
>> reply(to=-2)
)
def test_http_1_1_required(tctx):
"""
Test that we properly forward an HTTP_1_1_REQUIRED stream error.
"""
playbook, cff = start_h2_client(tctx)
flow = Placeholder(HTTPFlow)
server = Placeholder(Server)
sff = FrameFactory()
forwarded_request_frames = Placeholder(bytes)
assert (
playbook
>> DataReceived(
tctx.client,
cff.build_headers_frame(
example_request_headers, flags=["END_STREAM"]
).serialize(),
)
<< http.HttpRequestHeadersHook(flow)
>> reply()
<< http.HttpRequestHook(flow)
>> reply()
<< OpenConnection(server)
>> reply(None, side_effect=make_h2)
<< SendData(server, forwarded_request_frames)
>> DataReceived(
server,
sff.build_rst_stream_frame(1, ErrorCodes.HTTP_1_1_REQUIRED).serialize(),
)
<< http.HttpErrorHook(flow)
>> reply()
<< SendData(
tctx.client,
cff.build_rst_stream_frame(1, ErrorCodes.HTTP_1_1_REQUIRED).serialize(),
)
)
def test_stream_concurrency(tctx):
"""Test that we can send an intercepted request with a lower stream id than one that has already been sent."""
playbook, cff = start_h2_client(tctx)
flow1 = Placeholder(HTTPFlow)
flow2 = Placeholder(HTTPFlow)
reqheadershook1 = http.HttpRequestHeadersHook(flow1)
reqheadershook2 = http.HttpRequestHeadersHook(flow2)
reqhook1 = http.HttpRequestHook(flow1)
reqhook2 = http.HttpRequestHook(flow2)
server = Placeholder(Server)
data_req1 = Placeholder(bytes)
data_req2 = Placeholder(bytes)
assert (
playbook
>> DataReceived(
tctx.client,
cff.build_headers_frame(
example_request_headers, flags=["END_STREAM"], stream_id=1
).serialize()
+ cff.build_headers_frame(
example_request_headers, flags=["END_STREAM"], stream_id=3
).serialize(),
)
<< reqheadershook1
<< reqheadershook2
>> reply(to=reqheadershook1)
<< reqhook1
>> reply(to=reqheadershook2)
<< reqhook2
# req 2 overtakes 1 and we already have a reply:
>> reply(to=reqhook2)
<< OpenConnection(server)
>> reply(None, side_effect=make_h2)
<< SendData(server, data_req2)
>> reply(to=reqhook1)
<< SendData(server, data_req1)
)
frames = decode_frames(data_req2())
assert [type(x) for x in frames] == [
hyperframe.frame.SettingsFrame,
hyperframe.frame.WindowUpdateFrame,
hyperframe.frame.HeadersFrame,
]
frames = decode_frames(data_req1())
assert [type(x) for x in frames] == [
hyperframe.frame.HeadersFrame,
]
def test_max_concurrency(tctx):
playbook, cff = start_h2_client(tctx)
server = Placeholder(Server)
req1_bytes = Placeholder(bytes)
settings_ack_bytes = Placeholder(bytes)
req2_bytes = Placeholder(bytes)
playbook.hooks = False
sff = FrameFactory()
assert (
playbook
>> DataReceived(
tctx.client,
cff.build_headers_frame(
example_request_headers, flags=["END_STREAM"], stream_id=1
).serialize(),
)
<< OpenConnection(server)
>> reply(None, side_effect=make_h2)
<< SendData(server, req1_bytes)
>> DataReceived(
server,
sff.build_settings_frame(
{h2.settings.SettingCodes.MAX_CONCURRENT_STREAMS: 1}
).serialize(),
)
<< SendData(server, settings_ack_bytes)
>> DataReceived(
tctx.client,
cff.build_headers_frame(
example_request_headers, flags=["END_STREAM"], stream_id=3
).serialize(),
)
# Can't send it upstream yet, all streams in use!
>> DataReceived(
server,
sff.build_headers_frame(
example_response_headers, flags=["END_STREAM"], stream_id=1
).serialize(),
)
# But now we can!
<< SendData(server, req2_bytes)
<< SendData(tctx.client, Placeholder(bytes))
>> DataReceived(
server,
sff.build_headers_frame(
example_response_headers, flags=["END_STREAM"], stream_id=3
).serialize(),
)
<< SendData(tctx.client, Placeholder(bytes))
)
settings, _, req1 = decode_frames(req1_bytes())
(settings_ack,) = decode_frames(settings_ack_bytes())
(req2,) = decode_frames(req2_bytes())
assert type(settings) is hyperframe.frame.SettingsFrame
assert type(req1) is hyperframe.frame.HeadersFrame
assert type(settings_ack) is hyperframe.frame.SettingsFrame
assert type(req2) is hyperframe.frame.HeadersFrame
assert req1.stream_id == 1
assert req2.stream_id == 3
def test_stream_concurrent_get_connection(tctx):
"""Test that an immediate second request for the same domain does not trigger a second connection attempt."""
playbook, cff = start_h2_client(tctx)
playbook.hooks = False
server = Placeholder(Server)
data = Placeholder(bytes)
assert (
playbook
>> DataReceived(
tctx.client,
cff.build_headers_frame(
example_request_headers, flags=["END_STREAM"], stream_id=1
).serialize(),
)
<< (o := OpenConnection(server))
>> DataReceived(
tctx.client,
cff.build_headers_frame(
example_request_headers, flags=["END_STREAM"], stream_id=3
).serialize(),
)
>> reply(None, to=o, side_effect=make_h2)
<< SendData(server, data)
)
frames = decode_frames(data())
assert [type(x) for x in frames] == [
hyperframe.frame.SettingsFrame,
hyperframe.frame.WindowUpdateFrame,
hyperframe.frame.HeadersFrame,
hyperframe.frame.HeadersFrame,
]
def test_kill_stream(tctx):
"""Test that we can kill individual streams."""
playbook, cff = start_h2_client(tctx)
flow1 = Placeholder(HTTPFlow)
flow2 = Placeholder(HTTPFlow)
req_headers_hook_1 = http.HttpRequestHeadersHook(flow1)
def kill(flow: HTTPFlow):
# Can't use flow.kill() here because that currently still depends on a reply object.
flow.error = Error(Error.KILLED_MESSAGE)
server = Placeholder(Server)
data_req1 = Placeholder(bytes)
assert (
playbook
>> DataReceived(
tctx.client,
| python | MIT | e6aa924bb411a9687b91920b8d094af37bc02b90 | 2026-01-04T14:40:00.086164Z | true |
mitmproxy/mitmproxy | https://github.com/mitmproxy/mitmproxy/blob/e6aa924bb411a9687b91920b8d094af37bc02b90/test/mitmproxy/proxy/layers/http/hyper_h2_test_helpers.py | test/mitmproxy/proxy/layers/http/hyper_h2_test_helpers.py | # This file has been copied from https://github.com/python-hyper/hyper-h2/blob/master/test/helpers.py,
# MIT License
# -*- coding: utf-8 -*-
"""
helpers
~~~~~~~
This module contains helpers for the h2 tests.
"""
from hpack.hpack import Encoder
from hyperframe.frame import AltSvcFrame
from hyperframe.frame import ContinuationFrame
from hyperframe.frame import DataFrame
from hyperframe.frame import GoAwayFrame
from hyperframe.frame import HeadersFrame
from hyperframe.frame import PingFrame
from hyperframe.frame import PriorityFrame
from hyperframe.frame import PushPromiseFrame
from hyperframe.frame import RstStreamFrame
from hyperframe.frame import SettingsFrame
from hyperframe.frame import WindowUpdateFrame
SAMPLE_SETTINGS = {
SettingsFrame.HEADER_TABLE_SIZE: 4096,
SettingsFrame.ENABLE_PUSH: 1,
SettingsFrame.MAX_CONCURRENT_STREAMS: 2,
}
class FrameFactory:
"""
A class containing lots of helper methods and state to build frames. This
allows test cases to easily build correct HTTP/2 frames to feed to
hyper-h2.
"""
def __init__(self):
self.encoder = Encoder()
def refresh_encoder(self):
self.encoder = Encoder()
def preamble(self):
return b"PRI * HTTP/2.0\r\n\r\nSM\r\n\r\n"
def build_headers_frame(self, headers, flags=[], stream_id=1, **priority_kwargs):
"""
Builds a single valid headers frame out of the contained headers.
"""
f = HeadersFrame(stream_id)
f.data = self.encoder.encode(headers)
f.flags.add("END_HEADERS")
for flag in flags:
f.flags.add(flag)
for k, v in priority_kwargs.items():
setattr(f, k, v)
return f
def build_continuation_frame(self, header_block, flags=[], stream_id=1):
"""
Builds a single continuation frame out of the binary header block.
"""
f = ContinuationFrame(stream_id)
f.data = header_block
f.flags = set(flags)
return f
def build_data_frame(self, data, flags=None, stream_id=1, padding_len=0):
"""
Builds a single data frame out of a chunk of data.
"""
flags = set(flags) if flags is not None else set()
f = DataFrame(stream_id)
f.data = data
f.flags = flags
if padding_len:
flags.add("PADDED")
f.pad_length = padding_len
return f
def build_settings_frame(self, settings, ack=False):
"""
Builds a single settings frame.
"""
f = SettingsFrame(0)
if ack:
f.flags.add("ACK")
f.settings = settings
return f
def build_window_update_frame(self, stream_id, increment):
"""
Builds a single WindowUpdate frame.
"""
f = WindowUpdateFrame(stream_id)
f.window_increment = increment
return f
def build_ping_frame(self, ping_data, flags=None):
"""
Builds a single Ping frame.
"""
f = PingFrame(0)
f.opaque_data = ping_data
if flags:
f.flags = set(flags)
return f
def build_goaway_frame(self, last_stream_id, error_code=0, additional_data=b""):
"""
Builds a single GOAWAY frame.
"""
f = GoAwayFrame(0)
f.error_code = error_code
f.last_stream_id = last_stream_id
f.additional_data = additional_data
return f
def build_rst_stream_frame(self, stream_id, error_code=0):
"""
Builds a single RST_STREAM frame.
"""
f = RstStreamFrame(stream_id)
f.error_code = error_code
return f
def build_push_promise_frame(
self, stream_id, promised_stream_id, headers, flags=[]
):
"""
Builds a single PUSH_PROMISE frame.
"""
f = PushPromiseFrame(stream_id)
f.promised_stream_id = promised_stream_id
f.data = self.encoder.encode(headers)
f.flags = set(flags)
f.flags.add("END_HEADERS")
return f
def build_priority_frame(self, stream_id, weight, depends_on=0, exclusive=False):
"""
Builds a single priority frame.
"""
f = PriorityFrame(stream_id)
f.depends_on = depends_on
f.stream_weight = weight
f.exclusive = exclusive
return f
def build_alt_svc_frame(self, stream_id, origin, field):
"""
Builds a single ALTSVC frame.
"""
f = AltSvcFrame(stream_id)
f.origin = origin
f.field = field
return f
def change_table_size(self, new_size):
"""
Causes the encoder to send a dynamic size update in the next header
block it sends.
"""
self.encoder.header_table_size = new_size
| python | MIT | e6aa924bb411a9687b91920b8d094af37bc02b90 | 2026-01-04T14:40:00.086164Z | false |
mitmproxy/mitmproxy | https://github.com/mitmproxy/mitmproxy/blob/e6aa924bb411a9687b91920b8d094af37bc02b90/test/mitmproxy/proxy/layers/http/test_http1.py | test/mitmproxy/proxy/layers/http/test_http1.py | import pytest
from mitmproxy import http
from mitmproxy.proxy.commands import SendData
from mitmproxy.proxy.events import DataReceived
from mitmproxy.proxy.layers.http import Http1Client
from mitmproxy.proxy.layers.http import Http1Server
from mitmproxy.proxy.layers.http import ReceiveHttp
from mitmproxy.proxy.layers.http import RequestData
from mitmproxy.proxy.layers.http import RequestEndOfMessage
from mitmproxy.proxy.layers.http import RequestHeaders
from mitmproxy.proxy.layers.http import ResponseData
from mitmproxy.proxy.layers.http import ResponseEndOfMessage
from mitmproxy.proxy.layers.http import ResponseHeaders
from test.mitmproxy.proxy.tutils import Placeholder
from test.mitmproxy.proxy.tutils import Playbook
class TestServer:
@pytest.mark.parametrize("pipeline", ["pipeline", None])
def test_simple(self, tctx, pipeline):
hdrs1 = Placeholder(RequestHeaders)
hdrs2 = Placeholder(RequestHeaders)
req2 = b"GET http://example.com/two HTTP/1.1\r\nHost: example.com\r\n\r\n"
playbook = Playbook(Http1Server(tctx))
(
playbook
>> DataReceived(
tctx.client,
b"POST http://example.com/one HTTP/1.1\r\n"
b"Content-Length: 3\r\n"
b"\r\n"
b"abc" + (req2 if pipeline else b""),
)
<< ReceiveHttp(hdrs1)
<< ReceiveHttp(RequestData(1, b"abc"))
<< ReceiveHttp(RequestEndOfMessage(1))
>> ResponseHeaders(1, http.Response.make(200))
<< SendData(tctx.client, b"HTTP/1.1 200 OK\r\ncontent-length: 0\r\n\r\n")
>> ResponseEndOfMessage(1)
)
if not pipeline:
playbook >> DataReceived(tctx.client, req2)
playbook << ReceiveHttp(hdrs2)
playbook << ReceiveHttp(RequestEndOfMessage(3))
assert playbook
@pytest.mark.parametrize("pipeline", ["pipeline", None])
def test_connect(self, tctx, pipeline):
playbook = Playbook(Http1Server(tctx))
(
playbook
>> DataReceived(
tctx.client,
b"CONNECT example.com:443 HTTP/1.1\r\n"
b"content-length: 0\r\n"
b"\r\n" + (b"some plain tcp" if pipeline else b""),
)
<< ReceiveHttp(Placeholder(RequestHeaders))
# << ReceiveHttp(RequestEndOfMessage(1))
>> ResponseHeaders(1, http.Response.make(200))
<< SendData(tctx.client, b"HTTP/1.1 200 OK\r\ncontent-length: 0\r\n\r\n")
>> ResponseEndOfMessage(1)
)
if not pipeline:
playbook >> DataReceived(tctx.client, b"some plain tcp")
assert playbook << ReceiveHttp(RequestData(1, b"some plain tcp"))
@pytest.mark.parametrize("pipeline", ["pipeline", None])
def test_upgrade(self, tctx, pipeline):
playbook = Playbook(Http1Server(tctx))
(
playbook
>> DataReceived(
tctx.client,
b"POST http://example.com/one HTTP/1.1\r\n"
b"Connection: Upgrade\r\n"
b"Upgrade: websocket\r\n"
b"\r\n" + (b"some websockets" if pipeline else b""),
)
<< ReceiveHttp(Placeholder(RequestHeaders))
<< ReceiveHttp(RequestEndOfMessage(1))
>> ResponseHeaders(1, http.Response.make(101))
<< SendData(
tctx.client,
b"HTTP/1.1 101 Switching Protocols\r\ncontent-length: 0\r\n\r\n",
)
>> ResponseEndOfMessage(1)
)
if not pipeline:
playbook >> DataReceived(tctx.client, b"some websockets")
assert playbook << ReceiveHttp(RequestData(1, b"some websockets"))
def test_upgrade_denied(self, tctx):
assert (
Playbook(Http1Server(tctx))
>> DataReceived(
tctx.client,
b"GET http://example.com/ HTTP/1.1\r\n"
b"Connection: Upgrade\r\n"
b"Upgrade: websocket\r\n"
b"\r\n",
)
<< ReceiveHttp(Placeholder(RequestHeaders))
<< ReceiveHttp(RequestEndOfMessage(1))
>> ResponseHeaders(1, http.Response.make(200))
<< SendData(tctx.client, b"HTTP/1.1 200 OK\r\ncontent-length: 0\r\n\r\n")
>> ResponseEndOfMessage(1)
>> DataReceived(tctx.client, b"GET / HTTP/1.1\r\n\r\n")
<< ReceiveHttp(Placeholder(RequestHeaders))
<< ReceiveHttp(RequestEndOfMessage(3))
)
class TestClient:
@pytest.mark.parametrize("pipeline", ["pipeline", None])
def test_simple(self, tctx, pipeline):
req = http.Request.make("GET", "http://example.com/")
resp = Placeholder(ResponseHeaders)
playbook = Playbook(Http1Client(tctx))
(
playbook
>> RequestHeaders(1, req, True)
<< SendData(tctx.server, b"GET / HTTP/1.1\r\ncontent-length: 0\r\n\r\n")
>> RequestEndOfMessage(1)
)
if pipeline:
with pytest.raises(
AssertionError, match="assert self.stream_id == event.stream_id"
):
assert playbook >> RequestHeaders(3, req, True)
return
assert (
playbook
>> DataReceived(
tctx.server, b"HTTP/1.1 200 OK\r\ncontent-length: 0\r\n\r\n"
)
<< ReceiveHttp(resp)
<< ReceiveHttp(ResponseEndOfMessage(1))
# no we can send the next request
>> RequestHeaders(3, req, True)
<< SendData(tctx.server, b"GET / HTTP/1.1\r\ncontent-length: 0\r\n\r\n")
)
assert resp().response.status_code == 200
def test_connect(self, tctx):
req = http.Request.make("CONNECT", "http://example.com:443")
req.authority = "example.com:443"
resp = Placeholder(ResponseHeaders)
playbook = Playbook(Http1Client(tctx))
assert (
playbook
>> RequestHeaders(1, req, True)
<< SendData(
tctx.server,
b"CONNECT example.com:443 HTTP/1.1\r\ncontent-length: 0\r\n\r\n",
)
>> RequestEndOfMessage(1)
>> DataReceived(
tctx.server,
b"HTTP/1.1 200 OK\r\ncontent-length: 0\r\n\r\nsome plain tcp",
)
<< ReceiveHttp(resp)
# << ReceiveHttp(ResponseEndOfMessage(1))
<< ReceiveHttp(ResponseData(1, b"some plain tcp"))
# no we can send plain data
>> RequestData(1, b"some more tcp")
<< SendData(tctx.server, b"some more tcp")
)
def test_upgrade(self, tctx):
req = http.Request.make(
"GET",
"http://example.com/ws",
headers={
"Connection": "Upgrade",
"Upgrade": "websocket",
},
)
resp = Placeholder(ResponseHeaders)
playbook = Playbook(Http1Client(tctx))
assert (
playbook
>> RequestHeaders(1, req, True)
<< SendData(
tctx.server,
b"GET /ws HTTP/1.1\r\nConnection: Upgrade\r\nUpgrade: websocket\r\ncontent-length: 0\r\n\r\n",
)
>> RequestEndOfMessage(1)
>> DataReceived(
tctx.server,
b"HTTP/1.1 101 Switching Protocols\r\ncontent-length: 0\r\n\r\nhello",
)
<< ReceiveHttp(resp)
<< ReceiveHttp(ResponseEndOfMessage(1))
<< ReceiveHttp(ResponseData(1, b"hello"))
# no we can send plain data
>> RequestData(1, b"some more websockets")
<< SendData(tctx.server, b"some more websockets")
)
def test_upgrade_denied(self, tctx):
req = http.Request.make(
"GET",
"http://example.com/ws",
headers={
"Connection": "Upgrade",
"Upgrade": "websocket",
},
)
resp = Placeholder(ResponseHeaders)
playbook = Playbook(Http1Client(tctx))
assert (
playbook
>> RequestHeaders(1, req, True)
<< SendData(
tctx.server,
b"GET /ws HTTP/1.1\r\nConnection: Upgrade\r\nUpgrade: websocket\r\ncontent-length: 0\r\n\r\n",
)
>> RequestEndOfMessage(1)
>> DataReceived(
tctx.server, b"HTTP/1.1 200 Ok\r\ncontent-length: 0\r\n\r\n"
)
<< ReceiveHttp(resp)
<< ReceiveHttp(ResponseEndOfMessage(1))
>> RequestHeaders(3, req, True)
<< SendData(tctx.server, Placeholder(bytes))
)
| python | MIT | e6aa924bb411a9687b91920b8d094af37bc02b90 | 2026-01-04T14:40:00.086164Z | false |
mitmproxy/mitmproxy | https://github.com/mitmproxy/mitmproxy/blob/e6aa924bb411a9687b91920b8d094af37bc02b90/test/mitmproxy/proxy/layers/http/test_http_fuzz.py | test/mitmproxy/proxy/layers/http/test_http_fuzz.py | from typing import Any
import pytest
from h2.settings import SettingCodes
from hypothesis import example
from hypothesis import given
from hypothesis.strategies import binary
from hypothesis.strategies import booleans
from hypothesis.strategies import composite
from hypothesis.strategies import data
from hypothesis.strategies import dictionaries
from hypothesis.strategies import integers
from hypothesis.strategies import lists
from hypothesis.strategies import sampled_from
from hypothesis.strategies import sets
from hypothesis.strategies import text
from mitmproxy import connection
from mitmproxy import options
from mitmproxy.addons.proxyserver import Proxyserver
from mitmproxy.connection import Server
from mitmproxy.http import HTTPFlow
from mitmproxy.proxy import context
from mitmproxy.proxy import events
from mitmproxy.proxy.commands import OpenConnection
from mitmproxy.proxy.commands import SendData
from mitmproxy.proxy.events import ConnectionClosed
from mitmproxy.proxy.events import DataReceived
from mitmproxy.proxy.events import Start
from mitmproxy.proxy.layers import http
from mitmproxy.proxy.layers.http import _http2
from mitmproxy.proxy.layers.http import HTTPMode
from test.mitmproxy.proxy.layers.http.hyper_h2_test_helpers import FrameFactory
from test.mitmproxy.proxy.layers.http.test_http2 import example_request_headers
from test.mitmproxy.proxy.layers.http.test_http2 import example_response_headers
from test.mitmproxy.proxy.layers.http.test_http2 import make_h2
from test.mitmproxy.proxy.layers.http.test_http2 import start_h2_client
from test.mitmproxy.proxy.tutils import _eq
from test.mitmproxy.proxy.tutils import _TracebackInPlaybook
from test.mitmproxy.proxy.tutils import Placeholder
from test.mitmproxy.proxy.tutils import Playbook
from test.mitmproxy.proxy.tutils import reply
opts = options.Options()
Proxyserver().load(opts)
@pytest.fixture(scope="module", autouse=True)
def disable_h2_error_catching():
errs = _http2.CATCH_HYPER_H2_ERRORS
_http2.CATCH_HYPER_H2_ERRORS = ()
try:
yield None
finally:
_http2.CATCH_HYPER_H2_ERRORS = errs
request_lines = sampled_from(
[
b"GET / HTTP/1.1",
b"GET http://example.com/ HTTP/1.1",
b"CONNECT example.com:443 HTTP/1.1",
b"HEAD /foo HTTP/0.9",
]
)
response_lines = sampled_from(
[
b"HTTP/1.1 200 OK",
b"HTTP/1.1 100 Continue",
b"HTTP/0.9 204 No Content",
b"HEAD /foo HTTP/0.9",
]
)
headers = lists(
sampled_from(
[
b"Host: example.com",
b"Content-Length: 5",
b"Expect: 100-continue",
b"Transfer-Encoding: chunked",
b"Connection: close",
b"",
]
)
)
bodies = sampled_from([b"", b"12345", b"5\r\n12345\r\n0\r\n\r\n"])
@composite
def mutations(draw, elements):
data = draw(elements)
cut_start = draw(integers(0, len(data)))
cut_end = draw(integers(cut_start, len(data)))
data = data[:cut_start] + data[cut_end:]
replace_start = draw(integers(0, len(data)))
replace_end = draw(integers(replace_start, len(data)))
return data[:replace_start] + draw(binary()) + data[replace_end:]
@composite
def chunks(draw, elements):
data = draw(elements)
chunks = []
a, b = sorted([draw(integers(0, len(data))), draw(integers(0, len(data)))])
if a > 0:
chunks.append(data[:a])
if a != b:
chunks.append(data[a:b])
if b < len(data):
chunks.append(data[b:])
return chunks
@composite
def h1_requests(draw):
request = draw(request_lines) + b"\r\n"
request += b"\r\n".join(draw(headers))
request += b"\r\n\r\n" + draw(bodies)
return request
@composite
def h2_responses(draw):
response = draw(response_lines) + b"\r\n"
response += b"\r\n".join(draw(headers))
response += b"\r\n\r\n" + draw(bodies)
return response
@given(chunks(mutations(h1_requests())))
def test_fuzz_h1_request(data):
tctx = _tctx()
layer = http.HttpLayer(tctx, HTTPMode.regular)
for _ in layer.handle_event(Start()):
pass
for chunk in data:
for _ in layer.handle_event(DataReceived(tctx.client, chunk)):
pass
@given(chunks(mutations(h2_responses())))
@example([b"0 OK\r\n\r\n", b"\r\n", b"5\r\n12345\r\n0\r\n\r\n"])
def test_fuzz_h1_response(data):
tctx = _tctx()
server = Placeholder(connection.Server)
playbook = Playbook(http.HttpLayer(tctx, HTTPMode.regular), hooks=False)
assert (
playbook
>> DataReceived(
tctx.client,
b"GET http://example.com/ HTTP/1.1\r\nHost: example.com\r\n\r\n",
)
<< OpenConnection(server)
>> reply(None)
<< SendData(server, b"GET / HTTP/1.1\r\nHost: example.com\r\n\r\n")
)
for chunk in data:
for _ in playbook.layer.handle_event(events.DataReceived(server(), chunk)):
pass
h2_flags = sets(
sampled_from(
[
"END_STREAM",
"END_HEADERS",
]
)
)
h2_stream_ids = integers(0, 3)
h2_stream_ids_nonzero = integers(1, 3)
@composite
def h2_headers(draw):
required_headers = [
[":path", "/"],
[":scheme", draw(sampled_from(["http", "https"]))],
[":method", draw(sampled_from(["GET", "POST", "CONNECT"]))],
]
optional_headers = [
[":authority", draw(sampled_from(["example.com:443", "example.com"]))],
["cookie", "foobaz"],
["host", "example.com"],
["content-length", "42"],
]
headers = required_headers + draw(lists(sampled_from(optional_headers), max_size=3))
i = draw(integers(0, len(headers)))
p = int(draw(booleans()))
r = draw(text())
if i > 0:
headers[i - 1][p - 1] = r
return headers
@composite
def h2_frames(draw):
ff = FrameFactory()
headers1 = ff.build_headers_frame(headers=draw(h2_headers()))
headers1.flags.clear()
headers1.flags |= draw(h2_flags)
headers2 = ff.build_headers_frame(
headers=draw(h2_headers()),
depends_on=draw(h2_stream_ids),
stream_weight=draw(integers(0, 255)),
exclusive=draw(booleans()),
)
headers2.flags.clear()
headers2.flags |= draw(h2_flags)
settings = ff.build_settings_frame(
settings=draw(
dictionaries(
keys=sampled_from(SettingCodes),
values=integers(0, 2**32 - 1),
max_size=5,
)
),
ack=draw(booleans()),
)
continuation = ff.build_continuation_frame(
header_block=ff.encoder.encode(draw(h2_headers())), flags=draw(h2_flags)
)
goaway = ff.build_goaway_frame(draw(h2_stream_ids))
push_promise = ff.build_push_promise_frame(
stream_id=draw(h2_stream_ids_nonzero),
promised_stream_id=draw(h2_stream_ids),
headers=draw(h2_headers()),
)
rst = ff.build_rst_stream_frame(draw(h2_stream_ids_nonzero))
prio = ff.build_priority_frame(
stream_id=draw(h2_stream_ids_nonzero),
weight=draw(integers(0, 255)),
depends_on=draw(h2_stream_ids),
exclusive=draw(booleans()),
)
data1 = ff.build_data_frame(draw(binary()), draw(h2_flags))
data2 = ff.build_data_frame(
draw(binary()), draw(h2_flags), stream_id=draw(h2_stream_ids_nonzero)
)
window_update = ff.build_window_update_frame(
draw(h2_stream_ids), draw(integers(0, 2**32 - 1))
)
frames = draw(
lists(
sampled_from(
[
headers1,
headers2,
settings,
continuation,
goaway,
push_promise,
rst,
prio,
data1,
data2,
window_update,
]
),
min_size=1,
max_size=11,
)
)
return b"".join(x.serialize() for x in frames)
def h2_layer(opts):
tctx = _tctx()
tctx.client.alpn = b"h2"
layer = http.HttpLayer(tctx, HTTPMode.regular)
for _ in layer.handle_event(Start()):
pass
for _ in layer.handle_event(
DataReceived(tctx.client, b"PRI * HTTP/2.0\r\n\r\nSM\r\n\r\n")
):
pass
return tctx, layer
def _h2_request(chunks):
tctx, layer = h2_layer(opts)
for chunk in chunks:
for _ in layer.handle_event(DataReceived(tctx.client, chunk)):
pass
# fmt: off
@given(chunks(h2_frames()))
@example([b'\x00\x00\x00\x01\x05\x00\x00\x00\x01\x00\x00\x00\x01\x05\x00\x00\x00\x01'])
@example([b'\x00\x00\x00\x01\x07\x00\x00\x00\x01A\x88/\x91\xd3]\x05\\\x87\xa7\x84\x86\x82`\x80f\x80\\\x80'])
@example([b'\x00\x00\x05\x02\x00\x00\x00\x00\x01\x00\x00\x00\x00\x00\x02\x00\x00\x00\x00'])
@example([b'\x00\x00\x13\x01\x04\x00\x00\x00\x01A\x88/\x91\xd3]\x05\\\x87\xa7\x84\x86\x82`\x80f\x80\\\x80'])
@example([b'\x00\x00\x12\x01\x04\x00\x00\x00\x01\x84\x86\x82`\x80A\x88/\x91\xd3]\x05\\\x87\xa7\\\x81\x07'])
@example([b'\x00\x00\x12\x01\x04\x00\x00\x00\x01\x84\x86\x82`\x80A\x88/\x91\xd3]\x05\\\x87\xa7\\\x81\x07'])
@example([b'\x00\x00\x14\x01\x04\x00\x00\x00\x01A\x88/\x91\xd3]\x05\\\x87\xa7\x84\x86`\x80\x82f\x80'])
@example([
b'\x00\x00%\x01\x04\x00\x00\x00\x01A\x8b/\x91\xd3]\x05\\\x87\xa6\xe3M3\x84\x86\x82`\x85\x94\xe7\x8c~\xfff\x88/\x91'
b'\xd3]\x05\\\x87\xa7\\\x82h_\x00\x00\x07\x01\x05\x00\x00\x00\x01\xc1\x84\x86\x82\xc0\xbf\xbe'])
@example([b'\x00\x00\x03\x01\x04\x00\x00\x00\x01\x84\x86\x82\x00\x00\x08\x07\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'])
def test_fuzz_h2_request_chunks(chunks):
_h2_request(chunks)
# fmt: on
@given(chunks(mutations(h2_frames())))
def test_fuzz_h2_request_mutations(chunks):
_h2_request(chunks)
def _tctx() -> context.Context:
tctx = context.Context(
connection.Client(
peername=("client", 1234),
sockname=("127.0.0.1", 8080),
timestamp_start=1605699329,
),
opts,
)
tctx.options.http2_ping_keepalive = 0
return tctx
def _h2_response(chunks):
tctx = _tctx()
playbook = Playbook(http.HttpLayer(tctx, HTTPMode.regular), hooks=False)
server = Placeholder(connection.Server)
assert (
playbook
>> DataReceived(
tctx.client,
b"GET http://example.com/ HTTP/1.1\r\nHost: example.com\r\n\r\n",
)
<< OpenConnection(server)
>> reply(None, side_effect=make_h2)
<< SendData(server, Placeholder())
)
for chunk in chunks:
for _ in playbook.layer.handle_event(events.DataReceived(server(), chunk)):
pass
# fmt: off
@given(chunks(h2_frames()))
@example([b'\x00\x00\x03\x01\x04\x00\x00\x00\x01\x84\x86\x82',
b'\x00\x00\x07\x05\x04\x00\x00\x00\x01\x00\x00\x00\x00\x84\x86\x82'])
@example([b'\x00\x00\x00\x00\x00\x00\x00\x00\x01'])
@example([b'\x00\x00\x00\x01\x04\x00\x00\x00\x01'])
@example([b'\x00\x00\x07\x05\x04\x00\x00\x00\x01\x00\x00\x00\x02\x84\x86\x82'])
@example([b'\x00\x00\x06\x014\x00\x01\x00\x00\x00\x00\x01@\x80\x81c\x86\x82'])
@example([b'\x00\x00\x06\x01\x04\x00\x00\x00\x01@\x80\x81c\x86\x82'])
def test_fuzz_h2_response_chunks(chunks):
_h2_response(chunks)
# fmt: on
@given(chunks(mutations(h2_frames())))
def test_fuzz_h2_response_mutations(chunks):
_h2_response(chunks)
@pytest.mark.parametrize(
"example",
[
(
True,
False,
[
"data_req",
"reply_hook_req_headers",
"reply_openconn",
"data_resp",
"data_reqbody",
"data_respbody",
"err_server_rst",
"reply_hook_resp_headers",
],
),
(
True,
False,
[
"data_req",
"reply_hook_req_headers",
"reply_openconn",
"err_server_rst",
"data_reqbody",
"reply_hook_error",
],
),
],
)
def test_cancel_examples(example):
"""
We can't specify examples in test_fuzz_cancel (because we use data, see
https://hypothesis.readthedocs.io/en/latest/data.html#interactive-draw),
so we have this here for explicit examples.
"""
stream_req, stream_resp, draws = example
def draw(lst):
if draws:
this_draw = draws.pop(0)
for name, evt in lst:
if name == this_draw:
return name, evt
raise AssertionError(
f"{this_draw} not in list: {[name for name, _ in lst]}"
)
else:
return lst[0]
_test_cancel(stream_req, stream_resp, draw)
@given(stream_request=booleans(), stream_response=booleans(), data=data())
def test_fuzz_cancel(stream_request, stream_response, data):
_test_cancel(
stream_request, stream_response, lambda lst: data.draw(sampled_from(lst))
)
def _test_cancel(stream_req, stream_resp, draw):
"""
Test that we don't raise an exception if someone disconnects.
"""
tctx = _tctx()
playbook, cff = start_h2_client(tctx)
flow = Placeholder(HTTPFlow)
server = Placeholder(Server)
def maybe_stream(flow: HTTPFlow):
if stream_req:
flow.request.stream = True
if stream_resp and flow.response:
flow.response.stream = True
hook_req_headers = http.HttpRequestHeadersHook(flow)
hook_req = http.HttpRequestHook(flow)
hook_resp_headers = http.HttpResponseHeadersHook(flow)
hook_resp = http.HttpResponseHook(flow)
hook_error = http.HttpErrorHook(flow)
openconn = OpenConnection(server)
send_upstream = SendData(server, Placeholder(bytes))
data_req = DataReceived(
tctx.client, cff.build_headers_frame(example_request_headers).serialize()
)
data_reqbody = DataReceived(
tctx.client, cff.build_data_frame(b"foo", flags=["END_STREAM"]).serialize()
)
data_resp = DataReceived(
server, cff.build_headers_frame(example_response_headers).serialize()
)
data_respbody = DataReceived(
server, cff.build_data_frame(b"bar", flags=["END_STREAM"]).serialize()
)
client_disc = ConnectionClosed(tctx.client)
client_rst = DataReceived(tctx.client, cff.build_rst_stream_frame(1).serialize())
server_disc = ConnectionClosed(server)
server_rst = DataReceived(server, cff.build_rst_stream_frame(1).serialize())
evts: dict[str, tuple[Any, Any, Any]] = {}
# precondition, but-not-after-this
evts["data_req"] = data_req, None, client_disc
evts["data_reqbody"] = data_reqbody, data_req, client_disc
evts["reply_hook_req_headers"] = (
reply(to=hook_req_headers, side_effect=maybe_stream),
hook_req_headers,
None,
)
evts["reply_hook_req"] = reply(to=hook_req), hook_req, None
evts["reply_openconn"] = (
reply(None, to=openconn, side_effect=make_h2),
openconn,
None,
)
evts["data_resp"] = data_resp, send_upstream, server_disc
evts["data_respbody"] = data_respbody, data_resp, server_disc
evts["reply_hook_resp_headers"] = (
reply(to=hook_resp_headers, side_effect=maybe_stream),
hook_resp_headers,
None,
)
evts["reply_hook_resp"] = reply(to=hook_resp), hook_resp, None
evts["reply_hook_error"] = reply(to=hook_error), hook_error, None
evts["err_client_disc"] = client_disc, None, None
evts["err_client_rst"] = client_rst, None, client_disc
evts["err_server_disc"] = server_disc, send_upstream, None
evts["err_server_rst"] = server_rst, send_upstream, server_disc
def eq_maybe(a, b):
# _eq helpfully raises a TypeError when placeholder types don't match
# that is useful in (test) development, but may happen legitimately when fuzzing here.
try:
return _eq(a, b)
except TypeError:
return False
while evts:
candidates = []
for name, (evt, precon, negprecon) in evts.items():
precondition_ok = precon is None or any(
eq_maybe(x, precon) for x in playbook.actual
)
neg_precondition_ok = negprecon is None or not any(
eq_maybe(x, negprecon) for x in playbook.actual
)
if precondition_ok and neg_precondition_ok:
# crude hack to increase fuzzing efficiency: make it more likely that we progress.
for i in range(1 if name.startswith("err_") else 3):
candidates.append((name, evt))
if not candidates:
break
name, evt = draw(candidates)
del evts[name]
try:
assert playbook >> evt
except AssertionError:
if any(isinstance(x, _TracebackInPlaybook) for x in playbook.actual):
raise
else:
# add commands that the server issued.
playbook.expected.extend(playbook.actual[len(playbook.expected) :])
| python | MIT | e6aa924bb411a9687b91920b8d094af37bc02b90 | 2026-01-04T14:40:00.086164Z | false |
mitmproxy/mitmproxy | https://github.com/mitmproxy/mitmproxy/blob/e6aa924bb411a9687b91920b8d094af37bc02b90/test/mitmproxy/addons/test_onboarding.py | test/mitmproxy/addons/test_onboarding.py | import pytest
from mitmproxy.addons import onboarding
from mitmproxy.test import taddons
@pytest.fixture
def client():
with onboarding.app.test_client() as client:
yield client
class TestApp:
def addons(self):
return [onboarding.Onboarding()]
def test_basic(self, client):
ob = onboarding.Onboarding()
with taddons.context(ob) as tctx:
tctx.configure(ob)
assert client.get("/").status_code == 200
@pytest.mark.parametrize("ext", ["pem", "p12", "cer", "magisk"])
def test_cert(self, client, ext, tdata):
ob = onboarding.Onboarding()
with taddons.context(ob) as tctx:
tctx.configure(ob, confdir=tdata.path("mitmproxy/data/confdir"))
resp = client.get(f"/cert/{ext}")
assert resp.status_code == 200
assert resp.data
@pytest.mark.parametrize("ext", ["pem", "p12", "cer", "magisk"])
def test_head(self, client, ext, tdata):
ob = onboarding.Onboarding()
with taddons.context(ob) as tctx:
tctx.configure(ob, confdir=tdata.path("mitmproxy/data/confdir"))
resp = client.head(f"http://{tctx.options.onboarding_host}/cert/{ext}")
assert resp.status_code == 200
assert "Content-Length" in resp.headers
assert "Content-Type" in resp.headers
assert "Content-Disposition" in resp.headers
assert "attachment" in resp.headers["Content-Disposition"]
assert not resp.data
| python | MIT | e6aa924bb411a9687b91920b8d094af37bc02b90 | 2026-01-04T14:40:00.086164Z | false |
mitmproxy/mitmproxy | https://github.com/mitmproxy/mitmproxy/blob/e6aa924bb411a9687b91920b8d094af37bc02b90/test/mitmproxy/addons/test_strip_dns_https_records.py | test/mitmproxy/addons/test_strip_dns_https_records.py | from mitmproxy import dns
from mitmproxy.addons import strip_dns_https_records
from mitmproxy.net.dns import https_records
from mitmproxy.net.dns import types
from mitmproxy.net.dns.https_records import SVCParamKeys
from mitmproxy.test import taddons
from mitmproxy.test import tflow
from mitmproxy.test import tutils
class TestStripECH:
def test_strip_ech(self):
se = strip_dns_https_records.StripDnsHttpsRecords()
with taddons.context(se) as tctx:
params1 = {
SVCParamKeys.PORT.value: b"\x01\xbb",
SVCParamKeys.ECH.value: b"testbytes",
}
params2 = {SVCParamKeys.PORT.value: b"\x01\xbb"}
record1 = https_records.HTTPSRecord(1, "example.com", params1)
record2 = https_records.HTTPSRecord(1, "example.com", params2)
answers = [
dns.ResourceRecord(
"dns.google",
dns.types.A,
dns.classes.IN,
32,
b"\x08\x08\x08\x08",
),
dns.ResourceRecord(
"dns.google",
dns.types.HTTPS,
dns.classes.IN,
32,
https_records.pack(record1),
),
dns.ResourceRecord(
"dns.google",
dns.types.HTTPS,
dns.classes.IN,
32,
https_records.pack(record2),
),
]
resp = tutils.tdnsresp(answers=answers)
f = tflow.tdnsflow(resp=resp)
tctx.configure(se, strip_ech=True)
se.dns_response(f)
assert all(
answer.https_ech is None
for answer in f.response.answers
if answer.type == types.HTTPS
)
def test_strip_alpn(self):
se = strip_dns_https_records.StripDnsHttpsRecords()
with taddons.context(se) as tctx:
record2 = https_records.HTTPSRecord(
1,
"example.com",
{
SVCParamKeys.ALPN.value: b"\x02h2\x02h3",
},
)
answers = [
dns.ResourceRecord(
"dns.google",
dns.types.HTTPS,
dns.classes.IN,
32,
https_records.pack(record2),
)
]
f = tflow.tdnsflow(resp=tutils.tdnsresp(answers=answers))
se.dns_response(f)
assert f.response.answers[0].https_alpn == (b"h2", b"h3")
tctx.configure(se, http3=False)
se.dns_response(f)
assert f.response.answers[0].https_alpn == (b"h2",)
f.response.answers[0].https_alpn = [b"h3"]
se.dns_response(f)
assert f.response.answers[0].https_alpn is None
| python | MIT | e6aa924bb411a9687b91920b8d094af37bc02b90 | 2026-01-04T14:40:00.086164Z | false |
mitmproxy/mitmproxy | https://github.com/mitmproxy/mitmproxy/blob/e6aa924bb411a9687b91920b8d094af37bc02b90/test/mitmproxy/addons/test_modifybody.py | test/mitmproxy/addons/test_modifybody.py | import pytest
from mitmproxy.addons import modifybody
from mitmproxy.addons import proxyserver
from mitmproxy.test import taddons
from mitmproxy.test import tflow
from mitmproxy.test.tutils import tresp
class TestModifyBody:
def test_configure(self):
mb = modifybody.ModifyBody()
with taddons.context(mb) as tctx:
tctx.configure(mb, modify_body=["one/two/three"])
with pytest.raises(Exception, match="Cannot parse modify_body"):
tctx.configure(mb, modify_body=["/"])
def test_warn_conflict(self, caplog):
caplog.set_level("DEBUG")
mb = modifybody.ModifyBody()
with taddons.context(mb, proxyserver.Proxyserver()) as tctx:
tctx.configure(mb, stream_large_bodies="3m", modify_body=["one/two/three"])
assert "Streamed bodies will not be modified" in caplog.text
def test_simple(self):
mb = modifybody.ModifyBody()
with taddons.context(mb) as tctx:
tctx.configure(
mb,
modify_body=[
"/~q/foo/bar",
"/~s/foo/bar",
],
)
f = tflow.tflow()
f.request.content = b"foo"
mb.request(f)
assert f.request.content == b"bar"
f = tflow.tflow(resp=True)
f.response.content = b"foo"
mb.response(f)
assert f.response.content == b"bar"
@pytest.mark.parametrize("take", [True, False])
def test_taken(self, take):
mb = modifybody.ModifyBody()
with taddons.context(mb) as tctx:
tctx.configure(mb, modify_body=["/foo/bar"])
f = tflow.tflow()
f.request.content = b"foo"
if take:
f.response = tresp()
mb.request(f)
assert (f.request.content == b"bar") ^ take
f = tflow.tflow(resp=True)
f.response.content = b"foo"
if take:
f.kill()
mb.response(f)
assert (f.response.content == b"bar") ^ take
def test_order(self):
mb = modifybody.ModifyBody()
with taddons.context(mb) as tctx:
tctx.configure(
mb,
modify_body=[
"/foo/bar",
"/bar/baz",
"/foo/oh noes!",
"/bar/oh noes!",
],
)
f = tflow.tflow()
f.request.content = b"foo"
mb.request(f)
assert f.request.content == b"baz"
class TestModifyBodyFile:
def test_simple(self, tmpdir):
mb = modifybody.ModifyBody()
with taddons.context(mb) as tctx:
tmpfile = tmpdir.join("replacement")
tmpfile.write("bar")
tctx.configure(mb, modify_body=["/~q/foo/@" + str(tmpfile)])
f = tflow.tflow()
f.request.content = b"foo"
mb.request(f)
assert f.request.content == b"bar"
async def test_nonexistent(self, tmpdir, caplog):
mb = modifybody.ModifyBody()
with taddons.context(mb) as tctx:
with pytest.raises(Exception, match="Invalid file path"):
tctx.configure(mb, modify_body=["/~q/foo/@nonexistent"])
tmpfile = tmpdir.join("replacement")
tmpfile.write("bar")
tctx.configure(mb, modify_body=["/~q/foo/@" + str(tmpfile)])
tmpfile.remove()
f = tflow.tflow()
f.request.content = b"foo"
mb.request(f)
assert "Could not read" in caplog.text
| python | MIT | e6aa924bb411a9687b91920b8d094af37bc02b90 | 2026-01-04T14:40:00.086164Z | false |
mitmproxy/mitmproxy | https://github.com/mitmproxy/mitmproxy/blob/e6aa924bb411a9687b91920b8d094af37bc02b90/test/mitmproxy/addons/test_tlsconfig.py | test/mitmproxy/addons/test_tlsconfig.py | import ipaddress
import logging
import ssl
import time
from pathlib import Path
import pytest
from cryptography import x509
from OpenSSL import SSL
from mitmproxy import certs
from mitmproxy import connection
from mitmproxy import options
from mitmproxy import tls
from mitmproxy.addons import tlsconfig
from mitmproxy.net import tls as net_tls
from mitmproxy.proxy import context
from mitmproxy.proxy.layers import modes
from mitmproxy.proxy.layers import quic
from mitmproxy.proxy.layers import tls as proxy_tls
from mitmproxy.test import taddons
from test.mitmproxy.proxy.layers import test_tls
from test.mitmproxy.proxy.layers.quic import test__stream_layers as test_quic
from test.mitmproxy.test_flow import tflow
def test_alpn_select_callback():
ctx = SSL.Context(SSL.SSLv23_METHOD)
conn = SSL.Connection(ctx)
# Test that we respect addons setting `client.alpn`.
conn.set_app_data(
tlsconfig.AppData(server_alpn=b"h2", http2=True, client_alpn=b"qux")
)
assert tlsconfig.alpn_select_callback(conn, [b"http/1.1", b"qux", b"h2"]) == b"qux"
conn.set_app_data(tlsconfig.AppData(server_alpn=b"h2", http2=True, client_alpn=b""))
assert (
tlsconfig.alpn_select_callback(conn, [b"http/1.1", b"qux", b"h2"])
== SSL.NO_OVERLAPPING_PROTOCOLS
)
# Test that we try to mirror the server connection's ALPN
conn.set_app_data(
tlsconfig.AppData(server_alpn=b"h2", http2=True, client_alpn=None)
)
assert tlsconfig.alpn_select_callback(conn, [b"http/1.1", b"qux", b"h2"]) == b"h2"
# Test that we respect the client's preferred HTTP ALPN.
conn.set_app_data(tlsconfig.AppData(server_alpn=None, http2=True, client_alpn=None))
assert (
tlsconfig.alpn_select_callback(conn, [b"qux", b"http/1.1", b"h2"])
== b"http/1.1"
)
assert tlsconfig.alpn_select_callback(conn, [b"qux", b"h2", b"http/1.1"]) == b"h2"
# Test no overlap
assert (
tlsconfig.alpn_select_callback(conn, [b"qux", b"quux"])
== SSL.NO_OVERLAPPING_PROTOCOLS
)
# Test that we don't select an ALPN if the server refused to select one.
conn.set_app_data(tlsconfig.AppData(server_alpn=b"", http2=True, client_alpn=None))
assert (
tlsconfig.alpn_select_callback(conn, [b"http/1.1"])
== SSL.NO_OVERLAPPING_PROTOCOLS
)
here = Path(__file__).parent
def _ctx(opts: options.Options) -> context.Context:
return context.Context(
connection.Client(
peername=("client", 1234),
sockname=("127.0.0.1", 8080),
timestamp_start=1605699329,
),
opts,
)
class TestTlsConfig:
def test_configure(self, tdata):
ta = tlsconfig.TlsConfig()
with taddons.context(ta) as tctx:
with pytest.raises(Exception, match="file does not exist"):
tctx.configure(ta, certs=["*=nonexistent"])
with pytest.raises(Exception, match="Invalid ECDH curve"):
tctx.configure(ta, tls_ecdh_curve_client="invalid")
with pytest.raises(Exception, match="Invalid certificate format"):
tctx.configure(
ta,
certs=[
tdata.path(
"mitmproxy/net/data/verificationcerts/trusted-leaf.key"
)
],
)
assert not ta.certstore.certs
tctx.configure(
ta,
certs=[
tdata.path("mitmproxy/net/data/verificationcerts/trusted-leaf.pem")
],
)
assert ta.certstore.certs
def test_configure_tls_version(self, caplog):
caplog.set_level(logging.INFO)
ta = tlsconfig.TlsConfig()
with taddons.context(ta) as tctx:
for attr in [
"tls_version_client_min",
"tls_version_client_max",
"tls_version_server_min",
"tls_version_server_max",
]:
caplog.clear()
tctx.configure(ta, **{attr: "SSL3"})
assert (
f"{attr} has been set to SSL3, "
"which is not supported by the current OpenSSL build."
) in caplog.text
caplog.clear()
tctx.configure(ta, tls_version_client_min="UNBOUNDED")
assert (
"tls_version_client_min has been set to UNBOUNDED. "
"Note that your OpenSSL build only supports the following TLS versions"
) in caplog.text
def test_configure_ciphers(self, caplog):
caplog.set_level(logging.INFO)
ta = tlsconfig.TlsConfig()
with taddons.context(ta) as tctx:
tctx.configure(
ta,
tls_version_client_min="TLS1",
ciphers_client="ALL",
)
assert (
"With tls_version_client_min set to TLS1, "
'ciphers_client must include "@SECLEVEL=0" for insecure TLS versions to work.'
) in caplog.text
caplog.clear()
tctx.configure(
ta,
ciphers_server="ALL",
)
assert not caplog.text
tctx.configure(
ta,
tls_version_server_min="SSL3",
)
assert (
"With tls_version_server_min set to SSL3, "
'ciphers_server must include "@SECLEVEL=0" for insecure TLS versions to work.'
) in caplog.text
def test_get_cert(self, tdata):
"""Test that we generate a certificate matching the connection's context."""
ta = tlsconfig.TlsConfig()
with taddons.context(ta) as tctx:
ta.configure(["confdir"])
ctx = _ctx(tctx.options)
# Edge case first: We don't have _any_ idea about the server nor is there a SNI,
# so we just return our local IP as subject.
entry = ta.get_cert(ctx)
assert entry.cert.cn == "127.0.0.1"
# Here we have an existing server connection...
ctx.server.address = ("server-address.example", 443)
with open(
tdata.path("mitmproxy/net/data/verificationcerts/trusted-leaf.crt"),
"rb",
) as f:
ctx.server.certificate_list = [certs.Cert.from_pem(f.read())]
entry = ta.get_cert(ctx)
assert entry.cert.cn == "example.mitmproxy.org"
assert entry.cert.altnames == x509.GeneralNames(
[
x509.DNSName("example.mitmproxy.org"),
x509.IPAddress(ipaddress.ip_address("127.0.0.1")),
x509.DNSName("server-address.example"),
]
)
# And now we also incorporate SNI.
ctx.client.sni = "🌈.sni.example"
entry = ta.get_cert(ctx)
assert entry.cert.altnames == x509.GeneralNames(
[
x509.DNSName("example.mitmproxy.org"),
x509.DNSName("xn--og8h.sni.example"),
x509.DNSName("server-address.example"),
]
)
with open(tdata.path("mitmproxy/data/invalid-subject.pem"), "rb") as f:
ctx.server.certificate_list = [certs.Cert.from_pem(f.read())]
with pytest.warns(UserWarning):
assert ta.get_cert(ctx) # does not raise
def test_tls_clienthello(self):
# only really testing for coverage here, there's no point in mirroring the individual conditions
ta = tlsconfig.TlsConfig()
with taddons.context(ta) as tctx:
ctx = _ctx(tctx.options)
ch = tls.ClientHelloData(ctx, None) # type: ignore
ta.tls_clienthello(ch)
assert not ch.establish_server_tls_first
def do_handshake(
self,
tssl_client: test_tls.SSLTest | SSL.Connection,
tssl_server: test_tls.SSLTest | SSL.Connection,
) -> bool:
# ClientHello
with pytest.raises((ssl.SSLWantReadError, SSL.WantReadError)):
tssl_client.do_handshake()
tssl_server.bio_write(tssl_client.bio_read(65536))
# ServerHello
with pytest.raises((ssl.SSLWantReadError, SSL.WantReadError)):
tssl_server.do_handshake()
tssl_client.bio_write(tssl_server.bio_read(65536))
# done
tssl_client.do_handshake()
tssl_server.bio_write(tssl_client.bio_read(65536))
tssl_server.do_handshake()
return True
def quic_do_handshake(
self,
tssl_client: test_quic.SSLTest,
tssl_server: test_quic.SSLTest,
) -> bool:
tssl_server.write(tssl_client.read())
tssl_client.write(tssl_server.read())
tssl_server.write(tssl_client.read())
return tssl_client.handshake_completed() and tssl_server.handshake_completed()
def test_tls_start_client(self, tdata):
ta = tlsconfig.TlsConfig()
with taddons.context(ta) as tctx:
ta.configure(["confdir"])
tctx.configure(
ta,
certs=[
tdata.path("mitmproxy/net/data/verificationcerts/trusted-leaf.pem")
],
ciphers_client="ECDHE-ECDSA-AES128-GCM-SHA256",
)
ctx = _ctx(tctx.options)
tls_start = tls.TlsData(ctx.client, context=ctx)
ta.tls_start_client(tls_start)
tssl_server = tls_start.ssl_conn
# assert that a preexisting ssl_conn is not overwritten
ta.tls_start_client(tls_start)
assert tssl_server is tls_start.ssl_conn
tssl_client = test_tls.SSLTest()
assert self.do_handshake(tssl_client, tssl_server)
assert tssl_client.obj.getpeercert()["subjectAltName"] == (
("DNS", "example.mitmproxy.org"),
)
def test_quic_start_client(self, tdata):
ta = tlsconfig.TlsConfig()
with taddons.context(ta) as tctx:
ta.configure(["confdir"])
tctx.configure(
ta,
certs=[
tdata.path("mitmproxy/net/data/verificationcerts/trusted-leaf.pem")
],
ciphers_client="CHACHA20_POLY1305_SHA256",
)
ctx = _ctx(tctx.options)
tls_start = quic.QuicTlsData(ctx.client, context=ctx)
ta.quic_start_client(tls_start)
settings_server = tls_start.settings
settings_server.alpn_protocols = ["h3"]
tssl_server = test_quic.SSLTest(server_side=True, settings=settings_server)
# assert that a preexisting settings is not overwritten
ta.quic_start_client(tls_start)
assert settings_server is tls_start.settings
tssl_client = test_quic.SSLTest(alpn=["h3"])
assert self.quic_do_handshake(tssl_client, tssl_server)
san = tssl_client.quic.tls._peer_certificate.extensions.get_extension_for_class(
x509.SubjectAlternativeName
)
assert san.value.get_values_for_type(x509.DNSName) == [
"example.mitmproxy.org"
]
def test_tls_start_server_cannot_verify(self):
ta = tlsconfig.TlsConfig()
with taddons.context(ta) as tctx:
ctx = _ctx(tctx.options)
ctx.server.address = ("example.mitmproxy.org", 443)
ctx.server.sni = "" # explicitly opt out of using the address.
tls_start = tls.TlsData(ctx.server, context=ctx)
with pytest.raises(
ValueError, match="Cannot validate certificate hostname without SNI"
):
ta.tls_start_server(tls_start)
def test_tls_start_server_verify_failed(self):
ta = tlsconfig.TlsConfig()
with taddons.context(ta) as tctx:
ctx = _ctx(tctx.options)
ctx.client.alpn_offers = [b"h2"]
ctx.client.cipher_list = ["TLS_AES_256_GCM_SHA384", "ECDHE-RSA-AES128-SHA"]
ctx.server.address = ("example.mitmproxy.org", 443)
tls_start = tls.TlsData(ctx.server, context=ctx)
ta.tls_start_server(tls_start)
tssl_client = tls_start.ssl_conn
tssl_server = test_tls.SSLTest(server_side=True)
with pytest.raises(SSL.Error, match="certificate verify failed"):
assert self.do_handshake(tssl_client, tssl_server)
@pytest.mark.parametrize("hostname", ["example.mitmproxy.org", "192.0.2.42"])
def test_tls_start_server_verify_ok(self, hostname, tdata):
ta = tlsconfig.TlsConfig()
with taddons.context(ta) as tctx:
ctx = _ctx(tctx.options)
ctx.server.address = (hostname, 443)
tctx.configure(
ta,
ssl_verify_upstream_trusted_ca=tdata.path(
"mitmproxy/net/data/verificationcerts/trusted-root.crt"
),
)
tls_start = tls.TlsData(ctx.server, context=ctx)
ta.tls_start_server(tls_start)
tssl_client = tls_start.ssl_conn
# assert that a preexisting ssl_conn is not overwritten
ta.tls_start_server(tls_start)
assert tssl_client is tls_start.ssl_conn
tssl_server = test_tls.SSLTest(server_side=True, sni=hostname.encode())
assert self.do_handshake(tssl_client, tssl_server)
@pytest.mark.parametrize("hostname", ["example.mitmproxy.org", "192.0.2.42"])
def test_quic_start_server_verify_ok(self, hostname, tdata):
ta = tlsconfig.TlsConfig()
with taddons.context(ta) as tctx:
ctx = _ctx(tctx.options)
ctx.server.address = (hostname, 443)
tctx.configure(
ta,
ssl_verify_upstream_trusted_ca=tdata.path(
"mitmproxy/net/data/verificationcerts/trusted-root.crt"
),
)
tls_start = quic.QuicTlsData(ctx.server, context=ctx)
ta.quic_start_server(tls_start)
settings_client = tls_start.settings
settings_client.alpn_protocols = ["h3"]
tssl_client = test_quic.SSLTest(settings=settings_client)
# assert that a preexisting ssl_conn is not overwritten
ta.quic_start_server(tls_start)
assert settings_client is tls_start.settings
tssl_server = test_quic.SSLTest(
server_side=True, sni=hostname.encode(), alpn=["h3"]
)
assert self.quic_do_handshake(tssl_client, tssl_server)
def test_tls_start_server_insecure(self):
ta = tlsconfig.TlsConfig()
with taddons.context(ta) as tctx:
ctx = _ctx(tctx.options)
ctx.server.address = ("example.mitmproxy.org", 443)
tctx.configure(
ta,
ssl_verify_upstream_trusted_ca=None,
ssl_insecure=True,
http2=False,
ciphers_server="ALL",
)
tls_start = tls.TlsData(ctx.server, context=ctx)
ta.tls_start_server(tls_start)
tssl_client = tls_start.ssl_conn
tssl_server = test_tls.SSLTest(server_side=True)
assert self.do_handshake(tssl_client, tssl_server)
def test_quic_start_server_insecure(self):
ta = tlsconfig.TlsConfig()
with taddons.context(ta) as tctx:
ctx = _ctx(tctx.options)
ctx.server.address = ("example.mitmproxy.org", 443)
ctx.client.alpn_offers = [b"h3"]
tctx.configure(
ta,
ssl_verify_upstream_trusted_ca=None,
ssl_insecure=True,
ciphers_server="CHACHA20_POLY1305_SHA256",
)
tls_start = quic.QuicTlsData(ctx.server, context=ctx)
ta.quic_start_server(tls_start)
tssl_client = test_quic.SSLTest(settings=tls_start.settings)
tssl_server = test_quic.SSLTest(server_side=True, alpn=["h3"])
assert self.quic_do_handshake(tssl_client, tssl_server)
def test_alpn_selection(self):
ta = tlsconfig.TlsConfig()
with taddons.context(ta) as tctx:
ctx = _ctx(tctx.options)
ctx.server.address = ("example.mitmproxy.org", 443)
tls_start = tls.TlsData(ctx.server, context=ctx)
def assert_alpn(http2, client_offers, expected):
tctx.configure(ta, http2=http2)
ctx.client.alpn_offers = client_offers
ctx.server.alpn_offers = None
tls_start.ssl_conn = None
ta.tls_start_server(tls_start)
assert ctx.server.alpn_offers == expected
assert_alpn(
True,
(proxy_tls.HTTP2_ALPN, *proxy_tls.HTTP1_ALPNS, b"foo"),
(proxy_tls.HTTP2_ALPN, *proxy_tls.HTTP1_ALPNS, b"foo"),
)
assert_alpn(
False,
(proxy_tls.HTTP2_ALPN, *proxy_tls.HTTP1_ALPNS, b"foo"),
(*proxy_tls.HTTP1_ALPNS, b"foo"),
)
assert_alpn(True, [], [])
assert_alpn(False, [], [])
ctx.client.timestamp_tls_setup = time.time()
# make sure that we don't upgrade h1 to h2,
# see comment in tlsconfig.py
assert_alpn(True, [], [])
def test_no_h2_proxy(self, tdata):
"""Do not negotiate h2 on the client<->proxy connection in secure web proxy mode,
https://github.com/mitmproxy/mitmproxy/issues/4689"""
ta = tlsconfig.TlsConfig()
with taddons.context(ta) as tctx:
tctx.configure(
ta,
certs=[
tdata.path("mitmproxy/net/data/verificationcerts/trusted-leaf.pem")
],
)
ctx = _ctx(tctx.options)
# mock up something that looks like a secure web proxy.
ctx.layers = [modes.HttpProxy(ctx), 123]
tls_start = tls.TlsData(ctx.client, context=ctx)
ta.tls_start_client(tls_start)
assert tls_start.ssl_conn.get_app_data()["client_alpn"] == b"http/1.1"
@pytest.mark.parametrize(
"client_certs",
[
"mitmproxy/net/data/verificationcerts/trusted-leaf.pem",
"mitmproxy/net/data/verificationcerts/",
],
)
def test_client_cert_file(self, tdata, client_certs):
ta = tlsconfig.TlsConfig()
with taddons.context(ta) as tctx:
ctx = _ctx(tctx.options)
ctx.server.address = ("example.mitmproxy.org", 443)
tctx.configure(
ta,
client_certs=tdata.path(client_certs),
ssl_verify_upstream_trusted_ca=tdata.path(
"mitmproxy/net/data/verificationcerts/trusted-root.crt"
),
)
tls_start = tls.TlsData(ctx.server, context=ctx)
ta.tls_start_server(tls_start)
tssl_client = tls_start.ssl_conn
tssl_server = test_tls.SSLTest(server_side=True)
assert self.do_handshake(tssl_client, tssl_server)
assert tssl_server.obj.getpeercert()
async def test_ca_expired(self, monkeypatch, caplog):
monkeypatch.setattr(certs.Cert, "has_expired", lambda self: True)
ta = tlsconfig.TlsConfig()
with taddons.context(ta):
ta.configure(["confdir"])
assert "The mitmproxy certificate authority has expired" in caplog.text
@pytest.mark.parametrize(
"cert,expect_crl",
[
pytest.param(
"mitmproxy/net/data/verificationcerts/trusted-leaf.crt",
True,
id="with-crl",
),
pytest.param(
"mitmproxy/net/data/verificationcerts/trusted-root.crt",
False,
id="without-crl",
),
pytest.param(
"mitmproxy/net/data/verificationcerts/invalid-crl.crt",
False,
id="invalid-crl",
),
],
)
def test_crl_substitution(self, tdata, cert, expect_crl) -> None:
ta = tlsconfig.TlsConfig()
with taddons.context(ta) as tctx:
ta.configure(["confdir"])
ctx = _ctx(tctx.options)
with open(tdata.path(cert), "rb") as f:
ctx.server.certificate_list = [certs.Cert.from_pem(f.read())]
crt = ta.get_cert(ctx)
if expect_crl:
assert crt.cert.crl_distribution_points[0].endswith(ta.crl_path())
else:
assert not crt.cert.crl_distribution_points
def test_crl_request(self):
ta = tlsconfig.TlsConfig()
with taddons.context(ta):
ta.configure(["confdir"])
f = tflow.tflow(req=tflow.treq(path="/other.crl"))
ta.request(f)
assert not f.response
f = tflow.tflow(req=tflow.treq(path=ta.crl_path()))
ta.request(f)
assert f.response
f = tflow.tflow(req=tflow.treq(path=ta.crl_path()), live=False)
ta.request(f)
assert not f.response
def test_default_ciphers():
assert (
tlsconfig._default_ciphers(net_tls.Version.TLS1_3) == tlsconfig._DEFAULT_CIPHERS
)
assert (
tlsconfig._default_ciphers(net_tls.Version.SSL3)
== tlsconfig._DEFAULT_CIPHERS_WITH_SECLEVEL_0
)
assert (
tlsconfig._default_ciphers(net_tls.Version.UNBOUNDED)
== tlsconfig._DEFAULT_CIPHERS_WITH_SECLEVEL_0
)
| python | MIT | e6aa924bb411a9687b91920b8d094af37bc02b90 | 2026-01-04T14:40:00.086164Z | false |
mitmproxy/mitmproxy | https://github.com/mitmproxy/mitmproxy/blob/e6aa924bb411a9687b91920b8d094af37bc02b90/test/mitmproxy/addons/test_mapremote.py | test/mitmproxy/addons/test_mapremote.py | import pytest
from mitmproxy.addons import mapremote
from mitmproxy.test import taddons
from mitmproxy.test import tflow
class TestMapRemote:
def test_configure(self):
mr = mapremote.MapRemote()
with taddons.context(mr) as tctx:
tctx.configure(mr, map_remote=["one/two/three"])
with pytest.raises(Exception, match="Invalid regular expression"):
tctx.configure(mr, map_remote=["/foo/+/three"])
def test_simple(self):
mr = mapremote.MapRemote()
with taddons.context(mr) as tctx:
tctx.configure(
mr,
map_remote=[
":example.org/images/:mitmproxy.org/img/",
],
)
f = tflow.tflow()
f.request.url = b"https://example.org/images/test.jpg"
mr.request(f)
assert f.request.url == "https://mitmproxy.org/img/test.jpg"
def test_host_header(self):
mr = mapremote.MapRemote()
with taddons.context(mr) as tctx:
tctx.configure(mr, map_remote=["|http://[^/]+|http://example.com:4444"])
f = tflow.tflow()
f.request.url = b"http://example.org/example"
f.request.headers["Host"] = "example.org"
mr.request(f)
assert f.request.headers.get("Host", "") == "example.com:4444"
def test_is_killed(self):
mr = mapremote.MapRemote()
with taddons.context(mr) as tctx:
tctx.configure(mr, map_remote=[":example.org:mitmproxy.org"])
f = tflow.tflow()
f.request.url = b"https://example.org/images/test.jpg"
f.kill()
mr.request(f)
assert f.request.url == "https://example.org/images/test.jpg"
| python | MIT | e6aa924bb411a9687b91920b8d094af37bc02b90 | 2026-01-04T14:40:00.086164Z | false |
mitmproxy/mitmproxy | https://github.com/mitmproxy/mitmproxy/blob/e6aa924bb411a9687b91920b8d094af37bc02b90/test/mitmproxy/addons/test_errorcheck.py | test/mitmproxy/addons/test_errorcheck.py | import logging
import pytest
from mitmproxy.addons.errorcheck import ErrorCheck
from mitmproxy.tools import main
@pytest.mark.parametrize("run_main", [main.mitmdump, main.mitmproxy])
def test_errorcheck(tdata, capsys, run_main):
"""Integration test: Make sure that we catch errors on startup an exit."""
with pytest.raises(SystemExit):
run_main(
[
"-n",
"-s",
tdata.path("mitmproxy/data/addonscripts/load_error.py"),
]
)
assert "Error logged during startup" in capsys.readouterr().err
async def test_no_error():
e = ErrorCheck()
await e.shutdown_if_errored()
e.finish()
async def test_error_message(capsys):
e = ErrorCheck()
logging.error("wat")
logging.error("wat")
with pytest.raises(SystemExit):
await e.shutdown_if_errored()
assert "Errors logged during startup, exiting..." in capsys.readouterr().err
async def test_repeat_error_on_stderr(capsys):
e = ErrorCheck(repeat_errors_on_stderr=True)
logging.error("wat")
with pytest.raises(SystemExit):
await e.shutdown_if_errored()
assert "Error logged during startup:\nwat" in capsys.readouterr().err
| python | MIT | e6aa924bb411a9687b91920b8d094af37bc02b90 | 2026-01-04T14:40:00.086164Z | false |
mitmproxy/mitmproxy | https://github.com/mitmproxy/mitmproxy/blob/e6aa924bb411a9687b91920b8d094af37bc02b90/test/mitmproxy/addons/test_anticomp.py | test/mitmproxy/addons/test_anticomp.py | from mitmproxy.addons import anticomp
from mitmproxy.test import taddons
from mitmproxy.test import tflow
class TestAntiComp:
def test_simple(self):
sa = anticomp.AntiComp()
with taddons.context(sa) as tctx:
f = tflow.tflow(resp=True)
sa.request(f)
tctx.configure(sa, anticomp=True)
f = tflow.tflow(resp=True)
f.request.headers["Accept-Encoding"] = "foobar"
sa.request(f)
assert f.request.headers["Accept-Encoding"] == "identity"
| python | MIT | e6aa924bb411a9687b91920b8d094af37bc02b90 | 2026-01-04T14:40:00.086164Z | false |
mitmproxy/mitmproxy | https://github.com/mitmproxy/mitmproxy/blob/e6aa924bb411a9687b91920b8d094af37bc02b90/test/mitmproxy/addons/test_core.py | test/mitmproxy/addons/test_core.py | import pytest
from mitmproxy import exceptions
from mitmproxy.addons import core
from mitmproxy.test import taddons
from mitmproxy.test import tflow
def test_set():
sa = core.Core()
with taddons.context(loadcore=False) as tctx:
assert tctx.master.options.upstream_cert
tctx.command(sa.set, "upstream_cert", "false")
assert not tctx.master.options.upstream_cert
with pytest.raises(exceptions.CommandError):
tctx.command(sa.set, "nonexistent")
def test_resume():
sa = core.Core()
with taddons.context(loadcore=False):
f = tflow.tflow()
assert not sa.resume([f])
f.intercept()
sa.resume([f])
assert not f.intercepted
def test_mark():
sa = core.Core()
with taddons.context(loadcore=False):
f = tflow.tflow()
assert not f.marked
sa.mark([f], ":default:")
assert f.marked
with pytest.raises(exceptions.CommandError):
sa.mark([f], "invalid")
sa.mark_toggle([f])
assert not f.marked
sa.mark_toggle([f])
assert f.marked
def test_kill():
sa = core.Core()
with taddons.context(loadcore=False):
f = tflow.tflow()
f.intercept()
assert f.killable
sa.kill([f])
assert not f.killable
def test_revert():
sa = core.Core()
with taddons.context(loadcore=False):
f = tflow.tflow()
f.backup()
f.request.content = b"bar"
assert f.modified()
sa.revert([f])
assert not f.modified()
def test_flow_set():
sa = core.Core()
with taddons.context(loadcore=False):
f = tflow.tflow(resp=True)
assert sa.flow_set_options()
assert f.request.method != "post"
sa.flow_set([f], "method", "post")
assert f.request.method == "POST"
assert f.request.host != "testhost"
sa.flow_set([f], "host", "testhost")
assert f.request.host == "testhost"
assert f.request.path != "/test/path"
sa.flow_set([f], "path", "/test/path")
assert f.request.path == "/test/path"
assert f.request.url != "http://foo.com/bar"
sa.flow_set([f], "url", "http://foo.com/bar")
assert f.request.url == "http://foo.com/bar"
with pytest.raises(exceptions.CommandError):
sa.flow_set([f], "url", "oink")
assert f.response.status_code != 404
sa.flow_set([f], "status_code", "404")
assert f.response.status_code == 404
assert f.response.reason == "Not Found"
with pytest.raises(exceptions.CommandError):
sa.flow_set([f], "status_code", "oink")
assert f.response.reason != "foo"
sa.flow_set([f], "reason", "foo")
assert f.response.reason == "foo"
def test_encoding():
sa = core.Core()
with taddons.context(loadcore=False):
f = tflow.tflow()
assert sa.encode_options()
sa.encode([f], "request", "deflate")
assert f.request.headers["content-encoding"] == "deflate"
sa.encode([f], "request", "br")
assert f.request.headers["content-encoding"] == "deflate"
sa.decode([f], "request")
assert "content-encoding" not in f.request.headers
sa.encode([f], "request", "br")
assert f.request.headers["content-encoding"] == "br"
sa.encode_toggle([f], "request")
assert "content-encoding" not in f.request.headers
sa.encode_toggle([f], "request")
assert f.request.headers["content-encoding"] == "deflate"
sa.encode_toggle([f], "request")
assert "content-encoding" not in f.request.headers
def test_options(tmpdir):
p = str(tmpdir.join("path"))
sa = core.Core()
with taddons.context() as tctx:
tctx.options.listen_host = "foo"
assert tctx.options.listen_host == "foo"
sa.options_reset_one("listen_host")
assert tctx.options.listen_host != "foo"
with pytest.raises(exceptions.CommandError):
sa.options_reset_one("unknown")
tctx.options.listen_host = "foo"
sa.options_save(p)
with pytest.raises(exceptions.CommandError):
sa.options_save("/")
sa.options_reset()
assert tctx.options.listen_host == ""
sa.options_load(p)
assert tctx.options.listen_host == "foo"
sa.options_load("/nonexistent")
with open(p, "a") as f:
f.write("'''")
with pytest.raises(exceptions.CommandError):
sa.options_load(p)
def test_validation_simple():
sa = core.Core()
with taddons.context() as tctx:
with pytest.raises(
exceptions.OptionsError,
match="requires the upstream_cert option to be enabled",
):
tctx.configure(
sa, add_upstream_certs_to_client_chain=True, upstream_cert=False
)
def test_client_certs(tdata):
sa = core.Core()
with taddons.context() as tctx:
# Folders should work.
tctx.configure(sa, client_certs=tdata.path("mitmproxy/data/clientcert"))
# Files, too.
tctx.configure(
sa, client_certs=tdata.path("mitmproxy/data/clientcert/client.pem")
)
with pytest.raises(
exceptions.OptionsError, match="certificate path does not exist"
):
tctx.configure(sa, client_certs="invalid")
| python | MIT | e6aa924bb411a9687b91920b8d094af37bc02b90 | 2026-01-04T14:40:00.086164Z | false |
mitmproxy/mitmproxy | https://github.com/mitmproxy/mitmproxy/blob/e6aa924bb411a9687b91920b8d094af37bc02b90/test/mitmproxy/addons/test_termlog.py | test/mitmproxy/addons/test_termlog.py | import builtins
import io
import logging
import pytest
from mitmproxy.addons import termlog
from mitmproxy.test import taddons
from mitmproxy.utils import vt_codes
@pytest.fixture(autouse=True)
def ensure_cleanup():
yield
assert not any(isinstance(x, termlog.TermLogHandler) for x in logging.root.handlers)
def test_output(capsys):
logging.getLogger().setLevel(logging.DEBUG)
t = termlog.TermLog()
with taddons.context(t) as tctx:
tctx.options.termlog_verbosity = "info"
tctx.configure(t)
logging.info("one")
logging.debug("two")
logging.warning("three")
logging.error("four")
out, err = capsys.readouterr()
assert "one" in out
assert "two" not in out
assert "three" in out
assert "four" in out
t.uninstall()
async def test_styling(monkeypatch) -> None:
monkeypatch.setattr(vt_codes, "ensure_supported", lambda _: True)
f = io.StringIO()
t = termlog.TermLog(out=f)
with taddons.context(t) as tctx:
tctx.configure(t)
logging.warning("hello")
assert "\x1b[33mhello\x1b[0m" in f.getvalue()
t.uninstall()
async def test_cannot_print(monkeypatch) -> None:
def _raise(*args, **kwargs):
raise OSError
monkeypatch.setattr(builtins, "print", _raise)
t = termlog.TermLog()
with taddons.context(t) as tctx:
tctx.configure(t)
with pytest.raises(SystemExit) as exc_info:
logging.info("Should not log this, but raise instead")
assert exc_info.value.args[0] == 1
t.uninstall()
| python | MIT | e6aa924bb411a9687b91920b8d094af37bc02b90 | 2026-01-04T14:40:00.086164Z | false |
mitmproxy/mitmproxy | https://github.com/mitmproxy/mitmproxy/blob/e6aa924bb411a9687b91920b8d094af37bc02b90/test/mitmproxy/addons/test_update_alt_svc.py | test/mitmproxy/addons/test_update_alt_svc.py | from mitmproxy import http
from mitmproxy.addons import update_alt_svc
from mitmproxy.proxy.mode_specs import ProxyMode
from mitmproxy.test import taddons
from mitmproxy.test import tflow
def test_simple():
header = 'h3="example.com:443"; ma=3600, h2=":443"; ma=3600'
modified = update_alt_svc.update_alt_svc_header(header, 1234)
assert modified == 'h3=":1234"; ma=3600, h2=":1234"; ma=3600'
def test_updates_alt_svc_header():
upd = update_alt_svc.UpdateAltSvc()
with taddons.context(upd) as ctx:
headers = http.Headers(
host="example.com",
content_type="application/xml",
alt_svc='h3="example.com:443"; ma=3600, h2=":443"; ma=3600',
)
resp = tflow.tresp(headers=headers)
f = tflow.tflow(resp=resp)
f.client_conn.sockname = ("", 1234)
upd.responseheaders(f)
assert (
f.response.headers["alt-svc"]
== 'h3="example.com:443"; ma=3600, h2=":443"; ma=3600'
)
ctx.options.keep_alt_svc_header = True
f.client_conn.proxy_mode = ProxyMode.parse("reverse:https://example.com")
upd.responseheaders(f)
assert (
f.response.headers["alt-svc"]
== 'h3="example.com:443"; ma=3600, h2=":443"; ma=3600'
)
ctx.options.keep_alt_svc_header = False
upd.responseheaders(f)
assert (
f.response.headers["alt-svc"] == 'h3=":1234"; ma=3600, h2=":1234"; ma=3600'
)
| python | MIT | e6aa924bb411a9687b91920b8d094af37bc02b90 | 2026-01-04T14:40:00.086164Z | false |
mitmproxy/mitmproxy | https://github.com/mitmproxy/mitmproxy/blob/e6aa924bb411a9687b91920b8d094af37bc02b90/test/mitmproxy/addons/test_export.py | test/mitmproxy/addons/test_export.py | import os
import shlex
from unittest import mock
import pyperclip
import pytest
from mitmproxy import exceptions
from mitmproxy.addons import export # heh
from mitmproxy.test import taddons
from mitmproxy.test import tflow
from mitmproxy.test import tutils
@pytest.fixture
def get_request():
return tflow.tflow(
req=tutils.treq(
method=b"GET",
content=b"",
path=b"/path?a=foo&a=bar&b=baz",
headers=((b"header", b"qvalue"), (b"content-length", b"0")),
)
)
@pytest.fixture
def get_response():
return tflow.tflow(
resp=tutils.tresp(status_code=404, content=b"Test Response Body")
)
@pytest.fixture
def get_flow():
return tflow.tflow(
req=tutils.treq(method=b"GET", content=b"", path=b"/path?a=foo&a=bar&b=baz"),
resp=tutils.tresp(status_code=404, content=b"Test Response Body"),
)
@pytest.fixture
def post_request():
return tflow.tflow(
req=tutils.treq(method=b"POST", headers=(), content=bytes(range(256)))
)
@pytest.fixture
def patch_request():
return tflow.tflow(
req=tutils.treq(method=b"PATCH", content=b"content", path=b"/path?query=param")
)
@pytest.fixture
def tcp_flow():
return tflow.ttcpflow()
@pytest.fixture
def udp_flow():
return tflow.tudpflow()
@pytest.fixture
def websocket_flow():
return tflow.twebsocketflow()
@pytest.fixture(scope="module")
def export_curl():
e = export.Export()
with taddons.context() as tctx:
tctx.configure(e)
yield export.curl_command
class TestExportCurlCommand:
def test_get(self, export_curl, get_request):
result = (
"""curl -H 'header: qvalue' 'http://address:22/path?a=foo&a=bar&b=baz'"""
)
assert export_curl(get_request) == result
def test_post(self, export_curl, post_request):
post_request.request.content = b"nobinarysupport"
result = "curl -X POST http://address:22/path -d nobinarysupport"
assert export_curl(post_request) == result
def test_post_with_no_content_has_explicit_content_length_header(
self, export_curl, post_request
):
post_request.request.content = None
result = "curl -H 'content-length: 0' -X POST http://address:22/path"
assert export_curl(post_request) == result
def test_fails_with_binary_data(self, export_curl, post_request):
# shlex.quote doesn't support a bytes object
# see https://github.com/python/cpython/pull/10871
post_request.request.headers["Content-Type"] = "application/json; charset=utf-8"
with pytest.raises(exceptions.CommandError):
export_curl(post_request)
def test_patch(self, export_curl, patch_request):
result = """curl -H 'header: qvalue' -X PATCH 'http://address:22/path?query=param' -d content"""
assert export_curl(patch_request) == result
def test_tcp(self, export_curl, tcp_flow):
with pytest.raises(exceptions.CommandError):
export_curl(tcp_flow)
def test_udp(self, export_curl, udp_flow):
with pytest.raises(exceptions.CommandError):
export_curl(udp_flow)
def test_escape_single_quotes_in_body(self, export_curl):
request = tflow.tflow(
req=tutils.treq(method=b"POST", headers=(), content=b"'&#")
)
command = export_curl(request)
assert shlex.split(command)[-2] == "-d"
assert shlex.split(command)[-1] == "'&#"
def test_expand_escaped(self, export_curl, post_request):
post_request.request.content = b"foo\nbar"
result = "curl -X POST http://address:22/path -d \"$(printf 'foo\\x0abar')\""
assert export_curl(post_request) == result
def test_no_expand_when_no_escaped(self, export_curl, post_request):
post_request.request.content = b"foobar"
result = "curl -X POST http://address:22/path -d foobar"
assert export_curl(post_request) == result
def test_strip_unnecessary(self, export_curl, get_request):
get_request.request.headers.clear()
get_request.request.headers["host"] = "address"
get_request.request.headers[":authority"] = "address"
get_request.request.headers["accept-encoding"] = "br"
result = """curl --compressed 'http://address:22/path?a=foo&a=bar&b=baz'"""
assert export_curl(get_request) == result
# This tests that we always specify the original host in the URL, which is
# important for SNI. If option `export_preserve_original_ip` is true, we
# ensure that we still connect to the same IP by using curl's `--resolve`
# option.
def test_correct_host_used(self, get_request):
e = export.Export()
with taddons.context() as tctx:
tctx.configure(e)
get_request.request.headers["host"] = "domain:22"
result = """curl -H 'header: qvalue' -H 'host: domain:22' 'http://domain:22/path?a=foo&a=bar&b=baz'"""
assert export.curl_command(get_request) == result
tctx.options.export_preserve_original_ip = True
result = (
"""curl --resolve 'domain:22:[192.168.0.1]' -H 'header: qvalue' -H 'host: domain:22' """
"""'http://domain:22/path?a=foo&a=bar&b=baz'"""
)
assert export.curl_command(get_request) == result
class TestExportHttpieCommand:
def test_get(self, get_request):
result = (
"""http GET 'http://address:22/path?a=foo&a=bar&b=baz' 'header: qvalue'"""
)
assert export.httpie_command(get_request) == result
def test_post(self, post_request):
post_request.request.content = b"nobinarysupport"
result = "http POST http://address:22/path <<< nobinarysupport"
assert export.httpie_command(post_request) == result
def test_fails_with_binary_data(self, post_request):
# shlex.quote doesn't support a bytes object
# see https://github.com/python/cpython/pull/10871
post_request.request.headers["Content-Type"] = "application/json; charset=utf-8"
with pytest.raises(exceptions.CommandError):
export.httpie_command(post_request)
def test_patch(self, patch_request):
result = """http PATCH 'http://address:22/path?query=param' 'header: qvalue' <<< content"""
assert export.httpie_command(patch_request) == result
def test_tcp(self, tcp_flow):
with pytest.raises(exceptions.CommandError):
export.httpie_command(tcp_flow)
def test_udp(self, udp_flow):
with pytest.raises(exceptions.CommandError):
export.httpie_command(udp_flow)
def test_escape_single_quotes_in_body(self):
request = tflow.tflow(
req=tutils.treq(method=b"POST", headers=(), content=b"'&#")
)
command = export.httpie_command(request)
assert shlex.split(command)[-2] == "<<<"
assert shlex.split(command)[-1] == "'&#"
# See comment in `TestExportCurlCommand.test_correct_host_used`. httpie
# currently doesn't have a way of forcing connection to a particular IP, so
# the command-line may not always reproduce the original request, in case
# the host is resolved to a different IP address.
#
# httpie tracking issue: https://github.com/httpie/httpie/issues/414
def test_correct_host_used(self, get_request):
get_request.request.headers["host"] = "domain:22"
result = (
"""http GET 'http://domain:22/path?a=foo&a=bar&b=baz' """
"""'header: qvalue' 'host: domain:22'"""
)
assert export.httpie_command(get_request) == result
class TestRaw:
def test_req_and_resp_present(self, get_flow):
assert b"header: qvalue" in export.raw(get_flow)
assert b"header-response: svalue" in export.raw(get_flow)
def test_get_request_present(self, get_request):
assert b"header: qvalue" in export.raw(get_request)
assert b"content-length: 0" in export.raw_request(get_request)
def test_get_response_present(self, get_response):
get_response.request.content = None
assert b"header-response: svalue" in export.raw(get_response)
def test_tcp(self, tcp_flow):
with pytest.raises(
exceptions.CommandError,
match="Can't export flow with no request or response",
):
export.raw(tcp_flow)
def test_udp(self, udp_flow):
with pytest.raises(
exceptions.CommandError,
match="Can't export flow with no request or response",
):
export.raw(udp_flow)
def test_websocket(self, websocket_flow):
assert b"hello binary" in export.raw(websocket_flow)
assert b"hello text" in export.raw(websocket_flow)
assert b"it's me" in export.raw(websocket_flow)
class TestRawRequest:
def test_get(self, get_request):
assert b"header: qvalue" in export.raw_request(get_request)
assert b"content-length: 0" in export.raw_request(get_request)
def test_no_content(self, get_request):
get_request.request.content = None
with pytest.raises(exceptions.CommandError):
export.raw_request(get_request)
def test_tcp(self, tcp_flow):
with pytest.raises(exceptions.CommandError):
export.raw_request(tcp_flow)
def test_udp(self, udp_flow):
with pytest.raises(exceptions.CommandError):
export.raw_request(udp_flow)
class TestRawResponse:
def test_get(self, get_response):
assert b"header-response: svalue" in export.raw_response(get_response)
def test_no_content(self, get_response):
get_response.response.content = None
with pytest.raises(exceptions.CommandError):
export.raw_response(get_response)
def test_tcp(self, tcp_flow):
with pytest.raises(exceptions.CommandError):
export.raw_response(tcp_flow)
def test_udp(self, udp_flow):
with pytest.raises(exceptions.CommandError):
export.raw_response(udp_flow)
def test_head_non_zero_content_length(self):
request = tflow.tflow(
req=tutils.treq(method=b"HEAD"),
resp=tutils.tresp(headers=((b"content-length", b"7"),), content=b""),
)
assert b"content-length: 7" in export.raw_response(request)
def qr(f):
with open(f, "rb") as fp:
return fp.read()
def test_export(tmp_path) -> None:
f = tmp_path / "outfile"
e = export.Export()
with taddons.context() as tctx:
tctx.configure(e)
assert e.formats() == ["curl", "httpie", "raw", "raw_request", "raw_response"]
with pytest.raises(exceptions.CommandError):
e.file("nonexistent", tflow.tflow(resp=True), f)
e.file("raw_request", tflow.tflow(resp=True), f)
assert qr(f)
os.unlink(f)
e.file("raw_response", tflow.tflow(resp=True), f)
assert qr(f)
os.unlink(f)
e.file("curl", tflow.tflow(resp=True), f)
assert qr(f)
os.unlink(f)
e.file("httpie", tflow.tflow(resp=True), f)
assert qr(f)
os.unlink(f)
e.file("raw", tflow.twebsocketflow(), f)
assert qr(f)
os.unlink(f)
@pytest.mark.parametrize(
"exception, log_message",
[
(PermissionError, "Permission denied"),
(IsADirectoryError, "Is a directory"),
(FileNotFoundError, "No such file or directory"),
],
)
def test_export_open(exception, log_message, tmpdir, caplog):
f = str(tmpdir.join("path"))
e = export.Export()
with mock.patch("mitmproxy.addons.export.open") as m:
m.side_effect = exception(log_message)
e.file("raw_request", tflow.tflow(resp=True), f)
assert log_message in caplog.text
def test_export_str(tmpdir, caplog):
"""Test that string export return a str without any UTF-8 surrogates"""
e = export.Export()
with taddons.context(e):
f = tflow.tflow()
f.request.headers.fields = (
(b"utf8-header", "é".encode("utf-8")),
(b"latin1-header", "é".encode("latin1")),
)
# ensure that we have no surrogates in the return value
assert e.export_str("curl", f).encode("utf8", errors="strict")
assert e.export_str("raw", f).encode("utf8", errors="strict")
def test_clip(tmpdir, caplog):
e = export.Export()
with taddons.context() as tctx:
tctx.configure(e)
with pytest.raises(exceptions.CommandError):
e.clip("nonexistent", tflow.tflow(resp=True))
with mock.patch("pyperclip.copy") as pc:
e.clip("raw_request", tflow.tflow(resp=True))
assert pc.called
with mock.patch("pyperclip.copy") as pc:
e.clip("raw_response", tflow.tflow(resp=True))
assert pc.called
with mock.patch("pyperclip.copy") as pc:
e.clip("curl", tflow.tflow(resp=True))
assert pc.called
with mock.patch("pyperclip.copy") as pc:
e.clip("httpie", tflow.tflow(resp=True))
assert pc.called
with mock.patch("pyperclip.copy") as pc:
log_message = (
"Pyperclip could not find a copy/paste mechanism for your system."
)
pc.side_effect = pyperclip.PyperclipException(log_message)
e.clip("raw_request", tflow.tflow(resp=True))
assert log_message in caplog.text
| python | MIT | e6aa924bb411a9687b91920b8d094af37bc02b90 | 2026-01-04T14:40:00.086164Z | false |
mitmproxy/mitmproxy | https://github.com/mitmproxy/mitmproxy/blob/e6aa924bb411a9687b91920b8d094af37bc02b90/test/mitmproxy/addons/test_blocklist.py | test/mitmproxy/addons/test_blocklist.py | import pytest
from mitmproxy.addons import blocklist
from mitmproxy.exceptions import OptionsError
from mitmproxy.test import taddons
from mitmproxy.test import tflow
@pytest.mark.parametrize(
"filter,err",
[
("/~u index.html/TOOMANY/300", "Invalid number of parameters"),
(":~d ~d ~d:200", "Invalid filter"),
("/~u index.html/abc", "Invalid HTTP status code"),
],
)
def test_parse_spec_err(filter, err):
with pytest.raises(ValueError, match=err):
blocklist.parse_spec(filter)
class TestBlockList:
@pytest.mark.parametrize(
"filter,request_url,status_code",
[
(":~u example.org:404", b"https://example.org/images/test.jpg", 404),
(":~u example.com:404", b"https://example.org/images/test.jpg", None),
(":~u test:404", b"https://example.org/images/TEST.jpg", 404),
("/!jpg/418", b"https://example.org/images/test.jpg", None),
("/!png/418", b"https://example.org/images/test.jpg", 418),
("|~u /DATA|500", b"https://example.org/DATA", 500),
("|~u /ASSETS|501", b"https://example.org/assets", 501),
("|~u /ping|201", b"https://example.org/PING", 201),
],
)
def test_block(self, filter, request_url, status_code):
bl = blocklist.BlockList()
with taddons.context(bl) as tctx:
tctx.configure(bl, block_list=[filter])
f = tflow.tflow()
f.request.url = request_url
bl.request(f)
if status_code is not None:
assert f.response.status_code == status_code
assert f.metadata["blocklisted"]
else:
assert not f.response
def test_uppercase_header_values(self):
bl = blocklist.BlockList()
with taddons.context(bl) as tctx:
tctx.configure(bl, block_list=["|~hq Cookie:\\sfoo=BAR|403"])
f = tflow.tflow()
f.request.url = "https://example.org/robots.txt"
f.request.headers["Cookie"] = "foo=BAR; key1=value1"
bl.request(f)
assert f.response.status_code == 403
assert f.metadata["blocklisted"]
def test_mixedcase_header_names(self):
bl = blocklist.BlockList()
with taddons.context(bl) as tctx:
tctx.configure(bl, block_list=["|~hq User-Agent:\\scurl|401"])
f = tflow.tflow()
f.request.url = "https://example.org/products/123"
f.request.headers["user-agent"] = "curl/8.11.1"
bl.request(f)
assert f.response
def test_special_kill_status_closes_connection(self):
bl = blocklist.BlockList()
with taddons.context(bl) as tctx:
tctx.configure(bl, block_list=[":.*:444"])
f = tflow.tflow()
bl.request(f)
assert f.error.msg == f.error.KILLED_MESSAGE
assert f.response is None
assert f.metadata["blocklisted"] is True
def test_already_handled(self):
"""Test that we don't interfere if another addon already killed this request."""
bl = blocklist.BlockList()
with taddons.context(bl) as tctx:
tctx.configure(bl, block_list=["/.*/404"])
f = tflow.tflow()
f.kill() # done by another addon.
bl.request(f)
assert not f.response
def test_configure_err(self):
bl = blocklist.BlockList()
with taddons.context(bl) as tctx:
with pytest.raises(OptionsError):
tctx.configure(bl, block_list=["lalelu"])
| python | MIT | e6aa924bb411a9687b91920b8d094af37bc02b90 | 2026-01-04T14:40:00.086164Z | false |
mitmproxy/mitmproxy | https://github.com/mitmproxy/mitmproxy/blob/e6aa924bb411a9687b91920b8d094af37bc02b90/test/mitmproxy/addons/test_view.py | test/mitmproxy/addons/test_view.py | import pytest
from mitmproxy import exceptions
from mitmproxy import flowfilter
from mitmproxy import io
from mitmproxy.addons import view
from mitmproxy.test import taddons
from mitmproxy.test import tflow
from mitmproxy.tools.console import consoleaddons
from mitmproxy.tools.console.common import render_marker
from mitmproxy.tools.console.common import SYMBOL_MARK
def tft(*, method="get", start=0):
f = tflow.tflow()
f.request.method = method
f.timestamp_created = start
return f
def test_order_refresh():
v = view.View()
sargs = []
def save(*args, **kwargs):
sargs.extend([args, kwargs])
v.sig_view_refresh.connect(save)
tf = tflow.tflow(resp=True)
with taddons.context() as tctx:
tctx.configure(v, view_order="time")
v.add([tf])
tf.timestamp_created = 10
assert not sargs
v.update([tf])
assert sargs
def test_order_generators_http():
v = view.View()
tf = tflow.tflow(resp=True)
rs = view.OrderRequestStart(v)
assert rs.generate(tf) == 946681200
rm = view.OrderRequestMethod(v)
assert rm.generate(tf) == tf.request.method
ru = view.OrderRequestURL(v)
assert ru.generate(tf) == tf.request.url
sz = view.OrderKeySize(v)
assert sz.generate(tf) == len(tf.request.raw_content) + len(tf.response.raw_content)
def test_order_generators_dns():
v = view.View()
tf = tflow.tdnsflow(resp=True)
rs = view.OrderRequestStart(v)
assert rs.generate(tf) == 946681200
rm = view.OrderRequestMethod(v)
assert rm.generate(tf) == "QUERY"
ru = view.OrderRequestURL(v)
assert ru.generate(tf) == "dns.google"
sz = view.OrderKeySize(v)
assert sz.generate(tf) == tf.response.size
tf = tflow.tdnsflow(resp=False)
assert sz.generate(tf) == 0
def order_generators_proto(tf, name):
v = view.View()
rs = view.OrderRequestStart(v)
assert rs.generate(tf) == 946681200
rm = view.OrderRequestMethod(v)
assert rm.generate(tf) == name
ru = view.OrderRequestURL(v)
assert ru.generate(tf) == "address:22"
sz = view.OrderKeySize(v)
assert sz.generate(tf) == sum(len(m.content) for m in tf.messages)
def test_order_generators_tcp():
order_generators_proto(tflow.ttcpflow(), "TCP")
def test_order_generators_udp():
order_generators_proto(tflow.tudpflow(), "UDP")
def test_simple():
v = view.View()
f = tft(start=1)
assert v.store_count() == 0
v.requestheaders(f)
assert list(v) == [f]
assert v.get_by_id(f.id)
assert not v.get_by_id("nonexistent")
# These all just call update
v.error(f)
v.response(f)
v.intercept(f)
v.resume(f)
v.kill(f)
assert list(v) == [f]
v.requestheaders(f)
assert list(v) == [f]
assert len(v._store) == 1
assert v.store_count() == 1
f2 = tft(start=3)
v.requestheaders(f2)
assert list(v) == [f, f2]
v.requestheaders(f2)
assert list(v) == [f, f2]
assert len(v._store) == 2
assert v.inbounds(0)
assert not v.inbounds(-1)
assert not v.inbounds(100)
f3 = tft(start=2)
v.requestheaders(f3)
assert list(v) == [f, f3, f2]
v.requestheaders(f3)
assert list(v) == [f, f3, f2]
assert len(v._store) == 3
f.marked = not f.marked
f2.marked = not f2.marked
v.clear_not_marked()
assert list(v) == [f, f2]
assert len(v) == 2
assert len(v._store) == 2
v.clear()
assert len(v) == 0
assert len(v._store) == 0
def test_simple_tcp():
v = view.View()
f = tflow.ttcpflow()
assert v.store_count() == 0
v.tcp_start(f)
assert list(v) == [f]
# These all just call update
v.tcp_start(f)
v.tcp_message(f)
v.tcp_error(f)
v.tcp_end(f)
assert list(v) == [f]
def test_simple_udp():
v = view.View()
f = tflow.tudpflow()
assert v.store_count() == 0
v.udp_start(f)
assert list(v) == [f]
# These all just call update
v.udp_start(f)
v.udp_message(f)
v.udp_error(f)
v.udp_end(f)
assert list(v) == [f]
def test_simple_dns():
v = view.View()
f = tflow.tdnsflow(resp=True, err=True)
assert v.store_count() == 0
v.dns_request(f)
assert list(v) == [f]
# These all just call update
v.dns_request(f)
v.dns_response(f)
v.dns_error(f)
assert list(v) == [f]
def test_filter():
v = view.View()
v.requestheaders(tft(method="get"))
v.requestheaders(tft(method="put"))
v.requestheaders(tft(method="get"))
v.requestheaders(tft(method="put"))
assert (len(v)) == 4
v.set_filter_cmd("~m get")
assert [i.request.method for i in v] == ["GET", "GET"]
assert len(v._store) == 4
v.set_filter(None)
assert len(v) == 4
v.toggle_marked()
assert len(v) == 0
v.toggle_marked()
assert len(v) == 4
with pytest.raises(exceptions.CommandError):
v.set_filter_cmd("~notafilter regex")
v[1].marked = True
v.toggle_marked()
assert len(v) == 1
assert v[0].marked
v.toggle_marked()
assert len(v) == 4
def tdump(path, flows):
with open(path, "wb") as f:
w = io.FlowWriter(f)
for i in flows:
w.add(i)
def test_create():
v = view.View()
with taddons.context():
v.create("get", "http://foo.com")
assert len(v) == 1
assert v[0].request.url == "http://foo.com/"
v.create("get", "http://foo.com")
assert len(v) == 2
with pytest.raises(exceptions.CommandError, match="Invalid URL"):
v.create("get", "http://foo.com\\")
with pytest.raises(exceptions.CommandError, match="Invalid URL"):
v.create("get", "http://")
def test_orders():
v = view.View()
with taddons.context(v):
assert v.order_options()
async def test_load(tmpdir, caplog):
path = str(tmpdir.join("path"))
v = view.View()
tdump(path, [tflow.tflow(resp=True), tflow.tflow(resp=True)])
v.load_file(path)
assert len(v) == 2
v.load_file(path)
assert len(v) == 4
try:
v.load_file("nonexistent_file_path")
except OSError:
assert False
with open(path, "wb") as f:
f.write(b"invalidflows")
v.load_file(path)
assert "Invalid data format." in caplog.text
def test_resolve():
v = view.View()
with taddons.context() as tctx:
f = tft(method="get")
assert tctx.command(v.resolve, "@all") == []
assert tctx.command(v.resolve, "@focus") == []
assert tctx.command(v.resolve, "@shown") == []
assert tctx.command(v.resolve, "@hidden") == []
assert tctx.command(v.resolve, "@marked") == []
assert tctx.command(v.resolve, "@unmarked") == []
assert tctx.command(v.resolve, f"@{f.id}") == []
assert tctx.command(v.resolve, "~m get") == []
v.requestheaders(f)
assert len(tctx.command(v.resolve, "~m get")) == 1
assert len(tctx.command(v.resolve, "@focus")) == 1
assert len(tctx.command(v.resolve, "@all")) == 1
assert len(tctx.command(v.resolve, "@shown")) == 1
assert len(tctx.command(v.resolve, "@unmarked")) == 1
assert len(tctx.command(v.resolve, f"@{f.id}")) == 1
assert tctx.command(v.resolve, "@hidden") == []
assert tctx.command(v.resolve, "@marked") == []
v.requestheaders(tft(method="put"))
assert len(tctx.command(v.resolve, f"@{f.id}")) == 1
assert len(tctx.command(v.resolve, "@focus")) == 1
assert len(tctx.command(v.resolve, "@shown")) == 2
assert len(tctx.command(v.resolve, "@all")) == 2
assert tctx.command(v.resolve, "@hidden") == []
assert tctx.command(v.resolve, "@marked") == []
v.requestheaders(tft(method="get"))
v.requestheaders(tft(method="put"))
f = flowfilter.parse("~m get")
v.set_filter(f)
v[0].marked = True
def methods(flows):
return [i.request.method for i in flows]
assert methods(tctx.command(v.resolve, "~m get")) == ["GET", "GET"]
assert methods(tctx.command(v.resolve, "~m put")) == ["PUT", "PUT"]
assert methods(tctx.command(v.resolve, "@shown")) == ["GET", "GET"]
assert methods(tctx.command(v.resolve, "@hidden")) == ["PUT", "PUT"]
assert methods(tctx.command(v.resolve, "@marked")) == ["GET"]
assert methods(tctx.command(v.resolve, "@unmarked")) == ["PUT", "GET", "PUT"]
assert methods(tctx.command(v.resolve, "@all")) == ["GET", "PUT", "GET", "PUT"]
with pytest.raises(exceptions.CommandError, match="Invalid filter expression"):
tctx.command(v.resolve, "~")
def test_movement():
v = view.View()
with taddons.context():
v.go(0)
v.add(
[
tflow.tflow(),
tflow.tflow(),
tflow.tflow(),
tflow.tflow(),
tflow.tflow(),
]
)
assert v.focus.index == 0
v.go(-1)
assert v.focus.index == 4
v.go(0)
assert v.focus.index == 0
v.go(1)
assert v.focus.index == 1
v.go(999)
assert v.focus.index == 4
v.go(-999)
assert v.focus.index == 0
v.focus_next()
assert v.focus.index == 1
v.focus_prev()
assert v.focus.index == 0
v.clear()
v.focus_next()
assert v.focus.index is None
v.focus_prev()
assert v.focus.index is None
def test_duplicate():
v = view.View()
with taddons.context():
f = [
tflow.tflow(),
tflow.tflow(),
]
v.add(f)
assert len(v) == 2
v.duplicate(f)
assert len(v) == 4
assert v.focus.index == 2
def test_remove():
v = view.View()
with taddons.context():
f = [tflow.tflow(), tflow.tflow()]
v.add(f)
assert len(v) == 2
v.remove(f)
assert len(v) == 0
def test_setgetval():
v = view.View()
with taddons.context():
f = tflow.tflow()
v.add([f])
v.setvalue([f], "key", "value")
assert v.getvalue(f, "key", "default") == "value"
assert v.getvalue(f, "unknow", "default") == "default"
v.setvalue_toggle([f], "key")
assert v.getvalue(f, "key", "default") == "true"
v.setvalue_toggle([f], "key")
assert v.getvalue(f, "key", "default") == "false"
def test_order():
v = view.View()
v.requestheaders(tft(method="get", start=1))
v.requestheaders(tft(method="put", start=2))
v.requestheaders(tft(method="get", start=3))
v.requestheaders(tft(method="put", start=4))
assert [i.timestamp_created for i in v] == [1, 2, 3, 4]
v.set_order("method")
assert v.get_order() == "method"
assert [i.request.method for i in v] == ["GET", "GET", "PUT", "PUT"]
v.set_reversed(True)
assert [i.request.method for i in v] == ["PUT", "PUT", "GET", "GET"]
v.set_order("time")
assert v.get_order() == "time"
assert [i.timestamp_created for i in v] == [4, 3, 2, 1]
v.set_reversed(False)
assert [i.timestamp_created for i in v] == [1, 2, 3, 4]
with pytest.raises(exceptions.CommandError):
v.set_order("not_an_order")
def test_reversed():
v = view.View()
v.requestheaders(tft(start=1))
v.requestheaders(tft(start=2))
v.requestheaders(tft(start=3))
v.set_reversed(True)
assert v[0].timestamp_created == 3
assert v[-1].timestamp_created == 1
assert v[2].timestamp_created == 1
with pytest.raises(IndexError):
v[5]
with pytest.raises(IndexError):
v[-5]
assert v._bisect(v[0]) == 1
assert v._bisect(v[2]) == 3
def test_update():
v = view.View()
flt = flowfilter.parse("~m get")
v.set_filter(flt)
f = tft(method="get")
v.requestheaders(f)
assert f in v
f.request.method = "put"
v.update([f])
assert f not in v
f.request.method = "get"
v.update([f])
assert f in v
v.update([f])
assert f in v
class Record:
def __init__(self):
self.calls = []
def __bool__(self):
return bool(self.calls)
def __repr__(self):
return repr(self.calls)
def __call__(self, *args, **kwargs):
self.calls.append((args, kwargs))
def test_signals():
v = view.View()
rec_add = Record()
rec_update = Record()
rec_remove = Record()
rec_refresh = Record()
def clearrec():
rec_add.calls = []
rec_update.calls = []
rec_remove.calls = []
rec_refresh.calls = []
v.sig_view_add.connect(rec_add)
v.sig_view_update.connect(rec_update)
v.sig_view_remove.connect(rec_remove)
v.sig_view_refresh.connect(rec_refresh)
assert not any([rec_add, rec_update, rec_remove, rec_refresh])
# Simple add
v.add([tft()])
assert rec_add
assert not any([rec_update, rec_remove, rec_refresh])
# Filter change triggers refresh
clearrec()
v.set_filter(flowfilter.parse("~m put"))
assert rec_refresh
assert not any([rec_update, rec_add, rec_remove])
v.set_filter(flowfilter.parse("~m get"))
# An update that results in a flow being added to the view
clearrec()
v[0].request.method = "PUT"
v.update([v[0]])
assert rec_remove
assert not any([rec_update, rec_refresh, rec_add])
# An update that does not affect the view just sends update
v.set_filter(flowfilter.parse("~m put"))
clearrec()
v.update([v[0]])
assert rec_update
assert not any([rec_remove, rec_refresh, rec_add])
# An update for a flow in state but not view does not do anything
f = v[0]
v.set_filter(flowfilter.parse("~m get"))
assert not len(v)
clearrec()
v.update([f])
assert not any([rec_add, rec_update, rec_remove, rec_refresh])
def test_focus_follow():
v = view.View()
with taddons.context(v) as tctx:
console_addon = consoleaddons.ConsoleAddon(tctx.master)
tctx.configure(console_addon)
tctx.configure(v, console_focus_follow=True, view_filter="~m get")
v.add([tft(start=5)])
assert v.focus.index == 0
v.add([tft(start=4)])
assert v.focus.index == 0
assert v.focus.flow.timestamp_created == 4
v.add([tft(start=7)])
assert v.focus.index == 2
assert v.focus.flow.timestamp_created == 7
mod = tft(method="put", start=6)
v.add([mod])
assert v.focus.index == 2
assert v.focus.flow.timestamp_created == 7
mod.request.method = "GET"
v.update([mod])
assert v.focus.index == 2
assert v.focus.flow.timestamp_created == 6
def test_focus():
# Special case - initialising with a view that already contains data
v = view.View()
v.add([tft()])
f = view.Focus(v)
assert f.index == 0
assert f.flow is v[0]
# Start empty
v = view.View()
f = view.Focus(v)
assert f.index is None
assert f.flow is None
v.add([tft(start=1)])
assert f.index == 0
assert f.flow is v[0]
# Try to set to something not in view
with pytest.raises(ValueError):
f.__setattr__("flow", tft())
with pytest.raises(ValueError):
f.__setattr__("index", 99)
v.add([tft(start=0)])
assert f.index == 1
assert f.flow is v[1]
v.add([tft(start=2)])
assert f.index == 1
assert f.flow is v[1]
f.index = 0
assert f.index == 0
f.index = 1
v.remove([v[1]])
v[1].intercept()
assert f.index == 1
assert f.flow is v[1]
v.remove([v[1]])
assert f.index == 0
assert f.flow is v[0]
v.remove([v[0]])
assert f.index is None
assert f.flow is None
v.add(
[
tft(method="get", start=0),
tft(method="get", start=1),
tft(method="put", start=2),
tft(method="get", start=3),
]
)
f.flow = v[2]
assert f.flow.request.method == "PUT"
filt = flowfilter.parse("~m get")
v.set_filter(filt)
assert f.index == 2
filt = flowfilter.parse("~m oink")
v.set_filter(filt)
assert f.index is None
def test_settings():
v = view.View()
f = tft()
with pytest.raises(KeyError):
v.settings[f]
v.add([f])
v.settings[f]["foo"] = "bar"
assert v.settings[f]["foo"] == "bar"
assert len(list(v.settings)) == 1
v.remove([f])
with pytest.raises(KeyError):
v.settings[f]
assert not v.settings.keys()
v.add([f])
v.settings[f]["foo"] = "bar"
assert v.settings.keys()
v.clear()
assert not v.settings.keys()
def test_properties():
v = view.View()
f = tft()
v.requestheaders(f)
assert v.get_length() == 1
assert not v.get_marked()
v.toggle_marked()
assert v.get_length() == 0
assert v.get_marked()
def test_configure():
v = view.View()
with taddons.context(v) as tctx:
tctx.configure(v, view_filter="~q")
with pytest.raises(Exception, match="Invalid filter expression"):
tctx.configure(v, view_filter="~~")
tctx.configure(v, view_order="method")
with pytest.raises(Exception, match="Unknown flow order"):
tctx.configure(v, view_order="no")
tctx.configure(v, view_order_reversed=True)
tctx.configure(v, console_focus_follow=True)
assert v.focus_follow
@pytest.mark.parametrize(
"marker, expected",
[
[":default:", SYMBOL_MARK],
["X", "X"],
[":grapes:", "\N{GRAPES}"],
[":not valid:", SYMBOL_MARK],
[":weird", SYMBOL_MARK],
],
)
def test_marker(marker, expected):
assert render_marker(marker) == expected
| python | MIT | e6aa924bb411a9687b91920b8d094af37bc02b90 | 2026-01-04T14:40:00.086164Z | false |
mitmproxy/mitmproxy | https://github.com/mitmproxy/mitmproxy/blob/e6aa924bb411a9687b91920b8d094af37bc02b90/test/mitmproxy/addons/test_server_side_events.py | test/mitmproxy/addons/test_server_side_events.py | from mitmproxy.addons.server_side_events import ServerSideEvents
from mitmproxy.test.tflow import tflow
async def test_simple(caplog):
s = ServerSideEvents()
f = tflow(resp=True)
f.response.headers["content-type"] = "text/event-stream"
s.response(f)
assert "mitmproxy currently does not support server side events" in caplog.text
| python | MIT | e6aa924bb411a9687b91920b8d094af37bc02b90 | 2026-01-04T14:40:00.086164Z | false |
mitmproxy/mitmproxy | https://github.com/mitmproxy/mitmproxy/blob/e6aa924bb411a9687b91920b8d094af37bc02b90/test/mitmproxy/addons/test_save.py | test/mitmproxy/addons/test_save.py | import pytest
from mitmproxy import exceptions
from mitmproxy import io
from mitmproxy.addons import save
from mitmproxy.addons import view
from mitmproxy.test import taddons
from mitmproxy.test import tflow
def test_configure(tmp_path):
sa = save.Save()
with taddons.context(sa) as tctx:
with pytest.raises(exceptions.OptionsError):
tctx.configure(sa, save_stream_file=str(tmp_path))
with pytest.raises(Exception, match="Invalid filter"):
tctx.configure(
sa, save_stream_file=str(tmp_path / "foo"), save_stream_filter="~~"
)
tctx.configure(sa, save_stream_filter="foo")
assert sa.filt
tctx.configure(sa, save_stream_filter=None)
assert not sa.filt
def rd(p):
with open(p, "rb") as f:
x = io.FlowReader(f)
return list(x.stream())
def test_tcp(tmp_path):
sa = save.Save()
with taddons.context(sa) as tctx:
p = str(tmp_path / "foo")
tctx.configure(sa, save_stream_file=p)
tt = tflow.ttcpflow()
sa.tcp_start(tt)
sa.tcp_end(tt)
tt = tflow.ttcpflow()
sa.tcp_start(tt)
sa.tcp_error(tt)
tctx.configure(sa, save_stream_file=None)
assert len(rd(p)) == 2
def test_udp(tmp_path):
sa = save.Save()
with taddons.context(sa) as tctx:
p = str(tmp_path / "foo")
tctx.configure(sa, save_stream_file=p)
tt = tflow.tudpflow()
sa.udp_start(tt)
sa.udp_end(tt)
tt = tflow.tudpflow()
sa.udp_start(tt)
sa.udp_error(tt)
tctx.configure(sa, save_stream_file=None)
assert len(rd(p)) == 2
def test_dns(tmp_path):
sa = save.Save()
with taddons.context(sa) as tctx:
p = str(tmp_path / "foo")
tctx.configure(sa, save_stream_file=p)
f = tflow.tdnsflow(resp=True)
sa.dns_request(f)
sa.dns_response(f)
tctx.configure(sa, save_stream_file=None)
assert rd(p)[0].response
tctx.configure(sa, save_stream_file="+" + p)
f = tflow.tdnsflow(err=True)
sa.dns_request(f)
sa.dns_error(f)
tctx.configure(sa, save_stream_file=None)
assert rd(p)[1].error
tctx.configure(sa, save_stream_file="+" + p)
f = tflow.tdnsflow()
sa.dns_request(f)
tctx.configure(sa, save_stream_file=None)
assert not rd(p)[2].response
f = tflow.tdnsflow()
sa.dns_response(f)
assert len(rd(p)) == 3
def test_websocket(tmp_path):
sa = save.Save()
with taddons.context(sa) as tctx:
p = str(tmp_path / "foo")
tctx.configure(sa, save_stream_file=p)
f = tflow.twebsocketflow()
sa.request(f)
sa.websocket_end(f)
f = tflow.twebsocketflow()
sa.request(f)
sa.websocket_end(f)
tctx.configure(sa, save_stream_file=None)
assert len(rd(p)) == 2
def test_save_command(tmp_path):
sa = save.Save()
with taddons.context() as tctx:
p = str(tmp_path / "foo")
sa.save([tflow.tflow(resp=True)], p)
assert len(rd(p)) == 1
sa.save([tflow.tflow(resp=True)], p)
assert len(rd(p)) == 1
sa.save([tflow.tflow(resp=True)], "+" + p)
assert len(rd(p)) == 2
with pytest.raises(exceptions.CommandError):
sa.save([tflow.tflow(resp=True)], str(tmp_path))
v = view.View()
tctx.master.addons.add(v)
tctx.master.addons.add(sa)
tctx.master.commands.execute("save.file @shown %s" % p)
def test_simple(tmp_path):
sa = save.Save()
with taddons.context(sa) as tctx:
p = str(tmp_path / "foo")
tctx.configure(sa, save_stream_file=p)
f = tflow.tflow(resp=True)
sa.request(f)
sa.response(f)
tctx.configure(sa, save_stream_file=None)
assert rd(p)[0].response
tctx.configure(sa, save_stream_file="+" + p)
f = tflow.tflow(err=True)
sa.request(f)
sa.error(f)
tctx.configure(sa, save_stream_file=None)
assert rd(p)[1].error
tctx.configure(sa, save_stream_file="+" + p)
f = tflow.tflow()
sa.request(f)
tctx.configure(sa, save_stream_file=None)
assert not rd(p)[2].response
f = tflow.tflow()
sa.response(f)
assert len(rd(p)) == 3
def test_rotate_stream(tmp_path):
sa = save.Save()
with taddons.context(sa) as tctx:
tctx.configure(sa, save_stream_file=str(tmp_path / "a.txt"))
f1 = tflow.tflow(resp=True)
f2 = tflow.tflow(resp=True)
sa.request(f1)
sa.response(f1)
sa.request(f2) # second request already started.
tctx.configure(sa, save_stream_file=str(tmp_path / "b.txt"))
sa.response(f2)
sa.done()
assert len(rd(tmp_path / "a.txt")) == 1
assert len(rd(tmp_path / "b.txt")) == 1
def test_disk_full(tmp_path, monkeypatch, capsys):
sa = save.Save()
with taddons.context(sa) as tctx:
tctx.configure(sa, save_stream_file=str(tmp_path / "foo.txt"))
def _raise(*_):
raise OSError("wat")
monkeypatch.setattr(sa, "maybe_rotate_to_new_file", _raise)
f = tflow.tflow(resp=True)
sa.request(f)
with pytest.raises(SystemExit):
sa.response(f)
assert "Error while writing" in capsys.readouterr().err
| python | MIT | e6aa924bb411a9687b91920b8d094af37bc02b90 | 2026-01-04T14:40:00.086164Z | false |
mitmproxy/mitmproxy | https://github.com/mitmproxy/mitmproxy/blob/e6aa924bb411a9687b91920b8d094af37bc02b90/test/mitmproxy/addons/test_next_layer.py | test/mitmproxy/addons/test_next_layer.py | from __future__ import annotations
import dataclasses
import logging
from collections.abc import Sequence
from dataclasses import dataclass
from functools import partial
from unittest.mock import MagicMock
import pytest
from mitmproxy.addons.next_layer import _starts_like_quic
from mitmproxy.addons.next_layer import NeedsMoreData
from mitmproxy.addons.next_layer import NextLayer
from mitmproxy.addons.next_layer import stack_match
from mitmproxy.connection import Address
from mitmproxy.connection import Client
from mitmproxy.connection import TlsVersion
from mitmproxy.connection import TransportProtocol
from mitmproxy.proxy.context import Context
from mitmproxy.proxy.layer import Layer
from mitmproxy.proxy.layers import ClientQuicLayer
from mitmproxy.proxy.layers import ClientTLSLayer
from mitmproxy.proxy.layers import DNSLayer
from mitmproxy.proxy.layers import HttpLayer
from mitmproxy.proxy.layers import modes
from mitmproxy.proxy.layers import RawQuicLayer
from mitmproxy.proxy.layers import ServerQuicLayer
from mitmproxy.proxy.layers import ServerTLSLayer
from mitmproxy.proxy.layers import TCPLayer
from mitmproxy.proxy.layers import UDPLayer
from mitmproxy.proxy.layers.http import HTTPMode
from mitmproxy.proxy.layers.http import HttpStream
from mitmproxy.proxy.layers.tls import HTTP1_ALPNS
from mitmproxy.proxy.mode_specs import ProxyMode
from mitmproxy.test import taddons
client_hello_no_extensions = bytes.fromhex(
"1603030065" # record header
"01000061" # handshake header
"03015658a756ab2c2bff55f636814deac086b7ca56b65058c7893ffc6074f5245f70205658a75475103a152637"
"78e1bb6d22e8bbd5b6b0a3a59760ad354e91ba20d353001a0035002f000a000500040009000300060008006000"
"61006200640100"
)
client_hello_with_extensions = bytes.fromhex(
"16030300bb" # record layer
"010000b7" # handshake layer
"03033b70638d2523e1cba15f8364868295305e9c52aceabda4b5147210abc783e6e1000022c02bc02fc02cc030"
"cca9cca8cc14cc13c009c013c00ac014009c009d002f0035000a0100006cff0100010000000010000e00000b65"
"78616d706c652e636f6d0017000000230000000d00120010060106030501050304010403020102030005000501"
"00000000001200000010000e000c02683208687474702f312e3175500000000b00020100000a00080006001d00"
"170018"
)
dtls_client_hello_with_extensions = bytes.fromhex(
"16fefd00000000000000000085" # record layer
"010000790000000000000079" # handshake layer
"fefd62bf0e0bf809df43e7669197be831919878b1a72c07a584d3c0a8ca6665878010000000cc02bc02fc00ac014c02cc0"
"3001000043000d0010000e0403050306030401050106010807ff01000100000a00080006001d00170018000b00020100001"
"7000000000010000e00000b6578616d706c652e636f6d"
)
quic_client_hello = bytes.fromhex(
"ca0000000108c0618c84b54541320823fcce946c38d8210044e6a93bbb283593f75ffb6f2696b16cfdcb5b1255"
"577b2af5fc5894188c9568bc65eef253faf7f0520e41341cfa81d6aae573586665ce4e1e41676364820402feec"
"a81f3d22dbb476893422069066104a43e121c951a08c53b83f960becf99cf5304d5bc5346f52f472bd1a04d192"
"0bae025064990d27e5e4c325ac46121d3acadebe7babdb96192fb699693d65e2b2e21c53beeb4f40b50673a2f6"
"c22091cb7c76a845384fedee58df862464d1da505a280bfef91ca83a10bebbcb07855219dbc14aecf8a48da049"
"d03c77459b39d5355c95306cd03d6bdb471694fa998ca3b1f875ce87915b88ead15c5d6313a443f39aad808922"
"57ddfa6b4a898d773bb6fb520ede47ebd59d022431b1054a69e0bbbdf9f0fb32fc8bcc4b6879dd8cd5389474b1"
"99e18333e14d0347740a11916429a818bb8d93295d36e99840a373bb0e14c8b3adcf5e2165e70803f15316fd5e"
"5eeec04ae68d98f1adb22c54611c80fcd8ece619dbdf97b1510032ec374b7a71f94d9492b8b8cb56f56556dd97"
"edf1e50fa90e868ff93636a365678bdf3ee3f8e632588cd506b6f44fbfd4d99988238fbd5884c98f6a124108c1"
"878970780e42b111e3be6215776ef5be5a0205915e6d720d22c6a81a475c9e41ba94e4983b964cb5c8e1f40607"
"76d1d8d1adcef7587ea084231016bd6ee2643d11a3a35eb7fe4cca2b3f1a4b21e040b0d426412cca6c4271ea63"
"fb54ed7f57b41cd1af1be5507f87ea4f4a0c997367e883291de2f1b8a49bdaa52bae30064351b1139703400730"
"18a4104344ec6b4454b50a42e804bc70e78b9b3c82497273859c82ed241b643642d76df6ceab8f916392113a62"
"b231f228c7300624d74a846bec2f479ab8a8c3461f91c7bf806236e3bd2f54ba1ef8e2a1e0bfdde0c5ad227f7d"
"364c52510b1ade862ce0c8d7bd24b6d7d21c99b34de6d177eb3d575787b2af55060d76d6c2060befbb7953a816"
"6f66ad88ecf929dbb0ad3a16cf7dfd39d925e0b4b649c6d0c07ad46ed0229c17fb6a1395f16e1b138aab3af760"
"2b0ac762c4f611f7f3468997224ffbe500a7c53f92f65e41a3765a9f1d7e3f78208f5b4e147962d8c97d6c1a80"
"91ffc36090b2043d71853616f34c2185dc883c54ab6d66e10a6c18e0b9a4742597361f8554a42da3373241d0c8"
"54119bfadccffaf2335b2d97ffee627cb891bda8140a39399f853da4859f7e19682e152243efbaffb662edd19b"
"3819a74107c7dbe05ecb32e79dcdb1260f153b1ef133e978ccca3d9e400a7ed6c458d77e2956d2cb897b7a298b"
"fe144b5defdc23dfd2adf69f1fb0917840703402d524987ae3b1dcb85229843c9a419ef46e1ba0ba7783f2a2ec"
"d057a57518836aef2a7839ebd3688da98b54c942941f642e434727108d59ea25875b3050ca53d4637c76cbcbb9"
"e972c2b0b781131ee0a1403138b55486fe86bbd644920ee6aa578e3bab32d7d784b5c140295286d90c99b14823"
"1487f7ea64157001b745aa358c9ea6bec5a8d8b67a7534ec1f7648ff3b435911dfc3dff798d32fbf2efe2c1fcc"
"278865157590572387b76b78e727d3e7682cb501cdcdf9a0f17676f99d9aa67f10edccc9a92080294e88bf28c2"
"a9f32ae535fdb27fff7706540472abb9eab90af12b2bea005da189874b0ca69e6ae1690a6f2adf75be3853c94e"
"fd8098ed579c20cb37be6885d8d713af4ba52958cee383089b98ed9cb26e11127cf88d1b7d254f15f7903dd7ed"
"297c0013924e88248684fe8f2098326ce51aa6e5"
)
quic_short_header_packet = bytes.fromhex(
"52e23539dde270bb19f7a8b63b7bcf3cdacf7d3dc68a7e00318bfa2dac3bad12cb7d78112efb5bcb1ee8e0b347"
"641cccd2736577d0178b4c4c4e97a8e9e2af1d28502e58c4882223e70c4d5124c4b016855340e982c5c453d61d"
"7d0720be075fce3126de3f0d54dc059150e0f80f1a8db5e542eb03240b0a1db44a322fb4fd3c6f2e054b369e14"
"5a5ff925db617d187ec65a7f00d77651968e74c1a9ddc3c7fab57e8df821b07e103264244a3a03d17984e29933"
)
dns_query = bytes.fromhex("002a01000001000000000000076578616d706c6503636f6d0000010001")
# Custom protocol with just base64-encoded messages
# https://github.com/mitmproxy/mitmproxy/pull/7087
custom_base64_proto = b"AAAAAAAAAAAAAAAAAAAAAA==\n"
http_get = b"GET / HTTP/1.1\r\nHost: example.com\r\n\r\n"
http_get_absolute = b"GET http://example.com/ HTTP/1.1\r\n\r\n"
http_connect = b"CONNECT example.com:443 HTTP/1.1\r\nHost: example.com:443\r\n\r\n"
class TestNextLayer:
@pytest.mark.parametrize(
"ignore, allow, transport_protocol, server_address, data_client, result",
[
# ignore
pytest.param(
[], [], "tcp", "example.com", b"", False, id="nothing ignored"
),
pytest.param(
["example.com"], [], "tcp", "example.com", b"", True, id="address"
),
pytest.param(
["192.0.2.1"], [], "tcp", "example.com", b"", True, id="ip address"
),
pytest.param(
["2001:db8::1"],
[],
"tcp",
"ipv6.example.com",
b"",
True,
id="ipv6 address",
),
pytest.param(
["example.com:443"],
[],
"tcp",
"example.com",
b"",
True,
id="port matches",
),
pytest.param(
["example.com:123"],
[],
"tcp",
"example.com",
b"",
False,
id="port does not match",
),
pytest.param(
["example.com"],
[],
"tcp",
"192.0.2.1",
http_get,
True,
id="http host header",
),
pytest.param(
["example.com"],
[],
"tcp",
"192.0.2.1",
http_get.replace(b"Host", b"X-Host"),
False,
id="http host header missing",
),
pytest.param(
["example.com"],
[],
"tcp",
"192.0.2.1",
http_get.split(b"\r\n", 1)[0],
NeedsMoreData,
id="incomplete http host header",
),
pytest.param(
["example.com"],
[],
"tcp",
"com",
b"",
False,
id="partial address match",
),
pytest.param(
["example.com"],
[],
"tcp",
None,
b"",
False,
id="no destination info",
),
pytest.param(
["example.com"],
[],
"tcp",
None,
client_hello_no_extensions,
False,
id="no sni",
),
pytest.param(
["example.com"],
[],
"tcp",
"192.0.2.1",
client_hello_with_extensions,
True,
id="sni",
),
pytest.param(
["example.com"],
[],
"tcp",
"192.0.2.1",
client_hello_with_extensions[:-5],
NeedsMoreData,
id="incomplete client hello",
),
pytest.param(
["example.com"],
[],
"tcp",
"192.0.2.1",
client_hello_no_extensions[:9] + b"\x00" * 200,
False,
id="invalid client hello",
),
pytest.param(
["example.com"],
[],
"tcp",
"decoy",
client_hello_with_extensions,
True,
id="sni mismatch",
),
pytest.param(
["example.com"],
[],
"udp",
"192.0.2.1",
dtls_client_hello_with_extensions,
True,
id="dtls sni",
),
pytest.param(
["example.com"],
[],
"udp",
"192.0.2.1",
dtls_client_hello_with_extensions[:-5],
NeedsMoreData,
id="incomplete dtls client hello",
),
pytest.param(
["example.com"],
[],
"udp",
"192.0.2.1",
dtls_client_hello_with_extensions[:9] + b"\x00" * 200,
False,
id="invalid dtls client hello",
),
pytest.param(
["example.com"],
[],
"udp",
"192.0.2.1",
quic_client_hello,
True,
id="quic sni",
),
# allow
pytest.param(
[],
["example.com"],
"tcp",
"example.com",
b"",
False,
id="allow: allow",
),
pytest.param(
[],
["example.com"],
"tcp",
"example.org",
b"",
True,
id="allow: ignore",
),
pytest.param(
[],
["example.com"],
"tcp",
"192.0.2.1",
client_hello_with_extensions,
False,
id="allow: sni",
),
pytest.param(
[],
["existing-sni.example"],
"tcp",
"192.0.2.1",
b"",
False,
id="allow: sni from parent layer",
),
pytest.param(
[],
["example.com"],
"tcp",
"decoy",
client_hello_with_extensions,
False,
id="allow: sni mismatch",
),
# allow with ignore
pytest.param(
["binary.example.com"],
["example.com"],
"tcp",
"example.com",
b"",
False,
id="allow+ignore: allowed and not ignored",
),
pytest.param(
["binary.example.com"],
["example.com"],
"tcp",
"binary.example.org",
b"",
True,
id="allow+ignore: allowed but ignored",
),
],
)
def test_ignore_connection(
self,
ignore: list[str],
allow: list[str],
transport_protocol: TransportProtocol,
server_address: str,
data_client: bytes,
result: bool | type[NeedsMoreData],
):
nl = NextLayer()
with taddons.context(nl) as tctx:
if ignore:
tctx.configure(nl, ignore_hosts=ignore)
if allow:
tctx.configure(nl, allow_hosts=allow)
ctx = Context(
Client(
peername=("192.168.0.42", 51234),
sockname=("0.0.0.0", 8080),
sni="existing-sni.example",
),
tctx.options,
)
ctx.client.transport_protocol = transport_protocol
if server_address:
ctx.server.address = (server_address, 443)
ctx.server.peername = (
("2001:db8::1", 443, 0, 0)
if server_address.startswith("ipv6")
else ("192.0.2.1", 443)
)
if result is NeedsMoreData:
with pytest.raises(NeedsMoreData):
nl._ignore_connection(ctx, data_client, b"")
else:
assert nl._ignore_connection(ctx, data_client, b"") is result
def test_show_ignored_hosts(self, monkeypatch):
nl = NextLayer()
with taddons.context(nl) as tctx:
m = MagicMock()
m.context = Context(
Client(peername=("192.168.0.42", 51234), sockname=("0.0.0.0", 8080)),
tctx.options,
)
m.context.layers = [modes.TransparentProxy(m.context)]
m.context.server.address = ("example.com", 42)
tctx.configure(nl, ignore_hosts=["example.com"])
# Connection is ignored (not-MITM'ed)
assert nl._ignore_connection(m.context, http_get, b"") is True
# No flow is being set (i.e. nothing shown in UI)
assert nl._next_layer(m.context, http_get, b"").flow is None
# ... until `--show-ignored-hosts` is set:
tctx.configure(nl, show_ignored_hosts=True)
assert nl._next_layer(m.context, http_get, b"").flow is not None
def test_next_layer(self, monkeypatch, caplog):
caplog.set_level(logging.DEBUG)
nl = NextLayer()
with taddons.context(nl) as tctx:
m = MagicMock()
m.context = Context(
Client(peername=("192.168.0.42", 51234), sockname=("0.0.0.0", 8080)),
tctx.options,
)
m.context.layers = [modes.TransparentProxy(m.context)]
m.context.server.address = ("example.com", 42)
tctx.configure(nl, ignore_hosts=["example.com"])
m.layer = preexisting = object()
nl.next_layer(m)
assert m.layer is preexisting
m.layer = None
monkeypatch.setattr(m, "data_client", lambda: http_get)
nl.next_layer(m)
assert m.layer
m.layer = None
monkeypatch.setattr(
m, "data_client", lambda: client_hello_with_extensions[:-5]
)
nl.next_layer(m)
assert not m.layer
assert "Deferring layer decision" in caplog.text
@dataclass
class TConf:
before: list[type[Layer]]
after: list[type[Layer]]
proxy_mode: str = "regular"
transport_protocol: TransportProtocol = "tcp"
tls_version: TlsVersion = None
data_client: bytes = b""
data_server: bytes = b""
ignore_hosts: Sequence[str] = ()
tcp_hosts: Sequence[str] = ()
udp_hosts: Sequence[str] = ()
ignore_conn: bool = False
server_address: Address | None = None
alpn: bytes | None = None
explicit_proxy_configs = [
pytest.param(
TConf(
before=[modes.HttpProxy],
after=[modes.HttpProxy, HttpLayer],
data_client=http_connect,
),
id=f"explicit proxy: regular http connect",
),
pytest.param(
TConf(
before=[modes.HttpProxy],
after=[modes.HttpProxy, HttpLayer],
ignore_hosts=[".+"],
data_client=http_connect,
),
id=f"explicit proxy: regular http connect disregards ignore_hosts",
),
pytest.param(
TConf(
before=[modes.HttpProxy],
after=[modes.HttpProxy, HttpLayer],
ignore_hosts=[".+"],
data_client=http_get_absolute,
),
id=f"explicit proxy: HTTP over regular proxy disregards ignore_hosts",
),
pytest.param(
TConf(
before=[modes.HttpProxy],
after=[modes.HttpProxy, ClientTLSLayer, HttpLayer],
data_client=client_hello_no_extensions,
),
id=f"explicit proxy: secure web proxy",
),
pytest.param(
TConf(
before=[
modes.HttpProxy,
ClientTLSLayer,
partial(HttpLayer, mode=HTTPMode.regular),
partial(HttpStream, stream_id=1),
],
after=[modes.HttpProxy, ClientTLSLayer, HttpLayer, HttpStream, TCPLayer],
server_address=("192.0.2.1", 443),
ignore_hosts=[".+"],
ignore_conn=True,
data_client=client_hello_with_extensions,
alpn=b"http/1.1",
),
id=f"explicit proxy: ignore_hosts over established secure web proxy",
),
pytest.param(
TConf(
before=[modes.HttpUpstreamProxy],
after=[modes.HttpUpstreamProxy, HttpLayer],
),
id=f"explicit proxy: upstream proxy",
),
pytest.param(
TConf(
before=[modes.HttpUpstreamProxy],
after=[modes.HttpUpstreamProxy, ClientQuicLayer, HttpLayer],
transport_protocol="udp",
),
id=f"explicit proxy: experimental http3",
),
pytest.param(
TConf(
before=[
modes.HttpProxy,
partial(HttpLayer, mode=HTTPMode.regular),
partial(HttpStream, stream_id=1),
],
after=[modes.HttpProxy, HttpLayer, HttpStream, HttpLayer],
data_client=b"GET / HTTP/1.1\r\n",
),
id=f"explicit proxy: HTTP over regular proxy",
),
pytest.param(
TConf(
before=[
modes.HttpProxy,
partial(HttpLayer, mode=HTTPMode.regular),
partial(HttpStream, stream_id=1),
],
after=[
modes.HttpProxy,
HttpLayer,
HttpStream,
ServerTLSLayer,
ClientTLSLayer,
],
data_client=client_hello_with_extensions,
),
id=f"explicit proxy: TLS over regular proxy",
),
pytest.param(
TConf(
before=[
modes.HttpProxy,
partial(HttpLayer, mode=HTTPMode.regular),
partial(HttpStream, stream_id=1),
ServerTLSLayer,
ClientTLSLayer,
],
after=[
modes.HttpProxy,
HttpLayer,
HttpStream,
ServerTLSLayer,
ClientTLSLayer,
HttpLayer,
],
data_client=b"GET / HTTP/1.1\r\n",
),
id=f"explicit proxy: HTTPS over regular proxy",
),
pytest.param(
TConf(
before=[
modes.HttpProxy,
partial(HttpLayer, mode=HTTPMode.regular),
partial(HttpStream, stream_id=1),
],
after=[modes.HttpProxy, HttpLayer, HttpStream, TCPLayer],
data_client=b"\xff",
),
id=f"explicit proxy: TCP over regular proxy",
),
]
reverse_proxy_configs = []
for proto_plain, proto_enc, app_layer in [
("udp", "dtls", UDPLayer),
("tcp", "tls", TCPLayer),
("http", "https", HttpLayer),
]:
if proto_plain == "udp":
data_client = dtls_client_hello_with_extensions
else:
data_client = client_hello_with_extensions
reverse_proxy_configs.extend(
[
pytest.param(
TConf(
before=[modes.ReverseProxy],
after=[modes.ReverseProxy, app_layer],
proxy_mode=f"reverse:{proto_plain}://example.com:42",
),
id=f"reverse proxy: {proto_plain} -> {proto_plain}",
),
pytest.param(
TConf(
before=[modes.ReverseProxy],
after=[
modes.ReverseProxy,
ServerTLSLayer,
ClientTLSLayer,
app_layer,
],
proxy_mode=f"reverse:{proto_enc}://example.com:42",
data_client=data_client,
),
id=f"reverse proxy: {proto_enc} -> {proto_enc}",
),
pytest.param(
TConf(
before=[modes.ReverseProxy],
after=[modes.ReverseProxy, ClientTLSLayer, app_layer],
proxy_mode=f"reverse:{proto_plain}://example.com:42",
data_client=data_client,
),
id=f"reverse proxy: {proto_enc} -> {proto_plain}",
),
pytest.param(
TConf(
before=[modes.ReverseProxy],
after=[modes.ReverseProxy, ServerTLSLayer, app_layer],
proxy_mode=f"reverse:{proto_enc}://example.com:42",
),
id=f"reverse proxy: {proto_plain} -> {proto_enc}",
),
]
)
reverse_proxy_configs.extend(
[
pytest.param(
TConf(
before=[modes.ReverseProxy],
after=[modes.ReverseProxy, DNSLayer],
proxy_mode="reverse:dns://example.com:53",
),
id="reverse proxy: dns",
),
pytest.param(
http3 := TConf(
before=[modes.ReverseProxy],
after=[modes.ReverseProxy, ServerQuicLayer, ClientQuicLayer, HttpLayer],
proxy_mode="reverse:http3://example.com",
),
id="reverse proxy: http3",
),
pytest.param(
dataclasses.replace(
http3,
proxy_mode="reverse:https://example.com",
transport_protocol="udp",
),
id="reverse proxy: http3 in https mode",
),
pytest.param(
TConf(
before=[modes.ReverseProxy],
after=[
modes.ReverseProxy,
ServerQuicLayer,
ClientQuicLayer,
RawQuicLayer,
],
proxy_mode="reverse:quic://example.com",
),
id="reverse proxy: quic",
),
pytest.param(
TConf(
before=[modes.ReverseProxy],
after=[modes.ReverseProxy, TCPLayer],
proxy_mode=f"reverse:http://example.com",
ignore_hosts=["example.com"],
server_address=("example.com", 80),
data_client=http_get,
ignore_conn=True,
),
id="reverse proxy: ignore_hosts",
),
]
)
transparent_proxy_configs = [
pytest.param(
TConf(
before=[modes.TransparentProxy],
after=[modes.TransparentProxy, ServerTLSLayer, ClientTLSLayer],
data_client=client_hello_no_extensions,
),
id=f"transparent proxy: tls",
),
pytest.param(
TConf(
before=[modes.TransparentProxy],
after=[modes.TransparentProxy, ServerTLSLayer, ClientTLSLayer],
data_client=dtls_client_hello_with_extensions,
transport_protocol="udp",
),
id=f"transparent proxy: dtls",
),
pytest.param(
quic := TConf(
before=[modes.TransparentProxy],
after=[modes.TransparentProxy, ServerQuicLayer, ClientQuicLayer],
data_client=quic_client_hello,
transport_protocol="udp",
server_address=("192.0.2.1", 443),
),
id="transparent proxy: quic",
),
pytest.param(
dataclasses.replace(
quic,
data_client=quic_short_header_packet,
),
id="transparent proxy: existing quic session",
),
pytest.param(
TConf(
before=[modes.TransparentProxy],
after=[modes.TransparentProxy, TCPLayer],
data_server=b"220 service ready",
),
id="transparent proxy: raw tcp",
),
pytest.param(
http := TConf(
before=[modes.TransparentProxy],
after=[modes.TransparentProxy, HttpLayer],
server_address=("192.0.2.1", 80),
data_client=http_get,
),
id="transparent proxy: http",
),
pytest.param(
TConf(
before=[modes.TransparentProxy, ServerTLSLayer, ClientTLSLayer],
after=[modes.TransparentProxy, ServerTLSLayer, ClientTLSLayer, HttpLayer],
data_client=b"GO /method-too-short-for-heuristic HTTP/1.1\r\n",
alpn=HTTP1_ALPNS[0],
),
id=f"transparent proxy: http via ALPN",
),
pytest.param(
TConf(
before=[modes.TransparentProxy],
after=[modes.TransparentProxy, TCPLayer],
server_address=("192.0.2.1", 23),
data_client=b"SSH-2.0-OpenSSH_9.7",
),
id="transparent proxy: ssh",
),
pytest.param(
dataclasses.replace(
http,
tcp_hosts=["192.0.2.1"],
after=[modes.TransparentProxy, TCPLayer],
),
id="transparent proxy: tcp_hosts",
),
pytest.param(
dataclasses.replace(
http,
ignore_hosts=["192.0.2.1"],
after=[modes.TransparentProxy, TCPLayer],
ignore_conn=True,
),
id="transparent proxy: ignore_hosts",
),
pytest.param(
TConf(
before=[modes.TransparentProxy],
after=[modes.TransparentProxy, TCPLayer],
data_client=custom_base64_proto,
),
id="transparent proxy: full alpha tcp",
),
pytest.param(
udp := TConf(
before=[modes.TransparentProxy],
after=[modes.TransparentProxy, UDPLayer],
server_address=("192.0.2.1", 553),
transport_protocol="udp",
data_client=b"\xff",
),
id="transparent proxy: raw udp",
),
pytest.param(
dns := dataclasses.replace(
udp,
after=[modes.TransparentProxy, DNSLayer],
data_client=dns_query,
server_address=("192.0.2.1", 53),
),
id="transparent proxy: dns over udp",
),
pytest.param(
dataclasses.replace(
dns,
transport_protocol="tcp",
),
id="transparent proxy: dns over tcp",
),
pytest.param(
dataclasses.replace(
udp,
udp_hosts=["192.0.2.1"],
after=[modes.TransparentProxy, UDPLayer],
),
id="transparent proxy: udp_hosts",
),
pytest.param(
TConf(
before=[modes.TransparentProxy],
after=[modes.TransparentProxy, DNSLayer],
proxy_mode="wireguard",
server_address=("10.0.0.53", 53),
ignore_hosts=[".+"],
transport_protocol="udp",
data_client=dns_query,
),
id="wireguard proxy: dns should not be ignored",
),
pytest.param(
TConf(
before=[modes.TransparentProxy, ServerQuicLayer, ClientQuicLayer],
after=[
modes.TransparentProxy,
ServerQuicLayer,
ClientQuicLayer,
RawQuicLayer,
],
data_client=b"<insert valid quic here>",
alpn=b"doq",
tls_version="QUICv1",
),
id=f"transparent proxy: non-http quic",
),
]
@pytest.mark.parametrize(
"test_conf",
[
*explicit_proxy_configs,
*reverse_proxy_configs,
*transparent_proxy_configs,
],
)
def test_next_layer(
test_conf: TConf,
):
nl = NextLayer()
with taddons.context(nl) as tctx:
tctx.configure(
nl,
ignore_hosts=test_conf.ignore_hosts,
tcp_hosts=test_conf.tcp_hosts,
udp_hosts=test_conf.udp_hosts,
)
ctx = Context(
Client(
peername=("192.168.0.42", 51234),
sockname=("0.0.0.0", 8080),
alpn=test_conf.alpn,
),
tctx.options,
)
ctx.server.address = test_conf.server_address
ctx.client.transport_protocol = test_conf.transport_protocol
ctx.client.tls_version = test_conf.tls_version
ctx.client.proxy_mode = ProxyMode.parse(test_conf.proxy_mode)
ctx.layers = [x(ctx) for x in test_conf.before]
nl._next_layer(
ctx,
data_client=test_conf.data_client,
data_server=test_conf.data_server,
)
assert stack_match(ctx, test_conf.after), f"Unexpected stack: {ctx.layers}"
last_layer = ctx.layers[-1]
if isinstance(last_layer, (UDPLayer, TCPLayer)):
assert bool(last_layer.flow) ^ test_conf.ignore_conn
def test_starts_like_quic():
assert not _starts_like_quic(b"", ("192.0.2.1", 443))
assert not _starts_like_quic(dtls_client_hello_with_extensions, ("192.0.2.1", 443))
# Long Header - we can get definite answers from version numbers.
assert _starts_like_quic(quic_client_hello, None)
quic_version_negotation_grease = bytes.fromhex(
"ca0a0a0a0a08c0618c84b54541320823fcce946c38d8210044e6a93bbb283593f75ffb6f2696b16cfdcb5b1255"
)
assert _starts_like_quic(quic_version_negotation_grease, None)
# Short Header - port-based is the best we can do.
assert _starts_like_quic(quic_short_header_packet, ("192.0.2.1", 443))
assert not _starts_like_quic(quic_short_header_packet, ("192.0.2.1", 444))
| python | MIT | e6aa924bb411a9687b91920b8d094af37bc02b90 | 2026-01-04T14:40:00.086164Z | false |
mitmproxy/mitmproxy | https://github.com/mitmproxy/mitmproxy/blob/e6aa924bb411a9687b91920b8d094af37bc02b90/test/mitmproxy/addons/test_asgiapp.py | test/mitmproxy/addons/test_asgiapp.py | import asyncio
import json
import flask
from flask import request
from mitmproxy.addons import asgiapp
from mitmproxy.addons import next_layer
from mitmproxy.addons.proxyserver import Proxyserver
from mitmproxy.test import taddons
tapp = flask.Flask(__name__)
@tapp.route("/")
def hello():
return "testapp"
@tapp.route("/parameters")
def request_check():
args = {}
for k in request.args.keys():
args[k] = request.args[k]
return json.dumps(args)
@tapp.route("/requestbody", methods=["POST"])
def request_body():
return json.dumps({"body": request.data.decode()})
@tapp.route("/error")
def error():
raise ValueError("An exception...")
async def errapp(scope, receive, send):
raise ValueError("errapp")
async def noresponseapp(scope, receive, send):
return
async def test_asgi_full(caplog):
ps = Proxyserver()
addons = [
asgiapp.WSGIApp(tapp, "testapp", 80),
asgiapp.ASGIApp(errapp, "errapp", 80),
asgiapp.ASGIApp(noresponseapp, "noresponseapp", 80),
]
with taddons.context(ps, *addons) as tctx:
tctx.master.addons.add(next_layer.NextLayer())
tctx.configure(ps, listen_host="127.0.0.1", listen_port=0)
assert await ps.setup_servers()
proxy_addr = ("127.0.0.1", ps.listen_addrs()[0][1])
# We parallelize connection establishment/closure because those operations tend to be slow.
[
(r1, w1),
(r2, w2),
(r3, w3),
(r4, w4),
(r5, w5),
] = await asyncio.gather(
asyncio.open_connection(*proxy_addr),
asyncio.open_connection(*proxy_addr),
asyncio.open_connection(*proxy_addr),
asyncio.open_connection(*proxy_addr),
asyncio.open_connection(*proxy_addr),
)
req = f"GET http://testapp:80/ HTTP/1.1\r\n\r\n"
w1.write(req.encode())
header = await r1.readuntil(b"\r\n\r\n")
assert header.startswith(b"HTTP/1.1 200 OK")
body = await r1.readuntil(b"testapp")
assert body == b"testapp"
req = f"GET http://testapp:80/parameters?param1=1¶m2=2 HTTP/1.1\r\n\r\n"
w2.write(req.encode())
header = await r2.readuntil(b"\r\n\r\n")
assert header.startswith(b"HTTP/1.1 200 OK")
body = await r2.readuntil(b"}")
assert body == b'{"param1": "1", "param2": "2"}'
req = f"POST http://testapp:80/requestbody HTTP/1.1\r\nContent-Length: 6\r\n\r\nHello!"
w3.write(req.encode())
header = await r3.readuntil(b"\r\n\r\n")
assert header.startswith(b"HTTP/1.1 200 OK")
body = await r3.readuntil(b"}")
assert body == b'{"body": "Hello!"}'
req = f"GET http://errapp:80/?foo=bar HTTP/1.1\r\n\r\n"
w4.write(req.encode())
header = await r4.readuntil(b"\r\n\r\n")
assert header.startswith(b"HTTP/1.1 500")
body = await r4.readuntil(b"ASGI Error")
assert body == b"ASGI Error"
assert "ValueError" in caplog.text
req = f"GET http://noresponseapp:80/ HTTP/1.1\r\n\r\n"
w5.write(req.encode())
header = await r5.readuntil(b"\r\n\r\n")
assert header.startswith(b"HTTP/1.1 500")
body = await r5.readuntil(b"ASGI Error")
assert body == b"ASGI Error"
assert "no response sent" in caplog.text
w1.close()
w2.close()
w3.close()
w4.close()
w5.close()
await asyncio.gather(
w1.wait_closed(),
w2.wait_closed(),
w3.wait_closed(),
w4.wait_closed(),
w5.wait_closed(),
)
tctx.configure(ps, server=False)
assert await ps.setup_servers()
| python | MIT | e6aa924bb411a9687b91920b8d094af37bc02b90 | 2026-01-04T14:40:00.086164Z | false |
mitmproxy/mitmproxy | https://github.com/mitmproxy/mitmproxy/blob/e6aa924bb411a9687b91920b8d094af37bc02b90/test/mitmproxy/addons/test_block.py | test/mitmproxy/addons/test_block.py | import pytest
from mitmproxy import connection
from mitmproxy.addons import block
from mitmproxy.proxy.mode_specs import ProxyMode
from mitmproxy.test import taddons
@pytest.mark.parametrize(
"block_global, block_private, should_be_killed, address",
[
# block_global: loopback
(True, False, False, ("127.0.0.1",)),
(True, False, False, ("::1",)),
# block_global: private
(True, False, False, ("10.0.0.1",)),
(True, False, False, ("172.20.0.1",)),
(True, False, False, ("192.168.1.1",)),
(True, False, False, ("::ffff:10.0.0.1",)),
(True, False, False, ("::ffff:172.20.0.1",)),
(True, False, False, ("::ffff:192.168.1.1",)),
(True, False, False, ("fe80::",)),
(True, False, False, (r"::ffff:192.168.1.1%scope",)),
# block_global: global
(True, False, True, ("1.1.1.1",)),
(True, False, True, ("8.8.8.8",)),
(True, False, True, ("216.58.207.174",)),
(True, False, True, ("::ffff:1.1.1.1",)),
(True, False, True, ("::ffff:8.8.8.8",)),
(True, False, True, ("::ffff:216.58.207.174",)),
(True, False, True, ("2001:4860:4860::8888",)),
(True, False, True, (r"2001:4860:4860::8888%scope",)),
# block_private: loopback
(False, True, False, ("127.0.0.1",)),
(False, True, False, ("::1",)),
# block_private: private
(False, True, True, ("10.0.0.1",)),
(False, True, True, ("172.20.0.1",)),
(False, True, True, ("192.168.1.1",)),
(False, True, True, ("::ffff:10.0.0.1",)),
(False, True, True, ("::ffff:172.20.0.1",)),
(False, True, True, ("::ffff:192.168.1.1",)),
(False, True, True, (r"::ffff:192.168.1.1%scope",)),
(False, True, True, ("fe80::",)),
# block_private: global
(False, True, False, ("1.1.1.1",)),
(False, True, False, ("8.8.8.8",)),
(False, True, False, ("216.58.207.174",)),
(False, True, False, ("::ffff:1.1.1.1",)),
(False, True, False, ("::ffff:8.8.8.8",)),
(False, True, False, ("::ffff:216.58.207.174",)),
(False, True, False, (r"::ffff:216.58.207.174%scope",)),
(False, True, False, ("2001:4860:4860::8888",)),
],
)
async def test_block_global(block_global, block_private, should_be_killed, address):
ar = block.Block()
with taddons.context(ar) as tctx:
tctx.configure(ar, block_global=block_global, block_private=block_private)
client = connection.Client(peername=address, sockname=("127.0.0.1", 8080))
ar.client_connected(client)
assert bool(client.error) == should_be_killed
async def test_ignore_local_mode():
"""At least on macOS, local mode peername may be the client's public IP."""
ar = block.Block()
with taddons.context(ar) as tctx:
tctx.configure(ar, block_private=True)
client = connection.Client(
peername=("192.168.1.1", 0),
sockname=("127.0.0.1", 8080),
proxy_mode=ProxyMode.parse("local"),
)
ar.client_connected(client)
assert not client.error
| python | MIT | e6aa924bb411a9687b91920b8d094af37bc02b90 | 2026-01-04T14:40:00.086164Z | false |
mitmproxy/mitmproxy | https://github.com/mitmproxy/mitmproxy/blob/e6aa924bb411a9687b91920b8d094af37bc02b90/test/mitmproxy/addons/test_cut.py | test/mitmproxy/addons/test_cut.py | from unittest import mock
import pyperclip
import pytest
from mitmproxy import certs
from mitmproxy import exceptions
from mitmproxy.addons import cut
from mitmproxy.addons import view
from mitmproxy.test import taddons
from mitmproxy.test import tflow
def test_extract(tdata):
tf = tflow.tflow(resp=True)
tests = [
["request.method", "GET"],
["request.scheme", "http"],
["request.host", "address"],
["request.http_version", "HTTP/1.1"],
["request.port", "22"],
["request.path", "/path"],
["request.url", "http://address:22/path"],
["request.text", "content"],
["request.content", b"content"],
["request.raw_content", b"content"],
["request.timestamp_start", "946681200"],
["request.timestamp_end", "946681201"],
["request.header[header]", "qvalue"],
["response.status_code", "200"],
["response.reason", "OK"],
["response.text", "message"],
["response.content", b"message"],
["response.raw_content", b"message"],
["response.header[header-response]", "svalue"],
["response.timestamp_start", "946681202"],
["response.timestamp_end", "946681203"],
["client_conn.peername.port", "22"],
["client_conn.peername.host", "127.0.0.1"],
["client_conn.tls_version", "TLSv1.2"],
["client_conn.sni", "address"],
["client_conn.tls_established", "true"],
["server_conn.address.port", "22"],
["server_conn.address.host", "address"],
["server_conn.peername.host", "192.168.0.1"],
["server_conn.tls_version", "TLSv1.2"],
["server_conn.sni", "address"],
["server_conn.tls_established", "true"],
]
for spec, expected in tests:
ret = cut.extract(spec, tf)
assert spec and ret == expected
with open(tdata.path("mitmproxy/net/data/text_cert"), "rb") as f:
d = f.read()
c1 = certs.Cert.from_pem(d)
tf.server_conn.certificate_list = [c1]
assert "CERTIFICATE" in cut.extract("server_conn.certificate_list", tf)
def test_extract_websocket():
tf = tflow.twebsocketflow(messages=True)
extracted_request_content = cut.extract("request.content", tf)
extracted_response_content = cut.extract("response.content", tf)
assert b"hello binary" in extracted_request_content
assert b"hello text" in extracted_request_content
assert b"it's me" in extracted_request_content
assert b"hello binary" in extracted_response_content
assert b"hello text" in extracted_response_content
assert b"it's me" in extracted_response_content
def test_extract_str():
tf = tflow.tflow()
tf.request.raw_content = b"\xff"
assert cut.extract_str("request.raw_content", tf) == r"b'\xff'"
def test_headername():
with pytest.raises(exceptions.CommandError):
cut.headername("header[foo.")
def qr(f):
with open(f, "rb") as fp:
return fp.read()
async def test_cut_clip(caplog):
v = view.View()
c = cut.Cut()
with taddons.context() as tctx:
tctx.master.addons.add(v, c)
v.add([tflow.tflow(resp=True)])
with mock.patch("pyperclip.copy") as pc:
tctx.command(c.clip, "@all", "request.method")
assert pc.called
with mock.patch("pyperclip.copy") as pc:
tctx.command(c.clip, "@all", "request.content")
assert pc.called
with mock.patch("pyperclip.copy") as pc:
tctx.command(c.clip, "@all", "request.method,request.content")
assert pc.called
with mock.patch("pyperclip.copy") as pc:
log_message = (
"Pyperclip could not find a copy/paste mechanism for your system."
)
pc.side_effect = pyperclip.PyperclipException(log_message)
tctx.command(c.clip, "@all", "request.method")
assert log_message in caplog.text
def test_cut_save(tmpdir):
f = str(tmpdir.join("path"))
v = view.View()
c = cut.Cut()
with taddons.context() as tctx:
tctx.master.addons.add(v, c)
v.add([tflow.tflow(resp=True)])
tctx.command(c.save, "@all", "request.method", f)
assert qr(f) == b"GET"
tctx.command(c.save, "@all", "request.content", f)
assert qr(f) == b"content"
tctx.command(c.save, "@all", "request.content", "+" + f)
assert qr(f) == b"content\ncontent"
v.add([tflow.tflow(resp=True)])
tctx.command(c.save, "@all", "request.method", f)
assert qr(f).splitlines() == [b"GET", b"GET"]
tctx.command(c.save, "@all", "request.method,request.content", f)
assert qr(f).splitlines() == [b"GET,b'content'", b"GET,b'content'"]
@pytest.mark.parametrize(
"exception, log_message",
[
(PermissionError, "Permission denied"),
(IsADirectoryError, "Is a directory"),
(FileNotFoundError, "No such file or directory"),
],
)
async def test_cut_save_open(exception, log_message, tmpdir, caplog):
f = str(tmpdir.join("path"))
v = view.View()
c = cut.Cut()
with taddons.context() as tctx:
tctx.master.addons.add(v, c)
v.add([tflow.tflow(resp=True)])
with mock.patch("mitmproxy.addons.cut.open") as m:
m.side_effect = exception(log_message)
tctx.command(c.save, "@all", "request.method", f)
assert log_message in caplog.text
def test_cut():
c = cut.Cut()
with taddons.context():
tflows = [tflow.tflow(resp=True)]
assert c.cut(tflows, ["request.method"]) == [["GET"]]
assert c.cut(tflows, ["request.scheme"]) == [["http"]]
assert c.cut(tflows, ["request.host"]) == [["address"]]
assert c.cut(tflows, ["request.port"]) == [["22"]]
assert c.cut(tflows, ["request.path"]) == [["/path"]]
assert c.cut(tflows, ["request.url"]) == [["http://address:22/path"]]
assert c.cut(tflows, ["request.content"]) == [[b"content"]]
assert c.cut(tflows, ["request.header[header]"]) == [["qvalue"]]
assert c.cut(tflows, ["request.header[unknown]"]) == [[""]]
assert c.cut(tflows, ["response.status_code"]) == [["200"]]
assert c.cut(tflows, ["response.reason"]) == [["OK"]]
assert c.cut(tflows, ["response.content"]) == [[b"message"]]
assert c.cut(tflows, ["response.header[header-response]"]) == [["svalue"]]
assert c.cut(tflows, ["moo"]) == [[""]]
with pytest.raises(exceptions.CommandError):
assert c.cut(tflows, ["__dict__"]) == [[""]]
with taddons.context():
tflows = [tflow.tflow(resp=False)]
assert c.cut(tflows, ["response.reason"]) == [[""]]
assert c.cut(tflows, ["response.header[key]"]) == [[""]]
for f in (tflow.ttcpflow(), tflow.tudpflow()):
c = cut.Cut()
with taddons.context():
tflows = [f]
assert c.cut(tflows, ["request.method"]) == [[""]]
assert c.cut(tflows, ["response.status"]) == [[""]]
| python | MIT | e6aa924bb411a9687b91920b8d094af37bc02b90 | 2026-01-04T14:40:00.086164Z | false |
mitmproxy/mitmproxy | https://github.com/mitmproxy/mitmproxy/blob/e6aa924bb411a9687b91920b8d094af37bc02b90/test/mitmproxy/addons/test_browser.py | test/mitmproxy/addons/test_browser.py | from unittest import mock
from mitmproxy.addons import browser
from mitmproxy.test import taddons
def test_browser(caplog):
caplog.set_level("INFO")
with (
mock.patch("subprocess.Popen") as po,
mock.patch("shutil.which") as which,
taddons.context(),
):
which.return_value = "chrome"
b = browser.Browser()
b.start()
assert po.called
b.start()
assert "Starting additional browser" in caplog.text
assert len(b.browser) == 2
b.start("unsupported-browser")
assert "Invalid browser name." in caplog.text
assert len(b.browser) == 2
b.done()
assert not b.browser
async def test_no_browser(caplog):
caplog.set_level("INFO")
with mock.patch("shutil.which") as which:
which.return_value = False
b = browser.Browser()
b.start()
assert "platform is not supported" in caplog.text
async def test_find_executable_cmd():
with mock.patch("shutil.which") as which:
which.side_effect = lambda cmd: cmd == "chrome"
assert browser.find_executable_cmd("chrome") == ["chrome"]
async def test_find_executable_cmd_no_executable():
with mock.patch("shutil.which") as which:
which.return_value = False
assert browser.find_executable_cmd("chrome") is None
async def test_find_flatpak_cmd():
def subprocess_run_mock(cmd, **kwargs):
returncode = 0 if cmd == ["flatpak", "info", "com.google.Chrome"] else 1
return mock.Mock(returncode=returncode)
with (
mock.patch("shutil.which") as which,
mock.patch("subprocess.run") as subprocess_run,
):
which.side_effect = lambda cmd: cmd == "flatpak"
subprocess_run.side_effect = subprocess_run_mock
assert browser.find_flatpak_cmd("com.google.Chrome") == [
"flatpak",
"run",
"-p",
"com.google.Chrome",
]
async def test_find_flatpak_cmd_no_flatpak():
with (
mock.patch("shutil.which") as which,
mock.patch("subprocess.run") as subprocess_run,
):
which.side_effect = lambda cmd: cmd == "flatpak"
subprocess_run.return_value = mock.Mock(returncode=1)
assert browser.find_flatpak_cmd("com.google.Chrome") is None
async def test_browser_start_firefox():
with (
mock.patch("shutil.which") as which,
mock.patch("subprocess.Popen") as po,
taddons.context(),
):
which.return_value = "firefox"
browser.Browser().start("firefox")
assert po.called
async def test_browser_start_firefox_not_found(caplog):
caplog.set_level("INFO")
with mock.patch("shutil.which") as which:
which.return_value = False
browser.Browser().start("firefox")
assert "platform is not supported" in caplog.text
| python | MIT | e6aa924bb411a9687b91920b8d094af37bc02b90 | 2026-01-04T14:40:00.086164Z | false |
mitmproxy/mitmproxy | https://github.com/mitmproxy/mitmproxy/blob/e6aa924bb411a9687b91920b8d094af37bc02b90/test/mitmproxy/addons/test_serverplayback.py | test/mitmproxy/addons/test_serverplayback.py | import urllib
import pytest
import mitmproxy.test.tutils
from mitmproxy import exceptions
from mitmproxy import io
from mitmproxy.addons import serverplayback
from mitmproxy.test import taddons
from mitmproxy.test import tflow
def tdump(path, flows):
with open(path, "wb") as f:
w = io.FlowWriter(f)
for i in flows:
w.add(i)
def test_load_file(tmpdir):
s = serverplayback.ServerPlayback()
with taddons.context(s):
fpath = str(tmpdir.join("flows"))
tdump(fpath, [tflow.tflow(resp=True)])
s.load_file(fpath)
assert s.flowmap
with pytest.raises(exceptions.CommandError):
s.load_file("/nonexistent")
def test_config(tmpdir):
s = serverplayback.ServerPlayback()
with taddons.context(s) as tctx:
fpath = str(tmpdir.join("flows"))
tdump(fpath, [tflow.tflow(resp=True)])
tctx.configure(s, server_replay=[fpath])
s.configured = False
with pytest.raises(exceptions.OptionsError):
tctx.configure(s, server_replay=[str(tmpdir)])
def test_server_playback():
sp = serverplayback.ServerPlayback()
with taddons.context(sp) as tctx:
tctx.configure(sp)
f = tflow.tflow(resp=True)
assert not sp.flowmap
sp.load_flows([f])
assert sp.flowmap
assert sp.next_flow(f)
assert not sp.flowmap
sp.load_flows([f])
assert sp.flowmap
sp.clear()
assert not sp.flowmap
def test_add_flows():
sp = serverplayback.ServerPlayback()
with taddons.context(sp) as tctx:
tctx.configure(sp)
f1 = tflow.tflow(resp=True)
f2 = tflow.tflow(resp=True)
sp.load_flows([f1])
sp.add_flows([f2])
assert sp.next_flow(f1)
assert sp.flowmap
assert sp.next_flow(f2)
assert not sp.flowmap
sp.add_flows([f1])
assert sp.flowmap
assert sp.next_flow(f1)
assert not sp.flowmap
def test_ignore_host():
sp = serverplayback.ServerPlayback()
with taddons.context(sp) as tctx:
tctx.configure(sp, server_replay_ignore_host=True)
r = tflow.tflow(resp=True)
r2 = tflow.tflow(resp=True)
r.request.host = "address"
r2.request.host = "address"
assert sp._hash(r) == sp._hash(r2)
r2.request.host = "wrong_address"
assert sp._hash(r) == sp._hash(r2)
def test_ignore_content():
s = serverplayback.ServerPlayback()
with taddons.context(s) as tctx:
tctx.configure(s, server_replay_ignore_content=False)
r = tflow.tflow(resp=True)
r2 = tflow.tflow(resp=True)
r.request.content = b"foo"
r2.request.content = b"foo"
assert s._hash(r) == s._hash(r2)
r2.request.content = b"bar"
assert not s._hash(r) == s._hash(r2)
tctx.configure(s, server_replay_ignore_content=True)
r = tflow.tflow(resp=True)
r2 = tflow.tflow(resp=True)
r.request.content = b"foo"
r2.request.content = b"foo"
assert s._hash(r) == s._hash(r2)
r2.request.content = b"bar"
assert s._hash(r) == s._hash(r2)
r2.request.content = b""
assert s._hash(r) == s._hash(r2)
r2.request.content = None
assert s._hash(r) == s._hash(r2)
def test_ignore_content_wins_over_params():
s = serverplayback.ServerPlayback()
with taddons.context(s) as tctx:
tctx.configure(
s,
server_replay_ignore_content=True,
server_replay_ignore_payload_params=["param1", "param2"],
)
# NOTE: parameters are mutually exclusive in options
r = tflow.tflow(resp=True)
r.request.headers["Content-Type"] = "application/x-www-form-urlencoded"
r.request.content = b"paramx=y"
r2 = tflow.tflow(resp=True)
r2.request.headers["Content-Type"] = "application/x-www-form-urlencoded"
r2.request.content = b"paramx=x"
# same parameters
assert s._hash(r) == s._hash(r2)
def test_ignore_payload_params_other_content_type():
s = serverplayback.ServerPlayback()
with taddons.context(s) as tctx:
tctx.configure(
s,
server_replay_ignore_content=False,
server_replay_ignore_payload_params=["param1", "param2"],
)
r = tflow.tflow(resp=True)
r.request.headers["Content-Type"] = "application/json"
r.request.content = b'{"param1":"1"}'
r2 = tflow.tflow(resp=True)
r2.request.headers["Content-Type"] = "application/json"
r2.request.content = b'{"param1":"1"}'
# same content
assert s._hash(r) == s._hash(r2)
# distint content (note only x-www-form-urlencoded payload is analysed)
r2.request.content = b'{"param1":"2"}'
assert not s._hash(r) == s._hash(r2)
def test_hash():
s = serverplayback.ServerPlayback()
with taddons.context(s) as tctx:
tctx.configure(s)
r = tflow.tflow()
r2 = tflow.tflow()
assert s._hash(r)
assert s._hash(r) == s._hash(r2)
r.request.headers["foo"] = "bar"
assert s._hash(r) == s._hash(r2)
r.request.path = "voing"
assert s._hash(r) != s._hash(r2)
r.request.path = "path?blank_value"
r2.request.path = "path?"
assert s._hash(r) != s._hash(r2)
def test_headers():
s = serverplayback.ServerPlayback()
with taddons.context(s) as tctx:
tctx.configure(s, server_replay_use_headers=["foo"])
r = tflow.tflow(resp=True)
r.request.headers["foo"] = "bar"
r2 = tflow.tflow(resp=True)
assert not s._hash(r) == s._hash(r2)
r2.request.headers["foo"] = "bar"
assert s._hash(r) == s._hash(r2)
r2.request.headers["oink"] = "bar"
assert s._hash(r) == s._hash(r2)
r = tflow.tflow(resp=True)
r2 = tflow.tflow(resp=True)
assert s._hash(r) == s._hash(r2)
def test_load():
s = serverplayback.ServerPlayback()
with taddons.context(s) as tctx:
tctx.configure(s)
r = tflow.tflow(resp=True)
r.request.headers["key"] = "one"
r2 = tflow.tflow(resp=True)
r2.request.headers["key"] = "two"
s.load_flows([r, r2])
assert s.count() == 2
n = s.next_flow(r)
assert n.request.headers["key"] == "one"
assert s.count() == 1
n = s.next_flow(r)
assert n.request.headers["key"] == "two"
assert not s.flowmap
assert s.count() == 0
assert not s.next_flow(r)
def test_load_with_server_replay_reuse():
s = serverplayback.ServerPlayback()
with taddons.context(s) as tctx:
tctx.configure(s, server_replay_reuse=True)
r = tflow.tflow(resp=True)
r.request.headers["key"] = "one"
r2 = tflow.tflow(resp=True)
r2.request.headers["key"] = "two"
s.load_flows([r, r2])
assert s.count() == 2
s.next_flow(r)
assert s.count() == 2
def test_ignore_params():
s = serverplayback.ServerPlayback()
with taddons.context(s) as tctx:
tctx.configure(s, server_replay_ignore_params=["param1", "param2"])
r = tflow.tflow(resp=True)
r.request.path = "/test?param1=1"
r2 = tflow.tflow(resp=True)
r2.request.path = "/test"
assert s._hash(r) == s._hash(r2)
r2.request.path = "/test?param1=2"
assert s._hash(r) == s._hash(r2)
r2.request.path = "/test?param2=1"
assert s._hash(r) == s._hash(r2)
r2.request.path = "/test?param3=2"
assert not s._hash(r) == s._hash(r2)
def thash(r, r2, setter):
s = serverplayback.ServerPlayback()
with taddons.context(s) as tctx:
s = serverplayback.ServerPlayback()
tctx.configure(s, server_replay_ignore_payload_params=["param1", "param2"])
setter(r, paramx="x", param1="1")
setter(r2, paramx="x", param1="1")
# same parameters
assert s._hash(r) == s._hash(r2)
# ignored parameters !=
setter(r2, paramx="x", param1="2")
assert s._hash(r) == s._hash(r2)
# missing parameter
setter(r2, paramx="x")
assert s._hash(r) == s._hash(r2)
# ignorable parameter added
setter(r2, paramx="x", param1="2")
assert s._hash(r) == s._hash(r2)
# not ignorable parameter changed
setter(r2, paramx="y", param1="1")
assert not s._hash(r) == s._hash(r2)
# not ignorable parameter missing
setter(r2, param1="1")
r2.request.content = b"param1=1"
assert not s._hash(r) == s._hash(r2)
def test_ignore_payload_params():
def urlencode_setter(r, **kwargs):
r.request.content = urllib.parse.urlencode(kwargs).encode()
r = tflow.tflow(resp=True)
r.request.headers["Content-Type"] = "application/x-www-form-urlencoded"
r2 = tflow.tflow(resp=True)
r2.request.headers["Content-Type"] = "application/x-www-form-urlencoded"
thash(r, r2, urlencode_setter)
boundary = "somefancyboundary"
def multipart_setter(r, **kwargs):
b = f"--{boundary}\n"
parts = []
for k, v in kwargs.items():
parts.append('Content-Disposition: form-data; name="%s"\n\n%s\n' % (k, v))
c = b + b.join(parts) + b
r.request.content = c.encode()
r.request.headers["content-type"] = "multipart/form-data; boundary=" + boundary
r = tflow.tflow(resp=True)
r2 = tflow.tflow(resp=True)
thash(r, r2, multipart_setter)
def test_runtime_modify_params():
s = serverplayback.ServerPlayback()
with taddons.context(s) as tctx:
r = tflow.tflow(resp=True)
r.request.path = "/test?param1=1"
r2 = tflow.tflow(resp=True)
r2.request.path = "/test"
s.load_flows([r])
hash = next(iter(s.flowmap.keys()))
tctx.configure(s, server_replay_ignore_params=["param1"])
hash_mod = next(iter(s.flowmap.keys()))
assert hash != hash_mod
assert hash_mod == s._hash(r2)
def test_server_playback_full():
s = serverplayback.ServerPlayback()
with taddons.context(s) as tctx:
tctx.configure(
s,
server_replay_refresh=True,
)
f = tflow.tflow()
f.response = mitmproxy.test.tutils.tresp(content=f.request.content)
s.load_flows([f, f])
tf = tflow.tflow()
assert not tf.response
s.request(tf)
assert tf.response.data == f.response.data
tf = tflow.tflow()
tf.request.content = b"gibble"
assert not tf.response
s.request(tf)
assert not tf.response
async def test_server_playback_kill():
s = serverplayback.ServerPlayback()
with taddons.context(s) as tctx:
tctx.configure(s, server_replay_refresh=True, server_replay_kill_extra=True)
f = tflow.tflow()
f.response = mitmproxy.test.tutils.tresp(content=f.request.content)
s.load_flows([f])
f = tflow.tflow()
f.request.host = "nonexistent"
await tctx.cycle(s, f)
assert f.error
async def test_server_playback_kill_new_option():
s = serverplayback.ServerPlayback()
with taddons.context(s) as tctx:
tctx.configure(s, server_replay_refresh=True, server_replay_extra="kill")
f = tflow.tflow()
f.response = mitmproxy.test.tutils.tresp(content=f.request.content)
s.load_flows([f])
f = tflow.tflow()
f.request.host = "nonexistent"
await tctx.cycle(s, f)
assert f.error
@pytest.mark.parametrize(
"option,status",
[
("204", 204),
("400", 400),
("404", 404),
("500", 500),
],
)
async def test_server_playback_404(option, status):
s = serverplayback.ServerPlayback()
with taddons.context(s) as tctx:
tctx.configure(s, server_replay_refresh=True, server_replay_extra=option)
f = tflow.tflow()
f.response = mitmproxy.test.tutils.tresp(content=f.request.content)
s.load_flows([f])
f = tflow.tflow()
f.request.host = "nonexistent"
s.request(f)
assert f.response.status_code == status
def test_server_playback_response_deleted():
"""
The server playback addon holds references to flows that can be modified by the user in the meantime.
One thing that can happen is that users remove the response object. This happens for example when doing a client
replay at the same time.
"""
sp = serverplayback.ServerPlayback()
with taddons.context(sp) as tctx:
tctx.configure(sp)
f1 = tflow.tflow(resp=True)
f2 = tflow.tflow(resp=True)
assert not sp.flowmap
sp.load_flows([f1, f2])
assert sp.flowmap
f1.response = f2.response = None
assert not sp.next_flow(f1)
assert not sp.flowmap
| python | MIT | e6aa924bb411a9687b91920b8d094af37bc02b90 | 2026-01-04T14:40:00.086164Z | false |
mitmproxy/mitmproxy | https://github.com/mitmproxy/mitmproxy/blob/e6aa924bb411a9687b91920b8d094af37bc02b90/test/mitmproxy/addons/test_dns_resolver.py | test/mitmproxy/addons/test_dns_resolver.py | import asyncio
import socket
import sys
import typing
import pytest
import mitmproxy_rs
from mitmproxy import dns
from mitmproxy.addons import dns_resolver
from mitmproxy.addons import proxyserver
from mitmproxy.addons.dns_resolver import GetaddrinfoFallbackResolver
from mitmproxy.proxy.mode_specs import ProxyMode
from mitmproxy.test import taddons
from mitmproxy.test import tflow
from mitmproxy.test import tutils
async def test_ignores_reverse_mode():
dr = dns_resolver.DnsResolver()
with taddons.context(dr, proxyserver.Proxyserver()):
f = tflow.tdnsflow()
f.client_conn.proxy_mode = ProxyMode.parse("dns")
assert dr._should_resolve(f)
f.client_conn.proxy_mode = ProxyMode.parse("wireguard")
f.server_conn.address = ("10.0.0.53", 53)
assert dr._should_resolve(f)
f.client_conn.proxy_mode = ProxyMode.parse("reverse:dns://8.8.8.8")
assert not dr._should_resolve(f)
def _err():
raise RuntimeError("failed to get name servers")
async def test_name_servers(caplog, monkeypatch):
dr = dns_resolver.DnsResolver()
with taddons.context(dr) as tctx:
assert dr.name_servers() == mitmproxy_rs.dns.get_system_dns_servers()
tctx.options.dns_name_servers = ["1.1.1.1"]
assert dr.name_servers() == ["1.1.1.1"]
monkeypatch.setattr(mitmproxy_rs.dns, "get_system_dns_servers", _err)
tctx.options.dns_name_servers = []
assert dr.name_servers() == []
assert "Failed to get system dns servers" in caplog.text
async def lookup(name: str):
match name:
case "ipv4.example.com":
return ["1.2.3.4"]
case "ipv6.example.com":
return ["::1"]
case "no-a-records.example.com":
raise socket.gaierror(socket.EAI_NODATA)
case "no-network.example.com":
raise socket.gaierror(socket.EAI_AGAIN)
case _:
raise socket.gaierror(socket.EAI_NONAME)
async def getaddrinfo(host: str, *_, **__):
return [[None, None, None, None, [ip]] for ip in await lookup(host)]
Domain = typing.Literal[
"nxdomain.example.com",
"no-a-records.example.com",
"no-network.example.com",
"txt.example.com",
"ipv4.example.com",
"ipv6.example.com",
]
# We use literals here instead of bools because that makes the test easier to parse.
HostsFile = typing.Literal["hosts", "no-hosts"]
NameServers = typing.Literal["nameservers", "no-nameservers"]
@pytest.mark.parametrize("hosts_file", typing.get_args(HostsFile))
@pytest.mark.parametrize("name_servers", typing.get_args(NameServers))
@pytest.mark.parametrize("domain", typing.get_args(Domain))
async def test_lookup(
domain: Domain, hosts_file: HostsFile, name_servers: NameServers, monkeypatch
):
if name_servers == "nameservers":
monkeypatch.setattr(
mitmproxy_rs.dns, "get_system_dns_servers", lambda: ["8.8.8.8"]
)
monkeypatch.setattr(
mitmproxy_rs.dns.DnsResolver, "lookup_ipv4", lambda _, name: lookup(name)
)
monkeypatch.setattr(
mitmproxy_rs.dns.DnsResolver, "lookup_ipv6", lambda _, name: lookup(name)
)
else:
monkeypatch.setattr(mitmproxy_rs.dns, "get_system_dns_servers", lambda: [])
monkeypatch.setattr(asyncio.get_running_loop(), "getaddrinfo", getaddrinfo)
dr = dns_resolver.DnsResolver()
match domain:
case "txt.example.com":
typ = dns.types.TXT
case "ipv6.example.com":
typ = dns.types.AAAA
case _:
typ = dns.types.A
with taddons.context(dr) as tctx:
tctx.options.dns_use_hosts_file = hosts_file == "hosts"
req = tutils.tdnsreq(
questions=[
dns.Question(domain, typ, dns.classes.IN),
]
)
flow = tflow.tdnsflow(req=req)
await dr.dns_request(flow)
match (domain, name_servers, hosts_file):
case [_, "no-nameservers", "no-hosts"]:
assert flow.error
case ["nxdomain.example.com", _, _]:
assert flow.response.response_code == dns.response_codes.NXDOMAIN
case ["no-network.example.com", _, _]:
assert flow.response.response_code == dns.response_codes.SERVFAIL
case ["no-a-records.example.com", _, _]:
if sys.platform == "win32":
# On Windows, EAI_NONAME and EAI_NODATA are the same constant (11001)...
assert flow.response.response_code == dns.response_codes.NXDOMAIN
else:
assert flow.response.response_code == dns.response_codes.NOERROR
assert not flow.response.answers
case ["txt.example.com", "nameservers", _]:
assert flow.server_conn.address == ("8.8.8.8", 53)
case ["txt.example.com", "no-nameservers", _]:
assert flow.error
case ["ipv4.example.com", "nameservers", _]:
assert flow.response.answers[0].data == b"\x01\x02\x03\x04"
case ["ipv4.example.com", "no-nameservers", "hosts"]:
assert flow.response.answers[0].data == b"\x01\x02\x03\x04"
case ["ipv6.example.com", "nameservers", _]:
assert (
flow.response.answers[0].data
== b"\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x01"
)
case ["ipv6.example.com", "no-nameservers", "hosts"]:
assert (
flow.response.answers[0].data
== b"\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x01"
)
case other:
typing.assert_never(other)
async def test_unspec_lookup(monkeypatch):
monkeypatch.setattr(asyncio.get_running_loop(), "getaddrinfo", getaddrinfo)
assert await GetaddrinfoFallbackResolver().lookup_ip("ipv6.example.com") == ["::1"]
| python | MIT | e6aa924bb411a9687b91920b8d094af37bc02b90 | 2026-01-04T14:40:00.086164Z | false |
mitmproxy/mitmproxy | https://github.com/mitmproxy/mitmproxy/blob/e6aa924bb411a9687b91920b8d094af37bc02b90/test/mitmproxy/addons/test_command_history.py | test/mitmproxy/addons/test_command_history.py | import os
from pathlib import Path
from unittest.mock import patch
from mitmproxy.addons import command_history
from mitmproxy.test import taddons
class TestCommandHistory:
def test_load_and_save(self, tmpdir):
history_file = tmpdir.join("command_history")
commands = ["cmd1", "cmd2", "cmd3"]
with open(history_file, "w") as f:
f.write("\n".join(commands))
ch = command_history.CommandHistory()
ch.VACUUM_SIZE = 4
with taddons.context(ch) as tctx:
tctx.options.confdir = str(tmpdir)
assert ch.history == commands
ch.add_command("cmd4")
ch.done()
with open(history_file) as f:
assert f.read() == "cmd3\ncmd4\n"
async def test_done_writing_failed(self, caplog):
ch = command_history.CommandHistory()
ch.VACUUM_SIZE = 1
with taddons.context(ch) as tctx:
ch.history.append("cmd1")
ch.history.append("cmd2")
ch.history.append("cmd3")
tctx.options.confdir = "/non/existent/path/foobar1234/"
ch.done()
assert "Failed writing to" in caplog.text
def test_add_command(self):
ch = command_history.CommandHistory()
with taddons.context(ch):
ch.add_command("cmd1")
ch.add_command("cmd2")
assert ch.history == ["cmd1", "cmd2"]
ch.add_command("")
assert ch.history == ["cmd1", "cmd2"]
async def test_add_command_failed(self, caplog):
ch = command_history.CommandHistory()
with taddons.context(ch) as tctx:
tctx.options.confdir = "/non/existent/path/foobar1234/"
ch.add_command("cmd1")
assert "Failed writing to" in caplog.text
def test_get_next_and_prev(self, tmpdir):
ch = command_history.CommandHistory()
with taddons.context(ch) as tctx:
tctx.options.confdir = str(tmpdir)
ch.add_command("cmd1")
assert ch.get_next() == ""
assert ch.get_next() == ""
assert ch.get_prev() == "cmd1"
assert ch.get_prev() == "cmd1"
assert ch.get_prev() == "cmd1"
assert ch.get_next() == ""
assert ch.get_next() == ""
ch.add_command("cmd2")
assert ch.get_next() == ""
assert ch.get_next() == ""
assert ch.get_prev() == "cmd2"
assert ch.get_prev() == "cmd1"
assert ch.get_prev() == "cmd1"
assert ch.get_next() == "cmd2"
assert ch.get_next() == ""
assert ch.get_next() == ""
ch.add_command("cmd3")
assert ch.get_next() == ""
assert ch.get_next() == ""
assert ch.get_prev() == "cmd3"
assert ch.get_prev() == "cmd2"
assert ch.get_prev() == "cmd1"
assert ch.get_prev() == "cmd1"
assert ch.get_next() == "cmd2"
assert ch.get_next() == "cmd3"
assert ch.get_next() == ""
assert ch.get_next() == ""
assert ch.get_prev() == "cmd3"
assert ch.get_prev() == "cmd2"
ch.add_command("cmd4")
assert ch.get_prev() == "cmd4"
assert ch.get_prev() == "cmd3"
assert ch.get_prev() == "cmd2"
assert ch.get_prev() == "cmd1"
assert ch.get_prev() == "cmd1"
assert ch.get_next() == "cmd2"
assert ch.get_next() == "cmd3"
assert ch.get_next() == "cmd4"
assert ch.get_next() == ""
assert ch.get_next() == ""
ch.add_command("cmd5")
ch.add_command("cmd6")
assert ch.get_next() == ""
assert ch.get_prev() == "cmd6"
assert ch.get_prev() == "cmd5"
assert ch.get_prev() == "cmd4"
assert ch.get_next() == "cmd5"
assert ch.get_prev() == "cmd4"
assert ch.get_prev() == "cmd3"
assert ch.get_prev() == "cmd2"
assert ch.get_next() == "cmd3"
assert ch.get_prev() == "cmd2"
assert ch.get_prev() == "cmd1"
assert ch.get_prev() == "cmd1"
assert ch.get_prev() == "cmd1"
assert ch.get_next() == "cmd2"
assert ch.get_next() == "cmd3"
assert ch.get_next() == "cmd4"
assert ch.get_next() == "cmd5"
assert ch.get_next() == "cmd6"
assert ch.get_next() == ""
assert ch.get_next() == ""
ch.clear_history()
def test_clear(self, tmpdir):
ch = command_history.CommandHistory()
with taddons.context(ch) as tctx:
tctx.options.confdir = str(tmpdir)
ch.add_command("cmd1")
ch.add_command("cmd2")
ch.clear_history()
saved_commands = ch.get_history()
assert saved_commands == []
assert ch.get_next() == ""
assert ch.get_next() == ""
assert ch.get_prev() == ""
assert ch.get_prev() == ""
ch.clear_history()
async def test_clear_failed(self, monkeypatch, caplog):
ch = command_history.CommandHistory()
with taddons.context(ch) as tctx:
tctx.options.confdir = "/non/existent/path/foobar1234/"
with patch.object(Path, "exists") as mock_exists:
mock_exists.return_value = True
with patch.object(Path, "unlink") as mock_unlink:
mock_unlink.side_effect = IOError()
ch.clear_history()
assert "Failed deleting" in caplog.text
def test_filter(self, tmpdir):
ch = command_history.CommandHistory()
with taddons.context(ch) as tctx:
tctx.options.confdir = str(tmpdir)
ch.add_command("cmd1")
ch.add_command("cmd2")
ch.add_command("abc")
ch.set_filter("c")
assert ch.get_next() == "c"
assert ch.get_next() == "c"
assert ch.get_prev() == "cmd2"
assert ch.get_prev() == "cmd1"
assert ch.get_prev() == "cmd1"
assert ch.get_next() == "cmd2"
assert ch.get_next() == "c"
assert ch.get_next() == "c"
ch.set_filter("")
assert ch.get_next() == ""
assert ch.get_next() == ""
assert ch.get_prev() == "abc"
assert ch.get_prev() == "cmd2"
assert ch.get_prev() == "cmd1"
assert ch.get_prev() == "cmd1"
assert ch.get_next() == "cmd2"
assert ch.get_next() == "abc"
assert ch.get_next() == ""
assert ch.get_next() == ""
ch.clear_history()
def test_multiple_instances(self, tmpdir):
ch = command_history.CommandHistory()
with taddons.context(ch) as tctx:
tctx.options.confdir = str(tmpdir)
instances = [
command_history.CommandHistory(),
command_history.CommandHistory(),
command_history.CommandHistory(),
]
for i in instances:
i.configure("command_history")
saved_commands = i.get_history()
assert saved_commands == []
instances[0].add_command("cmd1")
saved_commands = instances[0].get_history()
assert saved_commands == ["cmd1"]
# These instances haven't yet added a new command, so they haven't
# yet reloaded their commands from the command file.
# This is expected, because if the user is filtering a command on
# another window, we don't want to interfere with that
saved_commands = instances[1].get_history()
assert saved_commands == []
saved_commands = instances[2].get_history()
assert saved_commands == []
# Since the second instanced added a new command, its list of
# saved commands has been updated to have the commands from the
# first instance + its own commands
instances[1].add_command("cmd2")
saved_commands = instances[1].get_history()
assert saved_commands == ["cmd2"]
saved_commands = instances[0].get_history()
assert saved_commands == ["cmd1"]
# Third instance is still empty as it has not yet ran any command
saved_commands = instances[2].get_history()
assert saved_commands == []
instances[2].add_command("cmd3")
saved_commands = instances[2].get_history()
assert saved_commands == ["cmd3"]
instances[0].add_command("cmd4")
saved_commands = instances[0].get_history()
assert saved_commands == ["cmd1", "cmd4"]
instances.append(command_history.CommandHistory())
instances[3].configure("command_history")
saved_commands = instances[3].get_history()
assert saved_commands == ["cmd1", "cmd2", "cmd3", "cmd4"]
instances[0].add_command("cmd_before_close")
instances.pop(0).done()
saved_commands = instances[0].get_history()
assert saved_commands == ["cmd2"]
instances[0].add_command("new_cmd")
saved_commands = instances[0].get_history()
assert saved_commands == ["cmd2", "new_cmd"]
instances.pop(0).done()
instances.pop(0).done()
instances.pop(0).done()
_path = os.path.join(tctx.options.confdir, "command_history")
lines = open(_path).readlines()
saved_commands = [cmd.strip() for cmd in lines]
assert saved_commands == [
"cmd1",
"cmd2",
"cmd3",
"cmd4",
"cmd_before_close",
"new_cmd",
]
instances = [command_history.CommandHistory(), command_history.CommandHistory()]
for i in instances:
i.configure("command_history")
i.clear_history()
saved_commands = i.get_history()
assert saved_commands == []
instances[0].add_command("cmd1")
instances[0].add_command("cmd2")
instances[1].add_command("cmd3")
instances[1].add_command("cmd4")
instances[1].add_command("cmd5")
saved_commands = instances[1].get_history()
assert saved_commands == ["cmd3", "cmd4", "cmd5"]
instances.pop().done()
instances.pop().done()
_path = os.path.join(tctx.options.confdir, "command_history")
lines = open(_path).readlines()
saved_commands = [cmd.strip() for cmd in lines]
assert saved_commands == ["cmd1", "cmd2", "cmd3", "cmd4", "cmd5"]
| python | MIT | e6aa924bb411a9687b91920b8d094af37bc02b90 | 2026-01-04T14:40:00.086164Z | false |
mitmproxy/mitmproxy | https://github.com/mitmproxy/mitmproxy/blob/e6aa924bb411a9687b91920b8d094af37bc02b90/test/mitmproxy/addons/test_script.py | test/mitmproxy/addons/test_script.py | import asyncio
import os
import re
import sys
import traceback
import pytest
from mitmproxy import addonmanager
from mitmproxy import exceptions
from mitmproxy.addons import script
from mitmproxy.proxy.layers.http import HttpRequestHook
from mitmproxy.test import taddons
from mitmproxy.test import tflow
from mitmproxy.tools import main
# We want this to be speedy for testing
script.ReloadInterval = 0.1
def test_load_script(tmp_path, tdata, caplog):
ns = script.load_script(
tdata.path("mitmproxy/data/addonscripts/recorder/recorder.py")
)
assert ns.addons
script.load_script("nonexistent")
assert "FileNotFoundError" in caplog.text
(tmp_path / "error.py").write_text("this is invalid syntax")
script.load_script(str(tmp_path / "error.py"))
assert "invalid syntax" in caplog.text
def test_load_fullname(tdata):
"""
Test that loading two scripts at locations a/foo.py and b/foo.py works.
This only succeeds if they get assigned different basenames.
"""
ns = script.load_script(tdata.path("mitmproxy/data/addonscripts/addon.py"))
assert ns.addons
ns2 = script.load_script(
tdata.path("mitmproxy/data/addonscripts/same_filename/addon.py")
)
assert ns.name != ns2.name
assert not hasattr(ns2, "addons")
class TestScript:
def test_notfound(self):
with taddons.context():
with pytest.raises(exceptions.OptionsError):
script.Script("nonexistent", False)
def test_quotes_around_filename(self, tdata):
"""
Test that a script specified as '"foo.py"' works to support the calling convention of
mitmproxy 2.0, as e.g. used by Cuckoo Sandbox.
"""
path = tdata.path("mitmproxy/data/addonscripts/recorder/recorder.py")
s = script.Script(f'"{path}"', False)
assert '"' not in s.fullpath
async def test_simple(self, tdata, caplog_async):
caplog_async.set_level("DEBUG")
sc = script.Script(
tdata.path("mitmproxy/data/addonscripts/recorder/recorder.py"),
True,
)
with taddons.context(sc) as tctx:
tctx.configure(sc)
await caplog_async.await_log("recorder configure")
rec = tctx.master.addons.get("recorder")
assert rec.call_log[0][0:2] == ("recorder", "load")
rec.call_log = []
f = tflow.tflow(resp=True)
tctx.master.addons.trigger(HttpRequestHook(f))
assert rec.call_log[0][1] == "request"
sc.done()
async def test_reload(self, tmp_path, caplog_async):
caplog_async.set_level("INFO")
with taddons.context() as tctx:
f = tmp_path / "foo.py"
f.write_text("\n")
sc = script.Script(str(f), True)
tctx.configure(sc)
await caplog_async.await_log("Loading")
caplog_async.clear()
for i in range(20):
# Some filesystems only have second-level granularity,
# so just writing once again is not good enough.
f.write_text("\n")
if "Loading" in caplog_async.caplog.text:
break
await asyncio.sleep(0.1)
else:
raise AssertionError("No reload seen")
sc.done()
async def test_exception(self, tdata, caplog_async):
caplog_async.set_level("INFO")
with taddons.context() as tctx:
sc = script.Script(
tdata.path("mitmproxy/data/addonscripts/error.py"),
True,
)
tctx.master.addons.add(sc)
await caplog_async.await_log("error load")
tctx.configure(sc)
f = tflow.tflow(resp=True)
tctx.master.addons.trigger(HttpRequestHook(f))
await caplog_async.await_log("ValueError: Error!")
await caplog_async.await_log("error.py")
sc.done()
def test_import_error(self, monkeypatch, tdata, caplog):
monkeypatch.setattr(sys, "frozen", True, raising=False)
script.Script(
tdata.path("mitmproxy/data/addonscripts/import_error.py"),
reload=False,
)
assert (
"Note that mitmproxy's binaries include their own Python environment"
in caplog.text
)
def test_configure_error(self, tdata, caplog):
with taddons.context():
script.Script(
tdata.path("mitmproxy/data/addonscripts/configure.py"),
False,
)
assert "Options Error" in caplog.text
async def test_addon(self, tdata, caplog_async):
caplog_async.set_level("INFO")
with taddons.context() as tctx:
sc = script.Script(tdata.path("mitmproxy/data/addonscripts/addon.py"), True)
tctx.master.addons.add(sc)
await caplog_async.await_log("addon running")
assert sc.ns.event_log == [
"scriptload",
"addonload",
"scriptconfigure",
"addonconfigure",
]
sc.done()
class TestCutTraceback:
def raise_(self, i):
if i > 0:
self.raise_(i - 1)
raise RuntimeError()
def test_simple(self):
try:
self.raise_(4)
except RuntimeError:
tb = sys.exc_info()[2]
tb_cut = addonmanager.cut_traceback(tb, "test_simple")
assert len(traceback.extract_tb(tb_cut)) == 5
tb_cut2 = addonmanager.cut_traceback(tb, "nonexistent")
assert len(traceback.extract_tb(tb_cut2)) == len(traceback.extract_tb(tb))
class TestScriptLoader:
async def test_script_run(self, tdata, caplog_async):
caplog_async.set_level("DEBUG")
rp = tdata.path("mitmproxy/data/addonscripts/recorder/recorder.py")
sc = script.ScriptLoader()
with taddons.context(sc):
sc.script_run([tflow.tflow(resp=True)], rp)
await caplog_async.await_log("recorder response")
debug = [
i.msg for i in caplog_async.caplog.records if i.levelname == "DEBUG"
]
assert debug == [
"recorder configure",
"recorder running",
"recorder requestheaders",
"recorder request",
"recorder responseheaders",
"recorder response",
]
async def test_script_run_nonexistent(self, caplog):
sc = script.ScriptLoader()
sc.script_run([tflow.tflow(resp=True)], "/")
assert "No such script" in caplog.text
async def test_simple(self, tdata):
sc = script.ScriptLoader()
with taddons.context(loadcore=False) as tctx:
tctx.master.addons.add(sc)
sc.running()
assert len(tctx.master.addons) == 1
tctx.master.options.update(
scripts=[tdata.path("mitmproxy/data/addonscripts/recorder/recorder.py")]
)
assert len(tctx.master.addons) == 1
assert len(sc.addons) == 1
tctx.master.options.update(scripts=[])
assert len(tctx.master.addons) == 1
assert len(sc.addons) == 0
def test_dupes(self):
sc = script.ScriptLoader()
with taddons.context(sc) as tctx:
with pytest.raises(exceptions.OptionsError):
tctx.configure(sc, scripts=["one", "one"])
async def test_script_deletion(self, tdata, caplog_async):
caplog_async.set_level("INFO")
tdir = tdata.path("mitmproxy/data/addonscripts/")
with open(tdir + "/dummy.py", "w") as f:
f.write("\n")
with taddons.context() as tctx:
sl = script.ScriptLoader()
tctx.master.addons.add(sl)
tctx.configure(
sl, scripts=[tdata.path("mitmproxy/data/addonscripts/dummy.py")]
)
await caplog_async.await_log("Loading")
os.remove(tdata.path("mitmproxy/data/addonscripts/dummy.py"))
await caplog_async.await_log("Removing")
await asyncio.sleep(0.1)
assert not tctx.options.scripts
assert not sl.addons
async def test_order(self, tdata, caplog_async):
caplog_async.set_level("DEBUG")
rec = tdata.path("mitmproxy/data/addonscripts/recorder")
sc = script.ScriptLoader()
sc.is_running = True
with taddons.context(sc) as tctx:
tctx.configure(
sc,
scripts=[
"%s/a.py" % rec,
"%s/b.py" % rec,
"%s/c.py" % rec,
],
)
await caplog_async.await_log("configure")
debug = [
i.msg for i in caplog_async.caplog.records if i.levelname == "DEBUG"
]
assert debug == [
"a load",
"a configure",
"a running",
"b load",
"b configure",
"b running",
"c load",
"c configure",
"c running",
]
caplog_async.clear()
tctx.configure(
sc,
scripts=[
"%s/c.py" % rec,
"%s/a.py" % rec,
"%s/b.py" % rec,
],
)
await caplog_async.await_log("b configure")
debug = [
i.msg for i in caplog_async.caplog.records if i.levelname == "DEBUG"
]
assert debug == [
"c configure",
"a configure",
"b configure",
]
caplog_async.clear()
tctx.configure(
sc,
scripts=[
"%s/e.py" % rec,
"%s/a.py" % rec,
],
)
await caplog_async.await_log("e configure")
debug = [
i.msg for i in caplog_async.caplog.records if i.levelname == "DEBUG"
]
assert debug == [
"c done",
"b done",
"a configure",
"e load",
"e configure",
"e running",
]
# stop reload tasks
tctx.configure(sc, scripts=[])
def test_order(tdata, capsys):
"""Integration test: Make sure that the runtime hooks are triggered on startup in the correct order."""
main.mitmdump(
[
"-n",
"-s",
tdata.path("mitmproxy/data/addonscripts/recorder/recorder.py"),
"-s",
tdata.path("mitmproxy/data/addonscripts/shutdown.py"),
]
)
time = r"\[[\d:.]+\] "
out = capsys.readouterr().out
assert re.match(
rf"{time}Loading script.+recorder.py\n"
rf"{time}\('recorder', 'load', .+\n"
rf"{time}\('recorder', 'configure', .+\n"
rf"{time}Loading script.+shutdown.py\n"
rf"{time}\('recorder', 'running', .+\n"
rf"{time}\('recorder', 'done', .+\n$",
out,
)
| python | MIT | e6aa924bb411a9687b91920b8d094af37bc02b90 | 2026-01-04T14:40:00.086164Z | false |
mitmproxy/mitmproxy | https://github.com/mitmproxy/mitmproxy/blob/e6aa924bb411a9687b91920b8d094af37bc02b90/test/mitmproxy/addons/test_clientplayback.py | test/mitmproxy/addons/test_clientplayback.py | import asyncio
import ssl
from contextlib import asynccontextmanager
import pytest
from mitmproxy.addons.clientplayback import ClientPlayback
from mitmproxy.addons.clientplayback import ReplayHandler
from mitmproxy.addons.proxyserver import Proxyserver
from mitmproxy.addons.tlsconfig import TlsConfig
from mitmproxy.connection import Address
from mitmproxy.exceptions import CommandError
from mitmproxy.exceptions import OptionsError
from mitmproxy.test import taddons
from mitmproxy.test import tflow
@asynccontextmanager
async def tcp_server(handle_conn, **server_args) -> Address:
"""TCP server context manager that...
1. Exits only after all handlers have returned.
2. Ensures that all handlers are closed properly. If we don't do that,
we get ghost errors in others tests from StreamWriter.__del__.
Spawning a TCP server is relatively slow. Consider using in-memory networking for faster tests.
"""
if not hasattr(asyncio, "TaskGroup"):
pytest.skip("Skipped because asyncio.TaskGroup is unavailable.")
tasks = asyncio.TaskGroup()
async def handle_conn_wrapper(
reader: asyncio.StreamReader,
writer: asyncio.StreamWriter,
) -> None:
try:
await handle_conn(reader, writer)
except Exception as e:
print(f"!!! TCP handler failed: {e}")
raise
finally:
if not writer.is_closing():
writer.close()
await writer.wait_closed()
async def _handle(r, w):
tasks.create_task(handle_conn_wrapper(r, w))
server = await asyncio.start_server(_handle, "127.0.0.1", 0, **server_args)
await server.start_serving()
async with server:
async with tasks:
yield server.sockets[0].getsockname()
@pytest.mark.parametrize("mode", ["http", "https", "upstream", "err"])
@pytest.mark.parametrize("concurrency", [-1, 1])
async def test_playback(tdata, mode, concurrency):
async def handler(reader: asyncio.StreamReader, writer: asyncio.StreamWriter):
if mode == "err":
return
req = await reader.readline()
if mode == "upstream":
assert req == b"GET http://address:22/path HTTP/1.1\r\n"
else:
assert req == b"GET /path HTTP/1.1\r\n"
req = await reader.readuntil(b"data")
assert req == (
b"header: qvalue\r\n"
b"content-length: 4\r\nHost: example.mitmproxy.org\r\n\r\n"
b"data"
)
writer.write(b"HTTP/1.1 204 No Content\r\n\r\n")
await writer.drain()
assert not await reader.read()
cp = ClientPlayback()
ps = Proxyserver()
tls = TlsConfig()
with taddons.context(cp, ps, tls) as tctx:
tctx.configure(cp, client_replay_concurrency=concurrency)
server_args = {}
if mode == "https":
server_args["ssl"] = ssl.create_default_context(ssl.Purpose.CLIENT_AUTH)
server_args["ssl"].load_cert_chain(
certfile=tdata.path(
"mitmproxy/net/data/verificationcerts/trusted-leaf.crt"
),
keyfile=tdata.path(
"mitmproxy/net/data/verificationcerts/trusted-leaf.key"
),
)
tctx.configure(
tls,
ssl_verify_upstream_trusted_ca=tdata.path(
"mitmproxy/net/data/verificationcerts/trusted-root.crt"
),
)
async with tcp_server(handler, **server_args) as addr:
cp.running()
flow = tflow.tflow(live=False)
flow.request.content = b"data"
if mode == "upstream":
tctx.options.mode = [f"upstream:http://{addr[0]}:{addr[1]}"]
flow.request.authority = f"{addr[0]}:{addr[1]}"
flow.request.host, flow.request.port = "address", 22
else:
flow.request.host, flow.request.port = addr
if mode == "https":
flow.request.scheme = "https"
# Used for SNI
flow.request.host_header = "example.mitmproxy.org"
cp.start_replay([flow])
assert cp.count() == 1
await asyncio.wait_for(cp.queue.join(), 5)
while cp.replay_tasks:
await asyncio.sleep(0.001)
if mode != "err":
assert flow.response.status_code == 204
await cp.done()
async def test_playback_https_upstream():
async def handler(reader: asyncio.StreamReader, writer: asyncio.StreamWriter):
conn_req = await reader.readuntil(b"\r\n\r\n")
assert conn_req == b"CONNECT address:22 HTTP/1.1\r\nHost: address:22\r\n\r\n"
writer.write(b"HTTP/1.1 502 Bad Gateway\r\n\r\n")
await writer.drain()
assert not await reader.read()
cp = ClientPlayback()
ps = Proxyserver()
with taddons.context(cp, ps) as tctx:
tctx.configure(cp)
async with tcp_server(handler) as addr:
cp.running()
flow = tflow.tflow(live=False)
flow.request.scheme = b"https"
flow.request.content = b"data"
tctx.options.mode = [f"upstream:http://{addr[0]}:{addr[1]}"]
cp.start_replay([flow])
assert cp.count() == 1
await asyncio.wait_for(cp.queue.join(), 5)
assert flow.response is None
assert (
str(flow.error)
== f"Upstream proxy {addr[0]}:{addr[1]} refused HTTP CONNECT request: 502 Bad Gateway"
)
await cp.done()
async def test_playback_crash(monkeypatch, caplog_async):
async def raise_err(*_, **__):
raise ValueError("oops")
monkeypatch.setattr(ReplayHandler, "replay", raise_err)
cp = ClientPlayback()
with taddons.context(cp):
cp.running()
cp.start_replay([tflow.tflow(live=False)])
await caplog_async.await_log("Client replay has crashed!")
assert "oops" in caplog_async.caplog.text
assert cp.count() == 0
await cp.done()
def test_check():
cp = ClientPlayback()
f = tflow.tflow(resp=True)
f.live = True
assert "live flow" in cp.check(f)
f = tflow.tflow(resp=True, live=False)
f.intercepted = True
assert "intercepted flow" in cp.check(f)
f = tflow.tflow(resp=True, live=False)
f.request = None
assert "missing request" in cp.check(f)
f = tflow.tflow(resp=True, live=False)
f.request.raw_content = None
assert "missing content" in cp.check(f)
for f in (tflow.ttcpflow(), tflow.tudpflow()):
f.live = False
assert "Can only replay HTTP" in cp.check(f)
async def test_start_stop(tdata, caplog_async):
cp = ClientPlayback()
with taddons.context(cp):
cp.start_replay([tflow.tflow(live=False)])
assert cp.count() == 1
ws_flow = tflow.twebsocketflow()
ws_flow.live = False
cp.start_replay([ws_flow])
await caplog_async.await_log("Can't replay WebSocket flows.")
assert cp.count() == 1
cp.stop_replay()
assert cp.count() == 0
def test_load(tdata):
cp = ClientPlayback()
with taddons.context(cp):
cp.load_file(tdata.path("mitmproxy/data/dumpfile-018.mitm"))
assert cp.count() == 1
with pytest.raises(CommandError):
cp.load_file("/nonexistent")
assert cp.count() == 1
def test_configure(tdata):
cp = ClientPlayback()
with taddons.context(cp) as tctx:
assert cp.count() == 0
tctx.configure(
cp, client_replay=[tdata.path("mitmproxy/data/dumpfile-018.mitm")]
)
assert cp.count() == 1
tctx.configure(cp, client_replay=[])
with pytest.raises(OptionsError):
tctx.configure(cp, client_replay=["nonexistent"])
tctx.configure(cp, client_replay_concurrency=-1)
with pytest.raises(OptionsError):
tctx.configure(cp, client_replay_concurrency=-2)
| python | MIT | e6aa924bb411a9687b91920b8d094af37bc02b90 | 2026-01-04T14:40:00.086164Z | false |
mitmproxy/mitmproxy | https://github.com/mitmproxy/mitmproxy/blob/e6aa924bb411a9687b91920b8d094af37bc02b90/test/mitmproxy/addons/test_comment.py | test/mitmproxy/addons/test_comment.py | from mitmproxy.addons.comment import Comment
from mitmproxy.test import taddons
from mitmproxy.test import tflow
def test_comment():
c = Comment()
f = tflow.tflow()
with taddons.context():
c.comment([f], "foo")
assert f.comment == "foo"
| python | MIT | e6aa924bb411a9687b91920b8d094af37bc02b90 | 2026-01-04T14:40:00.086164Z | false |
mitmproxy/mitmproxy | https://github.com/mitmproxy/mitmproxy/blob/e6aa924bb411a9687b91920b8d094af37bc02b90/test/mitmproxy/addons/test_proxyserver.py | test/mitmproxy/addons/test_proxyserver.py | from __future__ import annotations
import asyncio
import ssl
from collections.abc import AsyncGenerator
from collections.abc import Callable
from contextlib import asynccontextmanager
from dataclasses import dataclass
from typing import Any
from typing import ClassVar
from typing import TypeVar
from unittest.mock import Mock
import pytest
from aioquic.asyncio.protocol import QuicConnectionProtocol
from aioquic.asyncio.server import QuicServer
from aioquic.h3 import events as h3_events
from aioquic.h3.connection import FrameUnexpected
from aioquic.h3.connection import H3Connection
from aioquic.quic import events as quic_events
from aioquic.quic.configuration import QuicConfiguration
from aioquic.quic.connection import QuicConnection
from aioquic.quic.connection import QuicConnectionError
from .test_clientplayback import tcp_server
import mitmproxy.platform
import mitmproxy_rs
from mitmproxy import dns
from mitmproxy import exceptions
from mitmproxy.addons import dns_resolver
from mitmproxy.addons.next_layer import NextLayer
from mitmproxy.addons.proxyserver import Proxyserver
from mitmproxy.addons.tlsconfig import TlsConfig
from mitmproxy.connection import Address
from mitmproxy.proxy import layers
from mitmproxy.proxy import server_hooks
from mitmproxy.test import taddons
from mitmproxy.test import tflow
from mitmproxy.test.tflow import tclient_conn
from mitmproxy.test.tflow import tserver_conn
from mitmproxy.test.tutils import tdnsreq
from mitmproxy.utils import data
tlsdata = data.Data(__name__)
class HelperAddon:
def __init__(self):
self.flows = []
def request(self, f):
self.flows.append(f)
def tcp_start(self, f):
self.flows.append(f)
async def test_start_stop(caplog_async):
caplog_async.set_level("INFO")
async def server_handler(
reader: asyncio.StreamReader, writer: asyncio.StreamWriter
):
assert await reader.readuntil(b"\r\n\r\n") == b"GET /hello HTTP/1.1\r\n\r\n"
writer.write(b"HTTP/1.1 204 No Content\r\n\r\n")
await writer.drain()
ps = Proxyserver()
nl = NextLayer()
state = HelperAddon()
with taddons.context(ps, nl, state) as tctx:
async with tcp_server(server_handler) as addr:
tctx.configure(ps, listen_host="127.0.0.1", listen_port=0)
assert not ps.servers
assert await ps.setup_servers()
ps.running()
await caplog_async.await_log("HTTP(S) proxy listening at")
assert ps.servers
proxy_addr = ps.listen_addrs()[0]
reader, writer = await asyncio.open_connection(*proxy_addr)
req = f"GET http://{addr[0]}:{addr[1]}/hello HTTP/1.1\r\n\r\n"
writer.write(req.encode())
assert (
await reader.readuntil(b"\r\n\r\n")
== b"HTTP/1.1 204 No Content\r\n\r\n"
)
assert repr(ps) == "Proxyserver(1 active conns)"
assert ps.active_connections() == 1
await (
ps.setup_servers()
) # assert this can always be called without side effects
tctx.configure(ps, server=False)
await caplog_async.await_log("stopped")
if ps.servers.is_updating:
async with ps.servers._lock:
pass # wait until start/stop is finished.
assert not ps.servers
assert state.flows
assert state.flows[0].request.path == "/hello"
assert state.flows[0].response.status_code == 204
writer.close()
await writer.wait_closed()
await _wait_for_connection_closes(ps)
async def _wait_for_connection_closes(ps: Proxyserver):
# Waiting here until everything is really torn down... takes some effort.
client_handlers = [
conn_handler.transports[conn_handler.client].handler
for conn_handler in ps.connections.values()
if conn_handler.client in conn_handler.transports
]
for client_handler in client_handlers:
try:
await asyncio.wait_for(client_handler, 5)
except asyncio.CancelledError:
pass
for _ in range(5):
# Get all other scheduled coroutines to run.
await asyncio.sleep(0)
assert not ps.connections
async def test_inject() -> None:
async def server_handler(
reader: asyncio.StreamReader, writer: asyncio.StreamWriter
):
while s := await reader.read(1):
writer.write(s.upper())
ps = Proxyserver()
nl = NextLayer()
state = HelperAddon()
with taddons.context(ps, nl, state) as tctx:
async with tcp_server(server_handler) as addr:
tctx.configure(ps, listen_host="127.0.0.1", listen_port=0)
assert await ps.setup_servers()
ps.running()
proxy_addr = ps.servers["regular"].listen_addrs[0]
reader, writer = await asyncio.open_connection(*proxy_addr)
req = f"CONNECT {addr[0]}:{addr[1]} HTTP/1.1\r\n\r\n"
writer.write(req.encode())
assert (
await reader.readuntil(b"\r\n\r\n")
== b"HTTP/1.1 200 Connection established\r\n\r\n"
)
writer.write(b"a")
assert await reader.read(1) == b"A"
ps.inject_tcp(state.flows[0], False, b"b")
assert await reader.read(1) == b"B"
ps.inject_tcp(state.flows[0], True, b"c")
assert await reader.read(1) == b"c"
writer.close()
await writer.wait_closed()
await _wait_for_connection_closes(ps)
async def test_inject_fail(caplog) -> None:
ps = Proxyserver()
ps.inject_websocket(tflow.tflow(), True, b"test")
assert "Cannot inject WebSocket messages into non-WebSocket flows." in caplog.text
ps.inject_tcp(tflow.tflow(), True, b"test")
assert "Cannot inject TCP messages into non-TCP flows." in caplog.text
ps.inject_udp(tflow.tflow(), True, b"test")
assert "Cannot inject UDP messages into non-UDP flows." in caplog.text
ps.inject_udp(tflow.tudpflow(), True, b"test")
assert "Flow is not from a live connection." in caplog.text
ps.inject_websocket(tflow.twebsocketflow(), True, b"test")
assert "Flow is not from a live connection." in caplog.text
ps.inject_websocket(tflow.ttcpflow(), True, b"test")
assert "Cannot inject WebSocket messages into non-WebSocket flows" in caplog.text
async def test_warn_no_nextlayer(caplog):
"""
Test that we log an error if the proxy server is started without NextLayer addon.
That is a mean trap to fall into when writing end-to-end tests.
"""
ps = Proxyserver()
with taddons.context(ps) as tctx:
tctx.configure(ps, listen_host="127.0.0.1", listen_port=0, server=False)
assert await ps.setup_servers()
ps.running()
assert "Warning: Running proxyserver without nextlayer addon!" in caplog.text
async def test_self_connect():
server = tserver_conn()
client = tclient_conn()
server.address = ("localhost", 8080)
ps = Proxyserver()
with taddons.context(ps) as tctx:
tctx.configure(ps, listen_host="127.0.0.1", listen_port=0)
assert await ps.setup_servers()
ps.running()
assert ps.servers
server.address = ("localhost", ps.servers["regular"].listen_addrs[0][1])
ps.server_connect(server_hooks.ServerConnectionHookData(server, client))
assert "Request destination unknown" in server.error
tctx.configure(ps, server=False)
assert await ps.setup_servers()
await _wait_for_connection_closes(ps)
def test_options():
ps = Proxyserver()
with taddons.context(ps) as tctx:
with pytest.raises(exceptions.OptionsError):
tctx.configure(ps, stream_large_bodies="invalid")
tctx.configure(ps, stream_large_bodies="1m")
with pytest.raises(exceptions.OptionsError):
tctx.configure(ps, body_size_limit="invalid")
tctx.configure(ps, body_size_limit="1m")
with pytest.raises(exceptions.OptionsError):
tctx.configure(ps, connect_addr="invalid")
tctx.configure(ps, connect_addr="1.2.3.4")
assert ps._connect_addr == ("1.2.3.4", 0)
with pytest.raises(exceptions.OptionsError):
tctx.configure(ps, mode=["invalid!"])
with pytest.raises(exceptions.OptionsError):
tctx.configure(ps, mode=["regular", "reverse:example.com"])
tctx.configure(ps, mode=["regular", "local"], server=False)
async def test_startup_err(monkeypatch, caplog) -> None:
async def _raise(*_):
raise OSError("cannot bind")
monkeypatch.setattr(asyncio, "start_server", _raise)
ps = Proxyserver()
with taddons.context(ps):
assert not await ps.setup_servers()
assert "cannot bind" in caplog.text
async def test_shutdown_err(caplog_async) -> None:
caplog_async.set_level("INFO")
async def _raise(*_):
raise OSError("cannot close")
ps = Proxyserver()
with taddons.context(ps) as tctx:
tctx.configure(ps, listen_host="127.0.0.1", listen_port=0)
assert await ps.setup_servers()
ps.running()
assert ps.servers
for server in ps.servers:
setattr(server, "stop", _raise)
tctx.configure(ps, server=False)
await caplog_async.await_log("cannot close")
await _wait_for_connection_closes(ps)
async def lookup_ipv4():
return await asyncio.sleep(0, ["8.8.8.8"])
async def test_dns(caplog_async, monkeypatch) -> None:
monkeypatch.setattr(
mitmproxy_rs.dns.DnsResolver, "lookup_ipv4", lambda _, __: lookup_ipv4()
)
caplog_async.set_level("INFO")
ps = Proxyserver()
with taddons.context(ps, dns_resolver.DnsResolver()) as tctx:
tctx.configure(
ps,
mode=["dns@127.0.0.1:0"],
)
assert await ps.setup_servers()
ps.running()
await caplog_async.await_log("DNS server listening at")
assert ps.servers
dns_addr = ps.servers["dns@127.0.0.1:0"].listen_addrs[0]
s = await mitmproxy_rs.udp.open_udp_connection(*dns_addr)
req = tdnsreq()
s.write(req.packed)
resp = dns.DNSMessage.unpack(await s.read(65535))
assert req.id == resp.id and "8.8.8.8" in str(resp)
assert len(ps.connections) == 1
s.write(req.packed)
resp = dns.DNSMessage.unpack(await s.read(65535))
assert req.id == resp.id and "8.8.8.8" in str(resp)
assert len(ps.connections) == 1
req.id = req.id + 1
s.write(req.packed)
resp = dns.DNSMessage.unpack(await s.read(65535))
assert req.id == resp.id and "8.8.8.8" in str(resp)
assert len(ps.connections) == 1
(dns_conn,) = ps.connections.values()
assert isinstance(dns_conn.layer, layers.DNSLayer)
assert len(dns_conn.layer.flows) == 2
s.write(b"\x00")
await caplog_async.await_log("sent an invalid message")
tctx.configure(ps, server=False)
await caplog_async.await_log("stopped")
s.close()
await s.wait_closed()
await _wait_for_connection_closes(ps)
def test_validation_no_transparent(monkeypatch):
monkeypatch.setattr(mitmproxy.platform, "original_addr", None)
ps = Proxyserver()
with taddons.context(ps) as tctx:
with pytest.raises(Exception, match="Transparent mode not supported"):
tctx.configure(ps, mode=["transparent"])
def test_transparent_init(monkeypatch):
init = Mock()
monkeypatch.setattr(mitmproxy.platform, "original_addr", lambda: 1)
monkeypatch.setattr(mitmproxy.platform, "init_transparent_mode", init)
ps = Proxyserver()
with taddons.context(ps) as tctx:
tctx.configure(ps, mode=["transparent"], server=False)
assert init.called
@asynccontextmanager
async def udp_server(
handle_datagram: Callable[
[asyncio.DatagramTransport, bytes, tuple[str, int]], None
],
) -> Address:
class ServerProtocol(asyncio.DatagramProtocol):
def connection_made(self, transport):
self.transport = transport
def datagram_received(self, data, addr):
handle_datagram(self.transport, data, addr)
loop = asyncio.get_running_loop()
transport, _ = await loop.create_datagram_endpoint(
lambda: ServerProtocol(),
local_addr=("127.0.0.1", 0),
)
socket = transport.get_extra_info("socket")
try:
yield socket.getsockname()
finally:
transport.close()
async def test_udp(caplog_async) -> None:
caplog_async.set_level("INFO")
def handle_datagram(
transport: asyncio.DatagramTransport,
data: bytes,
remote_addr: Address,
):
assert data == b"\x16"
transport.sendto(b"\x01", remote_addr)
ps = Proxyserver()
nl = NextLayer()
with taddons.context(ps, nl) as tctx:
async with udp_server(handle_datagram) as server_addr:
mode = f"reverse:udp://{server_addr[0]}:{server_addr[1]}@127.0.0.1:0"
tctx.configure(ps, mode=[mode])
assert await ps.setup_servers()
ps.running()
await caplog_async.await_log(
f"reverse proxy to udp://{server_addr[0]}:{server_addr[1]} listening"
)
assert ps.servers
addr = ps.servers[mode].listen_addrs[0]
stream = await mitmproxy_rs.udp.open_udp_connection(*addr)
stream.write(b"\x16")
assert b"\x01" == await stream.read(65535)
assert repr(ps) == "Proxyserver(1 active conns)"
assert len(ps.connections) == 1
tctx.configure(ps, server=False)
await caplog_async.await_log("stopped")
stream.close()
await stream.wait_closed()
await _wait_for_connection_closes(ps)
class H3EchoServer(QuicConnectionProtocol):
def __init__(self, *args, **kwargs) -> None:
super().__init__(*args, **kwargs)
self._seen_headers: set[int] = set()
self.http: H3Connection | None = None
def http_headers_received(self, event: h3_events.HeadersReceived) -> None:
assert event.push_id is None
headers: dict[bytes, bytes] = {}
for name, value in event.headers:
headers[name] = value
response = []
if event.stream_id not in self._seen_headers:
self._seen_headers.add(event.stream_id)
assert headers[b":authority"] == b"example.mitmproxy.org"
assert headers[b":method"] == b"GET"
assert headers[b":path"] == b"/test"
response.append((b":status", b"200"))
response.append((b"x-response", headers[b"x-request"]))
self.http.send_headers(
stream_id=event.stream_id, headers=response, end_stream=event.stream_ended
)
self.transmit()
def http_data_received(self, event: h3_events.DataReceived) -> None:
assert event.push_id is None
assert event.stream_id in self._seen_headers
try:
self.http.send_data(
stream_id=event.stream_id,
data=event.data,
end_stream=event.stream_ended,
)
except FrameUnexpected:
if event.data or not event.stream_ended:
raise
self._quic.send_stream_data(
stream_id=event.stream_id,
data=b"",
end_stream=True,
)
self.transmit()
def http_event_received(self, event: h3_events.H3Event) -> None:
if isinstance(event, h3_events.HeadersReceived):
self.http_headers_received(event)
elif isinstance(event, h3_events.DataReceived):
self.http_data_received(event)
else:
raise AssertionError(event)
def quic_event_received(self, event: quic_events.QuicEvent) -> None:
if isinstance(event, quic_events.ProtocolNegotiated):
self.http = H3Connection(self._quic)
if self.http is not None:
for http_event in self.http.handle_event(event):
self.http_event_received(http_event)
class QuicDatagramEchoServer(QuicConnectionProtocol):
def quic_event_received(self, event: quic_events.QuicEvent) -> None:
if isinstance(event, quic_events.DatagramFrameReceived):
self._quic.send_datagram_frame(event.data)
self.transmit()
@asynccontextmanager
async def quic_server(
create_protocol, alpn: list[str]
) -> AsyncGenerator[Address, None]:
configuration = QuicConfiguration(
is_client=False,
alpn_protocols=alpn,
max_datagram_frame_size=65536,
)
configuration.load_cert_chain(
certfile=tlsdata.path("../net/data/verificationcerts/trusted-leaf.crt"),
keyfile=tlsdata.path("../net/data/verificationcerts/trusted-leaf.key"),
)
loop = asyncio.get_running_loop()
transport, server = await loop.create_datagram_endpoint(
lambda: QuicServer(
configuration=configuration,
create_protocol=create_protocol,
),
local_addr=("127.0.0.1", 0),
)
try:
yield transport.get_extra_info("sockname")
finally:
server.close()
class QuicClient(QuicConnectionProtocol):
TIMEOUT: ClassVar[int] = 10
def __init__(self, *args, **kwargs) -> None:
super().__init__(*args, **kwargs)
self._waiter = self._loop.create_future()
def quic_event_received(self, event: quic_events.QuicEvent) -> None:
if not self._waiter.done():
if isinstance(event, quic_events.ConnectionTerminated):
self._waiter.set_exception(
QuicConnectionError(
event.error_code, event.frame_type, event.reason_phrase
)
)
elif isinstance(event, quic_events.HandshakeCompleted):
self._waiter.set_result(None)
def connection_lost(self, exc: Exception | None) -> None:
if not self._waiter.done():
self._waiter.set_exception(exc)
return super().connection_lost(exc)
async def wait_handshake(self) -> None:
return await asyncio.wait_for(self._waiter, timeout=QuicClient.TIMEOUT)
class QuicDatagramClient(QuicClient):
def __init__(self, *args, **kwargs) -> None:
super().__init__(*args, **kwargs)
self._datagram: asyncio.Future[bytes] = self._loop.create_future()
def quic_event_received(self, event: quic_events.QuicEvent) -> None:
super().quic_event_received(event)
if not self._datagram.done():
if isinstance(event, quic_events.DatagramFrameReceived):
self._datagram.set_result(event.data)
elif isinstance(event, quic_events.ConnectionTerminated):
self._datagram.set_exception(
QuicConnectionError(
event.error_code, event.frame_type, event.reason_phrase
)
)
def send_datagram(self, data: bytes) -> None:
self._quic.send_datagram_frame(data)
self.transmit()
async def recv_datagram(self) -> bytes:
return await asyncio.wait_for(self._datagram, timeout=QuicClient.TIMEOUT)
@dataclass
class H3Response:
waiter: asyncio.Future[H3Response]
stream_id: int
headers: h3_events.H3Event | None = None
data: bytes | None = None
trailers: h3_events.H3Event | None = None
callback: Callable[[str], None] | None = None
async def wait_result(self) -> H3Response:
return await asyncio.wait_for(self.waiter, timeout=QuicClient.TIMEOUT)
def __setattr__(self, name: str, value: Any) -> None:
super().__setattr__(name, value)
if self.callback:
self.callback(name)
class H3Client(QuicClient):
def __init__(self, *args, **kwargs) -> None:
super().__init__(*args, **kwargs)
self._responses: dict[int, H3Response] = dict()
self.http = H3Connection(self._quic)
def http_headers_received(self, event: h3_events.HeadersReceived) -> None:
assert event.push_id is None
response = self._responses[event.stream_id]
if response.waiter.done():
return
if response.headers is None:
response.headers = event.headers
if event.stream_ended:
response.waiter.set_result(response)
elif response.trailers is None:
response.trailers = event.headers
if event.stream_ended:
response.waiter.set_result(response)
else:
response.waiter.set_exception(Exception("Headers after trailers received."))
def http_data_received(self, event: h3_events.DataReceived) -> None:
assert event.push_id is None
response = self._responses[event.stream_id]
if response.waiter.done():
return
if response.headers is None:
response.waiter.set_exception(Exception("Data without headers received."))
elif response.trailers is None:
if response.data is None:
response.data = event.data
else:
response.data = response.data + event.data
if event.stream_ended:
response.waiter.set_result(response)
elif event.data or not event.stream_ended:
response.waiter.set_exception(Exception("Data after trailers received."))
else:
response.waiter.set_result(response)
def http_event_received(self, event: h3_events.H3Event) -> None:
if isinstance(event, h3_events.HeadersReceived):
self.http_headers_received(event)
elif isinstance(event, h3_events.DataReceived):
self.http_data_received(event)
else:
raise AssertionError(event)
def quic_event_received(self, event: quic_events.QuicEvent) -> None:
super().quic_event_received(event)
for http_event in self.http.handle_event(event):
self.http_event_received(http_event)
def request(
self,
headers: h3_events.Headers,
data: bytes | None = None,
trailers: h3_events.Headers | None = None,
end_stream: bool = True,
) -> H3Response:
stream_id = self._quic.get_next_available_stream_id()
self.http.send_headers(
stream_id=stream_id,
headers=headers,
end_stream=data is None and trailers is None and end_stream,
)
if data is not None:
self.http.send_data(
stream_id=stream_id,
data=data,
end_stream=trailers is None and end_stream,
)
if trailers is not None:
self.http.send_headers(
stream_id=stream_id,
headers=trailers,
end_stream=end_stream,
)
waiter = self._loop.create_future()
response = H3Response(waiter=waiter, stream_id=stream_id)
self._responses[stream_id] = response
self.transmit()
return response
T = TypeVar("T", bound=QuicClient)
@asynccontextmanager
async def quic_connect(
cls: type[T],
alpn: list[str],
address: Address,
) -> AsyncGenerator[T, None]:
configuration = QuicConfiguration(
is_client=True,
alpn_protocols=alpn,
server_name="example.mitmproxy.org",
verify_mode=ssl.CERT_NONE,
max_datagram_frame_size=65536,
)
loop = asyncio.get_running_loop()
transport, protocol = await loop.create_datagram_endpoint(
lambda: cls(QuicConnection(configuration=configuration)),
local_addr=("127.0.0.1", 0),
)
assert isinstance(protocol, cls)
try:
protocol.connect(address)
await protocol.wait_handshake()
yield protocol
finally:
protocol.close()
await protocol.wait_closed()
transport.close()
async def _test_echo(client: H3Client, strict: bool) -> None:
def assert_no_data(response: H3Response):
if strict:
assert response.data is None
else:
assert not response.data
headers = [
(b":scheme", b"https"),
(b":authority", b"example.mitmproxy.org"),
(b":method", b"GET"),
(b":path", b"/test"),
]
r1 = await client.request(
headers=headers + [(b"x-request", b"justheaders")],
data=None,
trailers=None,
).wait_result()
assert r1.headers == [
(b":status", b"200"),
(b"x-response", b"justheaders"),
]
assert_no_data(r1)
assert r1.trailers is None
r2 = await client.request(
headers=headers + [(b"x-request", b"hasdata")],
data=b"echo",
trailers=None,
).wait_result()
assert r2.headers == [
(b":status", b"200"),
(b"x-response", b"hasdata"),
]
assert r2.data == b"echo"
assert r2.trailers is None
r3 = await client.request(
headers=headers + [(b"x-request", b"nodata")],
data=None,
trailers=[(b"x-request", b"buttrailers")],
).wait_result()
assert r3.headers == [
(b":status", b"200"),
(b"x-response", b"nodata"),
]
assert_no_data(r3)
assert r3.trailers == [(b"x-response", b"buttrailers")]
r4 = await client.request(
headers=headers + [(b"x-request", b"this")],
data=b"has",
trailers=[(b"x-request", b"everything")],
).wait_result()
assert r4.headers == [
(b":status", b"200"),
(b"x-response", b"this"),
]
assert r4.data == b"has"
assert r4.trailers == [(b"x-response", b"everything")]
# the following test makes sure that we behave properly if end_stream is sent separately
r5 = client.request(
headers=headers + [(b"x-request", b"this")],
data=b"has",
trailers=[(b"x-request", b"everything but end_stream")],
end_stream=False,
)
if not strict:
trailer_waiter = asyncio.get_running_loop().create_future()
r5.callback = lambda name: name != "trailers" or trailer_waiter.set_result(None)
await asyncio.wait_for(trailer_waiter, timeout=QuicClient.TIMEOUT)
assert r5.trailers is not None
assert not r5.waiter.done()
else:
await asyncio.sleep(0)
client._quic.send_stream_data(
stream_id=r5.stream_id,
data=b"",
end_stream=True,
)
client.transmit()
await r5.wait_result()
assert r5.headers == [
(b":status", b"200"),
(b"x-response", b"this"),
]
assert r5.data == b"has"
assert r5.trailers == [(b"x-response", b"everything but end_stream")]
@pytest.mark.parametrize("scheme", ["http3", "quic"])
async def test_reverse_http3_and_quic_stream(caplog_async, scheme: str) -> None:
caplog_async.set_level("INFO")
ps = Proxyserver()
nl = NextLayer()
ta = TlsConfig()
with taddons.context(ps, nl, ta) as tctx:
tctx.options.keep_host_header = True
ta.configure(["confdir"])
async with quic_server(H3EchoServer, alpn=["h3"]) as server_addr:
mode = f"reverse:{scheme}://{server_addr[0]}:{server_addr[1]}@127.0.0.1:0"
tctx.configure(
ta,
ssl_verify_upstream_trusted_ca=tlsdata.path(
"../net/data/verificationcerts/trusted-root.crt"
),
)
tctx.configure(ps, mode=[mode])
assert await ps.setup_servers()
ps.running()
await caplog_async.await_log(
f"reverse proxy to {scheme}://{server_addr[0]}:{server_addr[1]} listening"
)
assert ps.servers
addr = ps.servers[mode].listen_addrs[0]
async with quic_connect(H3Client, alpn=["h3"], address=addr) as client:
await _test_echo(client, strict=scheme == "http3")
assert len(ps.connections) == 1
tctx.configure(ps, server=False)
await caplog_async.await_log(f"stopped")
await _wait_for_connection_closes(ps)
async def test_reverse_quic_datagram(caplog_async) -> None:
caplog_async.set_level("INFO")
ps = Proxyserver()
nl = NextLayer()
ta = TlsConfig()
with taddons.context(ps, nl, ta) as tctx:
tctx.options.keep_host_header = True
ta.configure(["confdir"])
async with quic_server(QuicDatagramEchoServer, alpn=["dgram"]) as server_addr:
mode = f"reverse:quic://{server_addr[0]}:{server_addr[1]}@127.0.0.1:0"
tctx.configure(
ta,
ssl_verify_upstream_trusted_ca=tlsdata.path(
"../net/data/verificationcerts/trusted-root.crt"
),
)
tctx.configure(ps, mode=[mode])
assert await ps.setup_servers()
ps.running()
await caplog_async.await_log(
f"reverse proxy to quic://{server_addr[0]}:{server_addr[1]} listening"
)
assert ps.servers
addr = ps.servers[mode].listen_addrs[0]
async with quic_connect(
QuicDatagramClient, alpn=["dgram"], address=addr
) as client:
client.send_datagram(b"echo")
assert await client.recv_datagram() == b"echo"
tctx.configure(ps, server=False)
await caplog_async.await_log("stopped")
await _wait_for_connection_closes(ps)
@pytest.mark.skip("HTTP/3 for regular mode is not fully supported yet")
async def test_regular_http3(caplog_async, monkeypatch) -> None:
caplog_async.set_level("INFO")
ps = Proxyserver()
nl = NextLayer()
ta = TlsConfig()
with taddons.context(ps, nl, ta) as tctx:
ta.configure(["confdir"])
async with quic_server(H3EchoServer, alpn=["h3"]) as server_addr:
orig_open_connection = mitmproxy_rs.udp.open_udp_connection
async def open_connection_path(
host: str, port: int, *args, **kwargs
) -> mitmproxy_rs.Stream:
if host == "example.mitmproxy.org" and port == 443:
host = server_addr[0]
port = server_addr[1]
return orig_open_connection(host, port, *args, **kwargs)
monkeypatch.setattr(
mitmproxy_rs.udp, "open_udp_connection", open_connection_path
)
mode = f"http3@127.0.0.1:0"
tctx.configure(
ta,
ssl_verify_upstream_trusted_ca=tlsdata.path(
"../net/data/verificationcerts/trusted-root.crt"
),
)
tctx.configure(ps, mode=[mode])
assert await ps.setup_servers()
ps.running()
await caplog_async.await_log(f"HTTP3 proxy listening")
assert ps.servers
addr = ps.servers[mode].listen_addrs[0]
async with quic_connect(H3Client, alpn=["h3"], address=addr) as client:
await _test_echo(client=client, strict=True)
assert len(ps.connections) == 1
tctx.configure(ps, server=False)
await caplog_async.await_log("stopped")
await _wait_for_connection_closes(ps)
| python | MIT | e6aa924bb411a9687b91920b8d094af37bc02b90 | 2026-01-04T14:40:00.086164Z | false |
mitmproxy/mitmproxy | https://github.com/mitmproxy/mitmproxy/blob/e6aa924bb411a9687b91920b8d094af37bc02b90/test/mitmproxy/addons/test_dumper.py | test/mitmproxy/addons/test_dumper.py | import io
import shutil
from unittest import mock
import pytest
import mitmproxy_rs.syntax_highlight
from mitmproxy import exceptions
from mitmproxy.addons import dumper
from mitmproxy.addons.dumper import CONTENTVIEW_STYLES
from mitmproxy.http import Headers
from mitmproxy.net.dns import response_codes
from mitmproxy.test import taddons
from mitmproxy.test import tflow
from mitmproxy.test import tutils
def test_configure():
d = dumper.Dumper()
with taddons.context(d) as ctx:
ctx.configure(d, dumper_filter="~b foo")
assert d.filter
f = tflow.tflow(resp=True)
assert not d.match(f)
f.response.content = b"foo"
assert d.match(f)
ctx.configure(d, dumper_filter=None)
assert not d.filter
with pytest.raises(exceptions.OptionsError):
ctx.configure(d, dumper_filter="~~")
assert not d.filter
def test_simple():
sio = io.StringIO()
d = dumper.Dumper(sio)
with taddons.context(d) as ctx:
ctx.configure(d, flow_detail=0)
d.response(tflow.tflow(resp=True))
assert not sio.getvalue()
sio.truncate(0)
ctx.configure(d, flow_detail=1)
d.response(tflow.tflow(resp=True))
assert sio.getvalue()
sio.truncate(0)
ctx.configure(d, flow_detail=1)
d.error(tflow.tflow(err=True))
assert sio.getvalue()
sio.truncate(0)
ctx.configure(d, flow_detail=4)
d.response(tflow.tflow(resp=True))
assert sio.getvalue()
sio.truncate(0)
ctx.configure(d, flow_detail=4)
d.response(tflow.tflow(resp=True))
assert "<<" in sio.getvalue()
sio.truncate(0)
ctx.configure(d, flow_detail=4)
d.response(tflow.tflow(err=True))
assert "<<" in sio.getvalue()
sio.truncate(0)
ctx.configure(d, flow_detail=4)
flow = tflow.tflow()
flow.request = tutils.treq()
flow.client_conn = mock.MagicMock()
flow.client_conn.peername[0] = "foo"
flow.response = tutils.tresp(content=None)
flow.is_replay = "response"
flow.response.status_code = 300
d.response(flow)
assert sio.getvalue()
sio.truncate(0)
ctx.configure(d, flow_detail=4)
flow = tflow.tflow(resp=tutils.tresp(content=b"{"))
flow.response.headers["content-type"] = "application/json"
flow.response.status_code = 400
d.response(flow)
assert sio.getvalue()
sio.truncate(0)
ctx.configure(d, flow_detail=4)
flow = tflow.tflow()
flow.request.content = None
flow.response = tutils.tresp(content=None)
d.response(flow)
assert "content missing" in sio.getvalue()
sio.truncate(0)
def test_echo_body():
f = tflow.tflow(resp=True)
f.response.headers["content-type"] = "text/html"
f.response.content = b"foo bar voing\n" * 600
sio = io.StringIO()
d = dumper.Dumper(sio)
with taddons.context(d) as ctx:
ctx.configure(d, flow_detail=3)
d._echo_message(f.response, f)
t = sio.getvalue()
assert "cut off" in t
def test_echo_body_custom_cutoff():
f = tflow.tflow(resp=True)
f.response.headers["content-type"] = "text/html"
f.response.content = b"foo bar voing\n" * 4
sio = io.StringIO()
d = dumper.Dumper(sio)
with taddons.context(d) as ctx:
ctx.configure(d, flow_detail=3)
ctx.configure(d, content_view_lines_cutoff=3)
d._echo_message(f.response, f)
t = sio.getvalue()
assert "cut off" in t
def test_echo_trailer():
sio = io.StringIO()
d = dumper.Dumper(sio)
with taddons.context(d) as ctx:
ctx.configure(d, flow_detail=3)
f = tflow.tflow(resp=True)
f.request.headers["content-type"] = "text/html"
f.request.headers["transfer-encoding"] = "chunked"
f.request.headers["trailer"] = "my-little-request-trailer"
f.request.content = b"some request content\n" * 600
f.request.trailers = Headers(
[(b"my-little-request-trailer", b"foobar-request-trailer")]
)
f.response.headers["transfer-encoding"] = "chunked"
f.response.headers["trailer"] = "my-little-response-trailer"
f.response.content = b"some response content\n" * 100
f.response.trailers = Headers(
[(b"my-little-response-trailer", b"foobar-response-trailer")]
)
d.echo_flow(f)
t = sio.getvalue()
assert "content-type" in t
assert "cut off" in t
assert "some request content" in t
assert "foobar-request-trailer" in t
assert "some response content" in t
assert "foobar-response-trailer" in t
def test_echo_request_line():
sio = io.StringIO()
d = dumper.Dumper(sio)
with taddons.context(d) as ctx:
ctx.configure(d, flow_detail=3, showhost=True)
f = tflow.tflow(resp=True)
f.is_replay = "request"
d._echo_request_line(f)
assert "[replay]" in sio.getvalue()
sio.truncate(0)
f = tflow.tflow(resp=True)
f.is_replay = None
d._echo_request_line(f)
assert "[replay]" not in sio.getvalue()
sio.truncate(0)
f = tflow.tflow(resp=True)
f.request.http_version = "nonstandard"
d._echo_request_line(f)
assert "nonstandard" in sio.getvalue()
sio.truncate(0)
ctx.configure(d, flow_detail=1, showhost=True)
f = tflow.tflow(resp=True)
terminalWidth = max(shutil.get_terminal_size()[0] - 25, 50)
f.request.url = (
"http://address:22/" + ("x" * terminalWidth) + "textToBeTruncated"
)
d._echo_request_line(f)
assert "textToBeTruncated" not in sio.getvalue()
sio.truncate(0)
def test_tcp():
sio = io.StringIO()
d = dumper.Dumper(sio)
with taddons.context(d) as ctx:
ctx.configure(d, flow_detail=3, showhost=True)
f = tflow.ttcpflow()
d.tcp_message(f)
assert "it's me" in sio.getvalue()
sio.truncate(0)
f = tflow.ttcpflow(client_conn=True, err=True)
d.tcp_error(f)
assert "Error in TCP" in sio.getvalue()
def test_udp():
sio = io.StringIO()
d = dumper.Dumper(sio)
with taddons.context(d) as ctx:
ctx.configure(d, flow_detail=3, showhost=True)
f = tflow.tudpflow()
d.udp_message(f)
assert "it's me" in sio.getvalue()
sio.truncate(0)
f = tflow.tudpflow(client_conn=True, err=True)
d.udp_error(f)
assert "Error in UDP" in sio.getvalue()
def test_dns():
sio = io.StringIO()
d = dumper.Dumper(sio)
with taddons.context(d) as ctx:
ctx.configure(d, flow_detail=3, showhost=True)
f = tflow.tdnsflow(resp=True)
d.dns_response(f)
assert "8.8.8.8" in sio.getvalue()
sio.truncate(0)
f = tflow.tdnsflow()
f.response = f.request.fail(response_codes.NOTIMP)
d.dns_response(f)
assert "NOTIMP" in sio.getvalue()
sio.truncate(0)
f = tflow.tdnsflow(err=True)
d.dns_error(f)
assert "error" in sio.getvalue()
def test_websocket():
sio = io.StringIO()
d = dumper.Dumper(sio)
with taddons.context(d) as ctx:
ctx.configure(d, flow_detail=3, showhost=True)
f = tflow.twebsocketflow()
d.websocket_message(f)
assert "it's me" in sio.getvalue()
sio.truncate(0)
d.websocket_end(f)
assert "WebSocket connection closed by" in sio.getvalue()
sio.truncate(0)
f = tflow.twebsocketflow(err=True)
d.websocket_end(f)
assert "Error in WebSocket" in sio.getvalue()
assert "(reason:" not in sio.getvalue()
sio.truncate(0)
f = tflow.twebsocketflow(err=True, close_reason="Some lame excuse")
d.websocket_end(f)
assert "Error in WebSocket" in sio.getvalue()
assert "(reason: Some lame excuse)" in sio.getvalue()
sio.truncate(0)
f = tflow.twebsocketflow(close_code=4000)
d.websocket_end(f)
assert "UNKNOWN_ERROR=4000" in sio.getvalue()
assert "(reason:" not in sio.getvalue()
sio.truncate(0)
f = tflow.twebsocketflow(close_code=4000, close_reason="I swear I had a reason")
d.websocket_end(f)
assert "UNKNOWN_ERROR=4000" in sio.getvalue()
assert "(reason: I swear I had a reason)" in sio.getvalue()
def test_http2():
sio = io.StringIO()
d = dumper.Dumper(sio)
with taddons.context(d):
f = tflow.tflow(resp=True)
f.response.http_version = b"HTTP/2.0"
d.response(f)
assert "HTTP/2.0 200 OK" in sio.getvalue()
def test_quic():
sio = io.StringIO()
d = dumper.Dumper(sio)
with taddons.context(d):
f = tflow.ttcpflow()
f.client_conn.tls_version = "QUICv1"
# TODO: This should not be metadata, this should be typed attributes.
f.metadata["quic_stream_id_client"] = 1
f.metadata["quic_stream_id_server"] = 1
d.tcp_message(f)
assert "quic stream 1" in sio.getvalue()
f2 = tflow.tudpflow()
f2.client_conn.tls_version = "QUICv1"
# TODO: This should not be metadata, this should be typed attributes.
f2.metadata["quic_stream_id_client"] = 1
f2.metadata["quic_stream_id_server"] = 1
d.udp_message(f2)
assert "quic stream 1" in sio.getvalue()
def test_styling():
sio = io.StringIO()
d = dumper.Dumper(sio)
d.out_has_vt_codes = True
with taddons.context(d):
d.response(tflow.tflow(resp=True))
assert "\x1b[" in sio.getvalue()
def test_has_styles_for_tags():
missing = set(mitmproxy_rs.syntax_highlight.tags()) - set(CONTENTVIEW_STYLES)
assert not missing, f"Missing styles for tags: {missing}"
| python | MIT | e6aa924bb411a9687b91920b8d094af37bc02b90 | 2026-01-04T14:40:00.086164Z | false |
mitmproxy/mitmproxy | https://github.com/mitmproxy/mitmproxy/blob/e6aa924bb411a9687b91920b8d094af37bc02b90/test/mitmproxy/addons/test_proxyauth.py | test/mitmproxy/addons/test_proxyauth.py | import binascii
from unittest import mock
import ldap3
import pytest
from mitmproxy import exceptions
from mitmproxy.addons import proxyauth
from mitmproxy.proxy.layers import modes
from mitmproxy.proxy.mode_specs import ProxyMode
from mitmproxy.test import taddons
from mitmproxy.test import tflow
@pytest.mark.parametrize(
"scheme, expected",
[
("", " dXNlcm5hbWU6cGFzc3dvcmQ=\n"),
("basic", "basic dXNlcm5hbWU6cGFzc3dvcmQ=\n"),
("foobar", "foobar dXNlcm5hbWU6cGFzc3dvcmQ=\n"),
],
)
def test_mkauth(scheme, expected):
assert proxyauth.mkauth("username", "password", scheme) == expected
def test_parse_http_basic_auth():
input = proxyauth.mkauth("test", "test")
assert proxyauth.parse_http_basic_auth(input) == ("basic", "test", "test")
@pytest.mark.parametrize(
"input",
[
"",
"foo bar",
"basic abc",
"basic " + binascii.b2a_base64(b"foo").decode("ascii"),
],
)
def test_parse_http_basic_auth_error(input):
with pytest.raises(ValueError):
proxyauth.parse_http_basic_auth(input)
@pytest.mark.parametrize(
"mode, expected",
[
("regular", True),
("upstream:proxy", True),
("reverse:example.com", False),
],
)
def test_is_http_proxy(mode, expected):
f = tflow.tflow()
f.client_conn.proxy_mode = ProxyMode.parse(mode)
assert proxyauth.is_http_proxy(f) == expected
@pytest.mark.parametrize(
"is_http_proxy, expected",
[
(True, "Proxy-Authorization"),
(False, "Authorization"),
],
)
def test_http_auth_header(is_http_proxy, expected):
assert proxyauth.http_auth_header(is_http_proxy) == expected
@pytest.mark.parametrize(
"is_http_proxy, expected_status_code, expected_header",
[
(True, 407, "Proxy-Authenticate"),
(False, 401, "WWW-Authenticate"),
],
)
def test_make_auth_required_response(
is_http_proxy, expected_status_code, expected_header
):
resp = proxyauth.make_auth_required_response(is_http_proxy)
assert resp.status_code == expected_status_code
assert expected_header in resp.headers.keys()
class TestProxyAuth:
def test_socks5(self):
pa = proxyauth.ProxyAuth()
with taddons.context(pa, loadcore=False) as ctx:
ctx.configure(pa, proxyauth="foo:bar")
data = modes.Socks5AuthData(tflow.tclient_conn(), "foo", "baz")
pa.socks5_auth(data)
assert not data.valid
data.password = "bar"
pa.socks5_auth(data)
assert data.valid
def test_authenticate(self):
up = proxyauth.ProxyAuth()
with taddons.context(up, loadcore=False) as ctx:
ctx.configure(up, proxyauth="any")
f = tflow.tflow()
f.client_conn.proxy_mode = ProxyMode.parse("regular")
assert not f.response
up.authenticate_http(f)
assert f.response.status_code == 407
f = tflow.tflow()
f.request.headers["Proxy-Authorization"] = proxyauth.mkauth("test", "test")
up.authenticate_http(f)
assert not f.response
assert not f.request.headers.get("Proxy-Authorization")
f = tflow.tflow()
f.client_conn.proxy_mode = ProxyMode.parse("reverse:https://example.com")
assert not f.response
up.authenticate_http(f)
assert f.response.status_code == 401
f = tflow.tflow()
f.client_conn.proxy_mode = ProxyMode.parse("reverse:https://example.com")
f.request.headers["Authorization"] = proxyauth.mkauth("test", "test")
up.authenticate_http(f)
assert not f.response
assert not f.request.headers.get("Authorization")
def test_configure(self, monkeypatch, tdata, tmp_path):
monkeypatch.setattr(ldap3, "Server", mock.MagicMock())
monkeypatch.setattr(ldap3, "Connection", mock.MagicMock())
pa = proxyauth.ProxyAuth()
with taddons.context(pa) as ctx:
with pytest.raises(
exceptions.OptionsError, match="Invalid proxyauth specification"
):
ctx.configure(pa, proxyauth="foo")
ctx.configure(pa, proxyauth="foo:bar")
assert isinstance(pa.validator, proxyauth.SingleUser)
assert pa.validator("foo", "bar")
assert not pa.validator("foo", "baz")
with pytest.raises(
exceptions.OptionsError, match="Invalid single-user auth specification."
):
ctx.configure(pa, proxyauth="foo:bar:baz")
ctx.configure(pa, proxyauth="any")
assert isinstance(pa.validator, proxyauth.AcceptAll)
assert pa.validator("foo", "bar")
ctx.configure(pa, proxyauth=None)
assert pa.validator is None
ctx.configure(
pa,
proxyauth="ldap:localhost:cn=default,dc=cdhdt,dc=com:password:ou=application,dc=cdhdt,dc=com",
)
assert isinstance(pa.validator, proxyauth.Ldap)
ctx.configure(
pa,
proxyauth="ldap:localhost:1234:cn=default,dc=cdhdt,dc=com:password:ou=application,dc=cdhdt,dc=com",
)
assert isinstance(pa.validator, proxyauth.Ldap)
ctx.configure(
pa,
proxyauth="ldap:localhost:1234:cn=default,dc=cdhdt,dc=com:password:dc=cdhdt,dc=com?search_filter_key=SamAccountName",
)
assert isinstance(pa.validator, proxyauth.Ldap)
with pytest.raises(
exceptions.OptionsError, match="Invalid LDAP specification"
):
ctx.configure(pa, proxyauth="ldap:test:test:test")
with pytest.raises(
exceptions.OptionsError, match="Invalid LDAP specification"
):
ctx.configure(
pa,
proxyauth="ldap:localhost:1234:cn=default,dc=cdhdt,dc=com:password:ou=application,dc=cdhdt,dc=com?key=1",
)
with pytest.raises(
exceptions.OptionsError, match="Invalid LDAP specification"
):
ctx.configure(
pa, proxyauth="ldap:fake_serveruid=?dc=example,dc=com:person"
)
with pytest.raises(
exceptions.OptionsError, match="Invalid LDAP specification"
):
ctx.configure(pa, proxyauth="ldapssssssss:fake_server:dn:password:tree")
# test htpasswd
# Manually create a SHA1-hashed password for "test:test".
# The htpasswd file in the repo uses unsalted MD5, which we don't support.
p = tmp_path / "htpasswd"
p.write_text("test:{SHA}qUqP5cyxm6YcTAhz05Hph5gvu9M=\n")
ctx.configure(pa, proxyauth=f"@{p}")
assert isinstance(pa.validator, proxyauth.Htpasswd)
assert pa.validator("test", "test")
assert not pa.validator("test", "foo")
# nonexistent file
with pytest.raises(
exceptions.OptionsError, match="Could not open htpasswd file"
):
ctx.configure(pa, proxyauth="@nonexistent")
# malformed file
p.write_text("malformed\n")
with pytest.raises(
exceptions.OptionsError, match="Could not open htpasswd file"
):
ctx.configure(pa, proxyauth=f"@{p}")
def test_handlers(self):
up = proxyauth.ProxyAuth()
with taddons.context(up) as ctx:
ctx.configure(up, proxyauth="any")
f = tflow.tflow()
assert not f.response
up.requestheaders(f)
assert f.response.status_code == 407
f = tflow.tflow()
f.request.method = "CONNECT"
assert not f.response
up.http_connect(f)
assert f.response.status_code == 407
f = tflow.tflow()
f.request.method = "CONNECT"
f.request.headers["Proxy-Authorization"] = proxyauth.mkauth("test", "test")
up.http_connect(f)
assert not f.response
f2 = tflow.tflow(client_conn=f.client_conn)
up.requestheaders(f2)
assert not f2.response
assert f2.metadata["proxyauth"] == ("test", "test")
f3 = tflow.tflow()
f3.is_replay = True
up.requestheaders(f3)
assert not f2.response
@pytest.mark.parametrize(
"spec",
[
"ldaps:localhost:cn=default,dc=cdhdt,dc=com:password:ou=application,dc=cdhdt,dc=com",
"ldap:localhost:1234:cn=default,dc=cdhdt,dc=com:password:ou=application,dc=cdhdt,dc=com",
"ldap:localhost:1234:cn=default,dc=cdhdt,dc=com:password:ou=application,dc=cdhdt,dc=com?search_filter_key=cn",
],
)
def test_ldap(monkeypatch, spec):
monkeypatch.setattr(ldap3, "Server", mock.MagicMock())
monkeypatch.setattr(ldap3, "Connection", mock.MagicMock())
validator = proxyauth.Ldap(spec)
assert not validator("", "")
assert validator("foo", "bar")
validator.conn.response = False
assert not validator("foo", "bar")
| python | MIT | e6aa924bb411a9687b91920b8d094af37bc02b90 | 2026-01-04T14:40:00.086164Z | false |
mitmproxy/mitmproxy | https://github.com/mitmproxy/mitmproxy/blob/e6aa924bb411a9687b91920b8d094af37bc02b90/test/mitmproxy/addons/__init__.py | test/mitmproxy/addons/__init__.py | python | MIT | e6aa924bb411a9687b91920b8d094af37bc02b90 | 2026-01-04T14:40:00.086164Z | false | |
mitmproxy/mitmproxy | https://github.com/mitmproxy/mitmproxy/blob/e6aa924bb411a9687b91920b8d094af37bc02b90/test/mitmproxy/addons/test_intercept.py | test/mitmproxy/addons/test_intercept.py | import pytest
from mitmproxy import exceptions
from mitmproxy.addons import intercept
from mitmproxy.test import taddons
from mitmproxy.test import tflow
async def test_simple():
r = intercept.Intercept()
with taddons.context(r) as tctx:
assert not r.filt
tctx.configure(r, intercept="~q")
assert r.filt
assert tctx.options.intercept_active
with pytest.raises(exceptions.OptionsError):
tctx.configure(r, intercept="~~")
tctx.configure(r, intercept=None)
assert not r.filt
assert not tctx.options.intercept_active
tctx.configure(r, intercept="~s")
f = tflow.tflow(resp=True)
await tctx.cycle(r, f)
assert f.intercepted
f = tflow.tflow(resp=False)
await tctx.cycle(r, f)
assert not f.intercepted
f = tflow.tflow(resp=True)
r.response(f)
assert f.intercepted
tctx.configure(r, intercept_active=False)
f = tflow.tflow(resp=True)
await tctx.cycle(r, f)
assert not f.intercepted
tctx.configure(r, intercept_active=True)
f = tflow.tflow(resp=True)
await tctx.cycle(r, f)
assert f.intercepted
async def test_dns():
r = intercept.Intercept()
with taddons.context(r) as tctx:
tctx.configure(r, intercept="~s ~dns")
f = tflow.tdnsflow(resp=True)
await tctx.cycle(r, f)
assert f.intercepted
f = tflow.tdnsflow(resp=False)
await tctx.cycle(r, f)
assert not f.intercepted
tctx.configure(r, intercept_active=False)
f = tflow.tdnsflow(resp=True)
await tctx.cycle(r, f)
assert not f.intercepted
async def test_tcp():
r = intercept.Intercept()
with taddons.context(r) as tctx:
tctx.configure(r, intercept="~tcp")
f = tflow.ttcpflow()
await tctx.cycle(r, f)
assert f.intercepted
tctx.configure(r, intercept_active=False)
f = tflow.ttcpflow()
await tctx.cycle(r, f)
assert not f.intercepted
async def test_udp():
r = intercept.Intercept()
with taddons.context(r) as tctx:
tctx.configure(r, intercept="~udp")
f = tflow.tudpflow()
await tctx.cycle(r, f)
assert f.intercepted
tctx.configure(r, intercept_active=False)
f = tflow.tudpflow()
await tctx.cycle(r, f)
assert not f.intercepted
async def test_websocket_message():
r = intercept.Intercept()
with taddons.context(r) as tctx:
tctx.configure(r, intercept='~b "hello binary"')
f = tflow.twebsocketflow()
await tctx.cycle(r, f)
assert f.intercepted
tctx.configure(r, intercept_active=False)
f = tflow.twebsocketflow()
await tctx.cycle(r, f)
assert not f.intercepted
| python | MIT | e6aa924bb411a9687b91920b8d094af37bc02b90 | 2026-01-04T14:40:00.086164Z | false |
mitmproxy/mitmproxy | https://github.com/mitmproxy/mitmproxy/blob/e6aa924bb411a9687b91920b8d094af37bc02b90/test/mitmproxy/addons/test_eventstore.py | test/mitmproxy/addons/test_eventstore.py | import asyncio
import logging
from mitmproxy.addons import eventstore
async def test_simple():
store = eventstore.EventStore()
assert not store.data
sig_add_called = False
sig_refresh_called = False
def sig_add(entry):
nonlocal sig_add_called
sig_add_called = True
def sig_refresh():
nonlocal sig_refresh_called
sig_refresh_called = True
store.sig_add.connect(sig_add)
store.sig_refresh.connect(sig_refresh)
assert not sig_add_called
assert not sig_refresh_called
# test .log()
logging.error("test")
await asyncio.sleep(0)
assert store.data
assert sig_add_called
assert not sig_refresh_called
# test .clear()
sig_add_called = False
store.clear()
assert not store.data
assert not sig_add_called
assert sig_refresh_called
store.done()
async def test_max_size():
store = eventstore.EventStore(3)
assert store.size == 3
logging.warning("foo")
logging.warning("bar")
logging.warning("baz")
await asyncio.sleep(0)
assert len(store.data) == 3
assert "baz" in store.data[-1].msg
# overflow
logging.warning("boo")
await asyncio.sleep(0)
assert len(store.data) == 3
assert "boo" in store.data[-1].msg
store.done()
| python | MIT | e6aa924bb411a9687b91920b8d094af37bc02b90 | 2026-01-04T14:40:00.086164Z | false |
mitmproxy/mitmproxy | https://github.com/mitmproxy/mitmproxy/blob/e6aa924bb411a9687b91920b8d094af37bc02b90/test/mitmproxy/addons/test_anticache.py | test/mitmproxy/addons/test_anticache.py | from mitmproxy.addons import anticache
from mitmproxy.test import taddons
from mitmproxy.test import tflow
class TestAntiCache:
def test_simple(self):
sa = anticache.AntiCache()
with taddons.context(sa) as tctx:
f = tflow.tflow(resp=True)
f.request.headers["if-modified-since"] = "test"
f.request.headers["if-none-match"] = "test"
sa.request(f)
assert "if-modified-since" in f.request.headers
assert "if-none-match" in f.request.headers
tctx.configure(sa, anticache=True)
sa.request(f)
assert "if-modified-since" not in f.request.headers
assert "if-none-match" not in f.request.headers
| python | MIT | e6aa924bb411a9687b91920b8d094af37bc02b90 | 2026-01-04T14:40:00.086164Z | false |
mitmproxy/mitmproxy | https://github.com/mitmproxy/mitmproxy/blob/e6aa924bb411a9687b91920b8d094af37bc02b90/test/mitmproxy/addons/test_readfile.py | test/mitmproxy/addons/test_readfile.py | import asyncio
import io
from unittest import mock
import pytest
import mitmproxy.io
from mitmproxy import exceptions
from mitmproxy.addons import readfile
from mitmproxy.test import taddons
from mitmproxy.test import tflow
@pytest.fixture
def data():
f = io.BytesIO()
w = mitmproxy.io.FlowWriter(f)
flows = [
tflow.tflow(resp=True),
tflow.tflow(err=True),
tflow.ttcpflow(),
tflow.ttcpflow(err=True),
]
for flow in flows:
w.add(flow)
f.seek(0)
return f
@pytest.fixture
def corrupt_data(data):
f = io.BytesIO(data.getvalue())
f.seek(0, io.SEEK_END)
f.write(b"qibble")
f.seek(0)
return f
class TestReadFile:
def test_configure(self):
rf = readfile.ReadFile()
with taddons.context(rf) as tctx:
tctx.configure(rf, readfile_filter="~q")
with pytest.raises(Exception, match="Invalid filter expression"):
tctx.configure(rf, readfile_filter="~~")
tctx.configure(rf, readfile_filter="")
async def test_read(self, tmpdir, data, corrupt_data, caplog_async):
rf = readfile.ReadFile()
with taddons.context(rf) as tctx:
assert not rf.reading()
tf = tmpdir.join("tfile")
load_called = asyncio.Event()
async def load_flow(*_, **__):
load_called.set()
tctx.master.load_flow = load_flow
tf.write(data.getvalue())
tctx.configure(rf, rfile=str(tf), readfile_filter=".*")
assert not load_called.is_set()
rf.running()
await load_called.wait()
while rf.reading():
await asyncio.sleep(0)
tf.write(corrupt_data.getvalue())
tctx.configure(rf, rfile=str(tf))
rf.running()
await caplog_async.await_log("corrupted")
async def test_corrupt(self, corrupt_data, caplog_async):
rf = readfile.ReadFile()
with taddons.context(rf):
with pytest.raises(exceptions.FlowReadException):
await rf.load_flows(io.BytesIO(b"qibble"))
caplog_async.clear()
with pytest.raises(exceptions.FlowReadException):
await rf.load_flows(corrupt_data)
await caplog_async.await_log("file corrupted")
async def test_nonexistent_file(self, caplog):
rf = readfile.ReadFile()
with pytest.raises(exceptions.FlowReadException):
await rf.load_flows_from_path("nonexistent")
assert "nonexistent" in caplog.text
class TestReadFileStdin:
@mock.patch("sys.stdin")
async def test_stdin(self, stdin, data, corrupt_data):
rf = readfile.ReadFileStdin()
with taddons.context(rf):
with mock.patch("mitmproxy.master.Master.load_flow") as mck:
stdin.buffer = data
mck.assert_not_awaited()
await rf.load_flows(stdin.buffer)
mck.assert_awaited()
stdin.buffer = corrupt_data
with pytest.raises(exceptions.FlowReadException):
await rf.load_flows(stdin.buffer)
async def test_normal(self, tmpdir, data):
rf = readfile.ReadFileStdin()
with taddons.context(rf) as tctx:
tf = tmpdir.join("tfile")
with mock.patch("mitmproxy.master.Master.load_flow") as mck:
tf.write(data.getvalue())
tctx.configure(rf, rfile=str(tf))
mck.assert_not_awaited()
rf.running()
await asyncio.sleep(0)
mck.assert_awaited()
| python | MIT | e6aa924bb411a9687b91920b8d094af37bc02b90 | 2026-01-04T14:40:00.086164Z | false |
mitmproxy/mitmproxy | https://github.com/mitmproxy/mitmproxy/blob/e6aa924bb411a9687b91920b8d094af37bc02b90/test/mitmproxy/addons/test_disable_h2c.py | test/mitmproxy/addons/test_disable_h2c.py | from mitmproxy import flow
from mitmproxy.addons import disable_h2c
from mitmproxy.test import taddons
from mitmproxy.test import tflow
from mitmproxy.test import tutils
class TestDisableH2CleartextUpgrade:
def test_upgrade(self):
with taddons.context() as tctx:
a = disable_h2c.DisableH2C()
tctx.configure(a)
f = tflow.tflow()
f.request.headers["upgrade"] = "h2c"
f.request.headers["connection"] = "foo"
f.request.headers["http2-settings"] = "bar"
a.request(f)
assert "upgrade" not in f.request.headers
assert "connection" not in f.request.headers
assert "http2-settings" not in f.request.headers
def test_prior_knowledge(self):
with taddons.context() as tctx:
a = disable_h2c.DisableH2C()
tctx.configure(a)
f = tflow.tflow()
f.request = tutils.treq(
method=b"PRI",
path=b"*",
http_version=b"HTTP/2.0",
)
f.intercept()
a.request(f)
assert not f.killable
assert f.error.msg == flow.Error.KILLED_MESSAGE
def test_non_killable_flows(self):
with taddons.context() as tctx:
a = disable_h2c.DisableH2C()
tctx.configure(a)
f = tflow.tflow()
f.request = tutils.treq(
method=b"PRI",
path=b"*",
http_version=b"HTTP/2.0",
)
f.kill()
a.request(f)
| python | MIT | e6aa924bb411a9687b91920b8d094af37bc02b90 | 2026-01-04T14:40:00.086164Z | false |
mitmproxy/mitmproxy | https://github.com/mitmproxy/mitmproxy/blob/e6aa924bb411a9687b91920b8d094af37bc02b90/test/mitmproxy/addons/test_upstream_auth.py | test/mitmproxy/addons/test_upstream_auth.py | import base64
import pytest
from mitmproxy import exceptions
from mitmproxy.addons import upstream_auth
from mitmproxy.proxy.mode_specs import ProxyMode
from mitmproxy.test import taddons
from mitmproxy.test import tflow
def test_configure():
up = upstream_auth.UpstreamAuth()
with taddons.context(up) as tctx:
tctx.configure(up, upstream_auth="test:test")
assert up.auth == b"Basic" + b" " + base64.b64encode(b"test:test")
tctx.configure(up, upstream_auth="test:")
assert up.auth == b"Basic" + b" " + base64.b64encode(b"test:")
tctx.configure(up, upstream_auth=None)
assert not up.auth
with pytest.raises(exceptions.OptionsError):
tctx.configure(up, upstream_auth="")
with pytest.raises(exceptions.OptionsError):
tctx.configure(up, upstream_auth=":")
with pytest.raises(exceptions.OptionsError):
tctx.configure(up, upstream_auth=":test")
def test_simple():
up = upstream_auth.UpstreamAuth()
with taddons.context(up) as tctx:
tctx.configure(up, upstream_auth="foo:bar")
f = tflow.tflow()
up.http_connect_upstream(f)
assert "proxy-authorization" in f.request.headers
f = tflow.tflow()
up.requestheaders(f)
assert "proxy-authorization" not in f.request.headers
assert "authorization" not in f.request.headers
f.client_conn.proxy_mode = ProxyMode.parse("upstream:127.0.0.1")
up.requestheaders(f)
assert "proxy-authorization" in f.request.headers
f.client_conn.proxy_mode = ProxyMode.parse("reverse:127.0.0.1")
up.requestheaders(f)
assert "authorization" in f.request.headers
| python | MIT | e6aa924bb411a9687b91920b8d094af37bc02b90 | 2026-01-04T14:40:00.086164Z | false |
mitmproxy/mitmproxy | https://github.com/mitmproxy/mitmproxy/blob/e6aa924bb411a9687b91920b8d094af37bc02b90/test/mitmproxy/addons/test_stickycookie.py | test/mitmproxy/addons/test_stickycookie.py | import pytest
from mitmproxy.addons import stickycookie
from mitmproxy.test import taddons
from mitmproxy.test import tflow
from mitmproxy.test import tutils as ntutils
def test_domain_match():
assert stickycookie.domain_match("www.google.com", ".google.com")
assert stickycookie.domain_match("google.com", ".google.com")
class TestStickyCookie:
def test_config(self):
sc = stickycookie.StickyCookie()
with taddons.context(sc) as tctx:
with pytest.raises(Exception, match="Invalid filter expression"):
tctx.configure(sc, stickycookie="~b")
tctx.configure(sc, stickycookie="foo")
assert sc.flt
tctx.configure(sc, stickycookie=None)
assert not sc.flt
def test_simple(self):
sc = stickycookie.StickyCookie()
with taddons.context(sc) as tctx:
tctx.configure(sc, stickycookie=".*")
f = tflow.tflow(resp=True)
f.response.headers["set-cookie"] = "foo=bar"
sc.request(f)
sc.response(f)
assert sc.jar
assert "cookie" not in f.request.headers
f = f.copy()
sc.request(f)
assert f.request.headers["cookie"] == "foo=bar"
def _response(self, sc, cookie, host):
f = tflow.tflow(req=ntutils.treq(host=host, port=80), resp=True)
f.response.headers["Set-Cookie"] = cookie
sc.response(f)
return f
def test_response(self):
sc = stickycookie.StickyCookie()
with taddons.context(sc) as tctx:
tctx.configure(sc, stickycookie=".*")
c = (
"SSID=mooo; domain=.google.com, FOO=bar; Domain=.google.com; Path=/; "
"Expires=Wed, 13-Jan-2021 22:23:01 GMT; Secure; "
)
self._response(sc, c, "host")
assert not sc.jar.keys()
self._response(sc, c, "www.google.com")
assert sc.jar.keys()
sc.jar.clear()
self._response(sc, "SSID=mooo", "www.google.com")
assert list(sc.jar.keys())[0] == ("www.google.com", 80, "/")
def test_response_multiple(self):
sc = stickycookie.StickyCookie()
with taddons.context(sc) as tctx:
tctx.configure(sc, stickycookie=".*")
# Test setting of multiple cookies
c1 = "somecookie=test; Path=/"
c2 = "othercookie=helloworld; Path=/"
f = self._response(sc, c1, "www.google.com")
f.response.headers["Set-Cookie"] = c2
sc.response(f)
googlekey = list(sc.jar.keys())[0]
assert len(sc.jar[googlekey].keys()) == 2
def test_response_weird(self):
sc = stickycookie.StickyCookie()
with taddons.context(sc) as tctx:
tctx.configure(sc, stickycookie=".*")
# Test setting of weird cookie keys
f = tflow.tflow(req=ntutils.treq(host="www.google.com", port=80), resp=True)
cs = [
"foo/bar=hello",
"foo:bar=world",
"foo@bar=fizz",
]
for c in cs:
f.response.headers["Set-Cookie"] = c
sc.response(f)
googlekey = list(sc.jar.keys())[0]
assert len(sc.jar[googlekey].keys()) == len(cs)
def test_response_overwrite(self):
sc = stickycookie.StickyCookie()
with taddons.context(sc) as tctx:
tctx.configure(sc, stickycookie=".*")
# Test overwriting of a cookie value
c1 = "somecookie=helloworld; Path=/"
c2 = "somecookie=newvalue; Path=/"
f = self._response(sc, c1, "www.google.com")
f.response.headers["Set-Cookie"] = c2
sc.response(f)
googlekey = list(sc.jar.keys())[0]
assert len(sc.jar[googlekey]) == 1
assert sc.jar[googlekey]["somecookie"] == "newvalue"
def test_response_delete(self):
sc = stickycookie.StickyCookie()
with taddons.context(sc) as tctx:
tctx.configure(sc, stickycookie=".*")
# Test that a cookie is be deleted
# by setting the expire time in the past
f = self._response(sc, "duffer=zafar; Path=/", "www.google.com")
f.response.headers["Set-Cookie"] = (
"duffer=; Expires=Thu, 01-Jan-1970 00:00:00 GMT"
)
sc.response(f)
assert not sc.jar.keys()
def test_request(self):
sc = stickycookie.StickyCookie()
with taddons.context(sc) as tctx:
tctx.configure(sc, stickycookie=".*")
f = self._response(sc, "SSID=mooo", "www.google.com")
assert "cookie" not in f.request.headers
sc.request(f)
assert "cookie" in f.request.headers
| python | MIT | e6aa924bb411a9687b91920b8d094af37bc02b90 | 2026-01-04T14:40:00.086164Z | false |
mitmproxy/mitmproxy | https://github.com/mitmproxy/mitmproxy/blob/e6aa924bb411a9687b91920b8d094af37bc02b90/test/mitmproxy/addons/test_keepserving.py | test/mitmproxy/addons/test_keepserving.py | import asyncio
from mitmproxy import command
from mitmproxy.addons import keepserving
from mitmproxy.test import taddons
class Dummy:
def __init__(self, val: bool):
self.val = val
def load(self, loader):
loader.add_option("client_replay", bool, self.val, "test")
loader.add_option("server_replay", bool, self.val, "test")
loader.add_option("rfile", bool, self.val, "test")
@command.command("readfile.reading")
def readfile(self) -> bool:
return self.val
@command.command("replay.client.count")
def creplay(self) -> int:
return 1 if self.val else 0
@command.command("replay.server.count")
def sreplay(self) -> int:
return 1 if self.val else 0
@command.command("proxyserver.active_connections")
def active_connections(self) -> int:
return 1 if self.val else 0
class TKS(keepserving.KeepServing):
_is_shutdown = False
def shutdown(self):
self.is_shutdown = True
async def test_keepserving():
ks = TKS()
d = Dummy(True)
with taddons.context(ks) as tctx:
tctx.master.addons.add(d)
ks.running()
assert ks.keepgoing()
d.val = False
assert not ks.keepgoing()
await asyncio.sleep(0.3)
assert ks.is_shutdown
| python | MIT | e6aa924bb411a9687b91920b8d094af37bc02b90 | 2026-01-04T14:40:00.086164Z | false |
mitmproxy/mitmproxy | https://github.com/mitmproxy/mitmproxy/blob/e6aa924bb411a9687b91920b8d094af37bc02b90/test/mitmproxy/addons/test_modifyheaders.py | test/mitmproxy/addons/test_modifyheaders.py | import pytest
from mitmproxy.addons.modifyheaders import ModifyHeaders
from mitmproxy.addons.modifyheaders import parse_modify_spec
from mitmproxy.test import taddons
from mitmproxy.test import tflow
from mitmproxy.test.tutils import tresp
def test_parse_modify_spec():
spec = parse_modify_spec("/foo/bar/voing", True)
assert spec.matches.pattern == "foo"
assert spec.subject == b"bar"
assert spec.read_replacement() == b"voing"
spec = parse_modify_spec("/foo/bar/vo/ing/", False)
assert spec.matches.pattern == "foo"
assert spec.subject == b"bar"
assert spec.read_replacement() == b"vo/ing/"
spec = parse_modify_spec("/bar/voing", False)
assert spec.matches(tflow.tflow())
assert spec.subject == b"bar"
assert spec.read_replacement() == b"voing"
with pytest.raises(ValueError, match="Invalid regular expression"):
parse_modify_spec("/[/two", True)
class TestModifyHeaders:
def test_configure(self):
mh = ModifyHeaders()
with taddons.context(mh) as tctx:
with pytest.raises(Exception, match="Cannot parse modify_headers"):
tctx.configure(mh, modify_headers=["/"])
tctx.configure(mh, modify_headers=["/foo/bar/voing"])
def test_modify_headers(self):
mh = ModifyHeaders()
with taddons.context(mh) as tctx:
tctx.configure(mh, modify_headers=["/~q/one/two", "/~s/one/three"])
f = tflow.tflow()
f.request.headers["one"] = "xxx"
mh.requestheaders(f)
assert f.request.headers["one"] == "two"
f = tflow.tflow(resp=True)
f.response.headers["one"] = "xxx"
mh.responseheaders(f)
assert f.response.headers["one"] == "three"
tctx.configure(mh, modify_headers=["/~s/one/two", "/~s/one/three"])
f = tflow.tflow(resp=True)
f.request.headers["one"] = "xxx"
f.response.headers["one"] = "xxx"
mh.responseheaders(f)
assert f.response.headers.get_all("one") == ["two", "three"]
tctx.configure(mh, modify_headers=["/~q/one/two", "/~q/one/three"])
f = tflow.tflow()
f.request.headers["one"] = "xxx"
mh.requestheaders(f)
assert f.request.headers.get_all("one") == ["two", "three"]
# test removal of existing headers
tctx.configure(mh, modify_headers=["/~q/one/", "/~s/one/"])
f = tflow.tflow()
f.request.headers["one"] = "xxx"
mh.requestheaders(f)
assert "one" not in f.request.headers
f = tflow.tflow(resp=True)
f.response.headers["one"] = "xxx"
mh.responseheaders(f)
assert "one" not in f.response.headers
tctx.configure(mh, modify_headers=["/one/"])
f = tflow.tflow()
f.request.headers["one"] = "xxx"
mh.requestheaders(f)
assert "one" not in f.request.headers
f = tflow.tflow(resp=True)
f.response.headers["one"] = "xxx"
mh.responseheaders(f)
assert "one" not in f.response.headers
# test modifying a header that is also part of the filter expression
# https://github.com/mitmproxy/mitmproxy/issues/4245
tctx.configure(
mh,
modify_headers=[
"/~hq ^user-agent:.+Mozilla.+$/user-agent/Definitely not Mozilla ;)"
],
)
f = tflow.tflow()
f.request.headers["user-agent"] = "Hello, it's me, Mozilla"
mh.requestheaders(f)
assert "Definitely not Mozilla ;)" == f.request.headers["user-agent"]
@pytest.mark.parametrize("take", [True, False])
def test_taken(self, take):
mh = ModifyHeaders()
with taddons.context(mh) as tctx:
tctx.configure(mh, modify_headers=["/content-length/42"])
f = tflow.tflow()
if take:
f.response = tresp()
mh.requestheaders(f)
assert (f.request.headers["content-length"] == "42") ^ take
f = tflow.tflow(resp=True)
if take:
f.kill()
mh.responseheaders(f)
assert (f.response.headers["content-length"] == "42") ^ take
class TestModifyHeadersFile:
def test_simple(self, tmpdir):
mh = ModifyHeaders()
with taddons.context(mh) as tctx:
tmpfile = tmpdir.join("replacement")
tmpfile.write("two")
tctx.configure(mh, modify_headers=["/~q/one/@" + str(tmpfile)])
f = tflow.tflow()
f.request.headers["one"] = "xxx"
mh.requestheaders(f)
assert f.request.headers["one"] == "two"
async def test_nonexistent(self, tmpdir, caplog):
mh = ModifyHeaders()
with taddons.context(mh) as tctx:
with pytest.raises(
Exception, match="Cannot parse modify_headers .* Invalid file path"
):
tctx.configure(mh, modify_headers=["/~q/foo/@nonexistent"])
tmpfile = tmpdir.join("replacement")
tmpfile.write("bar")
tctx.configure(mh, modify_headers=["/~q/foo/@" + str(tmpfile)])
tmpfile.remove()
f = tflow.tflow()
f.request.content = b"foo"
mh.requestheaders(f)
assert "Could not read" in caplog.text
| python | MIT | e6aa924bb411a9687b91920b8d094af37bc02b90 | 2026-01-04T14:40:00.086164Z | false |
mitmproxy/mitmproxy | https://github.com/mitmproxy/mitmproxy/blob/e6aa924bb411a9687b91920b8d094af37bc02b90/test/mitmproxy/addons/test_maplocal.py | test/mitmproxy/addons/test_maplocal.py | import sys
from pathlib import Path
import pytest
from mitmproxy.addons.maplocal import file_candidates
from mitmproxy.addons.maplocal import MapLocal
from mitmproxy.addons.maplocal import MapLocalSpec
from mitmproxy.test import taddons
from mitmproxy.test import tflow
from mitmproxy.utils.spec import parse_spec
@pytest.mark.parametrize(
"url,spec,expected_candidates",
[
# trailing slashes
("https://example.com/foo", ":example.com/foo:/tmp", ["/tmp/index.html"]),
("https://example.com/foo/", ":example.com/foo:/tmp", ["/tmp/index.html"]),
("https://example.com/foo", ":example.com/foo:/tmp/", ["/tmp/index.html"]),
]
+ [
# simple prefixes
(
"http://example.com/foo/bar.jpg",
":example.com/foo:/tmp",
["/tmp/bar.jpg", "/tmp/bar.jpg/index.html"],
),
(
"https://example.com/foo/bar.jpg",
":example.com/foo:/tmp",
["/tmp/bar.jpg", "/tmp/bar.jpg/index.html"],
),
(
"https://example.com/foo/bar.jpg?query",
":example.com/foo:/tmp",
["/tmp/bar.jpg", "/tmp/bar.jpg/index.html"],
),
(
"https://example.com/foo/bar/baz.jpg",
":example.com/foo:/tmp",
["/tmp/bar/baz.jpg", "/tmp/bar/baz.jpg/index.html"],
),
("https://example.com/foo/bar.jpg", ":/foo/bar.jpg:/tmp", ["/tmp/index.html"]),
]
+ [
# URL decode and special characters
(
"http://example.com/foo%20bar.jpg",
":example.com:/tmp",
[
"/tmp/foo bar.jpg",
"/tmp/foo bar.jpg/index.html",
"/tmp/foo_bar.jpg",
"/tmp/foo_bar.jpg/index.html",
],
),
(
"http://example.com/fóobår.jpg",
":example.com:/tmp",
[
"/tmp/fóobår.jpg",
"/tmp/fóobår.jpg/index.html",
"/tmp/f_ob_r.jpg",
"/tmp/f_ob_r.jpg/index.html",
],
),
]
+ [
# index.html
("https://example.com/foo", ":example.com/foo:/tmp", ["/tmp/index.html"]),
("https://example.com/foo/", ":example.com/foo:/tmp", ["/tmp/index.html"]),
(
"https://example.com/foo/bar",
":example.com/foo:/tmp",
["/tmp/bar", "/tmp/bar/index.html"],
),
(
"https://example.com/foo/bar/",
":example.com/foo:/tmp",
["/tmp/bar", "/tmp/bar/index.html"],
),
]
+ [
# regex
(
"https://example/view.php?f=foo.jpg",
":example/view.php\\?f=(.+):/tmp",
["/tmp/foo.jpg", "/tmp/foo.jpg/index.html"],
),
(
"https://example/results?id=1&foo=2",
":example/(results\\?id=.+):/tmp",
[
"/tmp/results?id=1&foo=2",
"/tmp/results?id=1&foo=2/index.html",
"/tmp/results_id=1_foo=2",
"/tmp/results_id=1_foo=2/index.html",
],
),
]
+ [
# test directory traversal detection
("https://example.com/../../../../../../etc/passwd", ":example.com:/tmp", []),
# this is slightly hacky, but werkzeug's behavior differs per system.
(
"https://example.com/C:\\foo.txt",
":example.com:/tmp",
[]
if sys.platform == "win32"
else [
"/tmp/C:\\foo.txt",
"/tmp/C:\\foo.txt/index.html",
"/tmp/C__foo.txt",
"/tmp/C__foo.txt/index.html",
],
),
(
"https://example.com//etc/passwd",
":example.com:/tmp",
["/tmp/etc/passwd", "/tmp/etc/passwd/index.html"],
),
],
)
def test_file_candidates(url, spec, expected_candidates):
# we circumvent the path existence checks here to simplify testing
filt, subj, repl = parse_spec(spec)
spec = MapLocalSpec(filt, subj, Path(repl))
candidates = file_candidates(url, spec)
assert [x.as_posix() for x in candidates] == expected_candidates
class TestMapLocal:
def test_configure(self, tmpdir):
ml = MapLocal()
with taddons.context(ml) as tctx:
tctx.configure(ml, map_local=["/foo/bar/" + str(tmpdir)])
with pytest.raises(Exception, match="Invalid regular expression"):
tctx.configure(ml, map_local=["/foo/+/" + str(tmpdir)])
with pytest.raises(Exception, match="Invalid file path"):
tctx.configure(ml, map_local=["/foo/.+/three"])
def test_simple(self, tmpdir):
ml = MapLocal()
with taddons.context(ml) as tctx:
tmpfile = tmpdir.join("foo.jpg")
tmpfile.write("foo")
tctx.configure(ml, map_local=["|//example.org/images|" + str(tmpdir)])
f = tflow.tflow()
f.request.url = b"https://example.org/images/foo.jpg"
ml.request(f)
assert f.response.content == b"foo"
tmpfile = tmpdir.join("images", "bar.jpg")
tmpfile.write("bar", ensure=True)
tctx.configure(ml, map_local=["|//example.org|" + str(tmpdir)])
f = tflow.tflow()
f.request.url = b"https://example.org/images/bar.jpg"
ml.request(f)
assert f.response.content == b"bar"
tmpfile = tmpdir.join("foofoobar.jpg")
tmpfile.write("foofoobar", ensure=True)
tctx.configure(
ml, map_local=["|example.org/foo/foo/bar.jpg|" + str(tmpfile)]
)
f = tflow.tflow()
f.request.url = b"https://example.org/foo/foo/bar.jpg"
ml.request(f)
assert f.response.content == b"foofoobar"
async def test_nonexistent_files(self, tmpdir, monkeypatch, caplog):
caplog.set_level("INFO")
ml = MapLocal()
with taddons.context(ml) as tctx:
tctx.configure(ml, map_local=["|example.org/css|" + str(tmpdir)])
f = tflow.tflow()
f.request.url = b"https://example.org/css/nonexistent"
ml.request(f)
assert f.response.status_code == 404
assert "None of the local file candidates exist" in caplog.text
tmpfile = tmpdir.join("foo.jpg")
tmpfile.write("foo")
tctx.configure(ml, map_local=["|//example.org/images|" + str(tmpfile)])
tmpfile.remove()
monkeypatch.setattr(Path, "is_file", lambda x: True)
f = tflow.tflow()
f.request.url = b"https://example.org/images/foo.jpg"
ml.request(f)
assert "Could not read" in caplog.text
def test_is_killed(self, tmpdir):
ml = MapLocal()
with taddons.context(ml) as tctx:
tmpfile = tmpdir.join("foo.jpg")
tmpfile.write("foo")
tctx.configure(ml, map_local=["|//example.org/images|" + str(tmpfile)])
f = tflow.tflow()
f.request.url = b"https://example.org/images/foo.jpg"
f.kill()
ml.request(f)
assert not f.response
| python | MIT | e6aa924bb411a9687b91920b8d094af37bc02b90 | 2026-01-04T14:40:00.086164Z | false |
mitmproxy/mitmproxy | https://github.com/mitmproxy/mitmproxy/blob/e6aa924bb411a9687b91920b8d094af37bc02b90/test/mitmproxy/addons/test_stickyauth.py | test/mitmproxy/addons/test_stickyauth.py | import pytest
from mitmproxy import exceptions
from mitmproxy.addons import stickyauth
from mitmproxy.test import taddons
from mitmproxy.test import tflow
def test_configure():
r = stickyauth.StickyAuth()
with taddons.context(r) as tctx:
tctx.configure(r, stickyauth="~s")
with pytest.raises(exceptions.OptionsError):
tctx.configure(r, stickyauth="~~")
tctx.configure(r, stickyauth=None)
assert not r.flt
def test_simple():
r = stickyauth.StickyAuth()
with taddons.context(r) as tctx:
tctx.configure(r, stickyauth=".*")
f = tflow.tflow(resp=True)
f.request.headers["authorization"] = "foo"
r.request(f)
assert "address" in r.hosts
f = tflow.tflow(resp=True)
r.request(f)
assert f.request.headers["authorization"] == "foo"
| python | MIT | e6aa924bb411a9687b91920b8d094af37bc02b90 | 2026-01-04T14:40:00.086164Z | false |
mitmproxy/mitmproxy | https://github.com/mitmproxy/mitmproxy/blob/e6aa924bb411a9687b91920b8d094af37bc02b90/test/mitmproxy/addons/test_savehar.py | test/mitmproxy/addons/test_savehar.py | import json
import zlib
from pathlib import Path
import pytest
from mitmproxy import io
from mitmproxy import types
from mitmproxy import version
from mitmproxy.addons.save import Save
from mitmproxy.addons.savehar import SaveHar
from mitmproxy.connection import Server
from mitmproxy.exceptions import OptionsError
from mitmproxy.http import Headers
from mitmproxy.http import Request
from mitmproxy.http import Response
from mitmproxy.test import taddons
from mitmproxy.test import tflow
from mitmproxy.test import tutils
test_dir = Path(__file__).parent.parent
def test_write_error():
s = SaveHar()
with pytest.raises(FileNotFoundError):
s.export_har([], types.Path("unknown_dir/testing_flow.har"))
@pytest.mark.parametrize(
"header, expected",
[
(Headers([(b"cookie", b"foo=bar")]), [{"name": "foo", "value": "bar"}]),
(
Headers([(b"cookie", b"foo=bar"), (b"cookie", b"foo=baz")]),
[{"name": "foo", "value": "bar"}, {"name": "foo", "value": "baz"}],
),
],
)
def test_request_cookies(header: Headers, expected: list[dict]):
s = SaveHar()
req = Request.make("GET", "https://exampls.com", "", header)
assert s.format_multidict(req.cookies) == expected
@pytest.mark.parametrize(
"header, expected",
[
(
Headers(
[
(
b"set-cookie",
b"foo=bar; path=/; domain=.googls.com; priority=high",
)
]
),
[
{
"name": "foo",
"value": "bar",
"path": "/",
"domain": ".googls.com",
"httpOnly": False,
"secure": False,
}
],
),
(
Headers(
[
(
b"set-cookie",
b"foo=bar; path=/; domain=.googls.com; Secure; HttpOnly; priority=high",
),
(
b"set-cookie",
b"fooz=baz; path=/; domain=.googls.com; priority=high; SameSite=none",
),
]
),
[
{
"name": "foo",
"value": "bar",
"path": "/",
"domain": ".googls.com",
"httpOnly": True,
"secure": True,
},
{
"name": "fooz",
"value": "baz",
"path": "/",
"domain": ".googls.com",
"httpOnly": False,
"secure": False,
"sameSite": "none",
},
],
),
],
)
def test_response_cookies(header: Headers, expected: list[dict]):
s = SaveHar()
resp = Response.make(200, "", header)
assert s.format_response_cookies(resp) == expected
def test_seen_server_conn():
s = SaveHar()
flow = tflow.twebsocketflow()
servers_seen: set[Server] = set()
servers_seen.add(flow.server_conn)
calculated_timings = s.flow_entry(flow, servers_seen)["timings"]
assert calculated_timings["connect"] == -1.0
assert calculated_timings["ssl"] == -1.0
def test_timestamp_end():
s = SaveHar()
servers_seen: set[Server] = set()
flow = tflow.twebsocketflow()
assert s.flow_entry(flow, set())["timings"]["send"] == 1000
flow.request.timestamp_end = None
calculated_timings = s.flow_entry(flow, servers_seen)["timings"]
assert calculated_timings["send"] == 0
def test_tls_setup():
s = SaveHar()
servers_seen: set[Server] = set()
flow = tflow.twebsocketflow()
flow.server_conn.timestamp_tls_setup = None
assert s.flow_entry(flow, servers_seen)["timings"]["ssl"] == -1.0
def test_binary_content():
resp_content = SaveHar().make_har(
[tflow.tflow(resp=tutils.tresp(content=b"foo" + b"\xff" * 10))]
)["log"]["entries"][0]["response"]["content"]
assert resp_content == {
"compression": 0,
"encoding": "base64",
"mimeType": "",
"size": 13,
"text": "Zm9v/////////////w==",
}
@pytest.mark.parametrize(
"log_file", [pytest.param(x, id=x.stem) for x in test_dir.glob("data/flows/*.mitm")]
)
def test_savehar(log_file: Path, tmp_path: Path, monkeypatch):
monkeypatch.setattr(version, "VERSION", "1.2.3")
s = SaveHar()
flows = io.read_flows_from_paths([log_file])
s.export_har(flows, types.Path(tmp_path / "testing_flow.har"))
expected_har = json.loads(log_file.with_suffix(".har").read_bytes())
actual_har = json.loads(Path(tmp_path / "testing_flow.har").read_bytes())
assert actual_har == expected_har
def test_flow_entry():
"""https://github.com/mitmproxy/mitmproxy/issues/6579"""
s = SaveHar()
req = Request.make("CONNECT", "https://test.test/")
flow = tflow.tflow(req=req)
servers_seen: set[Server] = set()
flow_entry = s.flow_entry(flow, servers_seen)
assert flow_entry["request"]["url"].startswith("https")
class TestHardumpOption:
def test_simple(self, capsys):
s = SaveHar()
with taddons.context(s) as tctx:
tctx.configure(s, hardump="-")
s.response(tflow.tflow())
s.error(tflow.tflow())
ws = tflow.twebsocketflow()
s.response(ws)
s.websocket_end(ws)
s.done()
out = json.loads(capsys.readouterr().out)
assert len(out["log"]["entries"]) == 3
def test_filter(self, capsys):
s = SaveHar()
with taddons.context(s, Save()) as tctx:
tctx.configure(s, hardump="-", save_stream_filter="~b foo")
with pytest.raises(OptionsError):
tctx.configure(s, save_stream_filter="~~")
s.response(tflow.tflow(req=tflow.treq(content=b"foo")))
s.response(tflow.tflow())
s.done()
out = json.loads(capsys.readouterr().out)
assert len(out["log"]["entries"]) == 1
def test_free(self):
s = SaveHar()
with taddons.context(s, Save()) as tctx:
tctx.configure(s, hardump="-")
s.response(tflow.tflow())
assert s.flows
tctx.configure(s, hardump="")
assert not s.flows
def test_compressed(self, tmp_path):
s = SaveHar()
with taddons.context(s, Save()) as tctx:
tctx.configure(s, hardump=str(tmp_path / "out.zhar"))
s.response(tflow.tflow())
s.done()
out = json.loads(zlib.decompress((tmp_path / "out.zhar").read_bytes()))
assert len(out["log"]["entries"]) == 1
def test_content_raises():
flow = tflow.tflow(
req=tutils.treq(content=b"foo", headers=((b"content-encoding", b"utf8"),)),
resp=tutils.tresp(content=b"foo", headers=((b"content-encoding", b"utf8"),)),
)
with pytest.raises(ValueError):
_ = flow.request.content
with pytest.raises(ValueError):
_ = flow.response.content
# should not raise
assert SaveHar().make_har([flow])
if __name__ == "__main__":
version.VERSION = "1.2.3"
s = SaveHar()
for file in test_dir.glob("data/flows/*.mitm"):
path = open(file, "rb")
flows = list(io.FlowReader(path).stream())
s.export_har(flows, types.Path(test_dir / f"data/flows/{file.stem}.har"))
| python | MIT | e6aa924bb411a9687b91920b8d094af37bc02b90 | 2026-01-04T14:40:00.086164Z | false |
mitmproxy/mitmproxy | https://github.com/mitmproxy/mitmproxy/blob/e6aa924bb411a9687b91920b8d094af37bc02b90/test/mitmproxy/net/test_local_ip.py | test/mitmproxy/net/test_local_ip.py | from mitmproxy.net import local_ip
def test_get_local_ip():
# should never error, but may return None depending on the host OS configuration.
local_ip.get_local_ip()
local_ip.get_local_ip("0.0.0.0")
local_ip.get_local_ip("127.0.0.1")
local_ip.get_local_ip("invalid!")
def test_get_local_ip6():
# should never error, but may return None depending on the host OS configuration.
local_ip.get_local_ip6()
local_ip.get_local_ip6("::")
local_ip.get_local_ip6("::1")
local_ip.get_local_ip("invalid!")
| python | MIT | e6aa924bb411a9687b91920b8d094af37bc02b90 | 2026-01-04T14:40:00.086164Z | false |
mitmproxy/mitmproxy | https://github.com/mitmproxy/mitmproxy/blob/e6aa924bb411a9687b91920b8d094af37bc02b90/test/mitmproxy/net/__init__.py | test/mitmproxy/net/__init__.py | python | MIT | e6aa924bb411a9687b91920b8d094af37bc02b90 | 2026-01-04T14:40:00.086164Z | false | |
mitmproxy/mitmproxy | https://github.com/mitmproxy/mitmproxy/blob/e6aa924bb411a9687b91920b8d094af37bc02b90/test/mitmproxy/net/test_check.py | test/mitmproxy/net/test_check.py | from mitmproxy.net import check
def test_is_valid_host():
assert not check.is_valid_host(b"")
assert not check.is_valid_host(b"xn--ke.ws")
assert check.is_valid_host(b"one.two")
assert not check.is_valid_host(b"one" * 255)
assert check.is_valid_host(b"one.two.")
# Allow underscore
assert check.is_valid_host(b"one_two")
assert check.is_valid_host(b"::1")
# IP Address Validations
assert check.is_valid_host(b"127.0.0.1")
assert check.is_valid_host(b"2001:0db8:85a3:0000:0000:8a2e:0370:7334")
assert check.is_valid_host(b"2001:db8:85a3:0:0:8a2e:370:7334")
assert check.is_valid_host(b"2001:db8:85a3::8a2e:370:7334")
assert not check.is_valid_host(b"2001:db8::85a3::7334")
assert check.is_valid_host(b"2001-db8-85a3-8d3-1319-8a2e-370-7348.ipv6-literal.net")
# TLD must be between 2 and 63 chars
assert check.is_valid_host(b"example.tl")
assert check.is_valid_host(b"example.tld")
assert check.is_valid_host(b"example." + b"x" * 63)
assert not check.is_valid_host(b"example." + b"x" * 64)
# misc characters test
assert not check.is_valid_host(b"ex@mple")
assert not check.is_valid_host(b"ex@mple.com")
assert not check.is_valid_host(b"example..com")
assert not check.is_valid_host(b".example.com")
assert not check.is_valid_host(b"@.example.com")
assert not check.is_valid_host(b"!.example.com")
# Every label must be between 1 and 63 chars
assert not check.is_valid_host(b".tld")
assert check.is_valid_host(b"x" * 1 + b".tld")
assert check.is_valid_host(b"x" * 30 + b".tld")
assert not check.is_valid_host(b"x" * 64 + b".tld")
assert check.is_valid_host(b"x" * 1 + b".example.tld")
assert check.is_valid_host(b"x" * 30 + b".example.tld")
assert not check.is_valid_host(b"x" * 64 + b".example.tld")
# Misc Underscore Test Cases
assert check.is_valid_host(b"_example")
assert check.is_valid_host(b"_example_")
assert check.is_valid_host(b"example_")
assert check.is_valid_host(b"_a.example.tld")
assert check.is_valid_host(b"a_.example.tld")
assert check.is_valid_host(b"_a_.example.tld")
# Misc Dash/Hyphen/Minus Test Cases
assert check.is_valid_host(b"-example")
assert check.is_valid_host(b"-example_")
assert check.is_valid_host(b"example-")
assert check.is_valid_host(b"-a.example.tld")
assert check.is_valid_host(b"a-.example.tld")
assert check.is_valid_host(b"-a-.example.tld")
# Misc Combo Test Cases
assert check.is_valid_host(b"api-.example.com")
assert check.is_valid_host(b"__a.example-site.com")
assert check.is_valid_host(b"_-a.example-site.com")
assert check.is_valid_host(b"_a_.example-site.com")
assert check.is_valid_host(b"-a-.example-site.com")
assert check.is_valid_host(b"api-.a.example.com")
assert check.is_valid_host(b"api-._a.example.com")
assert check.is_valid_host(b"api-.a_.example.com")
assert check.is_valid_host(b"api-.ab.example.com")
# Test str
assert check.is_valid_host("example.tld")
assert not check.is_valid_host("foo..bar") # cannot be idna-encoded.
| python | MIT | e6aa924bb411a9687b91920b8d094af37bc02b90 | 2026-01-04T14:40:00.086164Z | false |
mitmproxy/mitmproxy | https://github.com/mitmproxy/mitmproxy/blob/e6aa924bb411a9687b91920b8d094af37bc02b90/test/mitmproxy/net/test_free_port.py | test/mitmproxy/net/test_free_port.py | import socket
from mitmproxy.net import free_port
def _raise(*_, **__):
raise OSError
def test_get_free_port():
assert free_port.get_free_port() is not None
def test_never_raises(monkeypatch):
monkeypatch.setattr(socket.socket, "bind", _raise)
assert free_port.get_free_port() == 0
| python | MIT | e6aa924bb411a9687b91920b8d094af37bc02b90 | 2026-01-04T14:40:00.086164Z | false |
mitmproxy/mitmproxy | https://github.com/mitmproxy/mitmproxy/blob/e6aa924bb411a9687b91920b8d094af37bc02b90/test/mitmproxy/net/test_encoding.py | test/mitmproxy/net/test_encoding.py | from unittest import mock
import pytest
from mitmproxy.net import encoding
@pytest.mark.parametrize(
"encoder",
[
"identity",
"none",
],
)
def test_identity(encoder):
assert b"string" == encoding.decode(b"string", encoder)
assert b"string" == encoding.encode(b"string", encoder)
with pytest.raises(ValueError):
encoding.encode(b"string", "nonexistent encoding")
@pytest.mark.parametrize(
"encoder",
[
"gzip",
"GZIP",
"br",
"deflate",
"zstd",
],
)
def test_encoders(encoder):
"""
This test is for testing byte->byte encoding/decoding
"""
assert encoding.decode(None, encoder) is None
assert encoding.encode(None, encoder) is None
assert b"" == encoding.decode(b"", encoder)
assert b"string" == encoding.decode(encoding.encode(b"string", encoder), encoder)
with pytest.raises(TypeError):
encoding.encode("string", encoder)
with pytest.raises(TypeError):
encoding.decode("string", encoder)
with pytest.raises(ValueError):
encoding.decode(b"foobar", encoder)
@pytest.mark.parametrize("encoder", ["utf8", "latin-1"])
def test_encoders_strings(encoder):
"""
This test is for testing byte->str decoding
and str->byte encoding
"""
assert "" == encoding.decode(b"", encoder)
assert "string" == encoding.decode(encoding.encode("string", encoder), encoder)
with pytest.raises(TypeError):
encoding.encode(b"string", encoder)
with pytest.raises(TypeError):
encoding.decode("foobar", encoder)
class TestDecodeGzip:
def test_regular_gzip(self):
# generated with gzip.compress(b"mitmproxy")
data = bytes.fromhex(
"1f8b0800e4a4106902ffcbcd2cc92d28caafa80400d21f9c9d09000000"
)
assert encoding.decode_gzip(data) == b"mitmproxy"
def test_zlib(self):
# generated with zlib.compress(b"mitmproxy")
data = bytes.fromhex("789ccbcd2cc92d28caafa80400138e03fa")
assert encoding.decode_gzip(data) == b"mitmproxy"
def test_truncated(self):
"""https://github.com/mitmproxy/mitmproxy/issues/7795"""
data = bytes.fromhex(
"1f8b08000000000000ffaa564a2d2a72ce4f4955b2d235d551502a4a2df12d4e57"
"b2527ab17efbb38d4d4f7b5a9fec58fb6cd3c267733a934a3353946a01000000ffff"
)
assert encoding.decode_gzip(data) == (
b'{"errCode":-5, "retMsg":"\xe8\xaf\xb7\xe6\xb1\x82\xe5\x8c\x85\xe4'
b'\xb8\xad\xe6\xb2\xa1\xe6\x9c\x89buid"}'
)
def test_cache():
decode_gzip = mock.MagicMock()
decode_gzip.return_value = b"decoded"
encode_gzip = mock.MagicMock()
encode_gzip.return_value = b"encoded"
with mock.patch.dict(encoding.custom_decode, gzip=decode_gzip):
with mock.patch.dict(encoding.custom_encode, gzip=encode_gzip):
assert encoding.decode(b"encoded", "gzip") == b"decoded"
assert decode_gzip.call_count == 1
# should be cached
assert encoding.decode(b"encoded", "gzip") == b"decoded"
assert decode_gzip.call_count == 1
# the other way around as well
assert encoding.encode(b"decoded", "gzip") == b"encoded"
assert encode_gzip.call_count == 0
# different encoding
decode_gzip.return_value = b"bar"
assert encoding.encode(b"decoded", "deflate") != b"decoded"
assert encode_gzip.call_count == 0
# This is not in the cache anymore
assert encoding.encode(b"decoded", "gzip") == b"encoded"
assert encode_gzip.call_count == 1
def test_zstd():
FRAME_SIZE = 1024
# Create payload of 1024b
test_content = "a" * FRAME_SIZE
# Compress it, will result a single frame
single_frame = encoding.encode_zstd(test_content.encode())
# Concat compressed frame, it'll result two frames, total size of 2048b payload
two_frames = single_frame + single_frame
# Uncompressed single frame should have the size of FRAME_SIZE
assert len(encoding.decode_zstd(single_frame)) == FRAME_SIZE
# Uncompressed two frames should have the size of FRAME_SIZE * 2
assert len(encoding.decode_zstd(two_frames)) == FRAME_SIZE * 2
| python | MIT | e6aa924bb411a9687b91920b8d094af37bc02b90 | 2026-01-04T14:40:00.086164Z | false |
mitmproxy/mitmproxy | https://github.com/mitmproxy/mitmproxy/blob/e6aa924bb411a9687b91920b8d094af37bc02b90/test/mitmproxy/net/test_tls.py | test/mitmproxy/net/test_tls.py | from pathlib import Path
import pytest
from cryptography.hazmat.primitives.asymmetric import ec
from OpenSSL import SSL
from mitmproxy import certs
from mitmproxy.net import tls
@pytest.mark.parametrize("version", [tls.Version.UNBOUNDED, tls.Version.SSL3])
def test_supported(version):
# wild assumption: test environments should not do SSLv3 by default.
expected_support = version is tls.Version.UNBOUNDED
assert tls.is_supported_version(version) == expected_support
def test_make_master_secret_logger():
assert tls.make_master_secret_logger(None) is None
assert isinstance(tls.make_master_secret_logger("filepath"), tls.MasterSecretLogger)
def test_sslkeylogfile(tdata, monkeypatch):
keylog = []
monkeypatch.setattr(
tls, "log_master_secret", lambda conn, secrets: keylog.append(secrets)
)
store = certs.CertStore.from_files(
Path(tdata.path("mitmproxy/net/data/verificationcerts/trusted-root.pem")),
Path(tdata.path("mitmproxy/net/data/dhparam.pem")),
)
entry = store.get_cert("example.com", [], None)
cctx = tls.create_proxy_server_context(
method=tls.Method.TLS_CLIENT_METHOD,
min_version=tls.DEFAULT_MIN_VERSION,
max_version=tls.DEFAULT_MAX_VERSION,
cipher_list=None,
ecdh_curve=None,
verify=tls.Verify.VERIFY_NONE,
ca_path=None,
ca_pemfile=None,
client_cert=None,
legacy_server_connect=False,
)
sctx = tls.create_client_proxy_context(
method=tls.Method.TLS_SERVER_METHOD,
min_version=tls.DEFAULT_MIN_VERSION,
max_version=tls.DEFAULT_MAX_VERSION,
cipher_list=None,
ecdh_curve=None,
chain_file=entry.chain_file,
alpn_select_callback=None,
request_client_cert=False,
extra_chain_certs=(),
dhparams=store.dhparams,
)
server = SSL.Connection(sctx)
server.set_accept_state()
server.use_certificate(entry.cert.to_cryptography())
server.use_privatekey(entry.privatekey)
client = SSL.Connection(cctx)
client.set_connect_state()
read, write = client, server
while True:
try:
read.do_handshake()
except SSL.WantReadError:
write.bio_write(read.bio_read(2**16))
else:
break
read, write = write, read
assert keylog
assert keylog[0].startswith(b"SERVER_HANDSHAKE_TRAFFIC_SECRET")
def test_is_record_magic():
assert not tls.starts_like_tls_record(b"POST /")
assert not tls.starts_like_tls_record(b"\x16\x03\x04")
assert not tls.starts_like_tls_record(b"")
assert not tls.starts_like_tls_record(b"\x16")
assert not tls.starts_like_tls_record(b"\x16\x03")
assert tls.starts_like_tls_record(b"\x16\x03\x00")
assert tls.starts_like_tls_record(b"\x16\x03\x01")
assert tls.starts_like_tls_record(b"\x16\x03\x02")
assert tls.starts_like_tls_record(b"\x16\x03\x03")
assert not tls.starts_like_tls_record(bytes.fromhex("16fefe"))
def test_is_dtls_record_magic():
assert not tls.starts_like_dtls_record(bytes.fromhex(""))
assert not tls.starts_like_dtls_record(bytes.fromhex("16"))
assert not tls.starts_like_dtls_record(bytes.fromhex("16fe"))
assert tls.starts_like_dtls_record(bytes.fromhex("16fefd"))
assert tls.starts_like_dtls_record(bytes.fromhex("16fefe"))
assert not tls.starts_like_dtls_record(bytes.fromhex("160300"))
assert not tls.starts_like_dtls_record(bytes.fromhex("160304"))
assert not tls.starts_like_dtls_record(bytes.fromhex("150301"))
def test_get_curve():
assert isinstance(tls.get_curve("secp256r1"), ec.SECP256R1)
| python | MIT | e6aa924bb411a9687b91920b8d094af37bc02b90 | 2026-01-04T14:40:00.086164Z | false |
mitmproxy/mitmproxy | https://github.com/mitmproxy/mitmproxy/blob/e6aa924bb411a9687b91920b8d094af37bc02b90/test/mitmproxy/net/test_server_spec.py | test/mitmproxy/net/test_server_spec.py | import pytest
from mitmproxy.net import server_spec
@pytest.mark.parametrize(
"spec,default_scheme,out",
[
("example.com", "https", ("https", ("example.com", 443))),
("http://example.com", "https", ("http", ("example.com", 80))),
("smtp.example.com:25", "tcp", ("tcp", ("smtp.example.com", 25))),
("http://127.0.0.1", "https", ("http", ("127.0.0.1", 80))),
("http://[::1]", "https", ("http", ("::1", 80))),
("http://[::1]/", "https", ("http", ("::1", 80))),
("https://[::1]/", "https", ("https", ("::1", 443))),
("http://[::1]:8080", "https", ("http", ("::1", 8080))),
],
)
def test_parse(spec, default_scheme, out):
assert server_spec.parse(spec, default_scheme) == out
def test_parse_err():
with pytest.raises(ValueError, match="Invalid server specification"):
server_spec.parse(":", "https")
with pytest.raises(ValueError, match="Invalid server scheme"):
server_spec.parse("ftp://example.com", "https")
with pytest.raises(ValueError, match="Invalid hostname"):
server_spec.parse("$$$", "https")
with pytest.raises(ValueError, match="Invalid port"):
server_spec.parse("example.com:999999", "https")
with pytest.raises(ValueError, match="Port specification missing"):
server_spec.parse("example.com", "tcp")
| python | MIT | e6aa924bb411a9687b91920b8d094af37bc02b90 | 2026-01-04T14:40:00.086164Z | false |
mitmproxy/mitmproxy | https://github.com/mitmproxy/mitmproxy/blob/e6aa924bb411a9687b91920b8d094af37bc02b90/test/mitmproxy/net/dns/test_https_records.py | test/mitmproxy/net/dns/test_https_records.py | import re
import struct
import pytest
from hypothesis import given
from hypothesis import strategies as st
from mitmproxy.net.dns import https_records
class TestHTTPSRecords:
def test_simple(self):
assert https_records.SVCParamKeys.ALPN.value == 1
assert https_records.SVCParamKeys(1).name == "ALPN"
def test_httpsrecord(self):
with pytest.raises(
TypeError,
match=re.escape(
"HTTPSRecord.__init__() missing 3 required positional arguments: 'priority', 'target_name', and 'params'"
),
):
https_records.HTTPSRecord()
def test_unpack(self):
params = {
0: b"\x00\x04\x00\x06",
1: b"\x02h2\x02h3",
2: b"",
3: b"\x01\xbb",
4: b"\xb9\xc7l\x99\xb9\xc7m\x99\xb9\xc7n\x99\xb9\xc7o\x99",
5: b"testbytes",
6: b"&\x06P\xc0\x80\x00\x00\x00\x00\x00\x00\x00\x00\x00\x01S&\x06P\xc0\x80\x01\x00\x00\x00\x00\x00\x00\x00\x00\x01S&\x06P\xc0\x80\x02\x00\x00\x00\x00\x00\x00\x00\x00\x01S&\x06P\xc0\x80\x03\x00\x00\x00\x00\x00\x00\x00\x00\x01S",
}
record = https_records.HTTPSRecord(1, "example.com", params)
assert https_records.unpack(https_records.pack(record)) == record
with pytest.raises(
struct.error, match=re.escape("unpack requires a buffer of 2 bytes")
):
https_records.unpack(b"")
with pytest.raises(
struct.error,
match=re.escape("unpack encountered an illegal characters at offset 3"),
):
https_records.unpack(
b"\x00\x01\x07exampl\x87\x03com\x00\x00\x01\x00\x06\x02h2\x02h3"
)
with pytest.raises(
struct.error, match=re.escape("unpack requires a buffer of 25 bytes")
):
https_records.unpack(
b"\x00\x01\x07example\x03com\x00\x00\x01\x00\x06\x02h2"
)
with pytest.raises(
struct.error, match=re.escape("unpack requires a label buffer of 7 bytes")
):
https_records.unpack(b"\x00\x01\x07exa")
def test_pack(self):
params = {
0: b"\x00\x04\x00\x06",
1: b"\x02h2\x02h3",
2: b"",
3: b"\x01\xbb",
4: b"\xb9\xc7l\x99\xb9\xc7m\x99\xb9\xc7n\x99\xb9\xc7o\x99",
5: b"testbytes",
6: b"&\x06P\xc0\x80\x00\x00\x00\x00\x00\x00\x00\x00\x00\x01S&\x06P\xc0\x80\x01\x00\x00\x00\x00\x00\x00\x00\x00\x01S&\x06P\xc0\x80\x02\x00\x00\x00\x00\x00\x00\x00\x00\x01S&\x06P\xc0\x80\x03\x00\x00\x00\x00\x00\x00\x00\x00\x01S",
}
record = https_records.HTTPSRecord(1, "example.com", params)
assert (
https_records.pack(record)
== b"\x00\x01\x07example\x03com\x00\x00\x00\x00\x04\x00\x04\x00\x06\x00\x01\x00\x06\x02h2\x02h3\x00\x02\x00\x00\x00\x03\x00\x02\x01\xbb\x00\x04\x00\x10\xb9\xc7l\x99\xb9\xc7m\x99\xb9\xc7n\x99\xb9\xc7o\x99\x00\x05\x00\ttestbytes\x00\x06\x00@&\x06P\xc0\x80\x00\x00\x00\x00\x00\x00\x00\x00\x00\x01S&\x06P\xc0\x80\x01\x00\x00\x00\x00\x00\x00\x00\x00\x01S&\x06P\xc0\x80\x02\x00\x00\x00\x00\x00\x00\x00\x00\x01S&\x06P\xc0\x80\x03\x00\x00\x00\x00\x00\x00\x00\x00\x01S"
)
record = https_records.HTTPSRecord(1, "", {})
assert https_records.pack(record) == b"\x00\x01\x00"
@given(st.binary())
def test_fuzz_unpack(self, data: bytes):
try:
https_records.unpack(data)
except struct.error:
pass
def test_to_json(self):
params = {
0: b"\x00",
1: b"\x01",
2: b"",
3: b"\x02",
4: b"\x03",
5: b"\x04",
6: b"\x05",
}
record = https_records.HTTPSRecord(1, "example.com", params)
assert record.to_json() == {
"alpn": r"\x01",
"ech": r"\x04",
"ipv4hint": r"\x03",
"ipv6hint": r"\x05",
"mandatory": r"\x00",
"no_default_alpn": "",
"port": r"\x02",
"priority": 1,
"target_name": "example.com",
}
params = {111: b"\x00"}
record = https_records.HTTPSRecord(1, "example.com", params)
assert record.to_json() == {
111: r"\x00",
"priority": 1,
"target_name": "example.com",
}
assert (
str(record)
== r"{'target_name': 'example.com', 'priority': 1, 111: '\\x00'}"
)
def test_from_json(self):
record = https_records.HTTPSRecord.from_json(
{
"mandatory": r"\x00",
"no_default_alpn": "",
"priority": 1,
"target_name": "example.com",
}
)
assert record.target_name == "example.com"
assert record.priority == 1
assert record.params == {0: b"\x00", 2: b""}
| python | MIT | e6aa924bb411a9687b91920b8d094af37bc02b90 | 2026-01-04T14:40:00.086164Z | false |
mitmproxy/mitmproxy | https://github.com/mitmproxy/mitmproxy/blob/e6aa924bb411a9687b91920b8d094af37bc02b90/test/mitmproxy/net/dns/test_response_codes.py | test/mitmproxy/net/dns/test_response_codes.py | from mitmproxy.net.dns import response_codes
def test_to_str():
assert response_codes.to_str(response_codes.NOERROR) == "NOERROR"
assert response_codes.to_str(100) == "RCODE(100)"
def test_from_str():
assert response_codes.from_str("NOERROR") == response_codes.NOERROR
assert response_codes.from_str("RCODE(100)") == 100
def test_http_equiv_status_code():
assert response_codes.http_equiv_status_code(response_codes.NOERROR) == 200
| python | MIT | e6aa924bb411a9687b91920b8d094af37bc02b90 | 2026-01-04T14:40:00.086164Z | false |
mitmproxy/mitmproxy | https://github.com/mitmproxy/mitmproxy/blob/e6aa924bb411a9687b91920b8d094af37bc02b90/test/mitmproxy/net/dns/test_domain_names.py | test/mitmproxy/net/dns/test_domain_names.py | import re
import struct
import pytest
from mitmproxy.net.dns import domain_names
from mitmproxy.net.dns import types
def test_unpack_from_with_compression():
assert domain_names.unpack_from_with_compression(
b"\xff\x03www\x07example\x03org\x00", 1, domain_names.cache()
) == (
"www.example.org",
17,
)
with pytest.raises(
struct.error, match=re.escape("unpack encountered domain name loop")
):
domain_names.unpack_from_with_compression(
b"\x03www\xc0\x00", 0, domain_names.cache()
)
assert domain_names.unpack_from_with_compression(
b"\xff\xff\xff\x07example\x03org\x00\xff\xff\xff\x03www\xc0\x03",
19,
domain_names.cache(),
) == ("www.example.org", 6)
def test_unpack():
assert domain_names.unpack(b"\x03www\x07example\x03org\x00") == "www.example.org"
with pytest.raises(
struct.error, match=re.escape("unpack requires a buffer of 17 bytes")
):
domain_names.unpack(b"\x03www\x07example\x03org\x00\xff")
with pytest.raises(
struct.error,
match=re.escape("unpack encountered a pointer which is not supported in RDATA"),
):
domain_names.unpack(b"\x03www\x07example\x03org\xc0\x00")
with pytest.raises(
struct.error, match=re.escape("unpack requires a label buffer of 10 bytes")
):
domain_names.unpack(b"\x0a")
with pytest.raises(
struct.error, match=re.escape("unpack encountered a label of length 64")
):
domain_names.unpack(b"\x40" + (b"a" * 64) + b"\x00")
with pytest.raises(
struct.error,
match=re.escape("unpack encountered an illegal characters at offset 1"),
):
domain_names.unpack(b"\x03\xff\xff\xff\00")
def test_pack():
assert domain_names.pack("") == b"\x00"
with pytest.raises(
ValueError, match=re.escape("domain name 'hello..world' contains empty labels")
):
domain_names.pack("hello..world")
label = "a" * 64
name = f"www.{label}.com"
with pytest.raises(
ValueError,
match="label too long",
):
domain_names.pack(name)
assert domain_names.pack("www.example.org") == b"\x03www\x07example\x03org\x00"
def test_record_data_can_have_compression():
assert domain_names.record_data_can_have_compression(types.NS)
assert not domain_names.record_data_can_have_compression(types.HTTPS)
def test_decompress_from_record_data():
buffer = (
b"\x10}\x81\x80\x00\x01\x00\x01\x00\x00\x00\x01\x06google\x03com\x00\x00\x06\x00\x01\xc0\x0c\x00\x06\x00"
+ b"\x01\x00\x00\x00\x0c\x00&\x03ns1\xc0\x0c\tdns-admin\xc0\x0c&~gw\x00\x00\x03\x84\x00\x00\x03\x84\x00"
+ b"\x00\x07\x08\x00\x00\x00<\x00\x00)\x02\x00\x00\x00\x00\x00\x00\x00"
)
assert (
domain_names.decompress_from_record_data(buffer, 40, 78, domain_names.cache())
== b"\x03ns1\x06google\x03com\x00\tdns-admin\x06google\x03com\x00&~gw\x00\x00\x03\x84\x00\x00\x03\x84\x00"
+ b"\x00\x07\x08\x00\x00\x00<"
)
def test_record_data_contains_fake_pointer():
# \xd2\a2 and \xc2\x00 seem like domain name compression pointers but are actually part of some other data type
buffer = (
b"\xfc\xc7\x81\x80\x00\x01\x00\x01\x00\x00\x00\x00\x06google\x03com\x00\x00\x06\x00\x01\xc0\x0c\x00\x06\x00"
+ b"\x01\x00\x00\x008\x00&\x03ns1\xc0\x0c\tdns-admin\xc0\x0c&\xd2\xa2\xc2\x00\x00\x03\x84\x00\x00\x03\x84\x00"
+ b"\x00\x07\x08\x00\x00\x00<"
)
assert (
domain_names.decompress_from_record_data(buffer, 40, 78, domain_names.cache())
== b"\x03ns1\x06google\x03com\x00\tdns-admin\x06google\x03com\x00&\xd2\xa2\xc2\x00\x00\x03\x84\x00\x00\x03"
+ b"\x84\x00\x00\x07\x08\x00\x00\x00<"
)
| python | MIT | e6aa924bb411a9687b91920b8d094af37bc02b90 | 2026-01-04T14:40:00.086164Z | false |
mitmproxy/mitmproxy | https://github.com/mitmproxy/mitmproxy/blob/e6aa924bb411a9687b91920b8d094af37bc02b90/test/mitmproxy/net/dns/test_classes.py | test/mitmproxy/net/dns/test_classes.py | from mitmproxy.net.dns import classes
def test_to_str():
assert classes.to_str(classes.IN) == "IN"
assert classes.to_str(0) == "CLASS(0)"
def test_from_str():
assert classes.from_str("IN") == classes.IN
assert classes.from_str("CLASS(0)") == 0
| python | MIT | e6aa924bb411a9687b91920b8d094af37bc02b90 | 2026-01-04T14:40:00.086164Z | false |
mitmproxy/mitmproxy | https://github.com/mitmproxy/mitmproxy/blob/e6aa924bb411a9687b91920b8d094af37bc02b90/test/mitmproxy/net/dns/test_types.py | test/mitmproxy/net/dns/test_types.py | from mitmproxy.net.dns import types
def test_to_str():
assert types.to_str(types.A) == "A"
assert types.to_str(0) == "TYPE(0)"
def test_from_str():
assert types.from_str("A") == types.A
assert types.from_str("TYPE(0)") == 0
| python | MIT | e6aa924bb411a9687b91920b8d094af37bc02b90 | 2026-01-04T14:40:00.086164Z | false |
mitmproxy/mitmproxy | https://github.com/mitmproxy/mitmproxy/blob/e6aa924bb411a9687b91920b8d094af37bc02b90/test/mitmproxy/net/dns/test_op_codes.py | test/mitmproxy/net/dns/test_op_codes.py | from mitmproxy.net.dns import op_codes
def test_to_str():
assert op_codes.to_str(op_codes.QUERY) == "QUERY"
assert op_codes.to_str(100) == "OPCODE(100)"
def test_from_str():
assert op_codes.from_str("QUERY") == op_codes.QUERY
assert op_codes.from_str("OPCODE(100)") == 100
| python | MIT | e6aa924bb411a9687b91920b8d094af37bc02b90 | 2026-01-04T14:40:00.086164Z | false |
mitmproxy/mitmproxy | https://github.com/mitmproxy/mitmproxy/blob/e6aa924bb411a9687b91920b8d094af37bc02b90/test/mitmproxy/net/dns/__init__.py | test/mitmproxy/net/dns/__init__.py | python | MIT | e6aa924bb411a9687b91920b8d094af37bc02b90 | 2026-01-04T14:40:00.086164Z | false | |
mitmproxy/mitmproxy | https://github.com/mitmproxy/mitmproxy/blob/e6aa924bb411a9687b91920b8d094af37bc02b90/test/mitmproxy/net/data/verificationcerts/generate.py | test/mitmproxy/net/data/verificationcerts/generate.py | """
Generate SSL test certificates.
"""
import os
import shlex
import shutil
import subprocess
import textwrap
ROOT_CA = "trusted-root"
SUBJECT = "example.mitmproxy.org"
def do(args):
print("> %s" % args)
args = shlex.split(args)
output = subprocess.check_output(args)
return output
def genrsa(cert: str):
do(f"openssl genrsa -out {cert}.key 2048")
def sign(cert: str, *exts: str):
with open(f"openssl-{cert}.conf", "w") as f:
f.write(
textwrap.dedent(
f"""
authorityKeyIdentifier=keyid,issuer
basicConstraints=CA:FALSE
keyUsage = digitalSignature, keyEncipherment
"""
)
+ "\n".join(exts)
)
do(
f"openssl x509 -req -in {cert}.csr "
f"-CA {ROOT_CA}.crt "
f"-CAkey {ROOT_CA}.key "
f"-CAcreateserial "
f"-days 7300 "
f"-sha256 "
f'-extfile "openssl-{cert}.conf" '
f"-out {cert}.crt"
)
os.remove(f"openssl-{cert}.conf")
def mkcert(cert, subject: str, *exts: str):
genrsa(cert)
do(
f"openssl req -new -nodes -batch "
f"-key {cert}.key "
f"-subj /CN={subject}/O=mitmproxy "
+ "".join(f'-addext "{ext}" ' for ext in exts)
+ f"-out {cert}.csr"
)
sign(cert, *exts)
os.remove(f"{cert}.csr")
# create trusted root CA
genrsa("trusted-root")
do(
"openssl req -x509 -new -nodes -batch "
"-key trusted-root.key "
"-days 7300 "
"-out trusted-root.crt"
)
h = do("openssl x509 -hash -noout -in trusted-root.crt").decode("ascii").strip()
shutil.copyfile("trusted-root.crt", f"{h}.0")
# create trusted leaf certs.
mkcert(
"trusted-leaf",
SUBJECT,
f"subjectAltName = DNS:{SUBJECT}",
"crlDistributionPoints = URI:https://trusted-root/example.crl",
)
mkcert("trusted-leaf-ip", "192.0.2.42", f"subjectAltName = IP:192.0.2.42")
mkcert(
"trusted-client-cert",
"client.mitmproxy.org",
"subjectAltName = DNS:client.mitmproxy.org",
"extendedKeyUsage = clientAuth",
)
# create self-signed cert
genrsa("self-signed")
do(
"openssl req -x509 -new -nodes -batch "
"-key self-signed.key "
f'-addext "subjectAltName = DNS:{SUBJECT}" '
"-days 7300 "
"-out self-signed.crt"
)
for x in [
"self-signed",
"trusted-leaf",
"trusted-leaf-ip",
"trusted-root",
"trusted-client-cert",
]:
with open(f"{x}.crt") as crt, open(f"{x}.key") as key, open(f"{x}.pem", "w") as pem:
pem.write(crt.read())
pem.write(key.read())
shutil.copyfile("trusted-leaf.pem", "example.mitmproxy.org.pem")
with (
open(f"trusted-leaf.crt") as crt,
open(f"self-signed.key") as key,
open(f"private-public-mismatch.pem", "w") as pem,
):
pem.write(crt.read())
pem.write(key.read())
with (
open(f"trusted-leaf.pem") as crt1,
open(f"trusted-root.crt") as crt2,
open(f"trusted-chain.pem", "w") as pem,
):
pem.write(crt1.read())
pem.write(crt2.read())
with open(f"trusted-leaf.pem") as crt1, open(f"trusted-chain-invalid.pem", "w") as pem:
pem.write(crt1.read())
pem.write("-----BEGIN CERTIFICATE-----\nnotacert\n-----END CERTIFICATE-----\n")
mkcert("invalid-crl", SUBJECT, "crlDistributionPoints = URI://[")
os.remove("invalid-crl.key")
| python | MIT | e6aa924bb411a9687b91920b8d094af37bc02b90 | 2026-01-04T14:40:00.086164Z | false |
mitmproxy/mitmproxy | https://github.com/mitmproxy/mitmproxy/blob/e6aa924bb411a9687b91920b8d094af37bc02b90/test/mitmproxy/net/http/test_user_agents.py | test/mitmproxy/net/http/test_user_agents.py | from mitmproxy.net.http import user_agents
def test_get_shortcut():
assert user_agents.get_by_shortcut("c")[0] == "chrome"
assert not user_agents.get_by_shortcut("_")
| python | MIT | e6aa924bb411a9687b91920b8d094af37bc02b90 | 2026-01-04T14:40:00.086164Z | false |
mitmproxy/mitmproxy | https://github.com/mitmproxy/mitmproxy/blob/e6aa924bb411a9687b91920b8d094af37bc02b90/test/mitmproxy/net/http/test_cookies.py | test/mitmproxy/net/http/test_cookies.py | import time
from unittest import mock
import pytest
from mitmproxy.net.http import cookies
cookie_pairs = [
["=uno", [["", "uno"]]],
["", []],
["one=uno", [["one", "uno"]]],
["one", [["one", ""]]],
["one=uno; two=due", [["one", "uno"], ["two", "due"]]],
['one="uno"; two="\\due"', [["one", "uno"], ["two", "due"]]],
['one="un\\"o"', [["one", 'un"o']]],
['one="uno,due"', [["one", "uno,due"]]],
["one=uno; two; three=tre", [["one", "uno"], ["two", ""], ["three", "tre"]]],
[
"_lvs2=zHai1+Hq+Tc2vmc2r4GAbdOI5Jopg3EwsdUT9g=; _rcc2=53VdltWl+Ov6ordflA==;",
[
["_lvs2", "zHai1+Hq+Tc2vmc2r4GAbdOI5Jopg3EwsdUT9g="],
["_rcc2", "53VdltWl+Ov6ordflA=="],
],
],
]
def test_read_key():
tokens = [
[("foo", 0), ("foo", 3)],
[("foo", 1), ("oo", 3)],
[(" foo", 0), (" foo", 4)],
[(" foo", 1), ("foo", 4)],
[(" foo;", 1), ("foo", 4)],
[(" foo=", 1), ("foo", 4)],
[(" foo=bar", 1), ("foo", 4)],
]
for q, a in tokens:
assert cookies._read_key(*q) == a
def test_read_quoted_string():
tokens = [
[('"foo" x', 0), ("foo", 5)],
[('"f\\oo" x', 0), ("foo", 6)],
[(r'"f\\o" x', 0), (r"f\o", 6)],
[(r'"f\\" x', 0), (r"f" + "\\", 5)],
[('"fo\\"" x', 0), ('fo"', 6)],
[('"foo" x', 7), ("", 8)],
]
for q, a in tokens:
assert cookies._read_quoted_string(*q) == a
def test_read_cookie_pairs():
vals = [
["=uno", [["", "uno"]]],
["one", [["one", ""]]],
["one=two", [["one", "two"]]],
["one=", [["one", ""]]],
['one="two"', [["one", "two"]]],
['one="two"; three=four', [["one", "two"], ["three", "four"]]],
[
'one="two"; three=four; five',
[["one", "two"], ["three", "four"], ["five", ""]],
],
['one="\\"two"; three=four', [["one", '"two'], ["three", "four"]]],
]
for s, lst in vals:
ret, off = cookies._read_cookie_pairs(s)
assert ret == lst
def test_pairs_roundtrips():
for s, expected in cookie_pairs:
ret, off = cookies._read_cookie_pairs(s)
assert ret == expected
s2 = cookies._format_pairs(expected)
ret, off = cookies._read_cookie_pairs(s2)
assert ret == expected
def test_cookie_roundtrips():
for s, expected in cookie_pairs:
ret = cookies.parse_cookie_header(s)
assert ret == expected
s2 = cookies.format_cookie_header(expected)
ret = cookies.parse_cookie_header(s2)
assert ret == expected
def test_parse_set_cookie_pairs():
pairs = [
["=", [[("", "")]]],
["=;foo=bar", [[("", ""), ("foo", "bar")]]],
["=;=;foo=bar", [[("", ""), ("", ""), ("foo", "bar")]]],
["=uno", [[("", "uno")]]],
["one=uno", [[("one", "uno")]]],
["one=un\x20", [[("one", "un\x20")]]],
["one=uno; foo", [[("one", "uno"), ("foo", None)]]],
[
"mun=1.390.f60; "
"expires=sun, 11-oct-2015 12:38:31 gmt; path=/; "
"domain=b.aol.com",
[
[
("mun", "1.390.f60"),
("expires", "sun, 11-oct-2015 12:38:31 gmt"),
("path", "/"),
("domain", "b.aol.com"),
]
],
],
[
r"rpb=190%3d1%2616726%3d1%2634832%3d1%2634874%3d1; "
"domain=.rubiconproject.com; "
"expires=mon, 11-may-2015 21:54:57 gmt; "
"path=/",
[
[
("rpb", r"190%3d1%2616726%3d1%2634832%3d1%2634874%3d1"),
("domain", ".rubiconproject.com"),
("expires", "mon, 11-may-2015 21:54:57 gmt"),
("path", "/"),
]
],
],
]
for s, expected in pairs:
ret, off = cookies._read_set_cookie_pairs(s)
assert ret == expected
s2 = cookies._format_set_cookie_pairs(expected[0])
ret2, off = cookies._read_set_cookie_pairs(s2)
assert ret2 == expected
def test_parse_set_cookie_header():
def set_cookie_equal(obs, exp):
assert obs[0] == exp[0]
assert obs[1] == exp[1]
assert obs[2].items(multi=True) == exp[2]
vals = [
["", []],
[";", []],
["=uno", [("", "uno", ())]],
["one=uno", [("one", "uno", ())]],
["one=uno; foo=bar", [("one", "uno", (("foo", "bar"),))]],
[
"one=uno; foo=bar; foo=baz",
[("one", "uno", (("foo", "bar"), ("foo", "baz")))],
],
# Comma Separated Variant of Set-Cookie Headers
[
"foo=bar, doo=dar",
[
("foo", "bar", ()),
("doo", "dar", ()),
],
],
[
"foo=bar; path=/, doo=dar; roo=rar; zoo=zar",
[
("foo", "bar", (("path", "/"),)),
("doo", "dar", (("roo", "rar"), ("zoo", "zar"))),
],
],
[
"foo=bar; expires=Mon, 24 Aug 2133",
[
("foo", "bar", (("expires", "Mon, 24 Aug 2133"),)),
],
],
[
"foo=bar; expires=Mon, 24 Aug 2133 00:00:00 GMT, doo=dar",
[
("foo", "bar", (("expires", "Mon, 24 Aug 2133 00:00:00 GMT"),)),
("doo", "dar", ()),
],
],
]
for s, expected in vals:
ret = cookies.parse_set_cookie_header(s)
if expected:
for i in range(len(expected)):
set_cookie_equal(ret[i], expected[i])
s2 = cookies.format_set_cookie_header(ret)
ret2 = cookies.parse_set_cookie_header(s2)
for i in range(len(expected)):
set_cookie_equal(ret2[i], expected[i])
else:
assert not ret
def test_refresh_cookie():
# Invalid expires format, sent to us by Reddit.
c = "rfoo=bar; Domain=reddit.com; expires=Thu, 31 Dec 2133 23:59:59 GMT; Path=/"
assert cookies.refresh_set_cookie_header(c, 60)
c = "MOO=BAR; Expires=Tue, 08-Mar-2011 00:20:38 GMT; Path=foo.com; Secure"
assert "00:21:38" in cookies.refresh_set_cookie_header(c, 60)
c = "rfoo=bar; Domain=reddit.com; expires=Thu, 31 Dec 2133; Path=/"
assert "expires" not in cookies.refresh_set_cookie_header(c, 60)
c = "foo,bar"
with pytest.raises(ValueError):
cookies.refresh_set_cookie_header(c, 60)
# https://github.com/mitmproxy/mitmproxy/issues/773
c = ">=A"
assert cookies.refresh_set_cookie_header(c, 60)
# https://github.com/mitmproxy/mitmproxy/issues/1118
c = "foo:bar=bla"
assert cookies.refresh_set_cookie_header(c, 0)
c = "foo/bar=bla"
assert cookies.refresh_set_cookie_header(c, 0)
# https://github.com/mitmproxy/mitmproxy/issues/2250
c = ""
assert cookies.refresh_set_cookie_header(c, 60) == ""
@mock.patch("time.time")
def test_get_expiration_ts(*args):
# Freeze time
now_ts = 17
time.time.return_value = now_ts
CA = cookies.CookieAttrs
F = cookies.get_expiration_ts
assert F(CA([("Expires", "Thu, 01-Jan-1970 00:00:00 GMT")])) == 0
assert F(CA([("Expires", "Mon, 24-Aug-2133 00:00:00 GMT")])) == 5164128000
assert F(CA([("Max-Age", "0")])) == now_ts
assert F(CA([("Max-Age", "31")])) == now_ts + 31
def test_is_expired():
CA = cookies.CookieAttrs
# A cookie can be expired
# by setting the expire time in the past
assert cookies.is_expired(CA([("Expires", "Thu, 01-Jan-1970 00:00:00 GMT")]))
# or by setting Max-Age to 0
assert cookies.is_expired(CA([("Max-Age", "0")]))
# or both
assert cookies.is_expired(
CA([("Expires", "Thu, 01-Jan-1970 00:00:00 GMT"), ("Max-Age", "0")])
)
assert not cookies.is_expired(CA([("Expires", "Mon, 24-Aug-2133 00:00:00 GMT")]))
assert not cookies.is_expired(CA([("Max-Age", "1")]))
assert not cookies.is_expired(
CA([("Expires", "Wed, 15-Jul-2133 00:00:00 GMT"), ("Max-Age", "1")])
)
assert not cookies.is_expired(CA([("Max-Age", "nan")]))
assert not cookies.is_expired(CA([("Expires", "false")]))
def test_group_cookies():
CA = cookies.CookieAttrs
groups = [
[
"one=uno; foo=bar; foo=baz",
[("one", "uno", CA([])), ("foo", "bar", CA([])), ("foo", "baz", CA([]))],
],
[
"one=uno; Path=/; foo=bar; Max-Age=0; foo=baz; expires=24-08-1993",
[
("one", "uno", CA([("Path", "/")])),
("foo", "bar", CA([("Max-Age", "0")])),
("foo", "baz", CA([("expires", "24-08-1993")])),
],
],
["one=uno;", [("one", "uno", CA([]))]],
[
"one=uno; Path=/; Max-Age=0; Expires=24-08-1993",
[
(
"one",
"uno",
CA([("Path", "/"), ("Max-Age", "0"), ("Expires", "24-08-1993")]),
)
],
],
["path=val; Path=/", [("path", "val", CA([("Path", "/")]))]],
]
for c, expected in groups:
observed = cookies.group_cookies(cookies.parse_cookie_header(c))
assert observed == expected
| python | MIT | e6aa924bb411a9687b91920b8d094af37bc02b90 | 2026-01-04T14:40:00.086164Z | false |
mitmproxy/mitmproxy | https://github.com/mitmproxy/mitmproxy/blob/e6aa924bb411a9687b91920b8d094af37bc02b90/test/mitmproxy/net/http/test_multipart.py | test/mitmproxy/net/http/test_multipart.py | import pytest
from mitmproxy.net.http import multipart
def test_decode():
boundary = "somefancyboundary"
content = (
"--{0}\n"
'Content-Disposition: form-data; name="field1"\n\n'
"value1\n"
"--{0}\n"
'Content-Disposition: form-data; name="field2"\n\n'
"value2\n"
"--{0}--".format(boundary).encode()
)
form = multipart.decode_multipart(f"multipart/form-data; {boundary=!s}", content)
assert len(form) == 2
assert form[0] == (b"field1", b"value1")
assert form[1] == (b"field2", b"value2")
boundary = "boundary茅莽"
result = multipart.decode_multipart(f"multipart/form-data; {boundary=!s}", content)
assert result == []
assert multipart.decode_multipart("", content) == []
def test_encode():
data = [(b"file", b"shell.jpg"), (b"file_size", b"1000")]
content = multipart.encode_multipart(
"multipart/form-data; boundary=127824672498", data
)
assert b'Content-Disposition: form-data; name="file"' in content
assert (
b"Content-Type: text/plain; charset=utf-8\r\n\r\nshell.jpg\r\n\r\n--127824672498\r\n"
in content
)
assert b"1000\r\n\r\n--127824672498--\r\n"
assert len(content) == 252
with pytest.raises(ValueError, match=r"boundary found in encoded string"):
multipart.encode_multipart(
"multipart/form-data; boundary=127824672498", [(b"key", b"--127824672498")]
)
result = multipart.encode_multipart(
"multipart/form-data; boundary=boundary茅莽", data
)
assert result == b""
assert multipart.encode_multipart("", data) == b""
| python | MIT | e6aa924bb411a9687b91920b8d094af37bc02b90 | 2026-01-04T14:40:00.086164Z | false |
mitmproxy/mitmproxy | https://github.com/mitmproxy/mitmproxy/blob/e6aa924bb411a9687b91920b8d094af37bc02b90/test/mitmproxy/net/http/test_url.py | test/mitmproxy/net/http/test_url.py | from typing import AnyStr
import pytest
from mitmproxy.net.http import url
from mitmproxy.net.http.url import parse_authority
def test_parse():
with pytest.raises(ValueError):
url.parse("")
s, h, po, pa = url.parse(b"http://foo.com:8888/test")
assert s == b"http"
assert h == b"foo.com"
assert po == 8888
assert pa == b"/test"
s, h, po, pa = url.parse("http://foo/bar")
assert s == b"http"
assert h == b"foo"
assert po == 80
assert pa == b"/bar"
s, h, po, pa = url.parse(b"http://user:pass@foo/bar")
assert s == b"http"
assert h == b"foo"
assert po == 80
assert pa == b"/bar"
s, h, po, pa = url.parse(b"http://foo")
assert pa == b"/"
s, h, po, pa = url.parse(b"https://foo")
assert po == 443
with pytest.raises(ValueError):
url.parse(b"https://foo:bar")
# Invalid IDNA
with pytest.raises(ValueError):
url.parse("http://\xfafoo")
# Invalid PATH
with pytest.raises(ValueError):
url.parse("http:/\xc6/localhost:56121")
# Null byte in host
with pytest.raises(ValueError):
url.parse("http://foo\0")
# Invalid IPv6 URL - see http://www.ietf.org/rfc/rfc2732.txt
with pytest.raises(ValueError):
url.parse("http://lo[calhost")
def test_ascii_check():
test_url = "https://xyz.tax-edu.net?flag=selectCourse&lc_id=42825&lc_name=茅莽莽猫氓猫氓".encode()
scheme, host, port, full_path = url.parse(test_url)
assert scheme == b"https"
assert host == b"xyz.tax-edu.net"
assert port == 443
assert (
full_path
== b"/?flag%3DselectCourse%26lc_id%3D42825%26lc_name%3D%E8%8C%85%E8%8E%BD%E8%8E"
b"%BD%E7%8C%AB%E6%B0%93%E7%8C%AB%E6%B0%93"
)
def test_parse_port_range():
# Port out of range
with pytest.raises(ValueError):
url.parse("http://foo:999999")
def test_unparse():
assert url.unparse("http", "foo.com", 99, "") == "http://foo.com:99"
assert url.unparse("http", "foo.com", 80, "/bar") == "http://foo.com/bar"
assert url.unparse("https", "foo.com", 80, "") == "https://foo.com:80"
assert url.unparse("https", "foo.com", 443, "") == "https://foo.com"
assert url.unparse(b"http", b"foo.com", 99, b"") == b"http://foo.com:99"
# We ignore the byte 126: '~' because of an incompatibility in Python 3.6 and 3.7
# In 3.6 it is escaped as %7E
# In 3.7 it stays as ASCII character '~'
# https://bugs.python.org/issue16285
surrogates = (bytes(range(0, 126)) + bytes(range(127, 256))).decode(
"utf8", "surrogateescape"
)
surrogates_quoted = (
"%00%01%02%03%04%05%06%07%08%09%0A%0B%0C%0D%0E%0F"
"%10%11%12%13%14%15%16%17%18%19%1A%1B%1C%1D%1E%1F"
"%20%21%22%23%24%25%26%27%28%29%2A%2B%2C-./"
"0123456789%3A%3B%3C%3D%3E%3F%40"
"ABCDEFGHIJKLMNOPQRSTUVWXYZ"
"%5B%5C%5D%5E_%60"
"abcdefghijklmnopqrstuvwxyz"
"%7B%7C%7D%7F" # 7E or ~ is excluded!
"%80%81%82%83%84%85%86%87%88%89%8A%8B%8C%8D%8E%8F"
"%90%91%92%93%94%95%96%97%98%99%9A%9B%9C%9D%9E%9F"
"%A0%A1%A2%A3%A4%A5%A6%A7%A8%A9%AA%AB%AC%AD%AE%AF"
"%B0%B1%B2%B3%B4%B5%B6%B7%B8%B9%BA%BB%BC%BD%BE%BF"
"%C0%C1%C2%C3%C4%C5%C6%C7%C8%C9%CA%CB%CC%CD%CE%CF"
"%D0%D1%D2%D3%D4%D5%D6%D7%D8%D9%DA%DB%DC%DD%DE%DF"
"%E0%E1%E2%E3%E4%E5%E6%E7%E8%E9%EA%EB%EC%ED%EE%EF"
"%F0%F1%F2%F3%F4%F5%F6%F7%F8%F9%FA%FB%FC%FD%FE%FF"
)
def test_empty_key_trailing_equal_sign():
"""
Some HTTP clients don't send trailing equal signs for parameters without assigned value, e.g. they send
foo=bar&baz&qux=quux
instead of
foo=bar&baz=&qux=quux
The respective behavior of encode() should be driven by a reference string given in similar_to parameter
"""
reference_without_equal = "key1=val1&key2&key3=val3"
reference_with_equal = "key1=val1&key2=&key3=val3"
post_data_empty_key_middle = [("one", "two"), ("emptykey", ""), ("three", "four")]
post_data_empty_key_end = [("one", "two"), ("three", "four"), ("emptykey", "")]
assert (
url.encode(post_data_empty_key_middle, similar_to=reference_with_equal)
== "one=two&emptykey=&three=four"
)
assert (
url.encode(post_data_empty_key_end, similar_to=reference_with_equal)
== "one=two&three=four&emptykey="
)
assert (
url.encode(post_data_empty_key_middle, similar_to=reference_without_equal)
== "one=two&emptykey&three=four"
)
assert (
url.encode(post_data_empty_key_end, similar_to=reference_without_equal)
== "one=two&three=four&emptykey"
)
def test_encode():
assert url.encode([("foo", "bar")])
assert url.encode([("foo", surrogates)])
assert not url.encode([], similar_to="justatext")
def test_decode():
s = "one=two&three=four"
assert len(url.decode(s)) == 2
assert url.decode(surrogates)
def test_quote():
assert url.quote("foo") == "foo"
assert url.quote("foo bar") == "foo%20bar"
assert url.quote(surrogates) == surrogates_quoted
def test_unquote():
assert url.unquote("foo") == "foo"
assert url.unquote("foo%20bar") == "foo bar"
assert url.unquote(surrogates_quoted) == surrogates
def test_hostport():
assert url.hostport(b"https", b"foo.com", 8080) == b"foo.com:8080"
def test_default_port():
assert url.default_port("http") == 80
assert url.default_port(b"https") == 443
assert url.default_port(b"qux") is None
@pytest.mark.parametrize(
"authority,valid,out",
[
["foo:42", True, ("foo", 42)],
[b"foo:42", True, ("foo", 42)],
["127.0.0.1:443", True, ("127.0.0.1", 443)],
["[2001:db8:42::]:443", True, ("2001:db8:42::", 443)],
[b"xn--aaa-pla.example:80", True, ("äaaa.example", 80)],
[b"xn--r8jz45g.xn--zckzah:80", True, ("例え.テスト", 80)],
["foo", True, ("foo", None)],
["foo..bar", False, ("foo..bar", None)],
["foo:bar", False, ("foo:bar", None)],
[b"foo:bar", False, ("foo:bar", None)],
["foo:999999999", False, ("foo:999999999", None)],
[b"\xff", False, ("\udcff", None)],
],
)
def test_parse_authority(authority: AnyStr, valid: bool, out):
assert parse_authority(authority, False) == out
if valid:
assert parse_authority(authority, True) == out
else:
with pytest.raises(ValueError):
parse_authority(authority, True)
| python | MIT | e6aa924bb411a9687b91920b8d094af37bc02b90 | 2026-01-04T14:40:00.086164Z | false |
mitmproxy/mitmproxy | https://github.com/mitmproxy/mitmproxy/blob/e6aa924bb411a9687b91920b8d094af37bc02b90/test/mitmproxy/net/http/__init__.py | test/mitmproxy/net/http/__init__.py | python | MIT | e6aa924bb411a9687b91920b8d094af37bc02b90 | 2026-01-04T14:40:00.086164Z | false | |
mitmproxy/mitmproxy | https://github.com/mitmproxy/mitmproxy/blob/e6aa924bb411a9687b91920b8d094af37bc02b90/test/mitmproxy/net/http/test_validate.py | test/mitmproxy/net/http/test_validate.py | import pytest
from mitmproxy.http import Headers
from mitmproxy.http import Request
from mitmproxy.http import Response
from mitmproxy.net.http.validate import parse_content_length
from mitmproxy.net.http.validate import parse_transfer_encoding
from mitmproxy.net.http.validate import validate_headers
def test_parse_content_length_ok():
assert parse_content_length("0") == 0
assert parse_content_length("42") == 42
assert parse_content_length(b"0") == 0
assert parse_content_length(b"42") == 42
@pytest.mark.parametrize(
"cl", ["NaN", "", " ", "-1", "+1", "0x42", "010", "foo", "1, 1"]
)
def test_parse_content_length_invalid(cl):
with pytest.raises(ValueError, match="invalid content-length"):
parse_content_length(cl)
with pytest.raises(ValueError, match="invalid content-length"):
parse_content_length(cl.encode())
def test_parse_transfer_encoding_ok():
assert parse_transfer_encoding(b"chunked") == "chunked"
assert parse_transfer_encoding("chunked") == "chunked"
assert parse_transfer_encoding("gzip,chunked") == "gzip,chunked"
assert parse_transfer_encoding("gzip, chunked") == "gzip,chunked"
@pytest.mark.parametrize(
"te",
[
"unknown",
"chunked,chunked",
"chunked,gzip",
"",
"chunKed",
"chun ked",
],
)
def test_parse_transfer_encoding_invalid(te):
with pytest.raises(ValueError, match="transfer-encoding"):
parse_transfer_encoding(te)
with pytest.raises(ValueError, match="transfer-encoding"):
parse_transfer_encoding(te.encode())
def test_validate_headers_ok():
validate_headers(
Response.make(headers=Headers(content_length="42")),
)
validate_headers(
Request.make(
"POST", "https://example.com", headers=Headers(transfer_encoding="chunked")
),
)
@pytest.mark.parametrize(
"headers",
[
pytest.param(
Headers(transfer_encoding="chunked", content_length="42"), id="cl.te"
),
pytest.param(Headers([(b"content-length ", b"42")]), id="whitespace-key"),
pytest.param(Headers([(b"content-length", b"42 ")]), id="whitespace-value"),
pytest.param(Headers(content_length="-42"), id="invalid-cl"),
pytest.param(Headers(transfer_encoding="unknown"), id="unknown-te"),
pytest.param(
Headers([(b"content-length", b"42"), (b"content-length", b"43")]),
id="multi-cl",
),
pytest.param(
Headers([(b"transfer-encoding", b""), (b"transfer-encoding", b"chunked")]),
id="multi-te",
),
],
)
def test_validate_headers_invalid(headers: Headers):
resp = Response.make()
resp.headers = (
headers # update manually as Response.make() fixes content-length headers.
)
with pytest.raises(ValueError):
validate_headers(resp)
def test_validate_headers_te_forbidden_http10():
resp = Response.make(headers=Headers(transfer_encoding="chunked"))
resp.http_version = "HTTP/1.0"
with pytest.raises(ValueError):
validate_headers(resp)
def test_validate_headers_te_forbidden_204():
resp = Response.make(headers=Headers(transfer_encoding="chunked"), status_code=204)
with pytest.raises(ValueError):
validate_headers(resp)
def test_validate_headers_te_forbidden_identity_request():
req = Request.make(
"POST", "https://example.com", headers=Headers(transfer_encoding="identity")
)
with pytest.raises(ValueError):
validate_headers(req)
| python | MIT | e6aa924bb411a9687b91920b8d094af37bc02b90 | 2026-01-04T14:40:00.086164Z | false |
mitmproxy/mitmproxy | https://github.com/mitmproxy/mitmproxy/blob/e6aa924bb411a9687b91920b8d094af37bc02b90/test/mitmproxy/net/http/test_headers.py | test/mitmproxy/net/http/test_headers.py | import collections
import pytest
from mitmproxy.net.http.headers import assemble_content_type
from mitmproxy.net.http.headers import infer_content_encoding
from mitmproxy.net.http.headers import parse_content_type
def test_parse_content_type():
p = parse_content_type
assert p("text/html") == ("text", "html", {})
assert p("text") is None
v = p("text/html; charset=UTF-8")
assert v == ("text", "html", {"charset": "UTF-8"})
def test_assemble_content_type():
p = assemble_content_type
assert p("text", "html", {}) == "text/html"
assert p("text", "html", {"charset": "utf8"}) == "text/html; charset=utf8"
assert (
p(
"text",
"html",
collections.OrderedDict([("charset", "utf8"), ("foo", "bar")]),
)
== "text/html; charset=utf8; foo=bar"
)
@pytest.mark.parametrize(
"content_type,content,expected",
[
("", b"", "latin-1"),
("", b"foo", "latin-1"),
("", b"\xfc", "latin-1"),
("", b"\xf0\xe2", "latin-1"),
# bom
("", b"\xef\xbb\xbffoo", "utf-8-sig"),
("", b"\xff\xfef\x00o\x00o\x00", "utf-16le"),
("", b"\xfe\xff\x00f\x00o\x00o", "utf-16be"),
("", b"\xff\xfe\x00\x00f\x00\x00\x00o\x00\x00\x00o\x00\x00\x00", "utf-32le"),
("", b"\x00\x00\xfe\xff\x00\x00\x00f\x00\x00\x00o\x00\x00\x00o", "utf-32be"),
# content-type charset
("text/html; charset=latin1", b"\xc3\xbc", "latin1"),
("text/html; charset=utf8", b"\xc3\xbc", "utf8"),
# json
("application/json", b'"\xc3\xbc"', "utf8"),
# html meta charset
(
"text/html",
b'<meta charset="gb2312">\xe6\x98\x8e\xe4\xbc\xaf',
"gb18030",
),
(
"text/html",
b'<meta http-equiv="content-type" '
b'content="text/html;charset=gb2312">\xe6\x98\x8e\xe4\xbc\xaf',
"gb18030",
),
(
"text/html",
b"<html></html>",
"utf8",
),
# xml declaration encoding
(
"application/xml",
b'<?xml version="1.0" encoding="gb2312"?>'
b"<root>\xe6\x98\x8e\xe4\xbc\xaf</root>",
"gb18030",
),
(
"application/xml",
b'<?xml version="1.0"?>',
"utf8",
),
# css charset
(
"text/css",
b'\xef\xbb\xbf@charset "UTF-8";.\xe5\xb9\xb3\xe5\x92\x8c,#div2 {color: green;}',
"utf-8-sig",
),
(
"text/css",
b'@charset "gb2312";#foo::before {content: "\xe6\x98\x8e\xe4\xbc\xaf"}',
"gb18030",
),
(
"text/css",
b"h1 {}",
"utf8",
),
# js
("application/javascript", b"", "utf8"),
("application/ecmascript", b"", "utf8"),
("text/javascript", b"", "utf8"),
],
)
def test_infer_content_encoding(content_type, content, expected):
# Additional test coverage in `test_http::TestMessageText`
assert infer_content_encoding(content_type, content) == expected
| python | MIT | e6aa924bb411a9687b91920b8d094af37bc02b90 | 2026-01-04T14:40:00.086164Z | false |
mitmproxy/mitmproxy | https://github.com/mitmproxy/mitmproxy/blob/e6aa924bb411a9687b91920b8d094af37bc02b90/test/mitmproxy/net/http/test_status_codes.py | test/mitmproxy/net/http/test_status_codes.py | from mitmproxy.net.http import status_codes
def test_simple():
assert status_codes.IM_A_TEAPOT == 418
assert status_codes.RESPONSES[418] == "I'm a teapot"
| python | MIT | e6aa924bb411a9687b91920b8d094af37bc02b90 | 2026-01-04T14:40:00.086164Z | false |
mitmproxy/mitmproxy | https://github.com/mitmproxy/mitmproxy/blob/e6aa924bb411a9687b91920b8d094af37bc02b90/test/mitmproxy/net/http/http1/test_assemble.py | test/mitmproxy/net/http/http1/test_assemble.py | import pytest
from mitmproxy.http import Headers
from mitmproxy.net.http.http1.assemble import _assemble_request_headers
from mitmproxy.net.http.http1.assemble import _assemble_request_line
from mitmproxy.net.http.http1.assemble import _assemble_response_headers
from mitmproxy.net.http.http1.assemble import assemble_body
from mitmproxy.net.http.http1.assemble import assemble_request
from mitmproxy.net.http.http1.assemble import assemble_request_head
from mitmproxy.net.http.http1.assemble import assemble_response
from mitmproxy.net.http.http1.assemble import assemble_response_head
from mitmproxy.test.tutils import treq
from mitmproxy.test.tutils import tresp
def test_assemble_request():
assert assemble_request(treq()) == (
b"GET /path HTTP/1.1\r\nheader: qvalue\r\ncontent-length: 7\r\n\r\ncontent"
)
with pytest.raises(ValueError):
assemble_request(treq(content=None))
def test_assemble_request_head():
c = assemble_request_head(treq(content=b"foo"))
assert b"GET" in c
assert b"qvalue" in c
assert b"content-length" in c
assert b"foo" not in c
def test_assemble_response():
assert assemble_response(tresp()) == (
b"HTTP/1.1 200 OK\r\n"
b"header-response: svalue\r\n"
b"content-length: 7\r\n"
b"\r\n"
b"message"
)
resp = tresp()
resp.headers["transfer-encoding"] = "chunked"
resp.headers["trailer"] = "my-little-trailer"
resp.trailers = Headers([(b"my-little-trailer", b"foobar")])
assert assemble_response(resp) == (
b"HTTP/1.1 200 OK\r\n"
b"header-response: svalue\r\n"
b"content-length: 7\r\n"
b"transfer-encoding: chunked\r\n"
b"trailer: my-little-trailer\r\n"
b"\r\n7\r\n"
b"message"
b"\r\n0\r\n"
b"my-little-trailer: foobar\r\n\r\n"
)
with pytest.raises(ValueError):
assemble_response(tresp(content=None))
def test_assemble_response_head():
c = assemble_response_head(tresp())
assert b"200" in c
assert b"svalue" in c
assert b"message" not in c
def test_assemble_body():
c = list(assemble_body(Headers(), [b"body"], Headers()))
assert c == [b"body"]
c = list(
assemble_body(
Headers(transfer_encoding="chunked"), [b"123456789a", b""], Headers()
)
)
assert c == [b"a\r\n123456789a\r\n", b"0\r\n\r\n"]
c = list(
assemble_body(Headers(transfer_encoding="chunked"), [b"123456789a"], Headers())
)
assert c == [b"a\r\n123456789a\r\n", b"0\r\n\r\n"]
c = list(
assemble_body(
Headers(transfer_encoding="chunked"),
[b"123456789a"],
Headers(trailer="trailer"),
)
)
assert c == [b"a\r\n123456789a\r\n", b"0\r\ntrailer: trailer\r\n\r\n"]
with pytest.raises(ValueError):
list(assemble_body(Headers(), [b"body"], Headers(trailer="trailer")))
def test_assemble_request_line():
assert _assemble_request_line(treq().data) == b"GET /path HTTP/1.1"
authority_request = treq(method=b"CONNECT", authority=b"address:22").data
assert _assemble_request_line(authority_request) == b"CONNECT address:22 HTTP/1.1"
absolute_request = treq(scheme=b"http", authority=b"address:22").data
assert (
_assemble_request_line(absolute_request)
== b"GET http://address:22/path HTTP/1.1"
)
def test_assemble_request_headers():
# https://github.com/mitmproxy/mitmproxy/issues/186
r = treq(content=b"")
r.headers["Transfer-Encoding"] = "chunked"
c = _assemble_request_headers(r.data)
assert b"Transfer-Encoding" in c
def test_assemble_response_headers():
# https://github.com/mitmproxy/mitmproxy/issues/186
r = tresp(content=b"")
r.headers["Transfer-Encoding"] = "chunked"
c = _assemble_response_headers(r)
assert b"Transfer-Encoding" in c
| python | MIT | e6aa924bb411a9687b91920b8d094af37bc02b90 | 2026-01-04T14:40:00.086164Z | false |
mitmproxy/mitmproxy | https://github.com/mitmproxy/mitmproxy/blob/e6aa924bb411a9687b91920b8d094af37bc02b90/test/mitmproxy/net/http/http1/__init__.py | test/mitmproxy/net/http/http1/__init__.py | python | MIT | e6aa924bb411a9687b91920b8d094af37bc02b90 | 2026-01-04T14:40:00.086164Z | false | |
mitmproxy/mitmproxy | https://github.com/mitmproxy/mitmproxy/blob/e6aa924bb411a9687b91920b8d094af37bc02b90/test/mitmproxy/net/http/http1/test_read.py | test/mitmproxy/net/http/http1/test_read.py | import pytest
from mitmproxy.http import Headers
from mitmproxy.net.http.http1.read import _read_headers
from mitmproxy.net.http.http1.read import _read_request_line
from mitmproxy.net.http.http1.read import _read_response_line
from mitmproxy.net.http.http1.read import connection_close
from mitmproxy.net.http.http1.read import expected_http_body_size
from mitmproxy.net.http.http1.read import get_header_tokens
from mitmproxy.net.http.http1.read import read_request_head
from mitmproxy.net.http.http1.read import read_response_head
from mitmproxy.test.tutils import treq
from mitmproxy.test.tutils import tresp
def test_get_header_tokens():
headers = Headers()
assert get_header_tokens(headers, "foo") == []
headers["foo"] = "bar"
assert get_header_tokens(headers, "foo") == ["bar"]
headers["foo"] = "bar, voing"
assert get_header_tokens(headers, "foo") == ["bar", "voing"]
headers.set_all("foo", ["bar, voing", "oink"])
assert get_header_tokens(headers, "foo") == ["bar", "voing", "oink"]
def test_connection_close():
headers = Headers()
assert connection_close(b"HTTP/1.0", headers)
assert not connection_close(b"HTTP/1.1", headers)
assert not connection_close(b"HTTP/2.0", headers)
headers["connection"] = "keep-alive"
assert not connection_close(b"HTTP/1.1", headers)
headers["connection"] = "close"
assert connection_close(b"HTTP/1.1", headers)
headers["connection"] = "foobar"
assert connection_close(b"HTTP/1.0", headers)
assert not connection_close(b"HTTP/1.1", headers)
def test_read_request_head():
rfile = [
b"GET / HTTP/1.1\r\n",
b"Content-Length: 4\r\n",
]
r = read_request_head(rfile)
assert r.method == "GET"
assert r.headers["Content-Length"] == "4"
assert r.content is None
def test_read_response_head():
rfile = [
b"HTTP/1.1 418 I'm a teapot\r\n",
b"Content-Length: 4\r\n",
]
r = read_response_head(rfile)
assert r.status_code == 418
assert r.headers["Content-Length"] == "4"
assert r.content is None
def test_expected_http_body_size():
# Expect: 100-continue
assert (
expected_http_body_size(
treq(headers=Headers(expect="100-continue", content_length="42")),
)
== 42
)
# http://tools.ietf.org/html/rfc7230#section-3.3
assert (
expected_http_body_size(
treq(method=b"HEAD"), tresp(headers=Headers(content_length="42"))
)
== 0
)
assert (
expected_http_body_size(
treq(method=b"CONNECT", headers=Headers()),
None,
)
== 0
)
assert expected_http_body_size(treq(method=b"CONNECT"), tresp()) == 0
for code in (100, 204, 304):
assert expected_http_body_size(treq(), tresp(status_code=code)) == 0
# chunked
assert (
expected_http_body_size(
treq(headers=Headers(transfer_encoding="chunked")),
)
is None
)
assert (
expected_http_body_size(
treq(headers=Headers(transfer_encoding="gzip,\tchunked")),
)
is None
)
with pytest.raises(ValueError, match="invalid transfer-encoding header"):
expected_http_body_size(
treq(
headers=Headers(transfer_encoding="chun\u212aed")
), # "chunKed".lower() == "chunked"
)
with pytest.raises(ValueError, match="unknown transfer-encoding header"):
expected_http_body_size(
treq(
headers=Headers(transfer_encoding="chun ked")
), # "chunKed".lower() == "chunked"
)
with pytest.raises(ValueError, match="unknown transfer-encoding header"):
expected_http_body_size(
treq(headers=Headers(transfer_encoding="qux")),
)
# transfer-encoding: gzip
assert (
expected_http_body_size(
treq(),
tresp(headers=Headers(transfer_encoding="gzip")),
)
== -1
)
# requests with non-chunked transfer encoding.
# technically invalid, but we want to maximize compatibility if validate_inbound_headers is false.
assert (
expected_http_body_size(
treq(headers=Headers(transfer_encoding="identity", content_length="42")),
None,
)
== 42
)
# Example of a misbehaving client:
# https://github.com/tensorflow/tensorflow/blob/fd9471e7d48e8e86684c847c0e1897c76e737805/third_party/xla/xla/tsl/platform/cloud/curl_http_request.cc
assert (
expected_http_body_size(
treq(headers=Headers(transfer_encoding="identity")), None
)
== 0
)
assert (
expected_http_body_size(treq(headers=Headers(transfer_encoding="gzip")), None)
== -1
)
# explicit length
assert expected_http_body_size(treq(headers=Headers(content_length="42"))) == 42
# invalid lengths
with pytest.raises(ValueError):
expected_http_body_size(treq(headers=Headers(content_length=b"foo")))
with pytest.raises(ValueError):
expected_http_body_size(
treq(
headers=Headers(
[(b"content-length", b"42"), (b"content-length", b"42")]
)
)
)
# no length
assert expected_http_body_size(treq(headers=Headers())) == 0
assert (
expected_http_body_size(treq(headers=Headers()), tresp(headers=Headers())) == -1
)
def test_read_request_line():
def t(b):
return _read_request_line(b)
assert t(b"GET / HTTP/1.1") == ("", 0, b"GET", b"", b"", b"/", b"HTTP/1.1")
assert t(b"OPTIONS * HTTP/1.1") == ("", 0, b"OPTIONS", b"", b"", b"*", b"HTTP/1.1")
assert t(b"CONNECT foo:42 HTTP/1.1") == (
"foo",
42,
b"CONNECT",
b"",
b"foo:42",
b"",
b"HTTP/1.1",
)
assert t(b"GET http://foo:42/bar HTTP/1.1") == (
"foo",
42,
b"GET",
b"http",
b"foo:42",
b"/bar",
b"HTTP/1.1",
)
assert t(b"GET http://foo:42 HTTP/1.1") == (
"foo",
42,
b"GET",
b"http",
b"foo:42",
b"/",
b"HTTP/1.1",
)
with pytest.raises(ValueError):
t(b"GET / WTF/1.1")
with pytest.raises(ValueError):
t(b"CONNECT example.com HTTP/1.1") # port missing
with pytest.raises(ValueError):
t(b"GET ws://example.com/ HTTP/1.1") # port missing
with pytest.raises(ValueError):
t(b"this is not http")
with pytest.raises(ValueError):
t(b"")
def test_read_response_line():
def t(b):
return _read_response_line(b)
assert t(b"HTTP/1.1 200 OK") == (b"HTTP/1.1", 200, b"OK")
assert t(b"HTTP/1.1 200") == (b"HTTP/1.1", 200, b"")
# https://github.com/mitmproxy/mitmproxy/issues/784
assert t(b"HTTP/1.1 200 Non-Autoris\xc3\xa9") == (
b"HTTP/1.1",
200,
b"Non-Autoris\xc3\xa9",
)
with pytest.raises(ValueError):
assert t(b"HTTP/1.1")
with pytest.raises(ValueError):
t(b"HTTP/1.1 OK OK")
with pytest.raises(ValueError):
t(b"WTF/1.1 200 OK")
with pytest.raises(ValueError):
t(b"")
class TestReadHeaders:
@staticmethod
def _read(data):
return _read_headers(data.splitlines(keepends=True))
def test_read_simple(self):
data = b"Header: one\r\nHeader2: two\r\n"
headers = self._read(data)
assert headers.fields == ((b"Header", b"one"), (b"Header2", b"two"))
def test_read_multi(self):
data = b"Header: one\r\nHeader: two\r\n"
headers = self._read(data)
assert headers.fields == ((b"Header", b"one"), (b"Header", b"two"))
def test_read_continued(self):
data = b"Header: one\r\n\ttwo\r\nHeader2: three\r\n"
headers = self._read(data)
assert headers.fields == ((b"Header", b"one\r\n two"), (b"Header2", b"three"))
def test_read_continued_err(self):
data = b"\tfoo: bar\r\n"
with pytest.raises(ValueError):
self._read(data)
def test_read_err(self):
data = b"foo"
with pytest.raises(ValueError):
self._read(data)
def test_read_empty_name(self):
data = b":foo"
with pytest.raises(ValueError):
self._read(data)
def test_read_empty_value(self):
data = b"bar:"
headers = self._read(data)
assert headers.fields == ((b"bar", b""),)
| python | MIT | e6aa924bb411a9687b91920b8d094af37bc02b90 | 2026-01-04T14:40:00.086164Z | false |
mitmproxy/mitmproxy | https://github.com/mitmproxy/mitmproxy/blob/e6aa924bb411a9687b91920b8d094af37bc02b90/test/mitmproxy/coretypes/test_multidict.py | test/mitmproxy/coretypes/test_multidict.py | import pytest
from mitmproxy.coretypes import multidict
class _TMulti:
@staticmethod
def _kconv(key):
return key.lower()
class TMultiDict(_TMulti, multidict.MultiDict):
pass
class TestMultiDict:
@staticmethod
def _multi():
return TMultiDict((("foo", "bar"), ("bar", "baz"), ("Bar", "bam")))
def test_init(self):
md = TMultiDict()
assert len(md) == 0
md = TMultiDict([("foo", "bar")])
assert len(md) == 1
assert md.fields == (("foo", "bar"),)
def test_repr(self):
assert repr(self._multi()) == (
"TMultiDict[('foo', 'bar'), ('bar', 'baz'), ('Bar', 'bam')]"
)
def test_getitem(self):
md = TMultiDict([("foo", "bar")])
assert "foo" in md
assert "Foo" in md
assert md["foo"] == "bar"
with pytest.raises(KeyError):
assert md["bar"]
md_multi = TMultiDict([("foo", "a"), ("foo", "b")])
assert md_multi["foo"] == "a"
def test_setitem(self):
md = TMultiDict()
md["foo"] = "bar"
assert md.fields == (("foo", "bar"),)
md["foo"] = "baz"
assert md.fields == (("foo", "baz"),)
md["bar"] = "bam"
assert md.fields == (("foo", "baz"), ("bar", "bam"))
def test_delitem(self):
md = self._multi()
del md["foo"]
assert "foo" not in md
assert "bar" in md
with pytest.raises(KeyError):
del md["foo"]
del md["bar"]
assert md.fields == ()
def test_iter(self):
md = self._multi()
assert list(md.__iter__()) == ["foo", "bar"]
def test_len(self):
md = TMultiDict()
assert len(md) == 0
md = self._multi()
assert len(md) == 2
def test_eq(self):
assert TMultiDict() == TMultiDict()
assert not (TMultiDict() == 42)
md1 = self._multi()
md2 = self._multi()
assert md1 == md2
md1.fields = md1.fields[1:] + md1.fields[:1]
assert not (md1 == md2)
def test_hash(self):
"""
If a class defines mutable objects and implements an __eq__() method,
it should not implement __hash__(), since the implementation of hashable
collections requires that a key's hash value is immutable.
"""
with pytest.raises(TypeError):
assert hash(TMultiDict())
def test_get_all(self):
md = self._multi()
assert md.get_all("foo") == ["bar"]
assert md.get_all("bar") == ["baz", "bam"]
assert md.get_all("baz") == []
def test_set_all(self):
md = TMultiDict()
md.set_all("foo", ["bar", "baz"])
assert md.fields == (("foo", "bar"), ("foo", "baz"))
md = TMultiDict(
(
("a", "b"),
("x", "x"),
("c", "d"),
("X", "X"),
("e", "f"),
)
)
md.set_all("x", ["1", "2", "3"])
assert md.fields == (
("a", "b"),
("x", "1"),
("c", "d"),
("X", "2"),
("e", "f"),
("x", "3"),
)
md.set_all("x", ["4"])
assert md.fields == (
("a", "b"),
("x", "4"),
("c", "d"),
("e", "f"),
)
def test_add(self):
md = self._multi()
md.add("foo", "foo")
assert md.fields == (
("foo", "bar"),
("bar", "baz"),
("Bar", "bam"),
("foo", "foo"),
)
def test_insert(self):
md = TMultiDict([("b", "b")])
md.insert(0, "a", "a")
md.insert(2, "c", "c")
assert md.fields == (("a", "a"), ("b", "b"), ("c", "c"))
def test_keys(self):
md = self._multi()
assert list(md.keys()) == ["foo", "bar"]
assert list(md.keys(multi=True)) == ["foo", "bar", "Bar"]
def test_values(self):
md = self._multi()
assert list(md.values()) == ["bar", "baz"]
assert list(md.values(multi=True)) == ["bar", "baz", "bam"]
def test_items(self):
md = self._multi()
assert list(md.items()) == [("foo", "bar"), ("bar", "baz")]
assert list(md.items(multi=True)) == [
("foo", "bar"),
("bar", "baz"),
("Bar", "bam"),
]
def test_state(self):
md = self._multi()
assert len(md.get_state()) == 3
assert md == TMultiDict.from_state(md.get_state())
md2 = TMultiDict()
assert md != md2
md2.set_state(md.get_state())
assert md == md2
class TParent:
def __init__(self):
self.vals = tuple()
def setter(self, vals):
self.vals = vals
def getter(self):
return self.vals
class TestMultiDictView:
def test_modify(self):
p = TParent()
tv = multidict.MultiDictView(p.getter, p.setter)
assert len(tv) == 0
tv["a"] = "b"
assert p.vals == (("a", "b"),)
tv["c"] = "b"
assert p.vals == (("a", "b"), ("c", "b"))
assert tv["a"] == "b"
def test_copy(self):
p = TParent()
tv = multidict.MultiDictView(p.getter, p.setter)
c = tv.copy()
assert isinstance(c, multidict.MultiDict)
assert tv.items() == c.items()
c["foo"] = "bar"
assert tv.items() != c.items()
| python | MIT | e6aa924bb411a9687b91920b8d094af37bc02b90 | 2026-01-04T14:40:00.086164Z | false |
mitmproxy/mitmproxy | https://github.com/mitmproxy/mitmproxy/blob/e6aa924bb411a9687b91920b8d094af37bc02b90/test/mitmproxy/coretypes/test_serializable.py | test/mitmproxy/coretypes/test_serializable.py | from __future__ import annotations
import copy
import dataclasses
import enum
from collections.abc import Mapping
from dataclasses import dataclass
from typing import Literal
import pytest
from mitmproxy.coretypes import serializable
from mitmproxy.coretypes.serializable import SerializableDataclass
class SerializableDummy(serializable.Serializable):
def __init__(self, i):
self.i = i
def get_state(self):
return copy.copy(self.i)
def set_state(self, i):
self.i = i
@classmethod
def from_state(cls, state):
return cls(state)
class TestSerializable:
def test_copy(self):
a = SerializableDummy(42)
assert a.i == 42
b = a.copy()
assert b.i == 42
a.set_state(1)
assert a.i == 1
assert b.i == 42
def test_copy_id(self):
a = SerializableDummy({"id": "foo", "foo": 42})
b = a.copy()
assert a.get_state()["id"] != b.get_state()["id"]
assert a.get_state()["foo"] == b.get_state()["foo"]
@dataclass
class Simple(SerializableDataclass):
x: int
y: str | None
@dataclass
class SerializableChild(SerializableDataclass):
foo: Simple
maybe_foo: Simple | None
@dataclass
class Inheritance(Simple):
z: bool
class TEnum(enum.Enum):
A = 1
B = 2
@dataclass
class TLiteral(SerializableDataclass):
lit: Literal["foo", "bar"]
@dataclass
class BuiltinChildren(SerializableDataclass):
a: list[int] | None
b: dict[str, int] | None
c: tuple[int, int] | None
d: list[Simple]
e: TEnum | None
@dataclass
class Defaults(SerializableDataclass):
z: int | None = 42
@dataclass
class Unsupported(SerializableDataclass):
a: Mapping[str, int]
@dataclass
class Addr(SerializableDataclass):
peername: tuple[str, int]
@dataclass(frozen=True)
class Frozen(SerializableDataclass):
x: int
@dataclass
class FrozenWrapper(SerializableDataclass):
f: Frozen
class TestSerializableDataclass:
@pytest.mark.parametrize(
"cls, state",
[
(Simple, {"x": 42, "y": "foo"}),
(Simple, {"x": 42, "y": None}),
(SerializableChild, {"foo": {"x": 42, "y": "foo"}, "maybe_foo": None}),
(
SerializableChild,
{"foo": {"x": 42, "y": "foo"}, "maybe_foo": {"x": 42, "y": "foo"}},
),
(Inheritance, {"x": 42, "y": "foo", "z": True}),
(
BuiltinChildren,
{
"a": [1, 2, 3],
"b": {"foo": 42},
"c": (1, 2),
"d": [{"x": 42, "y": "foo"}],
"e": 1,
},
),
(BuiltinChildren, {"a": None, "b": None, "c": None, "d": [], "e": None}),
(TLiteral, {"lit": "foo"}),
],
)
def test_roundtrip(self, cls, state):
a = cls.from_state(copy.deepcopy(state))
assert a.get_state() == state
def test_set(self):
s = SerializableChild(foo=Simple(x=42, y=None), maybe_foo=Simple(x=43, y=None))
s.set_state({"foo": {"x": 44, "y": None}, "maybe_foo": None})
assert s.foo.x == 44
assert s.maybe_foo is None
with pytest.raises(ValueError, match="Unexpected fields"):
Simple(0, "").set_state({"x": 42, "y": "foo", "z": True})
def test_invalid_none(self):
with pytest.raises(ValueError):
Simple.from_state({"x": None, "y": "foo"})
def test_defaults(self):
a = Defaults()
assert a.get_state() == {"z": 42}
def test_invalid_type(self):
with pytest.raises(ValueError):
Simple.from_state({"x": 42, "y": 42})
with pytest.raises(ValueError):
BuiltinChildren.from_state(
{"a": None, "b": None, "c": ("foo",), "d": [], "e": None}
)
def test_invalid_key(self):
with pytest.raises(ValueError):
Simple.from_state({"x": 42, "y": "foo", "z": True})
def test_invalid_type_in_list(self):
with pytest.raises(ValueError, match="Invalid value for x"):
BuiltinChildren.from_state(
{
"a": None,
"b": None,
"c": None,
"d": [{"x": "foo", "y": "foo"}],
"e": None,
}
)
def test_unsupported_type(self):
with pytest.raises(TypeError):
Unsupported.from_state({"a": "foo"})
def test_literal(self):
assert TLiteral.from_state({"lit": "foo"}).get_state() == {"lit": "foo"}
with pytest.raises(ValueError):
TLiteral.from_state({"lit": "unknown"})
def test_peername(self):
assert Addr.from_state({"peername": ("addr", 42)}).get_state() == {
"peername": ("addr", 42)
}
assert Addr.from_state({"peername": ("addr", 42, 0, 0)}).get_state() == {
"peername": ("addr", 42, 0, 0)
}
def test_set_immutable(self):
w = FrozenWrapper(Frozen(42))
with pytest.raises(dataclasses.FrozenInstanceError):
w.f.set_state({"x": 43})
w.set_state({"f": {"x": 43}})
assert w.f.x == 43
| python | MIT | e6aa924bb411a9687b91920b8d094af37bc02b90 | 2026-01-04T14:40:00.086164Z | false |
mitmproxy/mitmproxy | https://github.com/mitmproxy/mitmproxy/blob/e6aa924bb411a9687b91920b8d094af37bc02b90/test/mitmproxy/coretypes/test_bidi.py | test/mitmproxy/coretypes/test_bidi.py | import pytest
from mitmproxy.coretypes import bidi
def test_bidi():
b = bidi.BiDi(a=1, b=2)
assert b.a == 1
assert b.get_name(1) == "a"
assert b.get_name(5) is None
with pytest.raises(AttributeError):
getattr(b, "c")
with pytest.raises(ValueError):
bidi.BiDi(one=1, two=1)
| python | MIT | e6aa924bb411a9687b91920b8d094af37bc02b90 | 2026-01-04T14:40:00.086164Z | false |
mitmproxy/mitmproxy | https://github.com/mitmproxy/mitmproxy/blob/e6aa924bb411a9687b91920b8d094af37bc02b90/test/mitmproxy/coretypes/__init__.py | test/mitmproxy/coretypes/__init__.py | python | MIT | e6aa924bb411a9687b91920b8d094af37bc02b90 | 2026-01-04T14:40:00.086164Z | false | |
mitmproxy/mitmproxy | https://github.com/mitmproxy/mitmproxy/blob/e6aa924bb411a9687b91920b8d094af37bc02b90/test/mitmproxy/utils/test_data.py | test/mitmproxy/utils/test_data.py | import pytest
from mitmproxy.utils import data
def test_pkg_data():
assert data.pkg_data.path("tools/console")
with pytest.raises(ValueError):
data.pkg_data.path("nonexistent")
| python | MIT | e6aa924bb411a9687b91920b8d094af37bc02b90 | 2026-01-04T14:40:00.086164Z | false |
mitmproxy/mitmproxy | https://github.com/mitmproxy/mitmproxy/blob/e6aa924bb411a9687b91920b8d094af37bc02b90/test/mitmproxy/utils/test_bits.py | test/mitmproxy/utils/test_bits.py | # TODO: write tests
| python | MIT | e6aa924bb411a9687b91920b8d094af37bc02b90 | 2026-01-04T14:40:00.086164Z | false |
mitmproxy/mitmproxy | https://github.com/mitmproxy/mitmproxy/blob/e6aa924bb411a9687b91920b8d094af37bc02b90/test/mitmproxy/utils/test_magisk.py | test/mitmproxy/utils/test_magisk.py | import os
from cryptography import x509
from mitmproxy.test import taddons
from mitmproxy.utils import magisk
def test_get_ca(tdata):
with taddons.context() as tctx:
tctx.options.confdir = tdata.path("mitmproxy/data/confdir")
ca = magisk.get_ca_from_files()
assert isinstance(ca, x509.Certificate)
def test_subject_hash_old(tdata):
# checks if the hash is the same as that comming form openssl
with taddons.context() as tctx:
tctx.options.confdir = tdata.path("mitmproxy/data/confdir")
ca = magisk.get_ca_from_files()
our_hash = magisk.subject_hash_old(ca)
assert our_hash == "efb15d7d"
def test_magisk_write(tdata, tmp_path):
# checks if the hash is the same as that comming form openssl
with taddons.context() as tctx:
tctx.options.confdir = tdata.path("mitmproxy/data/confdir")
magisk_path = tmp_path / "mitmproxy-magisk-module.zip"
magisk.write_magisk_module(magisk_path)
assert os.path.exists(magisk_path)
| python | MIT | e6aa924bb411a9687b91920b8d094af37bc02b90 | 2026-01-04T14:40:00.086164Z | false |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.