repo stringlengths 7 90 | file_url stringlengths 81 315 | file_path stringlengths 4 228 | content stringlengths 0 32.8k | language stringclasses 1
value | license stringclasses 7
values | commit_sha stringlengths 40 40 | retrieved_at stringdate 2026-01-04 14:38:15 2026-01-05 02:33:18 | truncated bool 2
classes |
|---|---|---|---|---|---|---|---|---|
mitmproxy/mitmproxy | https://github.com/mitmproxy/mitmproxy/blob/e6aa924bb411a9687b91920b8d094af37bc02b90/mitmproxy/tools/console/window.py | mitmproxy/tools/console/window.py | import logging
import re
import urwid
from mitmproxy import flow
from mitmproxy.tools.console import commands
from mitmproxy.tools.console import common
from mitmproxy.tools.console import eventlog
from mitmproxy.tools.console import flowlist
from mitmproxy.tools.console import flowview
from mitmproxy.tools.console import grideditor
from mitmproxy.tools.console import help
from mitmproxy.tools.console import keybindings
from mitmproxy.tools.console import options
from mitmproxy.tools.console import overlay
from mitmproxy.tools.console import signals
from mitmproxy.tools.console import statusbar
class StackWidget(urwid.Frame):
def __init__(self, window, widget, title, focus):
self.is_focused = focus
self.window = window
if title:
header = urwid.AttrMap(
urwid.Text(title), "heading" if focus else "heading_inactive"
)
else:
header = None
super().__init__(widget, header=header)
def mouse_event(self, size, event, button, col, row, focus):
if event == "mouse press" and button == 1 and not self.is_focused:
self.window.switch()
return super().mouse_event(size, event, button, col, row, focus)
def keypress(self, size, key):
# Make sure that we don't propagate cursor events outside of the widget.
# Otherwise, in a horizontal layout, urwid's Pile would change the focused widget
# if we cannot scroll any further.
ret = super().keypress(size, key)
command = self._command_map[
ret
] # awkward as they don't implement a full dict api
if command and command.startswith("cursor"):
return None
return ret
class WindowStack:
def __init__(self, master, base):
self.master = master
self.windows = dict(
flowlist=flowlist.FlowListBox(master),
flowview=flowview.FlowView(master),
commands=commands.Commands(master),
keybindings=keybindings.KeyBindings(master),
options=options.Options(master),
help=help.HelpView(master),
eventlog=eventlog.EventLog(master),
edit_focus_query=grideditor.QueryEditor(master),
edit_focus_cookies=grideditor.CookieEditor(master),
edit_focus_setcookies=grideditor.SetCookieEditor(master),
edit_focus_setcookie_attrs=grideditor.CookieAttributeEditor(master),
edit_focus_multipart_form=grideditor.RequestMultipartEditor(master),
edit_focus_urlencoded_form=grideditor.RequestUrlEncodedEditor(master),
edit_focus_path=grideditor.PathEditor(master),
edit_focus_request_headers=grideditor.RequestHeaderEditor(master),
edit_focus_response_headers=grideditor.ResponseHeaderEditor(master),
)
self.stack = [base]
self.overlay = None
def set_overlay(self, o, **kwargs):
self.overlay = overlay.SimpleOverlay(
self,
o,
self.top_widget(),
o.width,
**kwargs,
)
def top_window(self):
"""
The current top window, ignoring overlays.
"""
return self.windows[self.stack[-1]]
def top_widget(self):
"""
The current top widget - either a window or the active overlay.
"""
if self.overlay:
return self.overlay
return self.top_window()
def push(self, wname):
if self.stack[-1] == wname:
return
prev = self.top_window()
self.stack.append(wname)
self.call("layout_pushed", prev)
def pop(self, *args, **kwargs):
"""
Pop off the stack, return True if we're already at the top.
"""
if not self.overlay and len(self.stack) == 1:
return True
self.call("layout_popping")
if self.overlay:
self.overlay = None
else:
self.stack.pop()
def call(self, name, *args, **kwargs):
"""
Call a function on both the top window, and the overlay if there is
one. If the widget has a key_responder, we call the function on the
responder instead.
"""
getattr(self.top_window(), name)(*args, **kwargs)
if self.overlay:
getattr(self.overlay, name)(*args, **kwargs)
class Window(urwid.Frame):
def __init__(self, master):
self.statusbar = statusbar.StatusBar(master)
super().__init__(
None, header=None, footer=urwid.AttrMap(self.statusbar, "background")
)
self.master = master
self.master.view.sig_view_refresh.connect(self.view_changed)
self.master.view.sig_view_add.connect(self.view_changed)
self.master.view.sig_view_remove.connect(self.view_changed)
self.master.view.sig_view_update.connect(self.view_changed)
self.master.view.focus.sig_change.connect(self.view_changed)
self.master.view.focus.sig_change.connect(self.focus_changed)
signals.focus.connect(self.sig_focus)
signals.flow_change.connect(self.flow_changed)
signals.pop_view_state.connect(self.pop)
self.master.options.subscribe(
self.configure, ["console_layout", "console_layout_headers"]
)
self.pane = 0
self.stacks = [WindowStack(master, "flowlist"), WindowStack(master, "eventlog")]
def focus_stack(self):
return self.stacks[self.pane]
def configure(self, options, updated):
self.refresh()
def refresh(self):
"""
Redraw the layout.
"""
c = self.master.options.console_layout
if c == "single":
self.pane = 0
def wrapped(idx):
widget = self.stacks[idx].top_widget()
if self.master.options.console_layout_headers:
title = self.stacks[idx].top_window().title
else:
title = None
return StackWidget(self, widget, title, self.pane == idx)
w = None
if c == "single":
w = wrapped(0)
elif c == "vertical":
w = urwid.Pile(
[wrapped(i) for i, s in enumerate(self.stacks)], focus_item=self.pane
)
else:
w = urwid.Columns(
[wrapped(i) for i, s in enumerate(self.stacks)],
dividechars=1,
focus_column=self.pane,
)
self.body = urwid.AttrMap(w, "background")
signals.window_refresh.send()
def flow_changed(self, flow: flow.Flow) -> None:
if self.master.view.focus.flow:
if flow.id == self.master.view.focus.flow.id:
self.focus_changed()
def focus_changed(self, *args, **kwargs):
"""
Triggered when the focus changes - either when it's modified, or
when it changes to a different flow altogether.
"""
for i in self.stacks:
i.call("focus_changed")
def view_changed(self, *args, **kwargs):
"""
Triggered when the view list has changed.
"""
for i in self.stacks:
i.call("view_changed")
def set_overlay(self, o, **kwargs):
"""
Set an overlay on the currently focused stack.
"""
self.focus_stack().set_overlay(o, **kwargs)
self.refresh()
def push(self, wname):
"""
Push a window onto the currently focused stack.
"""
self.focus_stack().push(wname)
self.refresh()
self.view_changed()
self.focus_changed()
def pop(self) -> None:
"""
Pop a window from the currently focused stack. If there is only one
window on the stack, this prompts for exit.
"""
if self.focus_stack().pop():
self.master.prompt_for_exit()
else:
self.refresh()
self.view_changed()
self.focus_changed()
def stacks_sorted_by_focus(self):
"""
Returns:
self.stacks, with the focused stack first.
"""
stacks = self.stacks.copy()
stacks.insert(0, stacks.pop(self.pane))
return stacks
def current(self, keyctx):
"""
Returns the active widget with a matching key context, including overlays.
If multiple stacks have an active widget with a matching key context,
the currently focused stack is preferred.
"""
for s in self.stacks_sorted_by_focus():
t = s.top_widget()
if t.keyctx == keyctx:
return t
def current_window(self, keyctx):
"""
Returns the active window with a matching key context, ignoring overlays.
If multiple stacks have an active widget with a matching key context,
the currently focused stack is preferred.
"""
for s in self.stacks_sorted_by_focus():
t = s.top_window()
if t.keyctx == keyctx:
return t
def sig_focus(self, section):
self.focus_position = section
def switch(self):
"""
Switch between the two panes.
"""
if self.master.options.console_layout == "single":
self.pane = 0
else:
self.pane = (self.pane + 1) % len(self.stacks)
self.refresh()
def mouse_event(self, *args, **kwargs):
# args: (size, event, button, col, row)
k = super().mouse_event(*args, **kwargs)
if not k:
if args[1] == "mouse drag":
signals.status_message.send(
message="Hold down fn, shift, alt or ctrl to select text or use the --set console_mouse=false parameter.",
expire=1,
)
elif args[1] == "mouse press" and args[2] == 4:
self.keypress(args[0], "up")
elif args[1] == "mouse press" and args[2] == 5:
self.keypress(args[0], "down")
else:
return False
return True
def keypress(self, size, k):
k = super().keypress(size, k)
if k:
return self.master.keymap.handle(self.focus_stack().top_widget().keyctx, k)
class Screen(urwid.raw_display.Screen):
def __init__(self) -> None:
super().__init__()
self.logger = logging.getLogger("urwid")
def write(self, data):
if common.IS_WINDOWS_OR_WSL:
# replace urwid's SI/SO, which produce artifacts under WSL.
# at some point we may figure out what they actually do.
data = re.sub("[\x0e\x0f]", "", data)
super().write(data)
| python | MIT | e6aa924bb411a9687b91920b8d094af37bc02b90 | 2026-01-04T14:40:00.086164Z | false |
mitmproxy/mitmproxy | https://github.com/mitmproxy/mitmproxy/blob/e6aa924bb411a9687b91920b8d094af37bc02b90/mitmproxy/tools/console/help.py | mitmproxy/tools/console/help.py | import urwid
from mitmproxy import flowfilter
from mitmproxy.tools.console import common
from mitmproxy.tools.console import layoutwidget
from mitmproxy.tools.console import tabs
class CListBox(urwid.ListBox):
def __init__(self, contents):
self.length = len(contents)
contents = contents[:] + [urwid.Text(["\n"])] * 5
super().__init__(contents)
def keypress(self, size, key):
if key == "m_end":
self.set_focus(self.length - 1)
elif key == "m_start":
self.set_focus(0)
else:
return super().keypress(size, key)
class HelpView(tabs.Tabs, layoutwidget.LayoutWidget):
title = "Help"
keyctx = "help"
def __init__(self, master):
self.master = master
self.helpctx = ""
super().__init__(
[
[self.keybindings_title, self.keybindings],
[self.filtexp_title, self.filtexp],
]
)
def keybindings_title(self):
return "Key Bindings"
def format_keys(self, binds):
kvs = []
for b in binds:
k = b.key
if b.key == " ":
k = "space"
kvs.append((k, b.help or b.command))
return common.format_keyvals(kvs)
def keybindings(self):
text = [urwid.Text([("title", "Common Keybindings")])]
text.extend(self.format_keys(self.master.keymap.list("commonkey")))
text.append(urwid.Text(["\n", ("title", "Keybindings for this view")]))
if self.helpctx:
text.extend(self.format_keys(self.master.keymap.list(self.helpctx)))
text.append(
urwid.Text(
[
"\n",
("title", "Global Keybindings"),
]
)
)
text.extend(self.format_keys(self.master.keymap.list("global")))
return CListBox(text)
def filtexp_title(self):
return "Filter Expressions"
def filtexp(self):
text = []
text.extend(common.format_keyvals(flowfilter.help, indent=4))
text.append(
urwid.Text(
[
"\n",
("text", " Regexes are Python-style.\n"),
("text", " Regexes can be specified as quoted strings.\n"),
(
"text",
' Header matching (~h, ~hq, ~hs) is against a string of the form "name: value".\n',
),
(
"text",
" Expressions with no operators are regex matches against URL.\n",
),
("text", " Default binary operator is &.\n"),
("head", "\n Examples:\n"),
]
)
)
examples = [
(r"google\.com", r"Url containing \"google.com"),
("~q ~b test", r"Requests where body contains \"test\""),
(
r"!(~q & ~t \"text/html\")",
"Anything but requests with a text/html content type.",
),
]
text.extend(common.format_keyvals(examples, indent=4))
return CListBox(text)
def layout_pushed(self, prev):
"""
We are just about to push a window onto the stack.
"""
self.helpctx = prev.keyctx
self.show()
| python | MIT | e6aa924bb411a9687b91920b8d094af37bc02b90 | 2026-01-04T14:40:00.086164Z | false |
mitmproxy/mitmproxy | https://github.com/mitmproxy/mitmproxy/blob/e6aa924bb411a9687b91920b8d094af37bc02b90/mitmproxy/tools/console/consoleaddons.py | mitmproxy/tools/console/consoleaddons.py | import csv
import logging
import re
from collections.abc import Sequence
import mitmproxy.types
from mitmproxy import command
from mitmproxy import command_lexer
from mitmproxy import contentviews
from mitmproxy import ctx
from mitmproxy import dns
from mitmproxy import exceptions
from mitmproxy import flow
from mitmproxy import http
from mitmproxy import log
from mitmproxy import tcp
from mitmproxy import udp
from mitmproxy.contentviews import ContentviewMessage
from mitmproxy.exceptions import CommandError
from mitmproxy.log import ALERT
from mitmproxy.tools.console import keymap
from mitmproxy.tools.console import overlay
from mitmproxy.tools.console import signals
from mitmproxy.utils import strutils
logger = logging.getLogger(__name__)
console_palettes = [
"lowlight",
"lowdark",
"light",
"dark",
"solarized_light",
"solarized_dark",
]
view_orders = [
"time",
"method",
"url",
"size",
]
console_layouts = [
"single",
"vertical",
"horizontal",
]
console_flowlist_layout = ["default", "table", "list"]
class ConsoleAddon:
"""
An addon that exposes console-specific commands, and hooks into required
events.
"""
def __init__(self, master):
self.master = master
self.started = False
def load(self, loader):
loader.add_option(
"console_default_contentview",
str,
"auto",
"The default content view mode.",
choices=contentviews.registry.available_views(),
)
loader.add_option(
"console_eventlog_verbosity",
str,
"info",
"EventLog verbosity.",
choices=log.LogLevels,
)
loader.add_option(
"console_layout",
str,
"single",
"Console layout.",
choices=sorted(console_layouts),
)
loader.add_option(
"console_layout_headers",
bool,
True,
"Show layout component headers",
)
loader.add_option(
"console_focus_follow", bool, False, "Focus follows new flows."
)
loader.add_option(
"console_palette",
str,
"solarized_dark",
"Color palette.",
choices=sorted(console_palettes),
)
loader.add_option(
"console_palette_transparent",
bool,
True,
"Set transparent background for palette.",
)
loader.add_option("console_mouse", bool, True, "Console mouse interaction.")
loader.add_option(
"console_flowlist_layout",
str,
"default",
"Set the flowlist layout",
choices=sorted(console_flowlist_layout),
)
loader.add_option(
"console_strip_trailing_newlines",
bool,
False,
"Strip trailing newlines from edited request/response bodies.",
)
@command.command("console.layout.options")
def layout_options(self) -> Sequence[str]:
"""
Returns the available options for the console_layout option.
"""
return ["single", "vertical", "horizontal"]
@command.command("console.layout.cycle")
def layout_cycle(self) -> None:
"""
Cycle through the console layout options.
"""
opts = self.layout_options()
off = self.layout_options().index(ctx.options.console_layout)
ctx.options.update(console_layout=opts[(off + 1) % len(opts)])
@command.command("console.panes.next")
def panes_next(self) -> None:
"""
Go to the next layout pane.
"""
self.master.window.switch()
@command.command("console.panes.prev")
def panes_prev(self) -> None:
"""
Go to the previous layout pane.
"""
return self.panes_next()
@command.command("console.options.reset.focus")
def options_reset_current(self) -> None:
"""
Reset the current option in the options editor.
"""
fv = self.master.window.current("options")
if not fv:
raise exceptions.CommandError("Not viewing options.")
self.master.commands.call_strings("options.reset.one", [fv.current_name()])
@command.command("console.nav.start")
def nav_start(self) -> None:
"""
Go to the start of a list or scrollable.
"""
self.master.inject_key("m_start")
@command.command("console.nav.end")
def nav_end(self) -> None:
"""
Go to the end of a list or scrollable.
"""
self.master.inject_key("m_end")
@command.command("console.nav.next")
def nav_next(self) -> None:
"""
Go to the next navigatable item.
"""
self.master.inject_key("m_next")
@command.command("console.nav.select")
def nav_select(self) -> None:
"""
Select a navigable item for viewing or editing.
"""
self.master.inject_key("m_select")
@command.command("console.nav.up")
def nav_up(self) -> None:
"""
Go up.
"""
self.master.inject_key("up")
@command.command("console.nav.down")
def nav_down(self) -> None:
"""
Go down.
"""
self.master.inject_key("down")
@command.command("console.nav.pageup")
def nav_pageup(self) -> None:
"""
Go up.
"""
self.master.inject_key("page up")
@command.command("console.nav.pagedown")
def nav_pagedown(self) -> None:
"""
Go down.
"""
self.master.inject_key("page down")
@command.command("console.nav.left")
def nav_left(self) -> None:
"""
Go left.
"""
self.master.inject_key("left")
@command.command("console.nav.right")
def nav_right(self) -> None:
"""
Go right.
"""
self.master.inject_key("right")
@command.command("console.choose")
def console_choose(
self,
prompt: str,
choices: Sequence[str],
cmd: mitmproxy.types.Cmd,
*args: mitmproxy.types.CmdArgs,
) -> None:
"""
Prompt the user to choose from a specified list of strings, then
invoke another command with all occurrences of {choice} replaced by
the choice the user made.
"""
def callback(opt):
# We're now outside of the call context...
repl = [arg.replace("{choice}", opt) for arg in args]
try:
self.master.commands.call_strings(cmd, repl)
except exceptions.CommandError as e:
logger.error(str(e))
self.master.overlay(overlay.Chooser(self.master, prompt, choices, "", callback))
@command.command("console.choose.cmd")
def console_choose_cmd(
self,
prompt: str,
choicecmd: mitmproxy.types.Cmd,
subcmd: mitmproxy.types.Cmd,
*args: mitmproxy.types.CmdArgs,
) -> None:
"""
Prompt the user to choose from a list of strings returned by a
command, then invoke another command with all occurrences of {choice}
replaced by the choice the user made.
"""
choices = ctx.master.commands.execute(choicecmd)
def callback(opt):
# We're now outside of the call context...
repl = [arg.replace("{choice}", opt) for arg in args]
try:
self.master.commands.call_strings(subcmd, repl)
except exceptions.CommandError as e:
logger.error(str(e))
self.master.overlay(overlay.Chooser(self.master, prompt, choices, "", callback))
@command.command("console.command")
def console_command(self, *command_str: str) -> None:
"""
Prompt the user to edit a command with a (possibly empty) starting value.
"""
quoted = " ".join(command_lexer.quote(x) for x in command_str)
if quoted:
quoted += " "
signals.status_prompt_command.send(partial=quoted)
@command.command("console.command.confirm")
def console_command_confirm(
self,
prompt: str,
cmd: mitmproxy.types.Cmd,
*args: mitmproxy.types.CmdArgs,
) -> None:
"""
Prompt the user before running the specified command.
"""
def callback(opt):
if opt == "n":
return
try:
self.master.commands.call_strings(cmd, args)
except exceptions.CommandError as e:
logger.exception(str(e))
self.master.prompt_for_user_choice(prompt, callback)
@command.command("console.command.set")
def console_command_set(self, option_name: str) -> None:
"""
Prompt the user to set an option.
"""
option_value = getattr(self.master.options, option_name, None) or ""
set_command = f"set {option_name} {option_value!r}"
cursor = len(set_command) - 1
signals.status_prompt_command.send(partial=set_command, cursor=cursor)
@command.command("console.view.keybindings")
def view_keybindings(self) -> None:
"""View the commands list."""
self.master.switch_view("keybindings")
@command.command("console.view.commands")
def view_commands(self) -> None:
"""View the commands list."""
self.master.switch_view("commands")
@command.command("console.view.options")
def view_options(self) -> None:
"""View the options editor."""
self.master.switch_view("options")
@command.command("console.view.eventlog")
def view_eventlog(self) -> None:
"""View the event log."""
self.master.switch_view("eventlog")
@command.command("console.view.help")
def view_help(self) -> None:
"""View help."""
self.master.switch_view("help")
@command.command("console.view.flow")
def view_flow(self, flow: flow.Flow) -> None:
"""View a flow."""
if isinstance(flow, (http.HTTPFlow, tcp.TCPFlow, udp.UDPFlow, dns.DNSFlow)):
self.master.switch_view("flowview")
else:
logger.warning(f"No detail view for {type(flow).__name__}.")
@command.command("console.exit")
def exit(self) -> None:
"""Exit mitmproxy."""
self.master.shutdown()
@command.command("console.view.pop")
def view_pop(self) -> None:
"""
Pop a view off the console stack. At the top level, this prompts the
user to exit mitmproxy.
"""
signals.pop_view_state.send()
@command.command("console.bodyview")
@command.argument("part", type=mitmproxy.types.Choice("console.bodyview.options"))
def bodyview(self, flow: flow.Flow, part: str) -> None:
"""
Spawn an external viewer for a flow request or response body based
on the detected MIME type. We use the mailcap system to find the
correct viewer, and fall back to the programs in $PAGER or $EDITOR
if necessary.
"""
fpart = getattr(flow, part, None)
if not fpart:
raise exceptions.CommandError(
"Part must be either request or response, not %s." % part
)
t = fpart.headers.get("content-type")
content = fpart.get_content(strict=False)
if not content:
raise exceptions.CommandError("No content to view.")
self.master.spawn_external_viewer(content, t)
@command.command("console.bodyview.options")
def bodyview_options(self) -> Sequence[str]:
"""
Possible parts for console.bodyview.
"""
return ["request", "response"]
@command.command("console.edit.focus.options")
def edit_focus_options(self) -> Sequence[str]:
"""
Possible components for console.edit.focus.
"""
flow = self.master.view.focus.flow
focus_options = []
try:
view_name = self.master.commands.call("console.flowview.mode")
except CommandError:
view_name = "auto"
def add_message_edit_option(
message_name: str, message: ContentviewMessage | None
) -> None:
if message is None:
return
data, _ = contentviews.get_data(message)
cv = contentviews.registry.get_view(
data or b"",
contentviews.make_metadata(message, flow),
view_name,
)
if isinstance(cv, contentviews.InteractiveContentview):
focus_options.append(f"{message_name} ({cv.name})")
if flow is None:
raise exceptions.CommandError("No flow selected.")
elif isinstance(flow, tcp.TCPFlow):
focus_options.append("tcp-message")
add_message_edit_option("tcp-message", flow.messages[-1])
elif isinstance(flow, udp.UDPFlow):
focus_options.append("udp-message")
add_message_edit_option("udp-message", flow.messages[-1])
elif isinstance(flow, http.HTTPFlow):
focus_options.extend(
[
"cookies",
"urlencoded form",
"multipart form",
"path",
"method",
"query",
"reason",
"request-headers",
"response-headers",
"request-body",
"response-body",
"status_code",
"set-cookies",
"url",
]
)
add_message_edit_option("request-body", flow.request)
add_message_edit_option("response-body", flow.response)
if flow.websocket:
add_message_edit_option(
"websocket-message", flow.websocket.messages[-1]
)
elif isinstance(flow, dns.DNSFlow):
raise exceptions.CommandError(
"Cannot edit DNS flows yet, please submit a patch."
)
return focus_options
@command.command("console.edit.focus")
@command.argument(
"flow_part", type=mitmproxy.types.Choice("console.edit.focus.options")
)
def edit_focus(self, flow_part: str) -> None:
"""
Edit a component of the currently focused flow.
"""
flow = self.master.view.focus.flow
# This shouldn't be necessary once this command is "console.edit @focus",
# but for now it is.
if not flow:
raise exceptions.CommandError("No flow selected.")
flow.backup()
require_dummy_response = (
flow_part in ("response-headers", "response-body", "set-cookies")
and flow.response is None
)
if require_dummy_response:
flow.response = http.Response.make()
if flow_part == "cookies":
self.master.switch_view("edit_focus_cookies")
elif flow_part == "urlencoded form":
self.master.switch_view("edit_focus_urlencoded_form")
elif flow_part == "multipart form":
self.master.switch_view("edit_focus_multipart_form")
elif flow_part == "path":
self.master.switch_view("edit_focus_path")
elif flow_part == "query":
self.master.switch_view("edit_focus_query")
elif flow_part == "request-headers":
self.master.switch_view("edit_focus_request_headers")
elif flow_part == "response-headers":
self.master.switch_view("edit_focus_response_headers")
elif m := re.match(
r"(?P<message>(request|response)-body|(tcp|udp|websocket)-message) \((?P<contentview>.+)\)",
flow_part,
):
match m["message"]:
case "request-body":
message = flow.request
case "response-body":
message = flow.response
case "tcp-message" | "udp-message":
message = flow.messages[-1]
case "websocket-message":
message = flow.websocket.messages[-1]
case _:
assert False, "should be exhaustive"
cv = contentviews.registry.get(m["contentview"])
if not cv or not isinstance(cv, contentviews.InteractiveContentview):
raise CommandError(
f"Contentview {m['contentview']} is not bidirectional."
)
pretty = contentviews.prettify_message(message, flow, cv.name)
prettified = self.master.spawn_editor(pretty.text)
message.content = contentviews.reencode_message(
prettified,
message,
flow,
cv.name,
)
elif flow_part in ("request-body", "response-body"):
if flow_part == "request-body":
message = flow.request
else:
message = flow.response
c = self.master.spawn_editor(message.get_content(strict=False) or b"")
# Many editors make it hard to save a file without a terminating
# newline on the last line. When editing message bodies, this can
# cause problems. We strip trailing newlines by default, but this
# behavior is configurable.
if self.master.options.console_strip_trailing_newlines:
c = c.rstrip(b"\n")
message.content = c
elif flow_part == "set-cookies":
self.master.switch_view("edit_focus_setcookies")
elif flow_part == "url":
url = flow.request.url.encode()
edited_url = self.master.spawn_editor(url)
url = edited_url.rstrip(b"\n")
flow.request.url = url.decode()
elif flow_part in ["method", "status_code", "reason"]:
self.master.commands.call_strings(
"console.command", ["flow.set", "@focus", flow_part]
)
elif flow_part in ["tcp-message", "udp-message", "websocket-message"]:
if flow_part == "websocket-message":
message = flow.websocket.messages[-1]
else:
message = flow.messages[-1]
c = self.master.spawn_editor(message.content or b"")
if self.master.options.console_strip_trailing_newlines:
c = c.rstrip(b"\n")
message.content = c.rstrip(b"\n")
def _grideditor(self):
gewidget = self.master.window.current("grideditor")
if not gewidget:
raise exceptions.CommandError("Not in a grideditor.")
return gewidget.key_responder()
@command.command("console.grideditor.add")
def grideditor_add(self) -> None:
"""
Add a row after the cursor.
"""
self._grideditor().cmd_add()
@command.command("console.grideditor.insert")
def grideditor_insert(self) -> None:
"""
Insert a row before the cursor.
"""
self._grideditor().cmd_insert()
@command.command("console.grideditor.delete")
def grideditor_delete(self) -> None:
"""
Delete row
"""
self._grideditor().cmd_delete()
@command.command("console.grideditor.load")
def grideditor_load(self, path: mitmproxy.types.Path) -> None:
"""
Read a file into the currrent cell.
"""
self._grideditor().cmd_read_file(path)
@command.command("console.grideditor.load_escaped")
def grideditor_load_escaped(self, path: mitmproxy.types.Path) -> None:
"""
Read a file containing a Python-style escaped string into the
currrent cell.
"""
self._grideditor().cmd_read_file_escaped(path)
@command.command("console.grideditor.save")
def grideditor_save(self, path: mitmproxy.types.Path) -> None:
"""
Save data to file as a CSV.
"""
rows = self._grideditor().value
try:
with open(path, "w", newline="", encoding="utf8") as fp:
writer = csv.writer(fp)
for row in rows:
writer.writerow(
[strutils.always_str(x) or "" for x in row] # type: ignore
)
logger.log(ALERT, "Saved %s rows as CSV." % (len(rows)))
except OSError as e:
logger.error(str(e))
@command.command("console.grideditor.editor")
def grideditor_editor(self) -> None:
"""
Spawn an external editor on the current cell.
"""
self._grideditor().cmd_spawn_editor()
@command.command("console.flowview.mode.set")
@command.argument(
"mode", type=mitmproxy.types.Choice("console.flowview.mode.options")
)
def flowview_mode_set(self, mode: str) -> None:
"""
Set the display mode for the current flow view.
"""
fv = self.master.window.current_window("flowview")
if not fv:
raise exceptions.CommandError("Not viewing a flow.")
idx = fv.body.tab_offset
if mode.lower() not in self.flowview_mode_options():
raise exceptions.CommandError("Invalid flowview mode.")
try:
self.master.commands.call_strings(
"view.settings.setval", ["@focus", f"flowview_mode_{idx}", mode]
)
except exceptions.CommandError as e:
logger.error(str(e))
@command.command("console.flowview.mode.options")
def flowview_mode_options(self) -> Sequence[str]:
"""
Returns the valid options for the flowview mode.
"""
return contentviews.registry.available_views()
@command.command("console.flowview.mode")
def flowview_mode(self) -> str:
"""
Get the display mode for the current flow view.
"""
fv = self.master.window.current_window("flowview")
if not fv:
raise exceptions.CommandError("Not viewing a flow.")
idx = fv.body.tab_offset
return self.master.commands.call_strings(
"view.settings.getval",
[
"@focus",
f"flowview_mode_{idx}",
self.master.options.console_default_contentview,
],
)
@command.command("console.key.contexts")
def key_contexts(self) -> Sequence[str]:
"""
The available contexts for key binding.
"""
return list(sorted(keymap.Contexts))
@command.command("console.key.bind")
def key_bind(
self,
contexts: Sequence[str],
key: str,
cmd: mitmproxy.types.Cmd,
*args: mitmproxy.types.CmdArgs,
) -> None:
"""
Bind a shortcut key.
"""
try:
self.master.keymap.add(key, cmd + " " + " ".join(args), contexts, "")
except ValueError as v:
raise exceptions.CommandError(v)
@command.command("console.key.unbind")
def key_unbind(self, contexts: Sequence[str], key: str) -> None:
"""
Un-bind a shortcut key.
"""
try:
self.master.keymap.remove(key, contexts)
except ValueError as v:
raise exceptions.CommandError(v)
def _keyfocus(self):
kwidget = self.master.window.current("keybindings")
if not kwidget:
raise exceptions.CommandError("Not viewing key bindings.")
f = kwidget.get_focused_binding()
if not f:
raise exceptions.CommandError("No key binding focused")
return f
@command.command("console.key.unbind.focus")
def key_unbind_focus(self) -> None:
"""
Un-bind the shortcut key currently focused in the key binding viewer.
"""
b = self._keyfocus()
try:
self.master.keymap.remove(b.key, b.contexts)
except ValueError as v:
raise exceptions.CommandError(v)
@command.command("console.key.execute.focus")
def key_execute_focus(self) -> None:
"""
Execute the currently focused key binding.
"""
b = self._keyfocus()
self.console_command(b.command)
@command.command("console.key.edit.focus")
def key_edit_focus(self) -> None:
"""
Execute the currently focused key binding.
"""
b = self._keyfocus()
self.console_command(
"console.key.bind",
",".join(b.contexts),
b.key,
b.command,
)
def running(self):
self.started = True
def update(self, flows) -> None:
if not flows:
signals.update_settings.send()
for f in flows:
signals.flow_change.send(flow=f)
| python | MIT | e6aa924bb411a9687b91920b8d094af37bc02b90 | 2026-01-04T14:40:00.086164Z | false |
mitmproxy/mitmproxy | https://github.com/mitmproxy/mitmproxy/blob/e6aa924bb411a9687b91920b8d094af37bc02b90/mitmproxy/tools/console/options.py | mitmproxy/tools/console/options.py | from __future__ import annotations
import pprint
import textwrap
import typing
from collections.abc import Sequence
from typing import Optional
import urwid
from mitmproxy import exceptions
from mitmproxy import optmanager
from mitmproxy.tools.console import layoutwidget
from mitmproxy.tools.console import overlay
from mitmproxy.tools.console import signals
HELP_HEIGHT = 5
def can_edit_inplace(opt):
if opt.choices:
return False
if opt.typespec in [str, int, Optional[str], Optional[int]]:
return True
def fcol(s, width, attr):
s = str(s)
return ("fixed", width, urwid.Text((attr, s)))
class OptionItem(urwid.WidgetWrap):
def __init__(self, walker, opt, focused, namewidth, editing):
self.walker, self.opt, self.focused = walker, opt, focused
self.namewidth = namewidth
self.editing = editing
super().__init__(self.get_widget())
def get_widget(self):
val = self.opt.current()
if self.opt.typespec is bool:
displayval = "true" if val else "false"
elif not val:
displayval = ""
elif self.opt.typespec == Sequence[str]:
displayval = pprint.pformat(val, indent=1)
else:
displayval = str(val)
changed = self.walker.master.options.has_changed(self.opt.name)
if self.focused:
valstyle = "option_active_selected" if changed else "option_selected"
else:
valstyle = "option_active" if changed else "text"
if self.editing:
valw = urwid.Edit(edit_text=displayval)
else:
valw = urwid.AttrMap(
urwid.Padding(urwid.Text([(valstyle, displayval)])), valstyle
)
return urwid.Columns(
[
(
self.namewidth,
urwid.Text([("title", self.opt.name.ljust(self.namewidth))]),
),
valw,
],
dividechars=2,
focus_column=1,
)
def get_edit_text(self):
return self._w[1].get_edit_text()
def selectable(self):
return True
def keypress(self, size, key):
if self.editing:
self._w[1].keypress(size, key)
return
return key
class OptionListWalker(urwid.ListWalker):
def __init__(self, master, help_widget: OptionHelp):
self.master = master
self.help_widget = help_widget
self.index = 0
self.focusobj = None
self.opts = sorted(master.options.keys())
self.maxlen = max(len(i) for i in self.opts)
self.editing = False
self.set_focus(0)
self.master.options.changed.connect(self.sig_mod)
def sig_mod(self, *args, **kwargs):
self.opts = sorted(self.master.options.keys())
self.maxlen = max(len(i) for i in self.opts)
self._modified()
self.set_focus(self.index)
def start_editing(self):
self.editing = True
self.focus_obj = self._get(self.index, True)
self._modified()
def stop_editing(self):
self.editing = False
self.focus_obj = self._get(self.index, False)
self.set_focus(self.index)
self._modified()
def get_edit_text(self):
return self.focus_obj.get_edit_text()
def _get(self, pos, editing):
name = self.opts[pos]
opt = self.master.options._options[name]
return OptionItem(self, opt, pos == self.index, self.maxlen, editing)
def get_focus(self):
return self.focus_obj, self.index
def set_focus(self, index):
self.editing = False
name = self.opts[index]
opt = self.master.options._options[name]
self.index = index
self.focus_obj = self._get(self.index, self.editing)
self.help_widget.update_help_text(opt.help)
self._modified()
def get_next(self, pos):
if pos >= len(self.opts) - 1:
return None, None
pos = pos + 1
return self._get(pos, False), pos
def get_prev(self, pos):
pos = pos - 1
if pos < 0:
return None, None
return self._get(pos, False), pos
def positions(self, reverse=False):
if reverse:
return reversed(range(len(self.opts)))
else:
return range(len(self.opts))
class OptionsList(urwid.ListBox):
def __init__(self, master, help_widget: OptionHelp):
self.master = master
self.walker = OptionListWalker(master, help_widget)
super().__init__(self.walker)
def save_config(self, path):
try:
optmanager.save(self.master.options, path)
except exceptions.OptionsError as e:
signals.status_message.send(message=str(e))
def keypress(self, size, key):
if self.walker.editing:
if key == "enter":
foc, idx = self.get_focus()
v = self.walker.get_edit_text()
try:
self.master.options.set(f"{foc.opt.name}={v}")
except exceptions.OptionsError as v:
signals.status_message.send(message=str(v))
self.walker.stop_editing()
return None
elif key == "esc":
self.walker.stop_editing()
return None
else:
if key == "m_start":
self.set_focus(0)
self.walker._modified()
elif key == "m_end":
self.set_focus(len(self.walker.opts) - 1)
self.walker._modified()
elif key == "m_select":
foc, idx = self.get_focus()
if foc.opt.typespec is bool:
self.master.options.toggler(foc.opt.name)()
# Bust the focus widget cache
self.set_focus(self.walker.index)
elif can_edit_inplace(foc.opt):
self.walker.start_editing()
self.walker._modified()
elif foc.opt.choices:
self.master.overlay(
overlay.Chooser(
self.master,
foc.opt.name,
foc.opt.choices,
foc.opt.current(),
self.master.options.setter(foc.opt.name),
)
)
elif foc.opt.typespec in (Sequence[str], typing.Sequence[str]):
self.master.overlay(
overlay.OptionsOverlay(
self.master,
foc.opt.name,
foc.opt.current(),
HELP_HEIGHT + 5,
),
valign="top",
)
else:
raise NotImplementedError()
return super().keypress(size, key)
class OptionHelp(urwid.Frame):
def __init__(self, master):
self.master = master
super().__init__(self.widget(""))
self.set_active(False)
def set_active(self, val):
h = urwid.Text("Option Help")
style = "heading" if val else "heading_inactive"
self.header = urwid.AttrMap(h, style)
def widget(self, txt):
cols, _ = self.master.ui.get_cols_rows()
return urwid.ListBox([urwid.Text(i) for i in textwrap.wrap(txt, cols)])
def update_help_text(self, txt: str) -> None:
self.body = self.widget(txt)
class Options(urwid.Pile, layoutwidget.LayoutWidget):
title = "Options"
keyctx = "options"
focus_position: int
def __init__(self, master):
oh = OptionHelp(master)
self.optionslist = OptionsList(master, oh)
super().__init__(
[
self.optionslist,
(HELP_HEIGHT, oh),
]
)
self.master = master
def current_name(self):
foc, idx = self.optionslist.get_focus()
return foc.opt.name
def keypress(self, size, key):
if key == "m_next":
self.focus_position = (self.focus_position + 1) % len(self.widget_list)
self.widget_list[1].set_active(self.focus_position == 1)
key = None
# This is essentially a copypasta from urwid.Pile's keypress handler.
# So much for "closed for modification, but open for extension".
item_rows = None
if len(size) == 2:
item_rows = self.get_item_rows(size, focus=True)
tsize = self.get_item_size(size, self.focus_position, True, item_rows)
return self.focus.keypress(tsize, key)
| python | MIT | e6aa924bb411a9687b91920b8d094af37bc02b90 | 2026-01-04T14:40:00.086164Z | false |
mitmproxy/mitmproxy | https://github.com/mitmproxy/mitmproxy/blob/e6aa924bb411a9687b91920b8d094af37bc02b90/mitmproxy/tools/console/overlay.py | mitmproxy/tools/console/overlay.py | import math
import urwid
from mitmproxy.tools.console import grideditor
from mitmproxy.tools.console import keymap
from mitmproxy.tools.console import layoutwidget
from mitmproxy.tools.console import signals
class SimpleOverlay(urwid.Overlay, layoutwidget.LayoutWidget):
def __init__(self, master, widget, parent, width, valign="middle"):
self.widget = widget
self.master = master
super().__init__(
widget, parent, align="center", width=width, valign=valign, height="pack"
)
@property
def keyctx(self):
return getattr(self.widget, "keyctx")
# mypy: Cannot override writeable attribute with read-only property
@keyctx.setter
def keyctx(self, value):
raise RuntimeError # pragma: no cover
def key_responder(self):
return self.widget.key_responder()
def focus_changed(self):
return self.widget.focus_changed()
def view_changed(self):
return self.widget.view_changed()
def layout_popping(self):
return self.widget.layout_popping()
class Choice(urwid.WidgetWrap):
def __init__(self, txt, focus, current, shortcut):
if shortcut:
selection_type = "option_selected_key" if focus else "key"
txt = [(selection_type, shortcut), ") ", txt]
else:
txt = " " + txt
if current:
s = "option_active_selected" if focus else "option_active"
else:
s = "option_selected" if focus else "text"
super().__init__(
urwid.AttrMap(
urwid.Padding(urwid.Text(txt)),
s,
)
)
def selectable(self):
return True
def keypress(self, size, key):
return key
class ChooserListWalker(urwid.ListWalker):
shortcuts = "123456789abcdefghijklmnoprstuvwxyz"
def __init__(self, choices, current):
self.index = 0
self.choices = choices
self.current = current
def _get(self, idx, focus):
c = self.choices[idx]
return Choice(c, focus, c == self.current, self.shortcuts[idx : idx + 1])
def set_focus(self, index):
self.index = index
def get_focus(self):
return self._get(self.index, True), self.index
def get_next(self, pos):
if pos >= len(self.choices) - 1:
return None, None
pos = pos + 1
return self._get(pos, False), pos
def get_prev(self, pos):
pos = pos - 1
if pos < 0:
return None, None
return self._get(pos, False), pos
def choice_by_shortcut(self, shortcut):
for i, choice in enumerate(self.choices):
if shortcut == self.shortcuts[i : i + 1]:
return choice
return None
class Chooser(urwid.WidgetWrap, layoutwidget.LayoutWidget):
keyctx = "chooser"
def __init__(self, master, title, choices, current, callback):
self.master = master
self.choices = choices
self.callback = callback
choicewidth = max(len(i) for i in choices)
self.width = max(choicewidth, len(title)) + 7
self.walker = ChooserListWalker(choices, current)
super().__init__(
urwid.AttrMap(
urwid.LineBox(
urwid.BoxAdapter(urwid.ListBox(self.walker), len(choices)),
title=title,
),
"background",
)
)
def selectable(self):
return True
def keypress(self, size, key):
key = self.master.keymap.handle_only("chooser", key)
choice = self.walker.choice_by_shortcut(key)
if choice:
self.callback(choice)
signals.pop_view_state.send()
return
if key == "m_select":
self.callback(self.choices[self.walker.index])
signals.pop_view_state.send()
return
elif key in ["q", "esc"]:
signals.pop_view_state.send()
return
binding = self.master.keymap.get("global", key)
# This is extremely awkward. We need a better way to match nav keys only.
if binding and binding.command.startswith("console.nav"):
self.master.keymap.handle("global", key)
elif key in keymap.navkeys:
return super().keypress(size, key)
class OptionsOverlay(urwid.WidgetWrap, layoutwidget.LayoutWidget):
keyctx = "grideditor"
def __init__(self, master, name, vals, vspace):
"""
vspace: how much vertical space to keep clear
"""
cols, rows = master.ui.get_cols_rows()
self.ge = grideditor.OptionsEditor(master, name, vals)
super().__init__(
urwid.AttrMap(
urwid.LineBox(urwid.BoxAdapter(self.ge, rows - vspace), title=name),
"background",
)
)
self.width = math.ceil(cols * 0.8)
def key_responder(self):
return self.ge.key_responder()
def layout_popping(self):
return self.ge.layout_popping()
class DataViewerOverlay(urwid.WidgetWrap, layoutwidget.LayoutWidget):
keyctx = "dataviewer"
def __init__(self, master, vals):
"""
vspace: how much vertical space to keep clear
"""
cols, rows = master.ui.get_cols_rows()
self.ge = grideditor.DataViewer(master, vals)
super().__init__(
urwid.AttrMap(
urwid.LineBox(urwid.BoxAdapter(self.ge, rows - 5), title="Data viewer"),
"background",
)
)
self.width = math.ceil(cols * 0.8)
def key_responder(self):
return self.ge.key_responder()
def layout_popping(self):
return self.ge.layout_popping()
| python | MIT | e6aa924bb411a9687b91920b8d094af37bc02b90 | 2026-01-04T14:40:00.086164Z | false |
mitmproxy/mitmproxy | https://github.com/mitmproxy/mitmproxy/blob/e6aa924bb411a9687b91920b8d094af37bc02b90/mitmproxy/tools/console/grideditor/col_subgrid.py | mitmproxy/tools/console/grideditor/col_subgrid.py | import urwid
from mitmproxy.net.http import cookies
from mitmproxy.tools.console import signals
from mitmproxy.tools.console.grideditor import base
class Column(base.Column):
def __init__(self, heading, subeditor):
super().__init__(heading)
self.subeditor = subeditor
def Edit(self, data):
raise RuntimeError("SubgridColumn should handle edits itself")
def Display(self, data):
return Display(data)
def blank(self):
return []
def keypress(self, key: str, editor):
if key in "rRe":
signals.status_message.send(message="Press enter to edit this field.")
return
elif key == "m_select":
self.subeditor.grideditor = editor
editor.master.switch_view("edit_focus_setcookie_attrs")
else:
return key
class Display(base.Cell):
def __init__(self, data):
p = cookies._format_pairs(data, sep="\n")
w = urwid.Text(p)
super().__init__(w)
def get_data(self):
pass
| python | MIT | e6aa924bb411a9687b91920b8d094af37bc02b90 | 2026-01-04T14:40:00.086164Z | false |
mitmproxy/mitmproxy | https://github.com/mitmproxy/mitmproxy/blob/e6aa924bb411a9687b91920b8d094af37bc02b90/mitmproxy/tools/console/grideditor/col_text.py | mitmproxy/tools/console/grideditor/col_text.py | """
Welcome to the encoding dance!
In a nutshell, text columns are actually a proxy class for byte columns,
which just encode/decodes contents.
"""
from mitmproxy.tools.console import signals
from mitmproxy.tools.console.grideditor import col_bytes
class Column(col_bytes.Column):
def __init__(self, heading, encoding="utf8", errors="surrogateescape"):
super().__init__(heading)
self.encoding_args = encoding, errors
def Display(self, data):
return TDisplay(data, self.encoding_args)
def Edit(self, data):
return TEdit(data, self.encoding_args)
def blank(self):
return ""
# This is the same for both edit and display.
class EncodingMixin:
def __init__(self, data, encoding_args):
self.encoding_args = encoding_args
super().__init__(str(data).encode(*self.encoding_args)) # type: ignore
def get_data(self):
data = super().get_data() # type: ignore
try:
return data.decode(*self.encoding_args)
except ValueError:
signals.status_message.send(message="Invalid encoding.")
raise
# urwid forces a different name for a subclass.
class TDisplay(EncodingMixin, col_bytes.Display):
pass
class TEdit(EncodingMixin, col_bytes.Edit):
pass
| python | MIT | e6aa924bb411a9687b91920b8d094af37bc02b90 | 2026-01-04T14:40:00.086164Z | false |
mitmproxy/mitmproxy | https://github.com/mitmproxy/mitmproxy/blob/e6aa924bb411a9687b91920b8d094af37bc02b90/mitmproxy/tools/console/grideditor/col_bytes.py | mitmproxy/tools/console/grideditor/col_bytes.py | import urwid
from mitmproxy.tools.console import signals
from mitmproxy.tools.console.grideditor import base
from mitmproxy.utils import strutils
class Column(base.Column):
def Display(self, data):
return Display(data)
def Edit(self, data):
return Edit(data)
def blank(self):
return b""
def keypress(self, key, editor):
if key in ["m_select"]:
editor.walker.start_edit()
else:
return key
class Display(base.Cell):
def __init__(self, data: bytes) -> None:
self.data = data
escaped = strutils.bytes_to_escaped_str(data)
w = urwid.Text(escaped, wrap="any")
super().__init__(w)
def get_data(self) -> bytes:
return self.data
class Edit(base.Cell):
def __init__(self, data: bytes) -> None:
d = strutils.bytes_to_escaped_str(data)
w = urwid.Edit(edit_text=d, wrap="any", multiline=True)
w = urwid.AttrMap(w, "editfield")
super().__init__(w)
def get_data(self) -> bytes:
txt = self._w.base_widget.get_text()[0].strip()
try:
return strutils.escaped_str_to_bytes(txt)
except ValueError:
signals.status_message.send(message="Invalid data.")
raise
| python | MIT | e6aa924bb411a9687b91920b8d094af37bc02b90 | 2026-01-04T14:40:00.086164Z | false |
mitmproxy/mitmproxy | https://github.com/mitmproxy/mitmproxy/blob/e6aa924bb411a9687b91920b8d094af37bc02b90/mitmproxy/tools/console/grideditor/editors.py | mitmproxy/tools/console/grideditor/editors.py | from typing import Any
import urwid
from mitmproxy import exceptions
from mitmproxy.http import Headers
from mitmproxy.tools.console import layoutwidget
from mitmproxy.tools.console import signals
from mitmproxy.tools.console.grideditor import base
from mitmproxy.tools.console.grideditor import col_bytes
from mitmproxy.tools.console.grideditor import col_subgrid
from mitmproxy.tools.console.grideditor import col_text
from mitmproxy.tools.console.grideditor import col_viewany
class QueryEditor(base.FocusEditor):
title = "Edit Query"
columns = [col_text.Column("Key"), col_text.Column("Value")]
def get_data(self, flow):
return flow.request.query.items(multi=True)
def set_data(self, vals, flow):
flow.request.query = vals
class HeaderEditor(base.FocusEditor):
columns = [col_bytes.Column("Key"), col_bytes.Column("Value")]
class RequestHeaderEditor(HeaderEditor):
title = "Edit Request Headers"
def get_data(self, flow):
return flow.request.headers.fields
def set_data(self, vals, flow):
flow.request.headers = Headers(vals)
class ResponseHeaderEditor(HeaderEditor):
title = "Edit Response Headers"
def get_data(self, flow):
return flow.response.headers.fields
def set_data(self, vals, flow):
flow.response.headers = Headers(vals)
class RequestMultipartEditor(base.FocusEditor):
title = "Edit Multipart Form"
columns = [col_bytes.Column("Key"), col_bytes.Column("Value")]
def get_data(self, flow):
return flow.request.multipart_form.items(multi=True)
def set_data(self, vals, flow):
flow.request.multipart_form = vals
class RequestUrlEncodedEditor(base.FocusEditor):
title = "Edit UrlEncoded Form"
columns = [col_text.Column("Key"), col_text.Column("Value")]
def get_data(self, flow):
return flow.request.urlencoded_form.items(multi=True)
def set_data(self, vals, flow):
flow.request.urlencoded_form = vals
class PathEditor(base.FocusEditor):
# TODO: Next row on enter?
title = "Edit Path Components"
columns = [
col_text.Column("Component"),
]
def data_in(self, data):
return [[i] for i in data]
def data_out(self, data):
return [i[0] for i in data]
def get_data(self, flow):
return self.data_in(flow.request.path_components)
def set_data(self, vals, flow):
flow.request.path_components = self.data_out(vals)
class CookieEditor(base.FocusEditor):
title = "Edit Cookies"
columns = [
col_text.Column("Name"),
col_text.Column("Value"),
]
def get_data(self, flow):
return flow.request.cookies.items(multi=True)
def set_data(self, vals, flow):
flow.request.cookies = vals
class CookieAttributeEditor(base.FocusEditor):
title = "Editing Set-Cookie attributes"
columns = [
col_text.Column("Name"),
col_text.Column("Value"),
]
grideditor: base.BaseGridEditor
def data_in(self, data):
return [(k, v or "") for k, v in data]
def data_out(self, data):
ret = []
for i in data:
if not i[1]:
ret.append([i[0], None])
else:
ret.append(i)
return ret
def layout_pushed(self, prev):
if self.grideditor.master.view.focus.flow:
self._w = base.BaseGridEditor(
self.grideditor.master,
self.title,
self.columns,
self.grideditor.walker.get_current_value(),
self.grideditor.set_subeditor_value,
self.grideditor.walker.focus,
self.grideditor.walker.focus_col,
)
else:
self._w = urwid.Pile([])
class SetCookieEditor(base.FocusEditor):
title = "Edit SetCookie Header"
columns = [
col_text.Column("Name"),
col_text.Column("Value"),
col_subgrid.Column("Attributes", CookieAttributeEditor),
]
def data_in(self, data):
flattened = []
for key, (value, attrs) in data:
flattened.append([key, value, attrs.items(multi=True)])
return flattened
def data_out(self, data):
vals = []
for key, value, attrs in data:
vals.append([key, (value, attrs)])
return vals
def get_data(self, flow):
return self.data_in(flow.response.cookies.items(multi=True))
def set_data(self, vals, flow):
flow.response.cookies = self.data_out(vals)
class OptionsEditor(base.GridEditor, layoutwidget.LayoutWidget):
title = ""
columns = [col_text.Column("")]
def __init__(self, master, name, vals):
self.name = name
super().__init__(master, [[i] for i in vals], self.callback)
def callback(self, vals) -> None:
try:
setattr(self.master.options, self.name, [i[0] for i in vals])
except exceptions.OptionsError as v:
signals.status_message.send(message=str(v))
def is_error(self, col, val):
pass
class DataViewer(base.GridEditor, layoutwidget.LayoutWidget):
title = ""
def __init__(
self,
master,
vals: (list[list[Any]] | list[Any] | Any),
) -> None:
if vals is not None:
# Whatever vals is, make it a list of rows containing lists of column values.
if not isinstance(vals, list):
vals = [vals]
if not isinstance(vals[0], list):
vals = [[i] for i in vals]
self.columns = [col_viewany.Column("")] * len(vals[0])
super().__init__(master, vals, self.callback)
def callback(self, vals):
pass
def is_error(self, col, val):
pass
| python | MIT | e6aa924bb411a9687b91920b8d094af37bc02b90 | 2026-01-04T14:40:00.086164Z | false |
mitmproxy/mitmproxy | https://github.com/mitmproxy/mitmproxy/blob/e6aa924bb411a9687b91920b8d094af37bc02b90/mitmproxy/tools/console/grideditor/__init__.py | mitmproxy/tools/console/grideditor/__init__.py | from . import base
from .editors import CookieAttributeEditor
from .editors import CookieEditor
from .editors import DataViewer
from .editors import OptionsEditor
from .editors import PathEditor
from .editors import QueryEditor
from .editors import RequestHeaderEditor
from .editors import RequestMultipartEditor
from .editors import RequestUrlEncodedEditor
from .editors import ResponseHeaderEditor
from .editors import SetCookieEditor
__all__ = [
"base",
"QueryEditor",
"RequestHeaderEditor",
"ResponseHeaderEditor",
"RequestMultipartEditor",
"RequestUrlEncodedEditor",
"PathEditor",
"CookieEditor",
"CookieAttributeEditor",
"SetCookieEditor",
"OptionsEditor",
"DataViewer",
]
| python | MIT | e6aa924bb411a9687b91920b8d094af37bc02b90 | 2026-01-04T14:40:00.086164Z | false |
mitmproxy/mitmproxy | https://github.com/mitmproxy/mitmproxy/blob/e6aa924bb411a9687b91920b8d094af37bc02b90/mitmproxy/tools/console/grideditor/base.py | mitmproxy/tools/console/grideditor/base.py | import abc
import copy
import os
from collections.abc import Callable
from collections.abc import Container
from collections.abc import Iterable
from collections.abc import MutableSequence
from collections.abc import Sequence
from typing import Any
from typing import ClassVar
from typing import Literal
from typing import overload
import urwid
import mitmproxy.tools.console.master
from mitmproxy import exceptions
from mitmproxy.tools.console import layoutwidget
from mitmproxy.tools.console import signals
from mitmproxy.utils import strutils
@overload
def read_file(filename: str, escaped: Literal[True]) -> bytes: ...
@overload
def read_file(filename: str, escaped: Literal[False]) -> str: ...
def read_file(filename: str, escaped: bool) -> bytes | str:
filename = os.path.expanduser(filename)
try:
with open(filename, "r" if escaped else "rb") as f:
d = f.read()
except OSError as v:
raise exceptions.CommandError(v)
if escaped:
try:
d = strutils.escaped_str_to_bytes(d)
except ValueError:
raise exceptions.CommandError("Invalid Python-style string encoding.")
return d
class Cell(urwid.WidgetWrap):
def get_data(self):
"""
Raises:
ValueError, if the current content is invalid.
"""
raise NotImplementedError()
def selectable(self):
return True
class Column(metaclass=abc.ABCMeta):
subeditor: urwid.Edit = None
def __init__(self, heading):
self.heading = heading
@abc.abstractmethod
def Display(self, data) -> Cell:
pass
@abc.abstractmethod
def Edit(self, data) -> Cell:
pass
@abc.abstractmethod
def blank(self) -> Any:
pass
def keypress(self, key: str, editor: "GridEditor") -> str | None:
return key
class GridRow(urwid.WidgetWrap):
def __init__(
self,
focused: int | None,
editing: bool,
editor: "GridEditor",
values: tuple[Iterable[bytes], Container[int]],
) -> None:
self.focused = focused
self.editor = editor
self.edit_col: Cell | None = None
errors = values[1]
self.fields: Sequence[Any] = []
for i, v in enumerate(values[0]):
if focused == i and editing:
self.edit_col = self.editor.columns[i].Edit(v)
self.fields.append(self.edit_col)
else:
w = self.editor.columns[i].Display(v)
if focused == i:
if i in errors:
w = urwid.AttrMap(w, "focusfield_error")
else:
w = urwid.AttrMap(w, "focusfield")
elif i in errors:
w = urwid.AttrMap(w, "field_error")
self.fields.append(w)
fspecs = self.fields[:]
if len(self.fields) > 1:
fspecs[0] = ("fixed", self.editor.first_width + 2, fspecs[0])
w = urwid.Columns(fspecs, dividechars=2)
if focused is not None:
w.focus_position = focused
super().__init__(w)
def keypress(self, s, k):
if self.edit_col:
w = self._w.column_widths(s)[self.focused]
k = self.edit_col.keypress((w,), k)
return k
def selectable(self):
return True
class GridWalker(urwid.ListWalker):
"""
Stores rows as a list of (rows, errors) tuples, where rows is a list
and errors is a set with an entry of each offset in rows that is an
error.
"""
def __init__(self, lst: Iterable[list], editor: "GridEditor") -> None:
self.lst: MutableSequence[tuple[Any, set]] = [(i, set()) for i in lst]
self.editor = editor
self.focus = 0
self.focus_col = 0
self.edit_row: GridRow | None = None
def _modified(self):
self.editor.show_empty_msg()
return super()._modified()
def add_value(self, lst):
self.lst.append((lst[:], set()))
self._modified()
def get_current_value(self):
if self.lst:
return self.lst[self.focus][0][self.focus_col]
def set_current_value(self, val) -> None:
errors = self.lst[self.focus][1]
emsg = self.editor.is_error(self.focus_col, val)
if emsg:
signals.status_message.send(message=emsg)
errors.add(self.focus_col)
else:
errors.discard(self.focus_col)
self.set_value(val, self.focus, self.focus_col, errors)
def set_value(self, val, focus, focus_col, errors=None):
if not errors:
errors = set()
row = list(self.lst[focus][0])
row[focus_col] = val
self.lst[focus] = [tuple(row), errors] # type: ignore
self._modified()
def delete_focus(self):
if self.lst:
del self.lst[self.focus]
self.focus = min(len(self.lst) - 1, self.focus)
self._modified()
def _insert(self, pos):
self.focus = pos
self.lst.insert(self.focus, ([c.blank() for c in self.editor.columns], set()))
self.focus_col = 0
self.start_edit()
def insert(self):
return self._insert(self.focus)
def add(self):
return self._insert(min(self.focus + 1, len(self.lst)))
def start_edit(self):
col = self.editor.columns[self.focus_col]
if self.lst and not col.subeditor:
self.edit_row = GridRow(
self.focus_col, True, self.editor, self.lst[self.focus]
)
self._modified()
def stop_edit(self):
if self.edit_row and self.edit_row.edit_col:
try:
val = self.edit_row.edit_col.get_data()
except ValueError:
return
self.edit_row = None
self.set_current_value(val)
def left(self):
self.focus_col = max(self.focus_col - 1, 0)
self._modified()
def right(self):
self.focus_col = min(self.focus_col + 1, len(self.editor.columns) - 1)
self._modified()
def tab_next(self):
self.stop_edit()
if self.focus_col < len(self.editor.columns) - 1:
self.focus_col += 1
elif self.focus != len(self.lst) - 1:
self.focus_col = 0
self.focus += 1
self._modified()
def get_focus(self):
if self.edit_row:
return self.edit_row, self.focus
elif self.lst:
return (
GridRow(self.focus_col, False, self.editor, self.lst[self.focus]),
self.focus,
)
else:
return None, None
def set_focus(self, focus):
self.stop_edit()
self.focus = focus
self._modified()
def get_next(self, pos):
if pos + 1 >= len(self.lst):
return None, None
return GridRow(None, False, self.editor, self.lst[pos + 1]), pos + 1
def get_prev(self, pos):
if pos - 1 < 0:
return None, None
return GridRow(None, False, self.editor, self.lst[pos - 1]), pos - 1
class GridListBox(urwid.ListBox):
def __init__(self, lw):
super().__init__(lw)
FIRST_WIDTH_MAX = 40
class BaseGridEditor(urwid.WidgetWrap):
title: str = ""
keyctx: ClassVar[str] = "grideditor"
def __init__(
self,
master: "mitmproxy.tools.console.master.ConsoleMaster",
title,
columns,
value: Any,
callback: Callable[..., None],
*cb_args,
**cb_kwargs,
) -> None:
value = self.data_in(copy.deepcopy(value))
self.master = master
self.title = title
self.columns = columns
self.value = value
self.callback = callback
self.cb_args = cb_args
self.cb_kwargs = cb_kwargs
first_width = 20
if value:
for r in value:
assert len(r) == len(self.columns)
first_width = max(len(r), first_width)
self.first_width = min(first_width, FIRST_WIDTH_MAX)
h = None
if any(col.heading for col in self.columns):
headings = []
for i, col in enumerate(self.columns):
c = urwid.Text(col.heading)
if i == 0 and len(self.columns) > 1:
headings.append(("fixed", first_width + 2, c))
else:
headings.append(c)
h = urwid.Columns(headings, dividechars=2)
h = urwid.AttrMap(h, "heading")
self.walker = GridWalker(self.value, self)
self.lb = GridListBox(self.walker)
w = urwid.Frame(self.lb, header=h)
super().__init__(w)
self.show_empty_msg()
def layout_popping(self):
res = []
for i in self.walker.lst:
if not i[1] and any([x for x in i[0]]):
res.append(i[0])
self.callback(self.data_out(res), *self.cb_args, **self.cb_kwargs)
def show_empty_msg(self):
if self.walker.lst:
self._w.footer = None
else:
self._w.footer = urwid.Text(
[
("highlight", "No values - you should add some. Press "),
("key", "?"),
("highlight", " for help."),
]
)
def set_subeditor_value(self, val, focus, focus_col):
self.walker.set_value(val, focus, focus_col)
def keypress(self, size, key):
if self.walker.edit_row:
if key == "esc":
self.walker.stop_edit()
elif key == "tab":
pf, pfc = self.walker.focus, self.walker.focus_col
self.walker.tab_next()
if self.walker.focus == pf and self.walker.focus_col != pfc:
self.walker.start_edit()
else:
self._w.keypress(size, key)
return None
column = self.columns[self.walker.focus_col]
if key == "m_start":
self.walker.set_focus(0)
elif key == "m_next":
self.walker.tab_next()
elif key == "m_end":
self.walker.set_focus(len(self.walker.lst) - 1)
elif key == "left":
self.walker.left()
elif key == "right":
self.walker.right()
elif column.keypress(key, self) and not self.handle_key(key):
return self._w.keypress(size, key)
def data_out(self, data: Sequence[list]) -> Any:
"""
Called on raw list data, before data is returned through the
callback.
"""
return data
def data_in(self, data: Any) -> Iterable[list]:
"""
Called to prepare provided data.
"""
return data
def is_error(self, col: int, val: Any) -> str | None:
"""
Return None, or a string error message.
"""
return None
def handle_key(self, key):
if key == "?":
signals.pop_view_state.send()
return False
def cmd_add(self):
self.walker.add()
def cmd_insert(self):
self.walker.insert()
def cmd_delete(self):
self.walker.delete_focus()
def cmd_read_file(self, path):
self.walker.set_current_value(read_file(path, False))
def cmd_read_file_escaped(self, path):
self.walker.set_current_value(read_file(path, True))
def cmd_spawn_editor(self):
o = self.walker.get_current_value()
if o is not None:
n = self.master.spawn_editor(o)
n = strutils.clean_hanging_newline(n)
self.walker.set_current_value(n)
class GridEditor(BaseGridEditor):
title = ""
columns: Sequence[Column] = ()
keyctx: ClassVar[str] = "grideditor"
def __init__(
self,
master: "mitmproxy.tools.console.master.ConsoleMaster",
value: Any,
callback: Callable[..., None],
*cb_args,
**cb_kwargs,
) -> None:
super().__init__(
master, self.title, self.columns, value, callback, *cb_args, **cb_kwargs
)
class FocusEditor(urwid.WidgetWrap, layoutwidget.LayoutWidget):
"""
A specialised GridEditor that edits the current focused flow.
"""
keyctx: ClassVar[str] = "grideditor"
def __init__(self, master):
self.master = master
def call(self, v, name, *args, **kwargs):
f = getattr(v, name, None)
if f:
f(*args, **kwargs)
def get_data(self, flow):
"""
Retrieve the data to edit from the current flow.
"""
raise NotImplementedError
def set_data(self, vals, flow):
"""
Set the current data on the flow.
"""
raise NotImplementedError
def set_data_update(self, vals, flow) -> None:
self.set_data(vals, flow)
signals.flow_change.send(flow=flow)
def key_responder(self):
return self._w
def layout_popping(self):
self.call(self._w, "layout_popping")
def layout_pushed(self, prev):
if self.master.view.focus.flow:
self._w = BaseGridEditor(
self.master,
self.title,
self.columns,
self.get_data(self.master.view.focus.flow),
self.set_data_update,
self.master.view.focus.flow,
)
else:
self._w = urwid.Pile([])
| python | MIT | e6aa924bb411a9687b91920b8d094af37bc02b90 | 2026-01-04T14:40:00.086164Z | false |
mitmproxy/mitmproxy | https://github.com/mitmproxy/mitmproxy/blob/e6aa924bb411a9687b91920b8d094af37bc02b90/mitmproxy/tools/console/grideditor/col_viewany.py | mitmproxy/tools/console/grideditor/col_viewany.py | """
A display-only column that displays any data type.
"""
from typing import Any
import urwid
from mitmproxy.tools.console.grideditor import base
from mitmproxy.utils import strutils
class Column(base.Column):
def Display(self, data):
return Display(data)
Edit = Display
def blank(self):
return ""
class Display(base.Cell):
def __init__(self, data: Any) -> None:
self.data = data
if isinstance(data, bytes):
data = strutils.bytes_to_escaped_str(data)
if not isinstance(data, str):
data = repr(data)
w = urwid.Text(data, wrap="any")
super().__init__(w)
def get_data(self) -> Any:
return self.data
| python | MIT | e6aa924bb411a9687b91920b8d094af37bc02b90 | 2026-01-04T14:40:00.086164Z | false |
mitmproxy/mitmproxy | https://github.com/mitmproxy/mitmproxy/blob/e6aa924bb411a9687b91920b8d094af37bc02b90/mitmproxy/tools/console/commander/__init__.py | mitmproxy/tools/console/commander/__init__.py | python | MIT | e6aa924bb411a9687b91920b8d094af37bc02b90 | 2026-01-04T14:40:00.086164Z | false | |
mitmproxy/mitmproxy | https://github.com/mitmproxy/mitmproxy/blob/e6aa924bb411a9687b91920b8d094af37bc02b90/mitmproxy/tools/console/commander/commander.py | mitmproxy/tools/console/commander/commander.py | import abc
from collections.abc import Sequence
from typing import NamedTuple
import urwid
from urwid.text_layout import calc_coords
import mitmproxy.command
import mitmproxy.flow
import mitmproxy.master
import mitmproxy.types
class Completer:
@abc.abstractmethod
def cycle(self, forward: bool = True) -> str:
raise NotImplementedError()
class ListCompleter(Completer):
def __init__(
self,
start: str,
options: Sequence[str],
) -> None:
self.start = start
self.options: list[str] = []
for o in options:
if o.startswith(start):
self.options.append(o)
self.options.sort()
self.pos = -1
def cycle(self, forward: bool = True) -> str:
if not self.options:
return self.start
if self.pos == -1:
self.pos = 0 if forward else len(self.options) - 1
else:
delta = 1 if forward else -1
self.pos = (self.pos + delta) % len(self.options)
return self.options[self.pos]
class CompletionState(NamedTuple):
completer: Completer
parsed: Sequence[mitmproxy.command.ParseResult]
class CommandBuffer:
def __init__(self, master: mitmproxy.master.Master, start: str = "") -> None:
self.master = master
self.text = start
# Cursor is always within the range [0:len(buffer)].
self._cursor = len(self.text)
self.completion: CompletionState | None = None
@property
def cursor(self) -> int:
return self._cursor
@cursor.setter
def cursor(self, x) -> None:
if x < 0:
self._cursor = 0
elif x > len(self.text):
self._cursor = len(self.text)
else:
self._cursor = x
def set_text(self, text: str) -> None:
self.text = text
self._cursor = len(self.text)
self.render()
def render(self):
parts, remaining = self.master.commands.parse_partial(self.text)
ret = []
if not parts:
# Means we just received the leader, so we need to give a blank
# text to the widget to render or it crashes
ret.append(("text", ""))
else:
for p in parts:
if p.valid:
if p.type == mitmproxy.types.Cmd:
ret.append(("commander_command", p.value))
else:
ret.append(("text", p.value))
elif p.value:
ret.append(("commander_invalid", p.value))
if remaining:
if parts[-1].type != mitmproxy.types.Space:
ret.append(("text", " "))
for param in remaining:
ret.append(("commander_hint", f"{param} "))
return ret
def left(self) -> None:
self.cursor = self.cursor - 1
def right(self) -> None:
self.cursor = self.cursor + 1
def cycle_completion(self, forward: bool = True) -> None:
if not self.completion:
parts, remaining = self.master.commands.parse_partial(
self.text[: self.cursor]
)
if parts and parts[-1].type != mitmproxy.types.Space:
type_to_complete = parts[-1].type
cycle_prefix = parts[-1].value
parsed = parts[:-1]
elif remaining:
type_to_complete = remaining[0].type
cycle_prefix = ""
parsed = parts
else:
return
ct = mitmproxy.types.CommandTypes.get(type_to_complete, None)
if ct:
self.completion = CompletionState(
completer=ListCompleter(
cycle_prefix,
ct.completion(
self.master.commands, type_to_complete, cycle_prefix
),
),
parsed=parsed,
)
if self.completion:
nxt = self.completion.completer.cycle(forward)
buf = "".join([i.value for i in self.completion.parsed]) + nxt
self.text = buf
self.cursor = len(self.text)
def backspace(self) -> None:
if self.cursor == 0:
return
self.text = self.text[: self.cursor - 1] + self.text[self.cursor :]
self.cursor = self.cursor - 1
self.completion = None
def delete(self) -> None:
if self.cursor == len(self.text):
return
self.text = self.text[: self.cursor] + self.text[self.cursor + 1 :]
self.completion = None
def insert(self, k: str) -> None:
"""
Inserts text at the cursor.
"""
# We don't want to insert a space before the command
if k == " " and self.text[0 : self.cursor].strip() == "":
return
self.text = self.text[: self.cursor] + k + self.text[self.cursor :]
self.cursor += len(k)
self.completion = None
class CommandEdit(urwid.WidgetWrap):
leader = ": "
def __init__(self, master: mitmproxy.master.Master, text: str) -> None:
super().__init__(urwid.Text(self.leader))
self.master = master
self.active_filter = False
self.filter_str = ""
self.cbuf = CommandBuffer(master, text)
self.update()
def keypress(self, size, key) -> None:
if key == "delete":
self.cbuf.delete()
elif key == "ctrl a" or key == "home":
self.cbuf.cursor = 0
elif key == "ctrl e" or key == "end":
self.cbuf.cursor = len(self.cbuf.text)
elif key == "meta b":
self.cbuf.cursor = self.cbuf.text.rfind(" ", 0, self.cbuf.cursor)
elif key == "meta f":
pos = self.cbuf.text.find(" ", self.cbuf.cursor + 1)
if pos == -1:
pos = len(self.cbuf.text)
self.cbuf.cursor = pos
elif key == "ctrl w":
prev_cursor = self.cbuf.cursor
pos = self.cbuf.text.rfind(" ", 0, self.cbuf.cursor - 1)
if pos == -1:
new_text = self.cbuf.text[self.cbuf.cursor :]
cursor_pos = 0
else:
txt_after = self.cbuf.text[self.cbuf.cursor :]
txt_before = self.cbuf.text[0:pos]
new_text = f"{txt_before} {txt_after}"
cursor_pos = prev_cursor - (prev_cursor - pos) + 1
self.cbuf.set_text(new_text)
self.cbuf.cursor = cursor_pos
elif key == "backspace":
self.cbuf.backspace()
if self.cbuf.text == "":
self.active_filter = False
self.master.commands.call("commands.history.filter", "")
self.filter_str = ""
elif key == "left" or key == "ctrl b":
self.cbuf.left()
elif key == "right" or key == "ctrl f":
self.cbuf.right()
elif key == "up" or key == "ctrl p":
if self.active_filter is False:
self.active_filter = True
self.filter_str = self.cbuf.text
self.master.commands.call("commands.history.filter", self.cbuf.text)
cmd = self.master.commands.execute("commands.history.prev")
self.cbuf = CommandBuffer(self.master, cmd)
elif key == "down" or key == "ctrl n":
prev_cmd = self.cbuf.text
cmd = self.master.commands.execute("commands.history.next")
if cmd == "":
if prev_cmd == self.filter_str:
self.cbuf = CommandBuffer(self.master, prev_cmd)
else:
self.active_filter = False
self.master.commands.call("commands.history.filter", "")
self.filter_str = ""
self.cbuf = CommandBuffer(self.master, "")
else:
self.cbuf = CommandBuffer(self.master, cmd)
elif key == "shift tab":
self.cbuf.cycle_completion(False)
elif key == "tab":
self.cbuf.cycle_completion()
elif len(key) == 1:
self.cbuf.insert(key)
self.update()
def update(self) -> None:
self._w.set_text([self.leader, self.cbuf.render()])
def render(self, size, focus=False) -> urwid.Canvas:
(maxcol,) = size
canv = self._w.render((maxcol,))
canv = urwid.CompositeCanvas(canv)
canv.cursor = self.get_cursor_coords((maxcol,))
return canv
def get_cursor_coords(self, size) -> tuple[int, int]:
p = self.cbuf.cursor + len(self.leader)
trans = self._w.get_line_translation(size[0])
x, y = calc_coords(self._w.get_text()[0], trans, p)
return x, y
def get_edit_text(self) -> str:
return self.cbuf.text
| python | MIT | e6aa924bb411a9687b91920b8d094af37bc02b90 | 2026-01-04T14:40:00.086164Z | false |
mitmproxy/mitmproxy | https://github.com/mitmproxy/mitmproxy/blob/e6aa924bb411a9687b91920b8d094af37bc02b90/mitmproxy/tools/web/master.py | mitmproxy/tools/web/master.py | import errno
import logging
from typing import cast
import tornado.httpserver
import tornado.ioloop
from mitmproxy import addons
from mitmproxy import flow
from mitmproxy import log
from mitmproxy import master
from mitmproxy import options
from mitmproxy import optmanager
from mitmproxy.addons import errorcheck
from mitmproxy.addons import eventstore
from mitmproxy.addons import intercept
from mitmproxy.addons import readfile
from mitmproxy.addons import view
from mitmproxy.addons.proxyserver import Proxyserver
from mitmproxy.tools.web import app
from mitmproxy.tools.web import static_viewer
from mitmproxy.tools.web import webaddons
logger = logging.getLogger(__name__)
class WebMaster(master.Master):
def __init__(self, opts: options.Options, with_termlog: bool = True):
super().__init__(opts, with_termlog=with_termlog)
self.view = view.View()
self.view.sig_view_add.connect(self._sig_view_add)
self.view.sig_view_remove.connect(self._sig_view_remove)
self.view.sig_view_update.connect(self._sig_view_update)
self.view.sig_view_refresh.connect(self._sig_view_refresh)
self.events = eventstore.EventStore()
self.events.sig_add.connect(self._sig_events_add)
self.events.sig_refresh.connect(self._sig_events_refresh)
self.options.changed.connect(self._sig_options_update)
self.addons.add(*addons.default_addons())
self.addons.add(
webaddons.WebAddon(),
webaddons.WebAuth(),
intercept.Intercept(),
readfile.ReadFileStdin(),
static_viewer.StaticViewer(),
self.view,
self.events,
errorcheck.ErrorCheck(),
)
self.app = app.Application(self, self.options.web_debug)
self.proxyserver: Proxyserver = self.addons.get("proxyserver")
self.proxyserver.servers.changed.connect(self._sig_servers_changed)
def _sig_view_add(self, flow: flow.Flow) -> None:
app.ClientConnection.broadcast_flow("flows/add", flow)
def _sig_view_update(self, flow: flow.Flow) -> None:
app.ClientConnection.broadcast_flow("flows/update", flow)
def _sig_view_remove(self, flow: flow.Flow, index: int) -> None:
app.ClientConnection.broadcast(
type="flows/remove",
payload=flow.id,
)
def _sig_view_refresh(self) -> None:
app.ClientConnection.broadcast_flow_reset()
def _sig_events_add(self, entry: log.LogEntry) -> None:
app.ClientConnection.broadcast(
type="events/add",
payload=app.logentry_to_json(entry),
)
def _sig_events_refresh(self) -> None:
app.ClientConnection.broadcast(
type="events/reset",
)
def _sig_options_update(self, updated: set[str]) -> None:
options_dict = optmanager.dump_dicts(self.options, updated)
app.ClientConnection.broadcast(
type="options/update",
payload=options_dict,
)
def _sig_servers_changed(self) -> None:
app.ClientConnection.broadcast(
type="state/update",
payload={
"servers": {
s.mode.full_spec: s.to_json() for s in self.proxyserver.servers
}
},
)
@property
def web_url(self) -> str:
return cast(webaddons.WebAuth, self.addons.get("webauth")).web_url
async def running(self):
# Register tornado with the current event loop
tornado.ioloop.IOLoop.current()
# Add our web app.
http_server = tornado.httpserver.HTTPServer(
self.app, max_buffer_size=2**32
) # 4GB
try:
http_server.listen(self.options.web_port, self.options.web_host)
except OSError as e:
message = f"Web server failed to listen on {self.options.web_host or '*'}:{self.options.web_port} with {e}"
if e.errno == errno.EADDRINUSE:
message += f"\nTry specifying a different port by using `--set web_port={self.options.web_port + 2}`."
raise OSError(e.errno, message, e.filename) from e
logger.info(f"Web server listening at {self.web_url}")
return await super().running()
| python | MIT | e6aa924bb411a9687b91920b8d094af37bc02b90 | 2026-01-04T14:40:00.086164Z | false |
mitmproxy/mitmproxy | https://github.com/mitmproxy/mitmproxy/blob/e6aa924bb411a9687b91920b8d094af37bc02b90/mitmproxy/tools/web/__init__.py | mitmproxy/tools/web/__init__.py | from mitmproxy.tools.web import master
__all__ = ["master"]
| python | MIT | e6aa924bb411a9687b91920b8d094af37bc02b90 | 2026-01-04T14:40:00.086164Z | false |
mitmproxy/mitmproxy | https://github.com/mitmproxy/mitmproxy/blob/e6aa924bb411a9687b91920b8d094af37bc02b90/mitmproxy/tools/web/web_columns.py | mitmproxy/tools/web/web_columns.py | # Auto-generated by web/gen/web_columns.py
AVAILABLE_WEB_COLUMNS = [
"icon",
"index",
"method",
"version",
"path",
"quickactions",
"size",
"status",
"time",
"timestamp",
"tls",
"comment",
]
| python | MIT | e6aa924bb411a9687b91920b8d094af37bc02b90 | 2026-01-04T14:40:00.086164Z | false |
mitmproxy/mitmproxy | https://github.com/mitmproxy/mitmproxy/blob/e6aa924bb411a9687b91920b8d094af37bc02b90/mitmproxy/tools/web/webaddons.py | mitmproxy/tools/web/webaddons.py | from __future__ import annotations
import hmac
import logging
import secrets
import webbrowser
from collections.abc import Sequence
from typing import TYPE_CHECKING
import argon2
from mitmproxy import ctx
from mitmproxy import exceptions
from mitmproxy.tools.web.web_columns import AVAILABLE_WEB_COLUMNS
if TYPE_CHECKING:
from mitmproxy.tools.web.master import WebMaster
logger = logging.getLogger(__name__)
class WebAuth:
_password: str
_hasher: argon2.PasswordHasher
def __init__(self):
self._password = secrets.token_hex(16)
self._hasher = argon2.PasswordHasher()
def load(self, loader):
loader.add_option(
"web_password",
str,
"",
"Password to protect the mitmweb user interface. "
"Values starting with `$` are interpreted as an argon2 hash, "
"everything else is considered a plaintext password. "
"If no password is provided, a random token is generated on startup."
"For automated calls, you can pass the password as token query parameter"
"or as `Authorization: Bearer ...` header.",
)
def configure(self, updated) -> None:
if "web_password" in updated:
if ctx.options.web_password.startswith("$"):
try:
argon2.extract_parameters(ctx.options.web_password)
except argon2.exceptions.InvalidHashError:
raise exceptions.OptionsError(
"`web_password` starts with `$`, but it's not a valid argon2 hash."
)
elif ctx.options.web_password:
logger.warning(
"Using a plaintext password to protect the mitmweb user interface. "
"Consider using an argon2 hash for `web_password` instead."
)
self._password = ctx.options.web_password or secrets.token_hex(16)
@property
def web_url(self) -> str:
if ctx.options.web_password:
auth = "" # We don't want to print plaintext passwords (and it doesn't work for argon2 anyhow).
else:
auth = f"?token={self._password}"
web_host = ctx.options.web_host
if ":" in web_host: # ipv6
web_host = f"[{web_host}]"
# noinspection HttpUrlsUsage
return f"http://{web_host}:{ctx.options.web_port}/{auth}"
@staticmethod
def auth_cookie_name() -> str:
return f"mitmproxy-auth-{ctx.options.web_port}"
def is_valid_password(self, password: str) -> bool:
if self._password.startswith("$"):
try:
return self._hasher.verify(self._password, password)
except argon2.exceptions.VerificationError:
return False
else:
return hmac.compare_digest(
self._password,
password,
)
class WebAddon:
def load(self, loader):
loader.add_option("web_open_browser", bool, True, "Start a browser.")
loader.add_option("web_debug", bool, False, "Enable mitmweb debugging.")
loader.add_option("web_port", int, 8081, "Web UI port.")
loader.add_option("web_host", str, "127.0.0.1", "Web UI host.")
loader.add_option(
"web_columns",
Sequence[str],
["tls", "icon", "path", "method", "status", "size", "time"],
f"Columns to show in the flow list. Can be one of the following: {', '.join(AVAILABLE_WEB_COLUMNS)}",
)
def running(self):
if hasattr(ctx.options, "web_open_browser") and ctx.options.web_open_browser:
master: WebMaster = ctx.master # type: ignore
success = open_browser(master.web_url)
if not success:
logger.info(
f"No web browser found. Please open a browser and point it to {master.web_url}",
)
if not success and not ctx.options.web_password:
logger.info(
f"You can configure a fixed authentication token by setting the `web_password` option "
f"(https://docs.mitmproxy.org/stable/concepts-options/#web_password).",
)
def open_browser(url: str) -> bool:
"""
Open a URL in a browser window.
In contrast to webbrowser.open, we limit the list of suitable browsers.
This gracefully degrades to a no-op on headless servers, where webbrowser.open
would otherwise open lynx.
Returns:
True, if a browser has been opened
False, if no suitable browser has been found.
"""
browsers = (
"windows-default",
"macosx",
"wslview %s",
"gio",
"x-www-browser",
"gnome-open %s",
"xdg-open",
"google-chrome",
"chrome",
"chromium",
"chromium-browser",
"firefox",
"opera",
"safari",
)
for browser in browsers:
try:
b = webbrowser.get(browser)
except webbrowser.Error:
pass
else:
if b.open(url):
return True
return False
| python | MIT | e6aa924bb411a9687b91920b8d094af37bc02b90 | 2026-01-04T14:40:00.086164Z | false |
mitmproxy/mitmproxy | https://github.com/mitmproxy/mitmproxy/blob/e6aa924bb411a9687b91920b8d094af37bc02b90/mitmproxy/tools/web/app.py | mitmproxy/tools/web/app.py | from __future__ import annotations
import asyncio
import functools
import hashlib
import json
import logging
import os.path
import re
import secrets
import sys
from collections.abc import Callable
from collections.abc import Sequence
from io import BytesIO
from typing import Any
from typing import Awaitable
from typing import ClassVar
from typing import Concatenate
from typing import Literal
from typing import Optional
import tornado.escape
import tornado.web
import tornado.websocket
import mitmproxy.flow
import mitmproxy.tools.web.master
import mitmproxy_rs
from mitmproxy import certs
from mitmproxy import command
from mitmproxy import contentviews
from mitmproxy import flowfilter
from mitmproxy import http
from mitmproxy import io
from mitmproxy import log
from mitmproxy import optmanager
from mitmproxy import version
from mitmproxy.dns import DNSFlow
from mitmproxy.http import HTTPFlow
from mitmproxy.tcp import TCPFlow
from mitmproxy.tcp import TCPMessage
from mitmproxy.tools.web.webaddons import WebAuth
from mitmproxy.udp import UDPFlow
from mitmproxy.udp import UDPMessage
from mitmproxy.utils import asyncio_utils
from mitmproxy.utils.emoji import emoji
from mitmproxy.utils.strutils import always_str
from mitmproxy.utils.strutils import cut_after_n_lines
from mitmproxy.websocket import WebSocketMessage
TRANSPARENT_PNG = (
b"\x89PNG\r\n\x1a\n\x00\x00\x00\rIHDR\x00\x00\x00\x01\x00\x00\x00\x01\x08"
b"\x04\x00\x00\x00\xb5\x1c\x0c\x02\x00\x00\x00\x0bIDATx\xdac\xfc\xff\x07"
b"\x00\x02\x00\x01\xfc\xa8Q\rh\x00\x00\x00\x00IEND\xaeB`\x82"
)
logger = logging.getLogger(__name__)
def cert_to_json(certs: Sequence[certs.Cert]) -> dict | None:
if not certs:
return None
cert = certs[0]
return {
"keyinfo": cert.keyinfo,
"sha256": cert.fingerprint().hex(),
"notbefore": int(cert.notbefore.timestamp()),
"notafter": int(cert.notafter.timestamp()),
"serial": str(cert.serial),
"subject": cert.subject,
"issuer": cert.issuer,
"altnames": [str(x.value) for x in cert.altnames],
}
def flow_to_json(flow: mitmproxy.flow.Flow) -> dict:
"""
Remove flow message content and cert to save transmission space.
Args:
flow: The original flow.
Sync with web/src/flow.ts.
"""
f = {
"id": flow.id,
"intercepted": flow.intercepted,
"is_replay": flow.is_replay,
"type": flow.type,
"modified": flow.modified(),
"marked": emoji.get(flow.marked, "🔴") if flow.marked else "",
"comment": flow.comment,
"timestamp_created": flow.timestamp_created,
}
if flow.client_conn:
f["client_conn"] = {
"id": flow.client_conn.id,
"peername": flow.client_conn.peername,
"sockname": flow.client_conn.sockname,
"tls_established": flow.client_conn.tls_established,
"cert": cert_to_json(flow.client_conn.certificate_list),
"sni": flow.client_conn.sni,
"cipher": flow.client_conn.cipher,
"alpn": always_str(flow.client_conn.alpn, "ascii", "backslashreplace"),
"tls_version": flow.client_conn.tls_version,
"timestamp_start": flow.client_conn.timestamp_start,
"timestamp_tls_setup": flow.client_conn.timestamp_tls_setup,
"timestamp_end": flow.client_conn.timestamp_end,
}
if flow.server_conn:
f["server_conn"] = {
"id": flow.server_conn.id,
"peername": flow.server_conn.peername,
"sockname": flow.server_conn.sockname,
"address": flow.server_conn.address,
"tls_established": flow.server_conn.tls_established,
"cert": cert_to_json(flow.server_conn.certificate_list),
"sni": flow.server_conn.sni,
"cipher": flow.server_conn.cipher,
"alpn": always_str(flow.server_conn.alpn, "ascii", "backslashreplace"),
"tls_version": flow.server_conn.tls_version,
"timestamp_start": flow.server_conn.timestamp_start,
"timestamp_tcp_setup": flow.server_conn.timestamp_tcp_setup,
"timestamp_tls_setup": flow.server_conn.timestamp_tls_setup,
"timestamp_end": flow.server_conn.timestamp_end,
}
if flow.error:
f["error"] = flow.error.get_state()
if isinstance(flow, HTTPFlow):
content_length: int | None
content_hash: str | None
if flow.request.raw_content is not None:
content_length = len(flow.request.raw_content)
content_hash = hashlib.sha256(flow.request.raw_content).hexdigest()
else:
content_length = None
content_hash = None
f["request"] = {
"method": flow.request.method,
"scheme": flow.request.scheme,
"host": flow.request.host,
"port": flow.request.port,
"path": flow.request.path,
"http_version": flow.request.http_version,
"headers": tuple(flow.request.headers.items(True)),
"contentLength": content_length,
"contentHash": content_hash,
"timestamp_start": flow.request.timestamp_start,
"timestamp_end": flow.request.timestamp_end,
"pretty_host": flow.request.pretty_host,
}
if flow.response:
if flow.response.raw_content is not None:
content_length = len(flow.response.raw_content)
content_hash = hashlib.sha256(flow.response.raw_content).hexdigest()
else:
content_length = None
content_hash = None
f["response"] = {
"http_version": flow.response.http_version,
"status_code": flow.response.status_code,
"reason": flow.response.reason,
"headers": tuple(flow.response.headers.items(True)),
"contentLength": content_length,
"contentHash": content_hash,
"timestamp_start": flow.response.timestamp_start,
"timestamp_end": flow.response.timestamp_end,
}
if flow.response.data.trailers:
f["response"]["trailers"] = tuple(
flow.response.data.trailers.items(True)
)
if flow.websocket:
f["websocket"] = {
"messages_meta": {
"contentLength": sum(
len(x.content) for x in flow.websocket.messages
),
"count": len(flow.websocket.messages),
"timestamp_last": flow.websocket.messages[-1].timestamp
if flow.websocket.messages
else None,
},
"closed_by_client": flow.websocket.closed_by_client,
"close_code": flow.websocket.close_code,
"close_reason": flow.websocket.close_reason,
"timestamp_end": flow.websocket.timestamp_end,
}
elif isinstance(flow, (TCPFlow, UDPFlow)):
f["messages_meta"] = {
"contentLength": sum(len(x.content) for x in flow.messages),
"count": len(flow.messages),
"timestamp_last": flow.messages[-1].timestamp if flow.messages else None,
}
elif isinstance(flow, DNSFlow):
f["request"] = flow.request.to_json()
if flow.response:
f["response"] = flow.response.to_json()
return f
def logentry_to_json(e: log.LogEntry) -> dict:
return {
"id": id(e), # we just need some kind of id.
"message": e.msg,
"level": e.level,
}
class APIError(tornado.web.HTTPError):
pass
class AuthRequestHandler(tornado.web.RequestHandler):
AUTH_COOKIE_VALUE = b"y"
def __init_subclass__(cls, **kwargs):
"""Automatically wrap all request handlers with `_require_auth`."""
for method in cls.SUPPORTED_METHODS:
method = method.lower()
fn = getattr(cls, method)
if fn is not tornado.web.RequestHandler._unimplemented_method:
setattr(cls, method, AuthRequestHandler._require_auth(fn))
def auth_fail(self, invalid_password: bool) -> None:
"""
Will be called when returning a 403.
May write a login form as the response.
"""
@staticmethod
def _require_auth[**P, R](
fn: Callable[Concatenate[AuthRequestHandler, P], R],
) -> Callable[Concatenate[AuthRequestHandler, P], R | None]:
@functools.wraps(fn)
def wrapper(
self: AuthRequestHandler, *args: P.args, **kwargs: P.kwargs
) -> R | None:
if not self.current_user:
password = ""
if auth_header := self.request.headers.get("Authorization"):
auth_scheme, _, auth_params = auth_header.partition(" ")
if auth_scheme == "Bearer":
password = auth_params
if not password:
password = self.get_argument("token", default="")
if not self.settings["is_valid_password"](password):
self.set_status(403)
self.auth_fail(bool(password))
return None
self.set_signed_cookie(
self.settings["auth_cookie_name"](),
self.AUTH_COOKIE_VALUE,
expires_days=400,
httponly=True,
samesite="Strict",
)
return fn(self, *args, **kwargs)
return wrapper
def get_current_user(self) -> bool:
return (
self.get_signed_cookie(self.settings["auth_cookie_name"](), min_version=2)
== self.AUTH_COOKIE_VALUE
)
class RequestHandler(AuthRequestHandler):
application: Application
def prepare(self):
if (
self.request.method not in ("GET", "HEAD", "OPTIONS")
and "Sec-Fetch-Site" in self.request.headers
and self.request.headers["Sec-Fetch-Site"] not in ("same-origin", "none")
):
raise tornado.httpclient.HTTPError(403)
def write(self, chunk: str | bytes | dict | list):
# Writing arrays on the top level is ok nowadays.
# http://flask.pocoo.org/docs/0.11/security/#json-security
if isinstance(chunk, list):
chunk = tornado.escape.json_encode(chunk)
self.set_header("Content-Type", "application/json; charset=UTF-8")
super().write(chunk)
def set_default_headers(self):
super().set_default_headers()
self.set_header("Server", version.MITMPROXY)
self.set_header("X-Frame-Options", "DENY")
self.add_header("X-XSS-Protection", "1; mode=block")
self.add_header("X-Content-Type-Options", "nosniff")
self.add_header(
"Content-Security-Policy",
"default-src 'self'; "
"connect-src 'self' ws:; "
"img-src 'self' data:; "
"style-src 'self' 'unsafe-inline'",
)
@property
def json(self):
if not self.request.headers.get("Content-Type", "").startswith(
"application/json"
):
raise APIError(400, "Invalid Content-Type, expected application/json.")
try:
return json.loads(self.request.body.decode())
except Exception as e:
raise APIError(400, f"Malformed JSON: {e}")
@property
def filecontents(self):
"""
Accept either a multipart/form file upload or just take the plain request body.
"""
if self.request.files:
return next(iter(self.request.files.values()))[0].body
else:
return self.request.body
@property
def view(self) -> mitmproxy.addons.view.View:
return self.application.master.view
@property
def master(self) -> mitmproxy.tools.web.master.WebMaster:
return self.application.master
@property
def flow(self) -> mitmproxy.flow.Flow:
flow_id = str(self.path_kwargs["flow_id"])
# FIXME: Add a facility to addon.view to safely access the store
flow = self.view.get_by_id(flow_id)
if flow:
return flow
else:
raise APIError(404, "Flow not found.")
def write_error(self, status_code: int, **kwargs):
if "exc_info" in kwargs and isinstance(kwargs["exc_info"][1], APIError):
self.finish(kwargs["exc_info"][1].log_message)
else:
super().write_error(status_code, **kwargs)
class IndexHandler(RequestHandler):
def auth_fail(self, invalid_password: bool) -> None:
self.render("login.html", invalid_password=invalid_password)
def get(self):
self.render("../index.html")
post = get # login form
class FilterHelp(RequestHandler):
def get(self):
self.write(dict(commands=flowfilter.help))
class WebSocketEventBroadcaster(tornado.websocket.WebSocketHandler, AuthRequestHandler):
# raise an error if inherited class doesn't specify its own instance.
connections: ClassVar[set[WebSocketEventBroadcaster]]
_send_queue: asyncio.Queue[bytes]
_send_task: asyncio.Task[None]
def prepare(self) -> Optional[Awaitable[None]]:
token = self.xsrf_token # https://github.com/tornadoweb/tornado/issues/645
assert token
return None
def open(self, *args, **kwargs):
self.connections.add(self)
self._send_queue = asyncio.Queue()
# Python 3.13+: use _send_queue.shutdown() and we can use keep_ref=True here.
self._send_task = asyncio_utils.create_task(
self.send_task(),
name="WebSocket send task",
keep_ref=False,
)
def on_close(self):
self.connections.discard(self)
self._send_task.cancel()
@classmethod
def broadcast(cls, **kwargs):
message = cls._json_dumps(kwargs)
for conn in cls.connections:
conn.send(message)
def send(self, message: bytes):
self._send_queue.put_nowait(message)
async def send_task(self):
while True:
message = await self._send_queue.get()
try:
await self.write_message(message)
except tornado.websocket.WebSocketClosedError:
self.on_close()
@staticmethod
def _json_dumps(d):
return json.dumps(d, ensure_ascii=False).encode("utf8", "surrogateescape")
class ClientConnection(WebSocketEventBroadcaster):
connections: ClassVar[set[ClientConnection]] = set() # type: ignore
application: Application
def __init__(self, application: Application, request, **kwargs):
super().__init__(application, request, **kwargs)
self.filters: dict[str, flowfilter.TFilter] = {} # filters per connection
@classmethod
def broadcast_flow_reset(cls) -> None:
for conn in cls.connections:
conn.send(cls._json_dumps({"type": "flows/reset"}))
for name, expr in conn.filters.copy().items():
conn.update_filter(name, expr.pattern)
@classmethod
def broadcast_flow(
cls,
type: Literal["flows/add", "flows/update"],
f: mitmproxy.flow.Flow,
) -> None:
flow_json = flow_to_json(f)
for conn in cls.connections:
conn._broadcast_flow(type, f, flow_json)
def _broadcast_flow(
self,
type: Literal["flows/add", "flows/update"],
f: mitmproxy.flow.Flow,
flow_json: dict, # Passing the flow_json dictionary to avoid recalculating it for each client
) -> None:
filters = {name: bool(expr(f)) for name, expr in self.filters.items()}
message = self._json_dumps(
{
"type": type,
"payload": {
"flow": flow_json,
"matching_filters": filters,
},
},
)
self.send(message)
def update_filter(self, name: str, expr: str) -> None:
if expr:
filt = flowfilter.parse(expr)
self.filters[name] = filt
matching_flow_ids = [f.id for f in self.application.master.view if filt(f)]
else:
self.filters.pop(name, None)
matching_flow_ids = None
message = self._json_dumps(
{
"type": "flows/filterUpdate",
"payload": {
"name": name,
"matching_flow_ids": matching_flow_ids,
},
},
)
self.send(message=message)
async def on_message(self, message: str | bytes):
try:
data = json.loads(message)
match data["type"]:
case "flows/updateFilter":
self.update_filter(data["payload"]["name"], data["payload"]["expr"])
case other:
raise ValueError(f"Unsupported command: {other}")
except Exception as e:
logger.error(f"Error processing message from {self}: {e}")
self.close(code=1011, reason="Internal server error.")
class Flows(RequestHandler):
def get(self):
self.write([flow_to_json(f) for f in self.view])
class DumpFlows(RequestHandler):
def get(self) -> None:
self.set_header("Content-Disposition", "attachment; filename=flows")
self.set_header("Content-Type", "application/octet-stream")
match: Callable[[mitmproxy.flow.Flow], bool]
try:
match = flowfilter.parse(self.request.arguments["filter"][0].decode())
except ValueError: # thrown py flowfilter.parse if filter is invalid
raise APIError(400, f"Invalid filter argument / regex")
except (
KeyError,
IndexError,
): # Key+Index: ["filter"][0] can fail, if it's not set
def match(_) -> bool:
return True
with BytesIO() as bio:
fw = io.FlowWriter(bio)
for f in self.view:
if match(f):
fw.add(f)
self.write(bio.getvalue())
async def post(self):
self.view.clear()
bio = BytesIO(self.filecontents)
for f in io.FlowReader(bio).stream():
await self.master.load_flow(f)
bio.close()
class ClearAll(RequestHandler):
def post(self):
self.view.clear()
self.master.events.clear()
class ResumeFlows(RequestHandler):
def post(self):
for f in self.view:
if not f.intercepted:
continue
f.resume()
self.view.update([f])
class KillFlows(RequestHandler):
def post(self):
for f in self.view:
if f.killable:
f.kill()
self.view.update([f])
class ResumeFlow(RequestHandler):
def post(self, flow_id):
self.flow.resume()
self.view.update([self.flow])
class KillFlow(RequestHandler):
def post(self, flow_id):
if self.flow.killable:
self.flow.kill()
self.view.update([self.flow])
class FlowHandler(RequestHandler):
def delete(self, flow_id):
if self.flow.killable:
self.flow.kill()
self.view.remove([self.flow])
def put(self, flow_id) -> None:
flow: mitmproxy.flow.Flow = self.flow
flow.backup()
try:
for a, b in self.json.items():
if a == "request" and hasattr(flow, "request"):
request: mitmproxy.http.Request = flow.request
for k, v in b.items():
if k in ["method", "scheme", "host", "path", "http_version"]:
setattr(request, k, str(v))
elif k == "port":
request.port = int(v)
elif k == "headers":
request.headers.clear()
for header in v:
request.headers.add(*header)
elif k == "trailers":
if request.trailers is not None:
request.trailers.clear()
else:
request.trailers = mitmproxy.http.Headers()
for trailer in v:
request.trailers.add(*trailer)
elif k == "content":
request.text = v
else:
raise APIError(400, f"Unknown update request.{k}: {v}")
elif a == "response" and hasattr(flow, "response"):
response: mitmproxy.http.Response = flow.response
for k, v in b.items():
if k in ["msg", "http_version"]:
setattr(response, k, str(v))
elif k == "code":
response.status_code = int(v)
elif k == "headers":
response.headers.clear()
for header in v:
response.headers.add(*header)
elif k == "trailers":
if response.trailers is not None:
response.trailers.clear()
else:
response.trailers = mitmproxy.http.Headers()
for trailer in v:
response.trailers.add(*trailer)
elif k == "content":
response.text = v
else:
raise APIError(400, f"Unknown update response.{k}: {v}")
elif a == "marked":
flow.marked = b
elif a == "comment":
flow.comment = b
else:
raise APIError(400, f"Unknown update {a}: {b}")
except APIError:
flow.revert()
raise
self.view.update([flow])
class DuplicateFlow(RequestHandler):
def post(self, flow_id):
f = self.flow.copy()
self.view.add([f])
self.write(f.id)
class RevertFlow(RequestHandler):
def post(self, flow_id):
if self.flow.modified():
self.flow.revert()
self.view.update([self.flow])
class ReplayFlow(RequestHandler):
def post(self, flow_id):
self.master.commands.call("replay.client", [self.flow])
class FlowContent(RequestHandler):
def post(self, flow_id, message):
self.flow.backup()
message = getattr(self.flow, message)
message.content = self.filecontents
self.view.update([self.flow])
def get(self, flow_id, message):
message = getattr(self.flow, message)
assert isinstance(self.flow, HTTPFlow)
original_cd = message.headers.get("Content-Disposition", None)
filename = None
if original_cd:
if m := re.search(r'filename=([-\w" .()]+)', original_cd):
filename = m.group(1)
if not filename:
filename = self.flow.request.path.split("?")[0].split("/")[-1]
filename = re.sub(r'[^-\w" .()]', "", filename)
cd = f"attachment; {filename=!s}"
self.set_header("Content-Disposition", cd)
self.set_header("Content-Type", "application/text")
self.set_header("X-Content-Type-Options", "nosniff")
self.set_header("X-Frame-Options", "DENY")
self.write(message.get_content(strict=False))
class FlowContentView(RequestHandler):
def message_to_json(
self,
view_name: str,
message: http.Message | TCPMessage | UDPMessage | WebSocketMessage,
flow: HTTPFlow | TCPFlow | UDPFlow,
max_lines: int | None = None,
from_client: bool | None = None,
timestamp: float | None = None,
):
if view_name and view_name.lower() == "auto":
view_name = "auto"
pretty = contentviews.prettify_message(message, flow, view_name=view_name)
if max_lines:
pretty.text = cut_after_n_lines(pretty.text, max_lines)
ret: dict[str, Any] = dict(
text=pretty.text,
view_name=pretty.view_name,
syntax_highlight=pretty.syntax_highlight,
description=pretty.description,
)
if from_client is not None:
ret["from_client"] = from_client
if timestamp is not None:
ret["timestamp"] = timestamp
return ret
def get(self, flow_id, message, content_view) -> None:
flow = self.flow
assert isinstance(flow, (HTTPFlow, TCPFlow, UDPFlow))
if self.request.arguments.get("lines"):
max_lines = int(self.request.arguments["lines"][0])
else:
max_lines = None
if message == "messages":
messages: list[TCPMessage] | list[UDPMessage] | list[WebSocketMessage]
if isinstance(flow, HTTPFlow) and flow.websocket:
messages = flow.websocket.messages
elif isinstance(flow, (TCPFlow, UDPFlow)):
messages = flow.messages
else:
raise APIError(400, f"This flow has no messages.")
msgs = []
for m in messages:
d = self.message_to_json(
view_name=content_view,
message=m,
flow=flow,
max_lines=max_lines,
from_client=m.from_client,
timestamp=m.timestamp,
)
msgs.append(d)
if max_lines:
max_lines -= d["text"].count("\n") + 1
assert max_lines is not None
if max_lines <= 0:
break
self.write(msgs)
else:
message = getattr(self.flow, message)
self.write(self.message_to_json(content_view, message, flow, max_lines))
class Commands(RequestHandler):
def get(self) -> None:
commands = {}
for name, cmd in self.master.commands.commands.items():
commands[name] = {
"help": cmd.help,
"parameters": [
{
"name": param.name,
"type": command.typename(param.type),
"kind": str(param.kind),
}
for param in cmd.parameters
],
"return_type": command.typename(cmd.return_type)
if cmd.return_type
else None,
"signature_help": cmd.signature_help(),
}
self.write(commands)
class ExecuteCommand(RequestHandler):
def post(self, cmd: str):
# TODO: We should parse query strings here, this API is painful.
try:
args = self.json["arguments"]
except APIError:
args = []
try:
result = self.master.commands.call_strings(cmd, args)
except Exception as e:
self.write({"error": str(e)})
else:
self.write(
{
"value": result,
# "type": command.typename(type(result)) if result is not None else "none"
}
)
class Events(RequestHandler):
def get(self):
self.write([logentry_to_json(e) for e in self.master.events.data])
class Options(RequestHandler):
def get(self):
self.write(optmanager.dump_dicts(self.master.options))
def put(self):
update = self.json
try:
self.master.options.update(**update)
except Exception as err:
raise APIError(400, f"{err}")
class SaveOptions(RequestHandler):
def post(self):
# try:
# optmanager.save(self.master.options, CONFIG_PATH, True)
# except Exception as err:
# raise APIError(400, "{}".format(err))
pass
class State(RequestHandler):
# Separate method for testability.
@staticmethod
def get_json(master: mitmproxy.tools.web.master.WebMaster):
return {
"version": version.VERSION,
"contentViews": [
v for v in contentviews.registry.available_views() if v != "query"
],
"servers": {
s.mode.full_spec: s.to_json() for s in master.proxyserver.servers
},
"platform": sys.platform,
"localModeUnavailable": mitmproxy_rs.local.LocalRedirector.unavailable_reason(),
}
def get(self):
self.write(State.get_json(self.master))
class ProcessList(RequestHandler):
@staticmethod
def get_json():
processes = mitmproxy_rs.process_info.active_executables()
return [
{
"is_visible": process.is_visible,
"executable": str(process.executable),
"is_system": process.is_system,
"display_name": process.display_name,
}
for process in processes
]
def get(self):
self.write(ProcessList.get_json())
class ProcessImage(RequestHandler):
def get(self):
path = self.get_query_argument("path", None)
if not path:
raise APIError(400, "Missing 'path' parameter.")
try:
icon_bytes = mitmproxy_rs.process_info.executable_icon(path)
except Exception:
icon_bytes = TRANSPARENT_PNG
self.set_header("Content-Type", "image/png")
self.set_header("X-Content-Type-Options", "nosniff")
self.set_header("Cache-Control", "max-age=604800")
self.write(icon_bytes)
class GZipContentAndFlowFiles(tornado.web.GZipContentEncoding):
CONTENT_TYPES = {
"application/octet-stream",
*tornado.web.GZipContentEncoding.CONTENT_TYPES,
}
handlers = [
(r"/", IndexHandler),
(r"/filter-help(?:\.json)?", FilterHelp),
(r"/updates", ClientConnection),
(r"/commands(?:\.json)?", Commands),
(r"/commands/(?P<cmd>[a-z.]+)", ExecuteCommand),
(r"/events(?:\.json)?", Events),
(r"/flows(?:\.json)?", Flows),
(r"/flows/dump", DumpFlows),
(r"/flows/resume", ResumeFlows),
(r"/flows/kill", KillFlows),
(r"/flows/(?P<flow_id>[0-9a-f\-]+)", FlowHandler),
(r"/flows/(?P<flow_id>[0-9a-f\-]+)/resume", ResumeFlow),
(r"/flows/(?P<flow_id>[0-9a-f\-]+)/kill", KillFlow),
(r"/flows/(?P<flow_id>[0-9a-f\-]+)/duplicate", DuplicateFlow),
(r"/flows/(?P<flow_id>[0-9a-f\-]+)/replay", ReplayFlow),
(r"/flows/(?P<flow_id>[0-9a-f\-]+)/revert", RevertFlow),
(r"/flows/(?P<flow_id>[0-9a-f\-]+)/(?P<message>request|response|messages)/content.data", FlowContent),
(r"/flows/(?P<flow_id>[0-9a-f\-]+)/(?P<message>request|response|messages)/content/(?P<content_view>[0-9a-zA-Z\-\_%]+)(?:\.json)?", FlowContentView),
(r"/clear", ClearAll),
(r"/options(?:\.json)?", Options),
(r"/options/save", SaveOptions),
(r"/state(?:\.json)?", State),
(r"/processes", ProcessList),
(r"/executable-icon", ProcessImage),
] # fmt: skip
class Application(tornado.web.Application):
master: mitmproxy.tools.web.master.WebMaster
def __init__(
self, master: mitmproxy.tools.web.master.WebMaster, debug: bool
) -> None:
self.master = master
auth_addon: WebAuth = master.addons.get("webauth")
super().__init__(
handlers=handlers, # type: ignore # https://github.com/tornadoweb/tornado/pull/3455
template_path=os.path.join(os.path.dirname(__file__), "templates"),
static_path=os.path.join(os.path.dirname(__file__), "static"),
xsrf_cookies=True,
xsrf_cookie_kwargs=dict(samesite="Strict"),
cookie_secret=secrets.token_bytes(32),
debug=debug,
autoreload=False,
transforms=[GZipContentAndFlowFiles],
is_valid_password=auth_addon.is_valid_password,
auth_cookie_name=auth_addon.auth_cookie_name,
compiled_template_cache=False, # Vite
)
| python | MIT | e6aa924bb411a9687b91920b8d094af37bc02b90 | 2026-01-04T14:40:00.086164Z | false |
mitmproxy/mitmproxy | https://github.com/mitmproxy/mitmproxy/blob/e6aa924bb411a9687b91920b8d094af37bc02b90/mitmproxy/tools/web/static_viewer.py | mitmproxy/tools/web/static_viewer.py | import json
import logging
import os.path
import pathlib
import shutil
import time
from collections.abc import Iterable
from typing import Optional
from mitmproxy import contentviews
from mitmproxy import ctx
from mitmproxy import flow
from mitmproxy import flowfilter
from mitmproxy import http
from mitmproxy import io
from mitmproxy import version
from mitmproxy.tools.web.app import flow_to_json
web_dir = pathlib.Path(__file__).absolute().parent
def save_static(path: pathlib.Path) -> None:
"""
Save the files for the static web view.
"""
# We want to overwrite the static files to keep track of the update.
if (path / "static").exists():
shutil.rmtree(str(path / "static"))
shutil.copytree(str(web_dir / "static"), str(path / "static"))
shutil.copyfile(str(web_dir / "index.html"), str(path / "index.html"))
with open(str(path / "static" / "static.js"), "w") as f:
f.write("MITMWEB_STATIC = true;")
def save_filter_help(path: pathlib.Path) -> None:
with open(str(path / "filter-help.json"), "w") as f:
json.dump(dict(commands=flowfilter.help), f)
def save_settings(path: pathlib.Path) -> None:
with open(str(path / "settings.json"), "w") as f:
json.dump(dict(version=version.VERSION), f)
def save_flows(path: pathlib.Path, flows: Iterable[flow.Flow]) -> None:
with open(str(path / "flows.json"), "w") as f:
json.dump([flow_to_json(f) for f in flows], f)
def save_flows_content(path: pathlib.Path, flows: Iterable[flow.Flow]) -> None:
for f in flows:
assert isinstance(f, http.HTTPFlow)
for m in ("request", "response"):
message = getattr(f, m)
message_path = path / "flows" / f.id / m
os.makedirs(str(message_path / "content"), exist_ok=True)
with open(str(message_path / "content.data"), "wb") as content_file:
# don't use raw_content here as this is served with a default content type
if message:
content_file.write(message.content)
else:
content_file.write(b"No content.")
# content_view
t = time.time()
if message:
pretty = contentviews.prettify_message(
message=message,
flow=f,
)
else:
pretty = contentviews.ContentviewResult(
text="No content.",
syntax_highlight="none",
view_name="/",
description="",
)
if time.time() - t > 0.1:
logging.info(
f"Slow content view: {pretty.view_name} took {round(time.time() - t, 1)}s",
)
with (message_path / "content" / "Auto.json").open(
"w"
) as content_view_file:
json.dump(
dict(
text=pretty.text,
syntax_highlight=pretty.syntax_highlight,
view_name=pretty.view_name,
description=pretty.description,
),
content_view_file,
)
class StaticViewer:
# TODO: make this a command at some point.
def load(self, loader):
loader.add_option(
"web_static_viewer",
Optional[str],
"",
"The path to output a static viewer.",
)
def configure(self, updated):
if "web_static_viewer" in updated and ctx.options.web_static_viewer:
flows = io.read_flows_from_paths([ctx.options.rfile])
p = pathlib.Path(ctx.options.web_static_viewer).expanduser()
self.export(p, flows)
def export(self, path: pathlib.Path, flows: Iterable[flow.Flow]) -> None:
save_static(path)
save_filter_help(path)
save_flows(path, flows)
save_flows_content(path, flows)
| python | MIT | e6aa924bb411a9687b91920b8d094af37bc02b90 | 2026-01-04T14:40:00.086164Z | false |
mitmproxy/mitmproxy | https://github.com/mitmproxy/mitmproxy/blob/e6aa924bb411a9687b91920b8d094af37bc02b90/mitmproxy/proxy/layer.py | mitmproxy/proxy/layer.py | """
Base class for protocol layers.
"""
import collections
import textwrap
from abc import abstractmethod
from collections.abc import Callable
from collections.abc import Generator
from dataclasses import dataclass
from logging import DEBUG
from typing import Any
from typing import ClassVar
from typing import NamedTuple
from typing import TypeVar
from mitmproxy.connection import Connection
from mitmproxy.proxy import commands
from mitmproxy.proxy import events
from mitmproxy.proxy.commands import Command
from mitmproxy.proxy.commands import StartHook
from mitmproxy.proxy.context import Context
T = TypeVar("T")
CommandGenerator = Generator[Command, Any, T]
"""
A function annotated with CommandGenerator[bool] may yield commands and ultimately return a boolean value.
"""
MAX_LOG_STATEMENT_SIZE = 2048
"""Maximum size of individual log statements before they will be truncated."""
class Paused(NamedTuple):
"""
State of a layer that's paused because it is waiting for a command reply.
"""
command: commands.Command
generator: CommandGenerator
class Layer:
"""
The base class for all protocol layers.
Layers interface with their child layer(s) by calling .handle_event(event),
which returns a list (more precisely: a generator) of commands.
Most layers do not implement .directly, but instead implement ._handle_event, which
is called by the default implementation of .handle_event.
The default implementation of .handle_event allows layers to emulate blocking code:
When ._handle_event yields a command that has its blocking attribute set to True, .handle_event pauses
the execution of ._handle_event and waits until it is called with the corresponding CommandCompleted event.
All events encountered in the meantime are buffered and replayed after execution is resumed.
The result is code that looks like blocking code, but is not blocking:
def _handle_event(self, event):
err = yield OpenConnection(server) # execution continues here after a connection has been established.
Technically this is very similar to how coroutines are implemented.
"""
__last_debug_message: ClassVar[str] = ""
context: Context
_paused: Paused | None
"""
If execution is currently paused, this attribute stores the paused coroutine
and the command for which we are expecting a reply.
"""
_paused_event_queue: collections.deque[events.Event]
"""
All events that have occurred since execution was paused.
These will be replayed to ._child_layer once we resume.
"""
debug: str | None = None
"""
Enable debug logging by assigning a prefix string for log messages.
Different amounts of whitespace for different layers work well.
"""
def __init__(self, context: Context) -> None:
self.context = context
self.context.layers.append(self)
self._paused = None
self._paused_event_queue = collections.deque()
show_debug_output = getattr(context.options, "proxy_debug", False)
if show_debug_output: # pragma: no cover
self.debug = " " * len(context.layers)
def __repr__(self):
statefun = getattr(self, "state", self._handle_event)
state = getattr(statefun, "__name__", "")
state = state.replace("state_", "")
if state == "_handle_event":
state = ""
else:
state = f"state: {state}"
return f"{type(self).__name__}({state})"
def __debug(self, message):
"""yield a Log command indicating what message is passing through this layer."""
if len(message) > MAX_LOG_STATEMENT_SIZE:
message = message[:MAX_LOG_STATEMENT_SIZE] + "…"
if Layer.__last_debug_message == message:
message = message.split("\n", 1)[0].strip()
if len(message) > 256:
message = message[:256] + "…"
else:
Layer.__last_debug_message = message
assert self.debug is not None
return commands.Log(textwrap.indent(message, self.debug), DEBUG)
@property
def stack_pos(self) -> str:
"""repr() for this layer and all its parent layers, only useful for debugging."""
try:
idx = self.context.layers.index(self)
except ValueError:
return repr(self)
else:
return " >> ".join(repr(x) for x in self.context.layers[: idx + 1])
@abstractmethod
def _handle_event(self, event: events.Event) -> CommandGenerator[None]:
"""Handle a proxy server event"""
yield from () # pragma: no cover
def handle_event(self, event: events.Event) -> CommandGenerator[None]:
if self._paused:
# did we just receive the reply we were waiting for?
pause_finished = (
isinstance(event, events.CommandCompleted)
and event.command is self._paused.command
)
if self.debug is not None:
yield self.__debug(f"{'>>' if pause_finished else '>!'} {event}")
if pause_finished:
assert isinstance(event, events.CommandCompleted)
yield from self.__continue(event)
else:
self._paused_event_queue.append(event)
else:
if self.debug is not None:
yield self.__debug(f">> {event}")
command_generator = self._handle_event(event)
send = None
# inlined copy of __process to reduce call stack.
# <✂✂✂>
try:
# Run ._handle_event to the next yield statement.
# If you are not familiar with generators and their .send() method,
# https://stackoverflow.com/a/12638313/934719 has a good explanation.
command = command_generator.send(send)
except StopIteration:
return
while True:
if self.debug is not None:
if not isinstance(command, commands.Log):
yield self.__debug(f"<< {command}")
if command.blocking is True:
# We only want this layer to block, the outer layers should not block.
# For example, take an HTTP/2 connection: If we intercept one particular request,
# we don't want all other requests in the connection to be blocked a well.
# We signal to outer layers that this command is already handled by assigning our layer to
# `.blocking` here (upper layers explicitly check for `is True`).
command.blocking = self
self._paused = Paused(
command,
command_generator,
)
yield command
return
else:
yield command
try:
command = next(command_generator)
except StopIteration:
return
# </✂✂✂>
def __process(self, command_generator: CommandGenerator, send=None):
"""
Yield commands from a generator.
If a command is blocking, execution is paused and this function returns without
processing any further commands.
"""
try:
# Run ._handle_event to the next yield statement.
# If you are not familiar with generators and their .send() method,
# https://stackoverflow.com/a/12638313/934719 has a good explanation.
command = command_generator.send(send)
except StopIteration:
return
while True:
if self.debug is not None:
if not isinstance(command, commands.Log):
yield self.__debug(f"<< {command}")
if command.blocking is True:
# We only want this layer to block, the outer layers should not block.
# For example, take an HTTP/2 connection: If we intercept one particular request,
# we don't want all other requests in the connection to be blocked a well.
# We signal to outer layers that this command is already handled by assigning our layer to
# `.blocking` here (upper layers explicitly check for `is True`).
command.blocking = self
self._paused = Paused(
command,
command_generator,
)
yield command
return
else:
yield command
try:
command = next(command_generator)
except StopIteration:
return
def __continue(self, event: events.CommandCompleted):
"""
Continue processing events after being paused.
The tricky part here is that events in the event queue may trigger commands which again pause the execution,
so we may not be able to process the entire queue.
"""
assert self._paused is not None
command_generator = self._paused.generator
self._paused = None
yield from self.__process(command_generator, event.reply)
while not self._paused and self._paused_event_queue:
ev = self._paused_event_queue.popleft()
if self.debug is not None:
yield self.__debug(f"!> {ev}")
command_generator = self._handle_event(ev)
yield from self.__process(command_generator)
mevents = (
events # alias here because autocomplete above should not have aliased version.
)
class NextLayer(Layer):
layer: Layer | None
"""The next layer. To be set by an addon."""
events: list[mevents.Event]
"""All events that happened before a decision was made."""
_ask_on_start: bool
def __init__(self, context: Context, ask_on_start: bool = False) -> None:
super().__init__(context)
self.context.layers.remove(self)
self.layer = None
self.events = []
self._ask_on_start = ask_on_start
self._handle: Callable[[mevents.Event], CommandGenerator[None]] | None = None
def __repr__(self):
return f"NextLayer:{self.layer!r}"
def handle_event(self, event: mevents.Event):
if self._handle is not None:
yield from self._handle(event)
else:
yield from super().handle_event(event)
def _handle_event(self, event: mevents.Event):
self.events.append(event)
# We receive new data. Let's find out if we can determine the next layer now?
if self._ask_on_start and isinstance(event, events.Start):
yield from self._ask()
elif (
isinstance(event, mevents.ConnectionClosed)
and event.connection == self.context.client
):
# If we have not determined the next protocol yet and the client already closes the connection,
# we abort everything.
yield commands.CloseConnection(self.context.client)
elif isinstance(event, mevents.DataReceived):
# For now, we only ask if we have received new data to reduce hook noise.
yield from self._ask()
def _ask(self):
"""
Manually trigger a next_layer hook.
The only use at the moment is to make sure that the top layer is initialized.
"""
yield NextLayerHook(self)
# Has an addon decided on the next layer yet?
if self.layer:
if self.debug:
yield commands.Log(f"{self.debug}[nextlayer] {self.layer!r}", DEBUG)
for e in self.events:
yield from self.layer.handle_event(e)
self.events.clear()
# Why do we need three assignments here?
# 1. When this function here is invoked we may have paused events. Those should be
# forwarded to the sublayer right away, so we reassign ._handle_event.
# 2. This layer is not needed anymore, so we directly reassign .handle_event.
# 3. Some layers may however still have a reference to the old .handle_event.
# ._handle is just an optimization to reduce the callstack in these cases.
self.handle_event = self.layer.handle_event # type: ignore
self._handle_event = self.layer.handle_event # type: ignore
self._handle = self.layer.handle_event
# Utility methods for whoever decides what the next layer is going to be.
def data_client(self):
return self._data(self.context.client)
def data_server(self):
return self._data(self.context.server)
def _data(self, connection: Connection):
data = (
e.data
for e in self.events
if isinstance(e, mevents.DataReceived) and e.connection == connection
)
return b"".join(data)
@dataclass
class NextLayerHook(StartHook):
"""
Network layers are being switched. You may change which layer will be used by setting data.layer.
(by default, this is done by mitmproxy.addons.NextLayer)
"""
data: NextLayer
| python | MIT | e6aa924bb411a9687b91920b8d094af37bc02b90 | 2026-01-04T14:40:00.086164Z | false |
mitmproxy/mitmproxy | https://github.com/mitmproxy/mitmproxy/blob/e6aa924bb411a9687b91920b8d094af37bc02b90/mitmproxy/proxy/server_hooks.py | mitmproxy/proxy/server_hooks.py | from dataclasses import dataclass
from . import commands
from mitmproxy import connection
@dataclass
class ClientConnectedHook(commands.StartHook):
"""
A client has connected to mitmproxy. Note that a connection can
correspond to multiple HTTP requests.
Setting client.error kills the connection.
"""
client: connection.Client
@dataclass
class ClientDisconnectedHook(commands.StartHook):
"""
A client connection has been closed (either by us or the client).
"""
client: connection.Client
@dataclass
class ServerConnectionHookData:
"""Event data for server connection event hooks."""
server: connection.Server
"""The server connection this hook is about."""
client: connection.Client
"""The client on the other end."""
@dataclass
class ServerConnectHook(commands.StartHook):
"""
Mitmproxy is about to connect to a server.
Note that a connection can correspond to multiple requests.
Setting data.server.error kills the connection.
"""
data: ServerConnectionHookData
@dataclass
class ServerConnectedHook(commands.StartHook):
"""
Mitmproxy has connected to a server.
"""
data: ServerConnectionHookData
@dataclass
class ServerDisconnectedHook(commands.StartHook):
"""
A server connection has been closed (either by us or the server).
"""
data: ServerConnectionHookData
@dataclass
class ServerConnectErrorHook(commands.StartHook):
"""
Mitmproxy failed to connect to a server.
Every server connection will receive either a server_connected or a server_connect_error event, but not both.
"""
data: ServerConnectionHookData
| python | MIT | e6aa924bb411a9687b91920b8d094af37bc02b90 | 2026-01-04T14:40:00.086164Z | false |
mitmproxy/mitmproxy | https://github.com/mitmproxy/mitmproxy/blob/e6aa924bb411a9687b91920b8d094af37bc02b90/mitmproxy/proxy/tunnel.py | mitmproxy/proxy/tunnel.py | import time
from enum import auto
from enum import Enum
from typing import Union
from mitmproxy import connection
from mitmproxy.proxy import commands
from mitmproxy.proxy import context
from mitmproxy.proxy import events
from mitmproxy.proxy import layer
from mitmproxy.proxy.layer import Layer
class TunnelState(Enum):
INACTIVE = auto()
ESTABLISHING = auto()
OPEN = auto()
CLOSED = auto()
class TunnelLayer(layer.Layer):
"""
A specialized layer that simplifies the implementation of tunneling protocols such as SOCKS, upstream HTTP proxies,
or TLS.
"""
child_layer: layer.Layer
tunnel_connection: connection.Connection
"""The 'outer' connection which provides the tunnel protocol I/O"""
conn: connection.Connection
"""The 'inner' connection which provides data I/O"""
tunnel_state: TunnelState = TunnelState.INACTIVE
command_to_reply_to: commands.OpenConnection | None = None
_event_queue: list[events.Event]
"""
If the connection already exists when we receive the start event,
we buffer commands until we have established the tunnel.
"""
def __init__(
self,
context: context.Context,
tunnel_connection: connection.Connection,
conn: connection.Connection,
):
super().__init__(context)
self.tunnel_connection = tunnel_connection
self.conn = conn
self.child_layer = layer.NextLayer(self.context)
self._event_queue = []
def __repr__(self):
return f"{type(self).__name__}({self.tunnel_state.name.lower()})"
def _handle_event(self, event: events.Event) -> layer.CommandGenerator[None]:
if isinstance(event, events.Start):
if self.tunnel_connection.state is not connection.ConnectionState.CLOSED:
# we might be in the interesting state here where the connection is already half-closed,
# for example because next_layer buffered events and the client disconnected in the meantime.
# we still expect a close event to arrive, so we carry on here as normal for now.
self.tunnel_state = TunnelState.ESTABLISHING
yield from self.start_handshake()
yield from self.event_to_child(event)
elif (
isinstance(event, events.ConnectionEvent)
and event.connection == self.tunnel_connection
):
if isinstance(event, events.DataReceived):
if self.tunnel_state is TunnelState.ESTABLISHING:
done, err = yield from self.receive_handshake_data(event.data)
if done:
if self.conn != self.tunnel_connection:
self.conn.state = connection.ConnectionState.OPEN
self.conn.timestamp_start = time.time()
if err:
if self.conn != self.tunnel_connection:
self.conn.state = connection.ConnectionState.CLOSED
self.conn.timestamp_start = time.time()
yield from self.on_handshake_error(err)
if done or err:
yield from self._handshake_finished(err)
else:
yield from self.receive_data(event.data)
elif isinstance(event, events.ConnectionClosed):
if self.conn != self.tunnel_connection:
self.conn.state &= ~connection.ConnectionState.CAN_READ
self.conn.timestamp_end = time.time()
if self.tunnel_state is TunnelState.OPEN:
yield from self.receive_close()
elif self.tunnel_state is TunnelState.ESTABLISHING:
err = "connection closed"
yield from self.on_handshake_error(err)
yield from self._handshake_finished(err)
self.tunnel_state = TunnelState.CLOSED
else: # pragma: no cover
raise AssertionError(f"Unexpected event: {event}")
else:
yield from self.event_to_child(event)
def _handshake_finished(self, err: str | None) -> layer.CommandGenerator[None]:
if err:
self.tunnel_state = TunnelState.CLOSED
else:
self.tunnel_state = TunnelState.OPEN
if self.command_to_reply_to:
yield from self.event_to_child(
events.OpenConnectionCompleted(self.command_to_reply_to, err)
)
self.command_to_reply_to = None
else:
for evt in self._event_queue:
yield from self.event_to_child(evt)
self._event_queue.clear()
def _handle_command(
self, command: commands.Command
) -> layer.CommandGenerator[None]:
if (
isinstance(command, commands.ConnectionCommand)
and command.connection == self.conn
):
if isinstance(command, commands.SendData):
yield from self.send_data(command.data)
elif isinstance(command, commands.CloseConnection):
if self.conn != self.tunnel_connection:
self.conn.state &= ~connection.ConnectionState.CAN_WRITE
command.connection = self.tunnel_connection
yield from self.send_close(command)
elif isinstance(command, commands.OpenConnection):
# create our own OpenConnection command object that blocks here.
self.command_to_reply_to = command
self.tunnel_state = TunnelState.ESTABLISHING
err = yield commands.OpenConnection(self.tunnel_connection)
if err:
yield from self.event_to_child(
events.OpenConnectionCompleted(command, err)
)
self.tunnel_state = TunnelState.CLOSED
else:
yield from self.start_handshake()
else: # pragma: no cover
raise AssertionError(f"Unexpected command: {command}")
else:
yield command
def event_to_child(self, event: events.Event) -> layer.CommandGenerator[None]:
if (
self.tunnel_state is TunnelState.ESTABLISHING
and not self.command_to_reply_to
):
self._event_queue.append(event)
return
for command in self.child_layer.handle_event(event):
yield from self._handle_command(command)
def start_handshake(self) -> layer.CommandGenerator[None]:
yield from self._handle_event(events.DataReceived(self.tunnel_connection, b""))
def receive_handshake_data(
self, data: bytes
) -> layer.CommandGenerator[tuple[bool, str | None]]:
"""returns a (done, err) tuple"""
yield from ()
return True, None
def on_handshake_error(self, err: str) -> layer.CommandGenerator[None]:
"""Called if either receive_handshake_data returns an error or we receive a close during handshake."""
yield commands.CloseConnection(self.tunnel_connection)
def receive_data(self, data: bytes) -> layer.CommandGenerator[None]:
yield from self.event_to_child(events.DataReceived(self.conn, data))
def receive_close(self) -> layer.CommandGenerator[None]:
yield from self.event_to_child(events.ConnectionClosed(self.conn))
def send_data(self, data: bytes) -> layer.CommandGenerator[None]:
yield commands.SendData(self.tunnel_connection, data)
def send_close(
self, command: commands.CloseConnection
) -> layer.CommandGenerator[None]:
yield command
class LayerStack:
def __init__(self) -> None:
self._stack: list[Layer] = []
def __getitem__(self, item: int) -> Layer:
return self._stack.__getitem__(item)
def __truediv__(self, other: Union[Layer, "LayerStack"]) -> "LayerStack":
if isinstance(other, Layer):
if self._stack:
self._stack[-1].child_layer = other # type: ignore
self._stack.append(other)
else:
if self._stack:
self._stack[-1].child_layer = other[0] # type: ignore
self._stack.extend(other._stack)
return self
| python | MIT | e6aa924bb411a9687b91920b8d094af37bc02b90 | 2026-01-04T14:40:00.086164Z | false |
mitmproxy/mitmproxy | https://github.com/mitmproxy/mitmproxy/blob/e6aa924bb411a9687b91920b8d094af37bc02b90/mitmproxy/proxy/commands.py | mitmproxy/proxy/commands.py | """
Commands make it possible for layers to communicate with the "outer world",
e.g. to perform IO or to ask the master.
A command is issued by a proxy layer and is then passed upwards to the proxy server, and from there
possibly to the master and addons.
The counterpart to commands are events.
"""
import logging
import warnings
from typing import TYPE_CHECKING
from typing import Union
import mitmproxy.hooks
from mitmproxy.connection import Connection
from mitmproxy.connection import Server
if TYPE_CHECKING:
import mitmproxy.proxy.layer
class Command:
"""
Base class for all commands
"""
blocking: Union[bool, "mitmproxy.proxy.layer.Layer"] = False
"""
Determines if the command blocks until it has been completed.
For practical purposes, this attribute should be thought of as a boolean value,
layers may swap out `True` with a reference to themselves to signal to outer layers
that they do not need to block as well.
Example:
reply = yield Hook("requestheaders", flow) # blocking command
yield Log("hello world", "info") # non-blocking
"""
def __repr__(self):
x = self.__dict__.copy()
x.pop("blocking", None)
return f"{type(self).__name__}({x!r})"
class RequestWakeup(Command):
"""
Request a `Wakeup` event after the specified amount of seconds.
"""
delay: float
def __init__(self, delay: float):
self.delay = delay
class ConnectionCommand(Command):
"""
Commands involving a specific connection
"""
connection: Connection
def __init__(self, connection: Connection):
self.connection = connection
class SendData(ConnectionCommand):
"""
Send data to a remote peer
"""
data: bytes
def __init__(self, connection: Connection, data: bytes):
super().__init__(connection)
self.data = data
def __repr__(self):
target = str(self.connection).split("(", 1)[0].lower()
return f"SendData({target}, {self.data!r})"
class OpenConnection(ConnectionCommand):
"""
Open a new connection
"""
connection: Server
blocking = True
class CloseConnection(ConnectionCommand):
"""
Close a connection. If the client connection is closed,
all other connections will ultimately be closed during cleanup.
"""
class CloseTcpConnection(CloseConnection):
half_close: bool
"""
If True, only close our half of the connection by sending a FIN packet.
This is required from some protocols which close their end to signal completion and then continue reading,
for example HTTP/1.0 without Content-Length header.
"""
def __init__(self, connection: Connection, half_close: bool = False):
super().__init__(connection)
self.half_close = half_close
class StartHook(Command, mitmproxy.hooks.Hook):
"""
Start an event hook in the mitmproxy core.
This triggers a particular function (derived from the class name) in all addons.
"""
name = ""
blocking = True
def __new__(cls, *args, **kwargs):
if cls is StartHook:
raise TypeError("StartHook may not be instantiated directly.")
return super().__new__(cls, *args, **kwargs)
class Log(Command):
"""
Log a message.
Layers could technically call `logging.log` directly, but the use of a command allows us to
write more expressive playbook tests. Put differently, by using commands we can assert that
a specific log message is a direct consequence of a particular I/O event.
This could also be implemented with some more playbook magic in the future,
but for now we keep the current approach as the fully sans-io one.
"""
message: str
level: int
def __init__(
self,
message: str,
level: int = logging.INFO,
):
if isinstance(level, str): # pragma: no cover
warnings.warn(
"commands.Log() now expects an integer log level, not a string.",
DeprecationWarning,
stacklevel=2,
)
level = getattr(logging, level.upper())
self.message = message
self.level = level
def __repr__(self):
return f"Log({self.message!r}, {logging.getLevelName(self.level).lower()})"
| python | MIT | e6aa924bb411a9687b91920b8d094af37bc02b90 | 2026-01-04T14:40:00.086164Z | false |
mitmproxy/mitmproxy | https://github.com/mitmproxy/mitmproxy/blob/e6aa924bb411a9687b91920b8d094af37bc02b90/mitmproxy/proxy/events.py | mitmproxy/proxy/events.py | """
When IO actions occur at the proxy server, they are passed down to layers as events.
Events represent the only way for layers to receive new data from sockets.
The counterpart to events are commands.
"""
import typing
import warnings
from dataclasses import dataclass
from dataclasses import is_dataclass
from typing import Any
from typing import Generic
from typing import TypeVar
from mitmproxy import flow
from mitmproxy.connection import Connection
from mitmproxy.proxy import commands
class Event:
"""
Base class for all events.
"""
def __repr__(self):
return f"{type(self).__name__}({self.__dict__!r})"
class Start(Event):
"""
Every layer initially receives a start event.
This is useful to emit events on startup.
"""
@dataclass
class ConnectionEvent(Event):
"""
All events involving connection IO.
"""
connection: Connection
@dataclass
class DataReceived(ConnectionEvent):
"""
Remote has sent some data.
"""
data: bytes
def __repr__(self):
target = type(self.connection).__name__.lower()
return f"DataReceived({target}, {self.data!r})"
class ConnectionClosed(ConnectionEvent):
"""
Remote has closed a connection.
"""
class CommandCompleted(Event):
"""
Emitted when a command has been finished, e.g.
when the master has replied or when we have established a server connection.
"""
command: commands.Command
reply: Any
def __new__(cls, *args, **kwargs):
if cls is CommandCompleted:
raise TypeError("CommandCompleted may not be instantiated directly.")
assert is_dataclass(cls)
return super().__new__(cls)
def __init_subclass__(cls, **kwargs):
command_cls = typing.get_type_hints(cls).get("command", None)
valid_command_subclass = (
isinstance(command_cls, type)
and issubclass(command_cls, commands.Command)
and command_cls is not commands.Command
)
if not valid_command_subclass:
warnings.warn(
f"{cls} needs a properly annotated command attribute.",
RuntimeWarning,
)
if command_cls in command_reply_subclasses:
other = command_reply_subclasses[command_cls]
warnings.warn(
f"Two conflicting subclasses for {command_cls}: {cls} and {other}",
RuntimeWarning,
)
command_reply_subclasses[command_cls] = cls # type: ignore
def __repr__(self):
return f"Reply({self.command!r}, {self.reply!r})"
command_reply_subclasses: dict[commands.Command, type[CommandCompleted]] = {}
@dataclass(repr=False)
class OpenConnectionCompleted(CommandCompleted):
command: commands.OpenConnection
reply: str | None
"""error message"""
@dataclass(repr=False)
class HookCompleted(CommandCompleted):
command: commands.StartHook
reply: None = None
T = TypeVar("T")
@dataclass
class MessageInjected(Event, Generic[T]):
"""
The user has injected a custom WebSocket/TCP/... message.
"""
flow: flow.Flow
message: T
@dataclass
class Wakeup(CommandCompleted):
"""
Event sent to layers that requested a wakeup using RequestWakeup.
"""
command: commands.RequestWakeup
| python | MIT | e6aa924bb411a9687b91920b8d094af37bc02b90 | 2026-01-04T14:40:00.086164Z | false |
mitmproxy/mitmproxy | https://github.com/mitmproxy/mitmproxy/blob/e6aa924bb411a9687b91920b8d094af37bc02b90/mitmproxy/proxy/utils.py | mitmproxy/proxy/utils.py | """
Utility decorators that help build state machines
"""
import functools
from mitmproxy.proxy import events
def expect(*event_types):
"""
Only allow the given event type.
If another event is passed, an AssertionError is raised.
"""
def decorator(f):
if __debug__ is True:
@functools.wraps(f)
def _check_event_type(self, event: events.Event):
if isinstance(event, event_types):
return f(self, event)
else:
event_types_str = (
"|".join(e.__name__ for e in event_types) or "no events"
)
raise AssertionError(
f"Unexpected event type at {f.__qualname__}: "
f"Expected {event_types_str}, got {event}."
)
return _check_event_type
else: # pragma: no cover
return f
return decorator
class ReceiveBuffer:
"""
A data structure to collect stream contents efficiently in O(n).
"""
_chunks: list[bytes]
_len: int
def __init__(self):
self._chunks = []
self._len = 0
def __iadd__(self, other: bytes):
assert isinstance(other, bytes)
self._chunks.append(other)
self._len += len(other)
return self
def __len__(self):
return self._len
def __bytes__(self):
return b"".join(self._chunks)
def __bool__(self):
return self._len > 0
def clear(self):
self._chunks.clear()
self._len = 0
| python | MIT | e6aa924bb411a9687b91920b8d094af37bc02b90 | 2026-01-04T14:40:00.086164Z | false |
mitmproxy/mitmproxy | https://github.com/mitmproxy/mitmproxy/blob/e6aa924bb411a9687b91920b8d094af37bc02b90/mitmproxy/proxy/mode_specs.py | mitmproxy/proxy/mode_specs.py | """
This module is responsible for parsing proxy mode specifications such as
`"regular"`, `"reverse:https://example.com"`, or `"socks5@1234"`. The general syntax is
mode [: mode_configuration] [@ [listen_addr:]listen_port]
For a full example, consider `reverse:https://example.com@127.0.0.1:443`.
This would spawn a reverse proxy on port 443 bound to localhost.
The mode is `reverse`, and the mode data is `https://example.com`.
Examples:
mode = ProxyMode.parse("regular@1234")
assert mode.listen_port == 1234
assert isinstance(mode, RegularMode)
ProxyMode.parse("reverse:example.com@invalid-port") # ValueError
RegularMode.parse("regular") # ok
RegularMode.parse("socks5") # ValueError
"""
from __future__ import annotations
import dataclasses
import platform
import re
import sys
from abc import ABCMeta
from abc import abstractmethod
from dataclasses import dataclass
from functools import cache
from typing import ClassVar
from typing import Literal
import mitmproxy_rs
from mitmproxy.coretypes.serializable import Serializable
from mitmproxy.net import server_spec
if sys.version_info < (3, 11):
from typing_extensions import Self # pragma: no cover
else:
from typing import Self
@dataclass(frozen=True) # type: ignore
class ProxyMode(Serializable, metaclass=ABCMeta):
"""
Parsed representation of a proxy mode spec. Subclassed for each specific mode,
which then does its own data validation.
"""
full_spec: str
"""The full proxy mode spec as entered by the user."""
data: str
"""The (raw) mode data, i.e. the part after the mode name."""
custom_listen_host: str | None
"""A custom listen host, if specified in the spec."""
custom_listen_port: int | None
"""A custom listen port, if specified in the spec."""
type_name: ClassVar[
str
] # automatically derived from the class name in __init_subclass__
"""The unique name for this proxy mode, e.g. "regular" or "reverse"."""
__types: ClassVar[dict[str, type[ProxyMode]]] = {}
def __init_subclass__(cls, **kwargs):
cls.type_name = cls.__name__.removesuffix("Mode").lower()
assert cls.type_name not in ProxyMode.__types
ProxyMode.__types[cls.type_name] = cls
def __repr__(self):
return f"ProxyMode.parse({self.full_spec!r})"
@abstractmethod
def __post_init__(self) -> None:
"""Validation of data happens here."""
@property
@abstractmethod
def description(self) -> str:
"""The mode description that will be used in server logs and UI."""
@property
def default_port(self) -> int | None:
"""
Default listen port of servers for this mode, see `ProxyMode.listen_port()`.
"""
return 8080
@property
@abstractmethod
def transport_protocol(self) -> Literal["tcp", "udp", "both"]:
"""The transport protocol used by this mode's server."""
@classmethod
@cache
def parse(cls, spec: str) -> Self:
"""
Parse a proxy mode specification and return the corresponding `ProxyMode` instance.
"""
head, _, listen_at = spec.rpartition("@")
if not head:
head = listen_at
listen_at = ""
mode, _, data = head.partition(":")
if listen_at:
if ":" in listen_at:
host, _, port_str = listen_at.rpartition(":")
else:
host = None
port_str = listen_at
try:
port = int(port_str)
if port < 0 or 65535 < port:
raise ValueError
except ValueError:
raise ValueError(f"invalid port: {port_str}")
else:
host = None
port = None
try:
mode_cls = ProxyMode.__types[mode.lower()]
except KeyError:
raise ValueError(f"unknown mode")
if not issubclass(mode_cls, cls):
raise ValueError(f"{mode!r} is not a spec for a {cls.type_name} mode")
return mode_cls(
full_spec=spec, data=data, custom_listen_host=host, custom_listen_port=port
)
def listen_host(self, default: str | None = None) -> str:
"""
Return the address a server for this mode should listen on. This can be either directly
specified in the spec or taken from a user-configured global default (`options.listen_host`).
By default, return an empty string to listen on all hosts.
"""
if self.custom_listen_host is not None:
return self.custom_listen_host
elif default is not None:
return default
else:
return ""
def listen_port(self, default: int | None = None) -> int | None:
"""
Return the port a server for this mode should listen on. This can be either directly
specified in the spec, taken from a user-configured global default (`options.listen_port`),
or from `ProxyMode.default_port`.
May be `None` for modes that don't bind to a specific address, e.g. local redirect mode.
"""
if self.custom_listen_port is not None:
return self.custom_listen_port
elif default is not None:
return default
else:
return self.default_port
@classmethod
def from_state(cls, state):
return ProxyMode.parse(state)
def get_state(self):
return self.full_spec
def set_state(self, state):
if state != self.full_spec:
raise dataclasses.FrozenInstanceError("Proxy modes are immutable.")
TCP: Literal["tcp", "udp", "both"] = "tcp"
UDP: Literal["tcp", "udp", "both"] = "udp"
BOTH: Literal["tcp", "udp", "both"] = "both"
def _check_empty(data):
if data:
raise ValueError("mode takes no arguments")
class RegularMode(ProxyMode):
"""A regular HTTP(S) proxy that is interfaced with `HTTP CONNECT` calls (or absolute-form HTTP requests)."""
description = "HTTP(S) proxy"
transport_protocol = TCP
def __post_init__(self) -> None:
_check_empty(self.data)
class TransparentMode(ProxyMode):
"""A transparent proxy, see https://docs.mitmproxy.org/dev/howto-transparent/"""
description = "Transparent Proxy"
transport_protocol = TCP
def __post_init__(self) -> None:
_check_empty(self.data)
class UpstreamMode(ProxyMode):
"""A regular HTTP(S) proxy, but all connections are forwarded to a second upstream HTTP(S) proxy."""
description = "HTTP(S) proxy (upstream mode)"
transport_protocol = TCP
scheme: Literal["http", "https"]
address: tuple[str, int]
# noinspection PyDataclass
def __post_init__(self) -> None:
scheme, self.address = server_spec.parse(self.data, default_scheme="http")
if scheme != "http" and scheme != "https":
raise ValueError("invalid upstream proxy scheme")
self.scheme = scheme
class ReverseMode(ProxyMode):
"""A reverse proxy. This acts like a normal server, but redirects all requests to a fixed target."""
description = "reverse proxy"
transport_protocol = TCP
scheme: Literal[
"http", "https", "http3", "tls", "dtls", "tcp", "udp", "dns", "quic"
]
address: tuple[str, int]
# noinspection PyDataclass
def __post_init__(self) -> None:
self.scheme, self.address = server_spec.parse(self.data, default_scheme="https")
if self.scheme in ("http3", "dtls", "udp", "quic"):
self.transport_protocol = UDP
elif self.scheme in ("dns", "https"):
self.transport_protocol = BOTH
self.description = f"{self.description} to {self.data}"
@property
def default_port(self) -> int | None:
if self.scheme == "dns":
return 53
return super().default_port
class Socks5Mode(ProxyMode):
"""A SOCKSv5 proxy."""
description = "SOCKS v5 proxy"
default_port = 1080
transport_protocol = TCP
def __post_init__(self) -> None:
_check_empty(self.data)
class DnsMode(ProxyMode):
"""A DNS server."""
description = "DNS server"
default_port = 53
transport_protocol = BOTH
def __post_init__(self) -> None:
_check_empty(self.data)
# class Http3Mode(ProxyMode):
# """
# A regular HTTP3 proxy that is interfaced with absolute-form HTTP requests.
# (This class will be merged into `RegularMode` once the UDP implementation is deemed stable enough.)
# """
#
# description = "HTTP3 proxy"
# transport_protocol = UDP
#
# def __post_init__(self) -> None:
# _check_empty(self.data)
class WireGuardMode(ProxyMode):
"""Proxy Server based on WireGuard"""
description = "WireGuard server"
default_port = 51820
transport_protocol = UDP
def __post_init__(self) -> None:
pass
class LocalMode(ProxyMode):
"""OS-level transparent proxy."""
description = "Local redirector"
transport_protocol = BOTH
default_port = None
def __post_init__(self) -> None:
# should not raise
mitmproxy_rs.local.LocalRedirector.describe_spec(self.data)
class TunMode(ProxyMode):
"""A Tun interface."""
description = "TUN interface"
default_port = None
transport_protocol = BOTH
def __post_init__(self) -> None:
invalid_tun_name = self.data and (
# The Rust side is Linux only for the moment, but eventually we may need this.
platform.system() == "Darwin" and not re.match(r"^utun\d+$", self.data)
)
if invalid_tun_name: # pragma: no cover
raise ValueError(
f"Invalid tun name: {self.data}. "
f"On macOS, the tun name must be the form utunx where x is a number, such as utun3."
)
class OsProxyMode(ProxyMode): # pragma: no cover
"""Deprecated alias for LocalMode"""
description = "Deprecated alias for LocalMode"
transport_protocol = BOTH
default_port = None
def __post_init__(self) -> None:
raise ValueError(
"osproxy mode has been renamed to local mode. Thanks for trying our experimental features!"
)
| python | MIT | e6aa924bb411a9687b91920b8d094af37bc02b90 | 2026-01-04T14:40:00.086164Z | false |
mitmproxy/mitmproxy | https://github.com/mitmproxy/mitmproxy/blob/e6aa924bb411a9687b91920b8d094af37bc02b90/mitmproxy/proxy/mode_servers.py | mitmproxy/proxy/mode_servers.py | """
This module defines "server instances", which manage
the TCP/UDP servers spawned by mitmproxy as specified by the proxy mode.
Example:
mode = ProxyMode.parse("reverse:https://example.com")
inst = ServerInstance.make(mode, manager_that_handles_callbacks)
await inst.start()
# TCP server is running now.
"""
from __future__ import annotations
import asyncio
import errno
import json
import logging
import os
import socket
import sys
import textwrap
import typing
from abc import ABCMeta
from abc import abstractmethod
from contextlib import contextmanager
from pathlib import Path
from typing import cast
from typing import ClassVar
from typing import Generic
from typing import get_args
from typing import TYPE_CHECKING
from typing import TypeVar
import mitmproxy_rs
from mitmproxy import ctx
from mitmproxy import flow
from mitmproxy import platform
from mitmproxy.connection import Address
from mitmproxy.net import local_ip
from mitmproxy.net.free_port import get_free_port
from mitmproxy.proxy import commands
from mitmproxy.proxy import layers
from mitmproxy.proxy import mode_specs
from mitmproxy.proxy import server
from mitmproxy.proxy.context import Context
from mitmproxy.proxy.layer import Layer
from mitmproxy.utils import human
if sys.version_info < (3, 11):
from typing_extensions import Self # pragma: no cover
else:
from typing import Self
if TYPE_CHECKING:
from mitmproxy.master import Master
logger = logging.getLogger(__name__)
class ProxyConnectionHandler(server.LiveConnectionHandler):
master: Master
def __init__(self, master, r, w, options, mode):
self.master = master
super().__init__(r, w, options, mode)
self.log_prefix = f"{human.format_address(self.client.peername)}: "
async def handle_hook(self, hook: commands.StartHook) -> None:
with self.timeout_watchdog.disarm():
# We currently only support single-argument hooks.
(data,) = hook.args()
await self.master.addons.handle_lifecycle(hook)
if isinstance(data, flow.Flow):
await data.wait_for_resume() # pragma: no cover
M = TypeVar("M", bound=mode_specs.ProxyMode)
class ServerManager(typing.Protocol):
# temporary workaround: for UDP, we use the 4-tuple because we don't have a uuid.
connections: dict[tuple | str, ProxyConnectionHandler]
@contextmanager
def register_connection(
self, connection_id: tuple | str, handler: ProxyConnectionHandler
): ... # pragma: no cover
class ServerInstance(Generic[M], metaclass=ABCMeta):
__modes: ClassVar[dict[str, type[ServerInstance]]] = {}
last_exception: Exception | None = None
def __init__(self, mode: M, manager: ServerManager):
self.mode: M = mode
self.manager: ServerManager = manager
def __init_subclass__(cls, **kwargs):
"""Register all subclasses so that make() finds them."""
# extract mode from Generic[Mode].
mode = get_args(cls.__orig_bases__[0])[0] # type: ignore
if not isinstance(mode, TypeVar):
assert issubclass(mode, mode_specs.ProxyMode)
assert mode.type_name not in ServerInstance.__modes
ServerInstance.__modes[mode.type_name] = cls
@classmethod
def make(
cls,
mode: mode_specs.ProxyMode | str,
manager: ServerManager,
) -> Self:
if isinstance(mode, str):
mode = mode_specs.ProxyMode.parse(mode)
inst = ServerInstance.__modes[mode.type_name](mode, manager)
if not isinstance(inst, cls):
raise ValueError(f"{mode!r} is not a spec for a {cls.__name__} server.")
return inst
@property
@abstractmethod
def is_running(self) -> bool:
pass
async def start(self) -> None:
try:
await self._start()
except Exception as e:
self.last_exception = e
raise
else:
self.last_exception = None
if self.listen_addrs:
addrs = " and ".join({human.format_address(a) for a in self.listen_addrs})
logger.info(f"{self.mode.description} listening at {addrs}.")
else:
logger.info(f"{self.mode.description} started.")
async def stop(self) -> None:
listen_addrs = self.listen_addrs
try:
await self._stop()
except Exception as e:
self.last_exception = e
raise
else:
self.last_exception = None
if listen_addrs:
addrs = " and ".join({human.format_address(a) for a in listen_addrs})
logger.info(f"{self.mode.description} at {addrs} stopped.")
else:
logger.info(f"{self.mode.description} stopped.")
@abstractmethod
async def _start(self) -> None:
pass
@abstractmethod
async def _stop(self) -> None:
pass
@property
@abstractmethod
def listen_addrs(self) -> tuple[Address, ...]:
pass
@abstractmethod
def make_top_layer(self, context: Context) -> Layer:
pass
def to_json(self) -> dict:
return {
"type": self.mode.type_name,
"description": self.mode.description,
"full_spec": self.mode.full_spec,
"is_running": self.is_running,
"last_exception": str(self.last_exception) if self.last_exception else None,
"listen_addrs": self.listen_addrs,
}
async def handle_stream(
self,
reader: asyncio.StreamReader | mitmproxy_rs.Stream,
writer: asyncio.StreamWriter | mitmproxy_rs.Stream | None = None,
) -> None:
if writer is None:
assert isinstance(reader, mitmproxy_rs.Stream)
writer = reader
handler = ProxyConnectionHandler(
ctx.master, reader, writer, ctx.options, self.mode
)
handler.layer = self.make_top_layer(handler.layer.context)
if isinstance(self.mode, mode_specs.TransparentMode):
assert isinstance(writer, asyncio.StreamWriter)
s = cast(socket.socket, writer.get_extra_info("socket"))
try:
assert platform.original_addr
original_dst = platform.original_addr(s)
except Exception as e:
logger.error(f"Transparent mode failure: {e!r}")
writer.close()
return
else:
handler.layer.context.client.sockname = original_dst
handler.layer.context.server.address = original_dst
elif isinstance(
self.mode,
(mode_specs.WireGuardMode, mode_specs.LocalMode, mode_specs.TunMode),
): # pragma: no cover on platforms without wg-test-client
handler.layer.context.server.address = writer.get_extra_info(
"remote_endpoint", handler.layer.context.client.sockname
)
with self.manager.register_connection(handler.layer.context.client.id, handler):
await handler.handle_client()
class AsyncioServerInstance(ServerInstance[M], metaclass=ABCMeta):
_servers: list[
asyncio.Server
| mitmproxy_rs.udp.UdpServer
| mitmproxy_rs.wireguard.WireGuardServer
]
def __init__(self, *args, **kwargs) -> None:
self._servers = []
super().__init__(*args, **kwargs)
@property
def is_running(self) -> bool:
return bool(self._servers)
@property
def listen_addrs(self) -> tuple[Address, ...]:
addrs = []
for s in self._servers:
if isinstance(
s, (mitmproxy_rs.udp.UdpServer, mitmproxy_rs.wireguard.WireGuardServer)
):
addrs.append(s.getsockname())
else:
try:
addrs.extend(sock.getsockname() for sock in s.sockets)
except OSError: # pragma: no cover
pass # this can fail during shutdown, see https://github.com/mitmproxy/mitmproxy/issues/6529
return tuple(addrs)
async def _start(self) -> None:
assert not self._servers
host = self.mode.listen_host(ctx.options.listen_host)
port = self.mode.listen_port(ctx.options.listen_port)
assert port is not None
try:
self._servers = await self.listen(host, port)
except OSError as e:
message = f"{self.mode.description} failed to listen on {host or '*'}:{port} with {e}"
if e.errno == errno.EADDRINUSE and self.mode.custom_listen_port is None:
assert (
self.mode.custom_listen_host is None
) # since [@ [listen_addr:]listen_port]
message += f"\nTry specifying a different port by using `--mode {self.mode.full_spec}@{port + 2}`."
raise OSError(e.errno, message, e.filename) from e
async def _stop(self) -> None:
assert self._servers
try:
for s in self._servers:
s.close()
# https://github.com/python/cpython/issues/104344
# await asyncio.gather(*[s.wait_closed() for s in self._servers])
finally:
# we always reset _server and ignore failures
self._servers = []
async def listen(
self, host: str, port: int
) -> list[
asyncio.Server
| mitmproxy_rs.udp.UdpServer
| mitmproxy_rs.wireguard.WireGuardServer
]:
if self.mode.transport_protocol not in ("tcp", "udp", "both"):
raise AssertionError(self.mode.transport_protocol)
# workaround for https://github.com/python/cpython/issues/89856:
# We want both IPv4 and IPv6 sockets to bind to the same port.
# This may fail (https://github.com/mitmproxy/mitmproxy/pull/5542#issuecomment-1222803291),
# so we try to cover the 99% case and then give up and fall back to what asyncio does.
if port == 0:
try:
return await self.listen(host, get_free_port())
except Exception as e:
logger.debug(
f"Failed to listen on a single port ({e!r}), falling back to default behavior."
)
servers: list[
asyncio.Server
| mitmproxy_rs.udp.UdpServer
| mitmproxy_rs.wireguard.WireGuardServer
] = []
if self.mode.transport_protocol in ("tcp", "both"):
servers.append(await asyncio.start_server(self.handle_stream, host, port))
if self.mode.transport_protocol in ("udp", "both"):
# we start two servers for dual-stack support.
# On Linux, this would also be achievable by toggling IPV6_V6ONLY off, but this here works cross-platform.
if host == "":
ipv4 = await self.start_udp_based_server("0.0.0.0", port)
servers.append(ipv4)
try:
ipv6 = await self.start_udp_based_server(
"::", ipv4.getsockname()[1]
)
servers.append(ipv6) # pragma: no cover
except Exception: # pragma: no cover
logger.debug("Failed to listen on '::', listening on IPv4 only.")
else:
servers.append(await self.start_udp_based_server(host, port))
return servers
async def start_udp_based_server(
self, host, port
) -> mitmproxy_rs.udp.UdpServer | mitmproxy_rs.wireguard.WireGuardServer:
return await mitmproxy_rs.udp.start_udp_server(
host,
port,
self.handle_stream,
)
class WireGuardServerInstance(AsyncioServerInstance[mode_specs.WireGuardMode]):
server_key: str
client_key: str
pubkey: str
def make_top_layer(
self, context: Context
) -> Layer: # pragma: no cover on platforms without wg-test-client
return layers.modes.TransparentProxy(context)
async def _start(self) -> None:
if self.mode.data:
conf_path = Path(self.mode.data).expanduser()
else:
conf_path = Path(ctx.options.confdir).expanduser() / "wireguard.conf"
if not conf_path.exists():
conf_path.parent.mkdir(parents=True, exist_ok=True)
conf_path.write_text(
json.dumps(
{
"server_key": mitmproxy_rs.wireguard.genkey(),
"client_key": mitmproxy_rs.wireguard.genkey(),
},
indent=4,
)
)
try:
c = json.loads(conf_path.read_text())
self.server_key = c["server_key"]
self.client_key = c["client_key"]
except Exception as e:
raise ValueError(f"Invalid configuration file ({conf_path}): {e}") from e
# error early on invalid keys
self.pubkey = mitmproxy_rs.wireguard.pubkey(self.client_key)
_ = mitmproxy_rs.wireguard.pubkey(self.server_key)
await super()._start()
conf = self.client_conf()
assert conf
logger.info("-" * 60 + "\n" + conf + "\n" + "-" * 60)
async def start_udp_based_server(
self, host, port
) -> mitmproxy_rs.wireguard.WireGuardServer:
return await mitmproxy_rs.wireguard.start_wireguard_server(
host,
port,
self.server_key,
[self.pubkey],
self.handle_stream,
self.handle_stream,
)
def client_conf(self) -> str | None:
if not self._servers:
return None
host = (
self.mode.listen_host(ctx.options.listen_host)
or local_ip.get_local_ip()
or local_ip.get_local_ip6()
)
port = self.mode.listen_port(ctx.options.listen_port)
return textwrap.dedent(
f"""
[Interface]
PrivateKey = {self.client_key}
Address = 10.0.0.1/32
DNS = 10.0.0.53
[Peer]
PublicKey = {mitmproxy_rs.wireguard.pubkey(self.server_key)}
AllowedIPs = 0.0.0.0/0
Endpoint = {host}:{port}
"""
).strip()
def to_json(self) -> dict:
return {"wireguard_conf": self.client_conf(), **super().to_json()}
class LocalRedirectorInstance(ServerInstance[mode_specs.LocalMode]):
_server: ClassVar[mitmproxy_rs.local.LocalRedirector | None] = None
"""The local redirector daemon. Will be started once and then reused for all future instances."""
_instance: ClassVar[LocalRedirectorInstance | None] = None
"""The current LocalRedirectorInstance. Will be unset again if an instance is stopped."""
listen_addrs = ()
@property
def is_running(self) -> bool:
return self._instance is not None
def make_top_layer(self, context: Context) -> Layer:
return layers.modes.TransparentProxy(context)
@classmethod
async def redirector_handle_stream(
cls,
stream: mitmproxy_rs.Stream,
) -> None:
if cls._instance is not None:
await cls._instance.handle_stream(stream)
async def _start(self) -> None:
if self._instance:
raise RuntimeError("Cannot spawn more than one local redirector.")
if self.mode.data:
spec = f"{self.mode.data},!{os.getpid()}"
else:
spec = f"!{os.getpid()}"
cls = self.__class__
cls._instance = self # assign before awaiting to avoid races
if cls._server is None:
try:
cls._server = await mitmproxy_rs.local.start_local_redirector(
cls.redirector_handle_stream,
cls.redirector_handle_stream,
)
except Exception:
cls._instance = None
raise
cls._server.set_intercept(spec)
async def _stop(self) -> None:
assert self._instance
assert self._server
self.__class__._instance = None
# We're not shutting down the server because we want to avoid additional UAC prompts.
self._server.set_intercept("")
class RegularInstance(AsyncioServerInstance[mode_specs.RegularMode]):
def make_top_layer(self, context: Context) -> Layer:
return layers.modes.HttpProxy(context)
class UpstreamInstance(AsyncioServerInstance[mode_specs.UpstreamMode]):
def make_top_layer(self, context: Context) -> Layer:
return layers.modes.HttpUpstreamProxy(context)
class TransparentInstance(AsyncioServerInstance[mode_specs.TransparentMode]):
def make_top_layer(self, context: Context) -> Layer:
return layers.modes.TransparentProxy(context)
class ReverseInstance(AsyncioServerInstance[mode_specs.ReverseMode]):
def make_top_layer(self, context: Context) -> Layer:
return layers.modes.ReverseProxy(context)
class Socks5Instance(AsyncioServerInstance[mode_specs.Socks5Mode]):
def make_top_layer(self, context: Context) -> Layer:
return layers.modes.Socks5Proxy(context)
class DnsInstance(AsyncioServerInstance[mode_specs.DnsMode]):
def make_top_layer(self, context: Context) -> Layer:
return layers.DNSLayer(context)
class TunInstance(ServerInstance[mode_specs.TunMode]):
_server: mitmproxy_rs.tun.TunInterface | None = None
listen_addrs = ()
def make_top_layer(
self, context: Context
) -> Layer: # pragma: no cover mocked in tests
return layers.modes.TransparentProxy(context)
@property
def is_running(self) -> bool:
return self._server is not None
@property
def tun_name(self) -> str | None:
if self._server:
return self._server.tun_name()
else:
return None
def to_json(self) -> dict:
return {"tun_name": self.tun_name, **super().to_json()}
async def _start(self) -> None:
assert self._server is None
self._server = await mitmproxy_rs.tun.create_tun_interface(
self.handle_stream,
self.handle_stream,
tun_name=self.mode.data or None,
)
logger.info(f"TUN interface created: {self._server.tun_name()}")
async def _stop(self) -> None:
assert self._server is not None
try:
self._server.close()
await self._server.wait_closed()
finally:
self._server = None
# class Http3Instance(AsyncioServerInstance[mode_specs.Http3Mode]):
# def make_top_layer(self, context: Context) -> Layer:
# return layers.modes.HttpProxy(context)
| python | MIT | e6aa924bb411a9687b91920b8d094af37bc02b90 | 2026-01-04T14:40:00.086164Z | false |
mitmproxy/mitmproxy | https://github.com/mitmproxy/mitmproxy/blob/e6aa924bb411a9687b91920b8d094af37bc02b90/mitmproxy/proxy/__init__.py | mitmproxy/proxy/__init__.py | """
This module contains mitmproxy's core network proxy.
The most important primitives are:
- Layers: represent protocol layers, e.g. one for TCP, TLS, and so on. Layers are nested, so
a typical configuration might be ReverseProxy/TLS/TCP.
Most importantly, layers are implemented using the sans-io pattern (https://sans-io.readthedocs.io/).
This means that calls return immediately, there is no blocking sync or async code.
- Server: the proxy server handles all I/O. This is implemented using `asyncio`, but could be done any other way.
The `ConnectionHandler` is subclassed in the `Proxyserver` addon, which handles the communication with the
rest of mitmproxy.
- Events: When I/O actions occur at the proxy server, they are passed to the outermost layer as events,
e.g. `DataReceived` or `ConnectionClosed`.
- Commands: In the other direction, layers can emit commands to higher layers or the proxy server.
This is used to e.g. send data, request for new connections to be opened, or to call mitmproxy's
event hooks.
- Context: The context is the connection context each layer is provided with, which is always a client connection
and sometimes also a server connection.
"""
| python | MIT | e6aa924bb411a9687b91920b8d094af37bc02b90 | 2026-01-04T14:40:00.086164Z | false |
mitmproxy/mitmproxy | https://github.com/mitmproxy/mitmproxy/blob/e6aa924bb411a9687b91920b8d094af37bc02b90/mitmproxy/proxy/context.py | mitmproxy/proxy/context.py | from typing import TYPE_CHECKING
from mitmproxy import connection
from mitmproxy.options import Options
if TYPE_CHECKING:
import mitmproxy.proxy.layer
class Context:
"""
The context object provided to each protocol layer in the proxy core.
"""
client: connection.Client
"""The client connection."""
server: connection.Server
"""
The server connection.
For practical reasons this attribute is always set, even if there is not server connection yet.
In this case the server address is `None`.
"""
options: Options
"""
Provides access to options for proxy layers. Not intended for use by addons, use `mitmproxy.ctx.options` instead.
"""
layers: list["mitmproxy.proxy.layer.Layer"]
"""
The protocol layer stack.
"""
def __init__(
self,
client: connection.Client,
options: Options,
) -> None:
self.client = client
self.options = options
self.server = connection.Server(
address=None, transport_protocol=client.transport_protocol
)
self.layers = []
def fork(self) -> "Context":
ret = Context(self.client, self.options)
ret.server = self.server
ret.layers = self.layers.copy()
return ret
def __repr__(self):
return (
f"Context(\n"
f" {self.client!r},\n"
f" {self.server!r},\n"
f" layers=[{self.layers!r}]\n"
f")"
)
| python | MIT | e6aa924bb411a9687b91920b8d094af37bc02b90 | 2026-01-04T14:40:00.086164Z | false |
mitmproxy/mitmproxy | https://github.com/mitmproxy/mitmproxy/blob/e6aa924bb411a9687b91920b8d094af37bc02b90/mitmproxy/proxy/server.py | mitmproxy/proxy/server.py | """
Proxy Server Implementation using asyncio.
The very high level overview is as follows:
- Spawn one coroutine per client connection and create a reverse proxy layer to example.com
- Process any commands from layer (such as opening a server connection)
- Wait for any IO and send it as events to top layer.
"""
import abc
import asyncio
import collections
import logging
import time
from collections.abc import Awaitable
from collections.abc import Callable
from collections.abc import MutableMapping
from contextlib import contextmanager
from dataclasses import dataclass
from types import TracebackType
from typing import Literal
from OpenSSL import SSL
import mitmproxy_rs
from mitmproxy import http
from mitmproxy import options as moptions
from mitmproxy import tls
from mitmproxy.connection import Address
from mitmproxy.connection import Client
from mitmproxy.connection import Connection
from mitmproxy.connection import ConnectionState
from mitmproxy.proxy import commands
from mitmproxy.proxy import events
from mitmproxy.proxy import layer
from mitmproxy.proxy import layers
from mitmproxy.proxy import mode_specs
from mitmproxy.proxy import server_hooks
from mitmproxy.proxy.context import Context
from mitmproxy.proxy.layers.http import HTTPMode
from mitmproxy.utils import asyncio_utils
from mitmproxy.utils import human
from mitmproxy.utils.data import pkg_data
logger = logging.getLogger(__name__)
UDP_TIMEOUT = 20
class TimeoutWatchdog:
last_activity: float
timeout: int
can_timeout: asyncio.Event
blocker: int
def __init__(self, timeout: int, callback: Callable[[], Awaitable]):
self.timeout = timeout
self.callback = callback
self.last_activity = time.time()
self.can_timeout = asyncio.Event()
self.can_timeout.set()
self.blocker = 0
def register_activity(self):
self.last_activity = time.time()
async def watch(self):
try:
while True:
await self.can_timeout.wait()
await asyncio.sleep(self.timeout - (time.time() - self.last_activity))
if self.last_activity + self.timeout < time.time():
await self.callback()
return
except asyncio.CancelledError:
return
@contextmanager
def disarm(self):
self.can_timeout.clear()
self.blocker += 1
try:
yield
finally:
self.blocker -= 1
if self.blocker == 0:
self.register_activity()
self.can_timeout.set()
@dataclass
class ConnectionIO:
handler: asyncio.Task | None = None
reader: asyncio.StreamReader | mitmproxy_rs.Stream | None = None
writer: asyncio.StreamWriter | mitmproxy_rs.Stream | None = None
class ConnectionHandler(metaclass=abc.ABCMeta):
transports: MutableMapping[Connection, ConnectionIO]
timeout_watchdog: TimeoutWatchdog
client: Client
max_conns: collections.defaultdict[Address, asyncio.Semaphore]
layer: "layer.Layer"
wakeup_timer: set[asyncio.Task]
def __init__(self, context: Context) -> None:
self.client = context.client
self.transports = {}
self.max_conns = collections.defaultdict(lambda: asyncio.Semaphore(5))
self.wakeup_timer = set()
# Ask for the first layer right away.
# In a reverse proxy scenario, this is necessary as we would otherwise hang
# on protocols that start with a server greeting.
self.layer = layer.NextLayer(context, ask_on_start=True)
if self.client.transport_protocol == "tcp":
timeout = context.options.tcp_timeout
else:
timeout = UDP_TIMEOUT
self.timeout_watchdog = TimeoutWatchdog(timeout, self.on_timeout)
self._server_event_lock = asyncio.Lock()
# workaround for https://bugs.python.org/issue40124 / https://bugs.python.org/issue29930
self._drain_lock = asyncio.Lock()
async def handle_client(self) -> None:
asyncio_utils.set_current_task_debug_info(
name=f"client handler",
client=self.client.peername,
)
watch = asyncio_utils.create_task(
self.timeout_watchdog.watch(),
name="timeout watchdog",
keep_ref=False,
client=self.client.peername,
)
self.log("client connect")
await self.handle_hook(server_hooks.ClientConnectedHook(self.client))
if self.client.error:
self.log("client kill connection")
writer = self.transports.pop(self.client).writer
assert writer
writer.close()
else:
await self.server_event(events.Start())
handler = asyncio_utils.create_task(
self.handle_connection(self.client),
name=f"client connection handler",
keep_ref=False,
client=self.client.peername,
)
self.transports[self.client].handler = handler
await asyncio.wait([handler])
if not handler.cancelled() and (e := handler.exception()):
self.log(
f"connection handler has crashed: {e}",
logging.ERROR,
exc_info=(type(e), e, e.__traceback__),
)
watch.cancel()
while self.wakeup_timer:
timer = self.wakeup_timer.pop()
timer.cancel()
self.log("client disconnect")
self.client.timestamp_end = time.time()
await self.handle_hook(server_hooks.ClientDisconnectedHook(self.client))
if self.transports:
self.log("closing transports...", logging.DEBUG)
for io in self.transports.values():
if io.handler:
io.handler.cancel("client disconnected")
await asyncio.wait(
[x.handler for x in self.transports.values() if x.handler]
)
self.log("transports closed!", logging.DEBUG)
async def open_connection(self, command: commands.OpenConnection) -> None:
if not command.connection.address:
self.log(f"Cannot open connection, no hostname given.")
await self.server_event(
events.OpenConnectionCompleted(
command, f"Cannot open connection, no hostname given."
)
)
return
hook_data = server_hooks.ServerConnectionHookData(
client=self.client, server=command.connection
)
await self.handle_hook(server_hooks.ServerConnectHook(hook_data))
if err := command.connection.error:
self.log(
f"server connection to {human.format_address(command.connection.address)} killed before connect: {err}"
)
await self.handle_hook(server_hooks.ServerConnectErrorHook(hook_data))
await self.server_event(
events.OpenConnectionCompleted(command, f"Connection killed: {err}")
)
return
async with self.max_conns[command.connection.address]:
reader: asyncio.StreamReader | mitmproxy_rs.Stream
writer: asyncio.StreamWriter | mitmproxy_rs.Stream
try:
command.connection.timestamp_start = time.time()
if command.connection.transport_protocol == "tcp":
reader, writer = await asyncio.open_connection(
*command.connection.address,
local_addr=command.connection.sockname,
)
elif command.connection.transport_protocol == "udp":
reader = writer = await mitmproxy_rs.udp.open_udp_connection(
*command.connection.address,
local_addr=command.connection.sockname,
)
else:
raise AssertionError(command.connection.transport_protocol)
except (OSError, asyncio.CancelledError) as e:
err = str(e)
if not err: # str(CancelledError()) returns empty string.
err = "connection cancelled"
self.log(f"error establishing server connection: {err}")
command.connection.error = err
await self.handle_hook(server_hooks.ServerConnectErrorHook(hook_data))
await self.server_event(events.OpenConnectionCompleted(command, err))
if isinstance(e, asyncio.CancelledError):
# From https://docs.python.org/3/library/asyncio-exceptions.html#asyncio.CancelledError:
# > In almost all situations the exception must be re-raised.
# It is not really defined what almost means here, but we play safe.
raise
else:
if command.connection.transport_protocol == "tcp":
# TODO: Rename to `timestamp_setup` and make it agnostic for both TCP (SYN/ACK) and UDP (DNS resl.)
command.connection.timestamp_tcp_setup = time.time()
command.connection.state = ConnectionState.OPEN
command.connection.peername = writer.get_extra_info("peername")
command.connection.sockname = writer.get_extra_info("sockname")
self.transports[command.connection] = ConnectionIO(
handler=asyncio.current_task(),
reader=reader,
writer=writer,
)
assert command.connection.peername
if command.connection.address[0] != command.connection.peername[0]:
addr = f"{human.format_address(command.connection.address)} ({human.format_address(command.connection.peername)})"
else:
addr = human.format_address(command.connection.address)
self.log(f"server connect {addr}")
await self.handle_hook(server_hooks.ServerConnectedHook(hook_data))
await self.server_event(events.OpenConnectionCompleted(command, None))
try:
await self.handle_connection(command.connection)
finally:
self.log(f"server disconnect {addr}")
command.connection.timestamp_end = time.time()
await self.handle_hook(
server_hooks.ServerDisconnectedHook(hook_data)
)
async def wakeup(self, request: commands.RequestWakeup) -> None:
await asyncio.sleep(request.delay)
task = asyncio.current_task()
assert task is not None
self.wakeup_timer.discard(task)
await self.server_event(events.Wakeup(request))
async def handle_connection(self, connection: Connection) -> None:
"""
Handle a connection for its entire lifetime.
This means we read until EOF,
but then possibly also keep on waiting for our side of the connection to be closed.
"""
cancelled = None
reader = self.transports[connection].reader
assert reader
while True:
try:
data = await reader.read(65535)
if not data:
raise OSError("Connection closed by peer.")
except OSError:
break
except asyncio.CancelledError as e:
cancelled = e
break
await self.server_event(events.DataReceived(connection, data))
try:
await self.drain_writers()
except asyncio.CancelledError as e:
cancelled = e
break
if cancelled is None and connection.transport_protocol == "tcp":
# TCP connections can be half-closed.
connection.state &= ~ConnectionState.CAN_READ
else:
connection.state = ConnectionState.CLOSED
await self.server_event(events.ConnectionClosed(connection))
if connection.state is ConnectionState.CAN_WRITE:
# we may still use this connection to *send* stuff,
# even though the remote has closed their side of the connection.
# to make this work we keep this task running and wait for cancellation.
try:
await asyncio.Event().wait()
except asyncio.CancelledError as e:
cancelled = e
try:
writer = self.transports[connection].writer
assert writer
writer.close()
except OSError:
pass
self.transports.pop(connection)
if cancelled:
raise cancelled
async def drain_writers(self):
"""
Drain all writers to create some backpressure. We won't continue reading until there's space available in our
write buffers, so if we cannot write fast enough our own read buffers run full and the TCP recv stream is throttled.
"""
async with self._drain_lock:
for transport in list(self.transports.values()):
if transport.writer is not None:
try:
await transport.writer.drain()
except OSError as e:
if transport.handler is not None:
transport.handler.cancel(f"Error sending data: {e}")
async def on_timeout(self) -> None:
try:
handler = self.transports[self.client].handler
except KeyError: # pragma: no cover
# there is a super short window between connection close and watchdog cancellation
pass
else:
if self.client.transport_protocol == "tcp":
self.log(f"Closing connection due to inactivity: {self.client}")
assert handler
handler.cancel("timeout")
async def hook_task(self, hook: commands.StartHook) -> None:
await self.handle_hook(hook)
if hook.blocking:
await self.server_event(events.HookCompleted(hook))
@abc.abstractmethod
async def handle_hook(self, hook: commands.StartHook) -> None:
pass
def log(
self,
message: str,
level: int = logging.INFO,
exc_info: Literal[True]
| tuple[type[BaseException], BaseException, TracebackType | None]
| None = None,
) -> None:
logger.log(
level, message, extra={"client": self.client.peername}, exc_info=exc_info
)
async def server_event(self, event: events.Event) -> None:
# server_event is supposed to be completely sync without any `await` that could pause execution.
# However, create_task with an [eager task factory] will schedule tasks immediately,
# which causes [reentrancy issues]. So we put the entire thing behind a lock.
#
# [eager task factory]: https://docs.python.org/3/library/asyncio-task.html#eager-task-factory
# [reentrancy issues]: https://github.com/mitmproxy/mitmproxy/issues/7027.
async with self._server_event_lock:
# No `await` beyond this point.
self.timeout_watchdog.register_activity()
try:
layer_commands = self.layer.handle_event(event)
for command in layer_commands:
if isinstance(command, commands.OpenConnection):
assert command.connection not in self.transports
handler = asyncio_utils.create_task(
self.open_connection(command),
name=f"server connection handler {command.connection.address}",
keep_ref=False,
client=self.client.peername,
)
self.transports[command.connection] = ConnectionIO(
handler=handler
)
elif isinstance(command, commands.RequestWakeup):
task = asyncio_utils.create_task(
self.wakeup(command),
name=f"wakeup timer ({command.delay:.1f}s)",
keep_ref=False,
client=self.client.peername,
)
assert task is not None
self.wakeup_timer.add(task)
elif (
isinstance(command, commands.ConnectionCommand)
and command.connection not in self.transports
):
pass # The connection has already been closed.
elif isinstance(command, commands.SendData):
writer = self.transports[command.connection].writer
assert writer
if not writer.is_closing():
writer.write(command.data)
elif isinstance(command, commands.CloseTcpConnection):
self.close_connection(command.connection, command.half_close)
elif isinstance(command, commands.CloseConnection):
self.close_connection(command.connection, False)
elif isinstance(command, commands.StartHook):
asyncio_utils.create_task(
self.hook_task(command),
name=f"handle_hook({command.name})",
keep_ref=True,
client=self.client.peername,
)
elif isinstance(command, commands.Log):
self.log(command.message, command.level)
else:
raise RuntimeError(f"Unexpected command: {command}")
except Exception:
self.log(f"mitmproxy has crashed!", logging.ERROR, exc_info=True)
def close_connection(
self, connection: Connection, half_close: bool = False
) -> None:
if half_close:
if not connection.state & ConnectionState.CAN_WRITE:
return
self.log(f"half-closing {connection}", logging.DEBUG)
try:
writer = self.transports[connection].writer
assert writer
if not writer.is_closing():
writer.write_eof()
except OSError:
# if we can't write to the socket anymore we presume it completely dead.
connection.state = ConnectionState.CLOSED
else:
connection.state &= ~ConnectionState.CAN_WRITE
else:
connection.state = ConnectionState.CLOSED
if connection.state is ConnectionState.CLOSED:
handler = self.transports[connection].handler
assert handler
handler.cancel("closed by command")
class LiveConnectionHandler(ConnectionHandler, metaclass=abc.ABCMeta):
def __init__(
self,
reader: asyncio.StreamReader | mitmproxy_rs.Stream,
writer: asyncio.StreamWriter | mitmproxy_rs.Stream,
options: moptions.Options,
mode: mode_specs.ProxyMode,
) -> None:
client = Client(
transport_protocol=writer.get_extra_info("transport_protocol", "tcp"),
peername=writer.get_extra_info("peername"),
sockname=writer.get_extra_info("sockname"),
timestamp_start=time.time(),
proxy_mode=mode,
state=ConnectionState.OPEN,
)
context = Context(client, options)
super().__init__(context)
self.transports[client] = ConnectionIO(
handler=None, reader=reader, writer=writer
)
class SimpleConnectionHandler(LiveConnectionHandler): # pragma: no cover
"""Simple handler that does not really process any hooks."""
hook_handlers: dict[str, Callable]
def __init__(self, reader, writer, options, mode, hook_handlers):
super().__init__(reader, writer, options, mode)
self.hook_handlers = hook_handlers
async def handle_hook(self, hook: commands.StartHook) -> None:
if hook.name in self.hook_handlers:
self.hook_handlers[hook.name](*hook.args())
if __name__ == "__main__": # pragma: no cover
# simple standalone implementation for testing.
loop = asyncio.get_event_loop()
opts = moptions.Options()
# options duplicated here to simplify testing setup
opts.add_option(
"store_streamed_bodies",
bool,
False,
"",
)
opts.add_option(
"connection_strategy",
str,
"lazy",
"Determine when server connections should be established.",
choices=("eager", "lazy"),
)
opts.add_option(
"keep_host_header",
bool,
False,
"""
Reverse Proxy: Keep the original host header instead of rewriting it
to the reverse proxy target.
""",
)
async def handle(reader, writer):
layer_stack = [
# lambda ctx: layers.ServerTLSLayer(ctx),
# lambda ctx: layers.HttpLayer(ctx, HTTPMode.regular),
# lambda ctx: setattr(ctx.server, "tls", True) or layers.ServerTLSLayer(ctx),
# lambda ctx: layers.ClientTLSLayer(ctx),
lambda ctx: layers.modes.ReverseProxy(ctx),
lambda ctx: layers.HttpLayer(ctx, HTTPMode.transparent),
]
def next_layer(nl: layer.NextLayer):
layr = layer_stack.pop(0)(nl.context)
layr.debug = " " * len(nl.context.layers)
nl.layer = layr
def request(flow: http.HTTPFlow):
if "cached" in flow.request.path:
flow.response = http.Response.make(418, f"(cached) {flow.request.text}")
if "toggle-tls" in flow.request.path:
if flow.request.url.startswith("https://"):
flow.request.url = flow.request.url.replace("https://", "http://")
else:
flow.request.url = flow.request.url.replace("http://", "https://")
if "redirect" in flow.request.path:
flow.request.host = "httpbin.org"
def tls_start_client(tls_start: tls.TlsData):
# INSECURE
ssl_context = SSL.Context(SSL.SSLv23_METHOD)
ssl_context.use_privatekey_file(
pkg_data.path(
"../test/mitmproxy/data/verificationcerts/trusted-leaf.key"
)
)
ssl_context.use_certificate_chain_file(
pkg_data.path(
"../test/mitmproxy/data/verificationcerts/trusted-leaf.crt"
)
)
tls_start.ssl_conn = SSL.Connection(ssl_context)
tls_start.ssl_conn.set_accept_state()
def tls_start_server(tls_start: tls.TlsData):
# INSECURE
ssl_context = SSL.Context(SSL.SSLv23_METHOD)
tls_start.ssl_conn = SSL.Connection(ssl_context)
tls_start.ssl_conn.set_connect_state()
if tls_start.context.client.sni is not None:
tls_start.ssl_conn.set_tlsext_host_name(
tls_start.context.client.sni.encode()
)
await SimpleConnectionHandler(
reader,
writer,
opts,
mode_specs.ProxyMode.parse("reverse:http://127.0.0.1:3000/"),
{
"next_layer": next_layer,
"request": request,
"tls_start_client": tls_start_client,
"tls_start_server": tls_start_server,
},
).handle_client()
coro = asyncio.start_server(handle, "127.0.0.1", 8080, loop=loop)
server = loop.run_until_complete(coro)
# Serve requests until Ctrl+C is pressed
assert server.sockets
print(f"Serving on {human.format_address(server.sockets[0].getsockname())}")
try:
loop.run_forever()
except KeyboardInterrupt:
pass
# Close the server
server.close()
loop.run_until_complete(server.wait_closed())
loop.close()
| python | MIT | e6aa924bb411a9687b91920b8d094af37bc02b90 | 2026-01-04T14:40:00.086164Z | false |
mitmproxy/mitmproxy | https://github.com/mitmproxy/mitmproxy/blob/e6aa924bb411a9687b91920b8d094af37bc02b90/mitmproxy/proxy/layers/tls.py | mitmproxy/proxy/layers/tls.py | import struct
import time
import typing
from collections.abc import Iterator
from dataclasses import dataclass
from logging import DEBUG
from logging import ERROR
from logging import INFO
from logging import WARNING
from OpenSSL import SSL
from mitmproxy import certs
from mitmproxy import connection
from mitmproxy.connection import TlsVersion
from mitmproxy.net.tls import starts_like_dtls_record
from mitmproxy.net.tls import starts_like_tls_record
from mitmproxy.proxy import commands
from mitmproxy.proxy import context
from mitmproxy.proxy import events
from mitmproxy.proxy import layer
from mitmproxy.proxy import tunnel
from mitmproxy.proxy.commands import StartHook
from mitmproxy.proxy.layers import tcp
from mitmproxy.proxy.layers import udp
from mitmproxy.tls import ClientHello
from mitmproxy.tls import ClientHelloData
from mitmproxy.tls import TlsData
from mitmproxy.utils import human
def handshake_record_contents(data: bytes) -> Iterator[bytes]:
"""
Returns a generator that yields the bytes contained in each handshake record.
This will raise an error on the first non-handshake record, so fully exhausting this
generator is a bad idea.
"""
offset = 0
while True:
if len(data) < offset + 5:
return
record_header = data[offset : offset + 5]
if not starts_like_tls_record(record_header):
raise ValueError(f"Expected TLS record, got {record_header!r} instead.")
record_size = struct.unpack("!H", record_header[3:])[0]
if record_size == 0:
raise ValueError("Record must not be empty.")
offset += 5
if len(data) < offset + record_size:
return
record_body = data[offset : offset + record_size]
yield record_body
offset += record_size
def get_client_hello(data: bytes) -> bytes | None:
"""
Read all TLS records that contain the initial ClientHello.
Returns the raw handshake packet bytes, without TLS record headers.
"""
client_hello = b""
for d in handshake_record_contents(data):
client_hello += d
if len(client_hello) >= 4:
client_hello_size = struct.unpack("!I", b"\x00" + client_hello[1:4])[0] + 4
if len(client_hello) >= client_hello_size:
return client_hello[:client_hello_size]
return None
def parse_client_hello(data: bytes) -> ClientHello | None:
"""
Check if the supplied bytes contain a full ClientHello message,
and if so, parse it.
Returns:
- A ClientHello object on success
- None, if the TLS record is not complete
Raises:
- A ValueError, if the passed ClientHello is invalid
"""
# Check if ClientHello is complete
client_hello = get_client_hello(data)
if client_hello:
try:
return ClientHello(client_hello[4:])
except EOFError as e:
raise ValueError("Invalid ClientHello") from e
return None
def dtls_handshake_record_contents(data: bytes) -> Iterator[bytes]:
"""
Returns a generator that yields the bytes contained in each handshake record.
This will raise an error on the first non-handshake record, so fully exhausting this
generator is a bad idea.
"""
offset = 0
while True:
# DTLS includes two new fields, totaling 8 bytes, between Version and Length
if len(data) < offset + 13:
return
record_header = data[offset : offset + 13]
if not starts_like_dtls_record(record_header):
raise ValueError(f"Expected DTLS record, got {record_header!r} instead.")
# Length fields starts at 11
record_size = struct.unpack("!H", record_header[11:])[0]
if record_size == 0:
raise ValueError("Record must not be empty.")
offset += 13
if len(data) < offset + record_size:
return
record_body = data[offset : offset + record_size]
yield record_body
offset += record_size
def get_dtls_client_hello(data: bytes) -> bytes | None:
"""
Read all DTLS records that contain the initial ClientHello.
Returns the raw handshake packet bytes, without TLS record headers.
"""
client_hello = b""
for d in dtls_handshake_record_contents(data):
client_hello += d
if len(client_hello) >= 13:
# comment about slicing: we skip the epoch and sequence number
client_hello_size = (
struct.unpack("!I", b"\x00" + client_hello[9:12])[0] + 12
)
if len(client_hello) >= client_hello_size:
return client_hello[:client_hello_size]
return None
def dtls_parse_client_hello(data: bytes) -> ClientHello | None:
"""
Check if the supplied bytes contain a full ClientHello message,
and if so, parse it.
Returns:
- A ClientHello object on success
- None, if the TLS record is not complete
Raises:
- A ValueError, if the passed ClientHello is invalid
"""
# Check if ClientHello is complete
client_hello = get_dtls_client_hello(data)
if client_hello:
try:
return ClientHello(client_hello[12:], dtls=True)
except EOFError as e:
raise ValueError("Invalid ClientHello") from e
return None
HTTP1_ALPNS = (b"http/1.1", b"http/1.0", b"http/0.9")
HTTP2_ALPN = b"h2"
HTTP3_ALPN = b"h3"
HTTP_ALPNS = (HTTP3_ALPN, HTTP2_ALPN, *HTTP1_ALPNS)
# We need these classes as hooks can only have one argument at the moment.
@dataclass
class TlsClienthelloHook(StartHook):
"""
Mitmproxy has received a TLS ClientHello message.
This hook decides whether a server connection is needed
to negotiate TLS with the client (data.establish_server_tls_first)
"""
data: ClientHelloData
@dataclass
class TlsStartClientHook(StartHook):
"""
TLS negotation between mitmproxy and a client is about to start.
An addon is expected to initialize data.ssl_conn.
(by default, this is done by `mitmproxy.addons.tlsconfig`)
"""
data: TlsData
@dataclass
class TlsStartServerHook(StartHook):
"""
TLS negotation between mitmproxy and a server is about to start.
An addon is expected to initialize data.ssl_conn.
(by default, this is done by `mitmproxy.addons.tlsconfig`)
"""
data: TlsData
@dataclass
class TlsEstablishedClientHook(StartHook):
"""
The TLS handshake with the client has been completed successfully.
"""
data: TlsData
@dataclass
class TlsEstablishedServerHook(StartHook):
"""
The TLS handshake with the server has been completed successfully.
"""
data: TlsData
@dataclass
class TlsFailedClientHook(StartHook):
"""
The TLS handshake with the client has failed.
"""
data: TlsData
@dataclass
class TlsFailedServerHook(StartHook):
"""
The TLS handshake with the server has failed.
"""
data: TlsData
class TLSLayer(tunnel.TunnelLayer):
tls: SSL.Connection = None # type: ignore
"""The OpenSSL connection object"""
def __init__(self, context: context.Context, conn: connection.Connection):
super().__init__(
context,
tunnel_connection=conn,
conn=conn,
)
conn.tls = True
def __repr__(self):
return (
super().__repr__().replace(")", f" {self.conn.sni!r} {self.conn.alpn!r})")
)
@property
def is_dtls(self):
return self.conn.transport_protocol == "udp"
@property
def proto_name(self):
return "DTLS" if self.is_dtls else "TLS"
def start_tls(self) -> layer.CommandGenerator[None]:
assert not self.tls
tls_start = TlsData(self.conn, self.context, is_dtls=self.is_dtls)
if self.conn == self.context.client:
yield TlsStartClientHook(tls_start)
else:
yield TlsStartServerHook(tls_start)
if not tls_start.ssl_conn:
yield commands.Log(
f"No {self.proto_name} context was provided, failing connection.", ERROR
)
yield commands.CloseConnection(self.conn)
return
assert tls_start.ssl_conn
self.tls = tls_start.ssl_conn
def tls_interact(self) -> layer.CommandGenerator[None]:
while True:
try:
data = self.tls.bio_read(65535)
except SSL.WantReadError:
return # Okay, nothing more waiting to be sent.
else:
yield commands.SendData(self.conn, data)
def receive_handshake_data(
self, data: bytes
) -> layer.CommandGenerator[tuple[bool, str | None]]:
# bio_write errors for b"", so we need to check first if we actually received something.
if data:
self.tls.bio_write(data)
try:
self.tls.do_handshake()
except SSL.WantReadError:
yield from self.tls_interact()
return False, None
except SSL.Error as e:
# provide more detailed information for some errors.
last_err = (
e.args and isinstance(e.args[0], list) and e.args[0] and e.args[0][-1]
)
if last_err in [
(
"SSL routines",
"tls_process_server_certificate",
"certificate verify failed",
),
("SSL routines", "", "certificate verify failed"), # OpenSSL 3+
]:
verify_result = SSL._lib.SSL_get_verify_result(self.tls._ssl) # type: ignore
error = SSL._ffi.string( # type: ignore
SSL._lib.X509_verify_cert_error_string(verify_result) # type: ignore
).decode()
err = f"Certificate verify failed: {error}"
elif last_err in [
("SSL routines", "ssl3_read_bytes", "tlsv1 alert unknown ca"),
("SSL routines", "ssl3_read_bytes", "sslv3 alert bad certificate"),
("SSL routines", "ssl3_read_bytes", "ssl/tls alert bad certificate"),
("SSL routines", "", "tlsv1 alert unknown ca"), # OpenSSL 3+
("SSL routines", "", "sslv3 alert bad certificate"), # OpenSSL 3+
("SSL routines", "", "ssl/tls alert bad certificate"), # OpenSSL 3.2+
]:
assert isinstance(last_err, tuple)
err = last_err[2]
elif (
last_err
in [
("SSL routines", "ssl3_get_record", "wrong version number"),
("SSL routines", "", "wrong version number"), # OpenSSL 3+
("SSL routines", "", "packet length too long"), # OpenSSL 3+
("SSL routines", "", "record layer failure"), # OpenSSL 3+
]
and data[:4].isascii()
):
err = f"The remote server does not speak TLS."
elif last_err in [
("SSL routines", "ssl3_read_bytes", "tlsv1 alert protocol version"),
("SSL routines", "", "tlsv1 alert protocol version"), # OpenSSL 3+
]:
err = (
f"The remote server and mitmproxy cannot agree on a TLS version to use. "
f"You may need to adjust mitmproxy's tls_version_server_min option."
)
else:
err = f"OpenSSL {e!r}"
return False, err
else:
# Here we set all attributes that are only known *after* the handshake.
# Get all peer certificates.
# https://www.openssl.org/docs/man1.1.1/man3/SSL_get_peer_cert_chain.html
# If called on the client side, the stack also contains the peer's certificate; if called on the server
# side, the peer's certificate must be obtained separately using SSL_get_peer_certificate(3).
all_certs = self.tls.get_peer_cert_chain() or []
if self.conn == self.context.client:
cert = self.tls.get_peer_certificate()
if cert:
all_certs.insert(0, cert)
self.conn.certificate_list = []
for cert in all_certs:
try:
# This may fail for weird certs, https://github.com/mitmproxy/mitmproxy/issues/6968.
parsed_cert = certs.Cert.from_pyopenssl(cert)
except ValueError as e:
yield commands.Log(
f"{self.debug}[tls] failed to parse certificate: {e}", WARNING
)
else:
self.conn.certificate_list.append(parsed_cert)
self.conn.timestamp_tls_setup = time.time()
self.conn.alpn = self.tls.get_alpn_proto_negotiated()
self.conn.cipher = self.tls.get_cipher_name()
self.conn.tls_version = typing.cast(
TlsVersion, self.tls.get_protocol_version_name()
)
if self.debug:
yield commands.Log(
f"{self.debug}[tls] tls established: {self.conn}", DEBUG
)
if self.conn == self.context.client:
yield TlsEstablishedClientHook(
TlsData(self.conn, self.context, self.tls)
)
else:
yield TlsEstablishedServerHook(
TlsData(self.conn, self.context, self.tls)
)
yield from self.receive_data(b"")
return True, None
def on_handshake_error(self, err: str) -> layer.CommandGenerator[None]:
self.conn.error = err
if self.conn == self.context.client:
yield TlsFailedClientHook(TlsData(self.conn, self.context, self.tls))
else:
yield TlsFailedServerHook(TlsData(self.conn, self.context, self.tls))
yield from super().on_handshake_error(err)
def receive_data(self, data: bytes) -> layer.CommandGenerator[None]:
if data:
self.tls.bio_write(data)
plaintext = bytearray()
close = False
while True:
try:
plaintext.extend(self.tls.recv(65535))
except SSL.WantReadError:
break
except SSL.ZeroReturnError:
close = True
break
except SSL.Error as e:
# This may be happening because the other side send an alert.
# There's somewhat ugly behavior with Firefox on Android here,
# which upon mistrusting a certificate still completes the handshake
# and then sends an alert in the next packet. At this point we have unfortunately
# already fired out `tls_established_client` hook.
yield commands.Log(f"TLS Error: {e}", WARNING)
break
# Can we send something?
# Note that this must happen after `recv()`, which may have advanced the state machine.
# https://github.com/mitmproxy/mitmproxy/discussions/7550
yield from self.tls_interact()
if plaintext:
yield from self.event_to_child(
events.DataReceived(self.conn, bytes(plaintext))
)
if close:
self.conn.state &= ~connection.ConnectionState.CAN_READ
if self.debug:
yield commands.Log(f"{self.debug}[tls] close_notify {self.conn}", DEBUG)
yield from self.event_to_child(events.ConnectionClosed(self.conn))
def receive_close(self) -> layer.CommandGenerator[None]:
if self.tls.get_shutdown() & SSL.RECEIVED_SHUTDOWN:
pass # We have already dispatched a ConnectionClosed to the child layer.
else:
yield from super().receive_close()
def send_data(self, data: bytes) -> layer.CommandGenerator[None]:
try:
self.tls.sendall(data)
except (SSL.ZeroReturnError, SSL.SysCallError):
# The other peer may still be trying to send data over, which we discard here.
pass
yield from self.tls_interact()
def send_close(
self, command: commands.CloseConnection
) -> layer.CommandGenerator[None]:
# We should probably shutdown the TLS connection properly here.
yield from super().send_close(command)
class ServerTLSLayer(TLSLayer):
"""
This layer establishes TLS for a single server connection.
"""
wait_for_clienthello: bool = False
def __init__(self, context: context.Context, conn: connection.Server | None = None):
super().__init__(context, conn or context.server)
def start_handshake(self) -> layer.CommandGenerator[None]:
wait_for_clienthello = (
# if command_to_reply_to is set, we've been instructed to open the connection from the child layer.
# in that case any potential ClientHello is already parsed (by the ClientTLS child layer).
not self.command_to_reply_to
# if command_to_reply_to is not set, the connection was already open when this layer received its Start
# event (eager connection strategy). We now want to establish TLS right away, _unless_ we already know
# that there's TLS on the client side as well (we check if our immediate child layer is set to be ClientTLS)
# In this case want to wait for ClientHello to be parsed, so that we can incorporate SNI/ALPN from there.
and isinstance(self.child_layer, ClientTLSLayer)
)
if wait_for_clienthello:
self.wait_for_clienthello = True
self.tunnel_state = tunnel.TunnelState.CLOSED
else:
yield from self.start_tls()
if self.tls:
yield from self.receive_handshake_data(b"")
def event_to_child(self, event: events.Event) -> layer.CommandGenerator[None]:
if self.wait_for_clienthello:
for command in super().event_to_child(event):
if (
isinstance(command, commands.OpenConnection)
and command.connection == self.conn
):
self.wait_for_clienthello = False
# swallow OpenConnection here by not re-yielding it.
else:
yield command
else:
yield from super().event_to_child(event)
def on_handshake_error(self, err: str) -> layer.CommandGenerator[None]:
yield commands.Log(f"Server TLS handshake failed. {err}", level=WARNING)
yield from super().on_handshake_error(err)
class ClientTLSLayer(TLSLayer):
"""
This layer establishes TLS on a single client connection.
┌─────┐
│Start│
└┬────┘
↓
┌────────────────────┐
│Wait for ClientHello│
└┬───────────────────┘
↓
┌────────────────┐
│Process messages│
└────────────────┘
"""
recv_buffer: bytearray
server_tls_available: bool
client_hello_parsed: bool = False
def __init__(self, context: context.Context):
if context.client.tls:
# In the case of TLS-over-TLS, we already have client TLS. As the outer TLS connection between client
# and proxy isn't that interesting to us, we just unset the attributes here and keep the inner TLS
# session's attributes.
# Alternatively we could create a new Client instance,
# but for now we keep it simple. There is a proof-of-concept at
# https://github.com/mitmproxy/mitmproxy/commit/9b6e2a716888b7787514733b76a5936afa485352.
context.client.alpn = None
context.client.cipher = None
context.client.sni = None
context.client.timestamp_tls_setup = None
context.client.tls_version = None
context.client.certificate_list = []
context.client.mitmcert = None
context.client.alpn_offers = []
context.client.cipher_list = []
super().__init__(context, context.client)
self.server_tls_available = isinstance(self.context.layers[-2], ServerTLSLayer)
self.recv_buffer = bytearray()
def start_handshake(self) -> layer.CommandGenerator[None]:
yield from ()
def receive_handshake_data(
self, data: bytes
) -> layer.CommandGenerator[tuple[bool, str | None]]:
if self.client_hello_parsed:
return (yield from super().receive_handshake_data(data))
self.recv_buffer.extend(data)
try:
if self.is_dtls:
client_hello = dtls_parse_client_hello(self.recv_buffer)
else:
client_hello = parse_client_hello(self.recv_buffer)
except ValueError:
return False, f"Cannot parse ClientHello: {self.recv_buffer.hex()}"
if client_hello:
self.client_hello_parsed = True
else:
return False, None
self.conn.sni = client_hello.sni
self.conn.alpn_offers = client_hello.alpn_protocols
tls_clienthello = ClientHelloData(self.context, client_hello)
yield TlsClienthelloHook(tls_clienthello)
if tls_clienthello.ignore_connection:
# we've figured out that we don't want to intercept this connection, so we assign fake connection objects
# to all TLS layers. This makes the real connection contents just go through.
self.conn = self.tunnel_connection = connection.Client(
peername=("ignore-conn", 0), sockname=("ignore-conn", 0)
)
parent_layer = self.context.layers[self.context.layers.index(self) - 1]
if isinstance(parent_layer, ServerTLSLayer):
parent_layer.conn = parent_layer.tunnel_connection = connection.Server(
address=None
)
if self.is_dtls:
self.child_layer = udp.UDPLayer(self.context, ignore=True)
else:
self.child_layer = tcp.TCPLayer(self.context, ignore=True)
yield from self.event_to_child(
events.DataReceived(self.context.client, bytes(self.recv_buffer))
)
self.recv_buffer.clear()
return True, None
if (
tls_clienthello.establish_server_tls_first
and not self.context.server.tls_established
):
err = yield from self.start_server_tls()
if err:
yield commands.Log(
f"Unable to establish {self.proto_name} connection with server ({err}). "
f"Trying to establish {self.proto_name} with client anyway. "
f"If you plan to redirect requests away from this server, "
f"consider setting `connection_strategy` to `lazy` to suppress early connections."
)
yield from self.start_tls()
if not self.conn.connected:
return False, "connection closed early"
ret = yield from super().receive_handshake_data(bytes(self.recv_buffer))
self.recv_buffer.clear()
return ret
def start_server_tls(self) -> layer.CommandGenerator[str | None]:
"""
We often need information from the upstream connection to establish TLS with the client.
For example, we need to check if the client does ALPN or not.
"""
if not self.server_tls_available:
return f"No server {self.proto_name} available."
err = yield commands.OpenConnection(self.context.server)
return err
def on_handshake_error(self, err: str) -> layer.CommandGenerator[None]:
if self.conn.sni:
dest = self.conn.sni
else:
dest = human.format_address(self.context.server.address)
level: int = WARNING
if err.startswith("Cannot parse ClientHello"):
pass
elif (
"('SSL routines', 'tls_early_post_process_client_hello', 'unsupported protocol')"
in err
or "('SSL routines', '', 'unsupported protocol')" in err # OpenSSL 3+
):
err = (
f"Client and mitmproxy cannot agree on a TLS version to use. "
f"You may need to adjust mitmproxy's tls_version_client_min option."
)
elif (
"unknown ca" in err
or "bad certificate" in err
or "certificate unknown" in err
):
err = (
f"The client does not trust the proxy's certificate for {dest} ({err})"
)
elif err == "connection closed":
err = (
f"The client disconnected during the handshake. If this happens consistently for {dest}, "
f"this may indicate that the client does not trust the proxy's certificate."
)
level = INFO
elif err == "connection closed early":
pass
else:
err = f"The client may not trust the proxy's certificate for {dest} ({err})"
if err != "connection closed early":
yield commands.Log(f"Client TLS handshake failed. {err}", level=level)
yield from super().on_handshake_error(err)
self.event_to_child = self.errored # type: ignore
def errored(self, event: events.Event) -> layer.CommandGenerator[None]:
if self.debug is not None:
yield commands.Log(
f"{self.debug}[tls] Swallowing {event} as handshake failed.", DEBUG
)
class MockTLSLayer(TLSLayer):
"""Mock layer to disable actual TLS and use cleartext in tests.
Use like so:
monkeypatch.setattr(tls, "ServerTLSLayer", tls.MockTLSLayer)
"""
def __init__(self, ctx: context.Context):
super().__init__(ctx, connection.Server(address=None))
| python | MIT | e6aa924bb411a9687b91920b8d094af37bc02b90 | 2026-01-04T14:40:00.086164Z | false |
mitmproxy/mitmproxy | https://github.com/mitmproxy/mitmproxy/blob/e6aa924bb411a9687b91920b8d094af37bc02b90/mitmproxy/proxy/layers/websocket.py | mitmproxy/proxy/layers/websocket.py | import time
from collections.abc import Iterator
from dataclasses import dataclass
import wsproto.extensions
import wsproto.frame_protocol
import wsproto.utilities
from wsproto import ConnectionState
from wsproto.frame_protocol import Opcode
from mitmproxy import connection
from mitmproxy import http
from mitmproxy import websocket
from mitmproxy.proxy import commands
from mitmproxy.proxy import events
from mitmproxy.proxy import layer
from mitmproxy.proxy.commands import StartHook
from mitmproxy.proxy.context import Context
from mitmproxy.proxy.events import MessageInjected
from mitmproxy.proxy.utils import expect
@dataclass
class WebsocketStartHook(StartHook):
"""
A WebSocket connection has commenced.
"""
flow: http.HTTPFlow
@dataclass
class WebsocketMessageHook(StartHook):
"""
Called when a WebSocket message is received from the client or
server. The most recent message will be flow.messages[-1]. The
message is user-modifiable. Currently there are two types of
messages, corresponding to the BINARY and TEXT frame types.
"""
flow: http.HTTPFlow
@dataclass
class WebsocketEndHook(StartHook):
"""
A WebSocket connection has ended.
You can check `flow.websocket.close_code` to determine why it ended.
"""
flow: http.HTTPFlow
class WebSocketMessageInjected(MessageInjected[websocket.WebSocketMessage]):
"""
The user has injected a custom WebSocket message.
"""
class WebsocketConnection(wsproto.Connection):
"""
A very thin wrapper around wsproto.Connection:
- we keep the underlying connection as an attribute for easy access.
- we add a framebuffer for incomplete messages
- we wrap .send() so that we can directly yield it.
"""
conn: connection.Connection
frame_buf: list[bytes]
def __init__(self, *args, conn: connection.Connection, **kwargs):
super().__init__(*args, **kwargs)
self.conn = conn
self.frame_buf = [b""]
def send2(self, event: wsproto.events.Event) -> commands.SendData:
data = self.send(event)
return commands.SendData(self.conn, data)
def __repr__(self):
return f"WebsocketConnection<{self.state.name}, {self.conn}>"
class WebsocketLayer(layer.Layer):
"""
WebSocket layer that intercepts and relays messages.
"""
flow: http.HTTPFlow
client_ws: WebsocketConnection
server_ws: WebsocketConnection
def __init__(self, context: Context, flow: http.HTTPFlow):
super().__init__(context)
self.flow = flow
@expect(events.Start)
def start(self, _) -> layer.CommandGenerator[None]:
client_extensions = []
server_extensions = []
# Parse extension headers. We only support deflate at the moment and ignore everything else.
assert self.flow.response # satisfy type checker
ext_header = self.flow.response.headers.get("Sec-WebSocket-Extensions", "")
if ext_header:
for ext in wsproto.utilities.split_comma_header(
ext_header.encode("ascii", "replace")
):
ext_name = ext.split(";", 1)[0].strip()
if ext_name == wsproto.extensions.PerMessageDeflate.name:
client_deflate = wsproto.extensions.PerMessageDeflate()
client_deflate.finalize(ext)
client_extensions.append(client_deflate)
server_deflate = wsproto.extensions.PerMessageDeflate()
server_deflate.finalize(ext)
server_extensions.append(server_deflate)
else:
yield commands.Log(
f"Ignoring unknown WebSocket extension {ext_name!r}."
)
self.client_ws = WebsocketConnection(
wsproto.ConnectionType.SERVER, client_extensions, conn=self.context.client
)
self.server_ws = WebsocketConnection(
wsproto.ConnectionType.CLIENT, server_extensions, conn=self.context.server
)
yield WebsocketStartHook(self.flow)
self._handle_event = self.relay_messages
_handle_event = start
@expect(events.DataReceived, events.ConnectionClosed, WebSocketMessageInjected)
def relay_messages(self, event: events.Event) -> layer.CommandGenerator[None]:
assert self.flow.websocket # satisfy type checker
if isinstance(event, events.ConnectionEvent):
from_client = event.connection == self.context.client
injected = False
elif isinstance(event, WebSocketMessageInjected):
from_client = event.message.from_client
injected = True
else:
raise AssertionError(f"Unexpected event: {event}")
from_str = "client" if from_client else "server"
if from_client:
src_ws = self.client_ws
dst_ws = self.server_ws
else:
src_ws = self.server_ws
dst_ws = self.client_ws
if isinstance(event, events.DataReceived):
src_ws.receive_data(event.data)
elif isinstance(event, events.ConnectionClosed):
src_ws.receive_data(None)
elif isinstance(event, WebSocketMessageInjected):
fragmentizer = Fragmentizer([], event.message.type == Opcode.TEXT)
src_ws._events.extend(fragmentizer(event.message.content))
else: # pragma: no cover
raise AssertionError(f"Unexpected event: {event}")
for ws_event in src_ws.events():
if isinstance(ws_event, wsproto.events.Message):
is_text = isinstance(ws_event.data, str)
if is_text:
typ = Opcode.TEXT
src_ws.frame_buf[-1] += ws_event.data.encode()
else:
typ = Opcode.BINARY
src_ws.frame_buf[-1] += ws_event.data
if ws_event.message_finished:
content = b"".join(src_ws.frame_buf)
fragmentizer = Fragmentizer(src_ws.frame_buf, is_text)
src_ws.frame_buf = [b""]
message = websocket.WebSocketMessage(
typ, from_client, content, injected=injected
)
self.flow.websocket.messages.append(message)
yield WebsocketMessageHook(self.flow)
if not message.dropped:
for msg in fragmentizer(message.content):
yield dst_ws.send2(msg)
elif ws_event.frame_finished:
src_ws.frame_buf.append(b"")
elif isinstance(ws_event, (wsproto.events.Ping, wsproto.events.Pong)):
yield commands.Log(
f"Received WebSocket {ws_event.__class__.__name__.lower()} from {from_str} "
f"(payload: {bytes(ws_event.payload)!r})"
)
yield dst_ws.send2(ws_event)
elif isinstance(ws_event, wsproto.events.CloseConnection):
self.flow.websocket.timestamp_end = time.time()
self.flow.websocket.closed_by_client = from_client
self.flow.websocket.close_code = ws_event.code
self.flow.websocket.close_reason = ws_event.reason
for ws in [self.server_ws, self.client_ws]:
if ws.state in {
ConnectionState.OPEN,
ConnectionState.REMOTE_CLOSING,
}:
# response == original event, so no need to differentiate here.
yield ws.send2(ws_event)
yield commands.CloseConnection(ws.conn)
yield WebsocketEndHook(self.flow)
self.flow.live = False
self._handle_event = self.done
else: # pragma: no cover
raise AssertionError(f"Unexpected WebSocket event: {ws_event}")
@expect(events.DataReceived, events.ConnectionClosed, WebSocketMessageInjected)
def done(self, _) -> layer.CommandGenerator[None]:
yield from ()
class Fragmentizer:
"""
Theory (RFC 6455):
Unless specified otherwise by an extension, frames have no semantic
meaning. An intermediary might coalesce and/or split frames, [...]
Practice:
Some WebSocket servers reject large payload sizes.
Other WebSocket servers reject CONTINUATION frames.
As a workaround, we either retain the original chunking or, if the payload has been modified, use ~4kB chunks.
If one deals with web servers that do not support CONTINUATION frames, addons need to monkeypatch FRAGMENT_SIZE
if they need to modify the message.
"""
# A bit less than 4kb to accommodate for headers.
FRAGMENT_SIZE = 4000
def __init__(self, fragments: list[bytes], is_text: bool):
self.fragment_lengths = [len(x) for x in fragments]
self.is_text = is_text
def msg(self, data: bytes, message_finished: bool):
if self.is_text:
data_str = data.decode(errors="replace")
return wsproto.events.TextMessage(
data_str, message_finished=message_finished
)
else:
return wsproto.events.BytesMessage(data, message_finished=message_finished)
def __call__(self, content: bytes) -> Iterator[wsproto.events.Message]:
if len(content) == sum(self.fragment_lengths):
# message has the same length, we can reuse the same sizes
offset = 0
for fl in self.fragment_lengths[:-1]:
yield self.msg(content[offset : offset + fl], False)
offset += fl
yield self.msg(content[offset:], True)
else:
offset = 0
total = len(content) - self.FRAGMENT_SIZE
while offset < total:
yield self.msg(content[offset : offset + self.FRAGMENT_SIZE], False)
offset += self.FRAGMENT_SIZE
yield self.msg(content[offset:], True)
| python | MIT | e6aa924bb411a9687b91920b8d094af37bc02b90 | 2026-01-04T14:40:00.086164Z | false |
mitmproxy/mitmproxy | https://github.com/mitmproxy/mitmproxy/blob/e6aa924bb411a9687b91920b8d094af37bc02b90/mitmproxy/proxy/layers/tcp.py | mitmproxy/proxy/layers/tcp.py | from dataclasses import dataclass
from mitmproxy import flow
from mitmproxy import tcp
from mitmproxy.connection import Connection
from mitmproxy.connection import ConnectionState
from mitmproxy.proxy import commands
from mitmproxy.proxy import events
from mitmproxy.proxy import layer
from mitmproxy.proxy.commands import StartHook
from mitmproxy.proxy.context import Context
from mitmproxy.proxy.events import MessageInjected
from mitmproxy.proxy.utils import expect
@dataclass
class TcpStartHook(StartHook):
"""
A TCP connection has started.
"""
flow: tcp.TCPFlow
@dataclass
class TcpMessageHook(StartHook):
"""
A TCP connection has received a message. The most recent message
will be flow.messages[-1]. The message is user-modifiable.
"""
flow: tcp.TCPFlow
@dataclass
class TcpEndHook(StartHook):
"""
A TCP connection has ended.
"""
flow: tcp.TCPFlow
@dataclass
class TcpErrorHook(StartHook):
"""
A TCP error has occurred.
Every TCP flow will receive either a tcp_error or a tcp_end event, but not both.
"""
flow: tcp.TCPFlow
class TcpMessageInjected(MessageInjected[tcp.TCPMessage]):
"""
The user has injected a custom TCP message.
"""
class TCPLayer(layer.Layer):
"""
Simple TCP layer that just relays messages right now.
"""
flow: tcp.TCPFlow | None
def __init__(self, context: Context, ignore: bool = False):
super().__init__(context)
if ignore:
self.flow = None
else:
self.flow = tcp.TCPFlow(self.context.client, self.context.server, True)
@expect(events.Start)
def start(self, _) -> layer.CommandGenerator[None]:
if self.flow:
yield TcpStartHook(self.flow)
if self.context.server.timestamp_start is None:
err = yield commands.OpenConnection(self.context.server)
if err:
if self.flow:
self.flow.error = flow.Error(str(err))
yield TcpErrorHook(self.flow)
yield commands.CloseConnection(self.context.client)
self._handle_event = self.done
return
self._handle_event = self.relay_messages
_handle_event = start
@expect(events.DataReceived, events.ConnectionClosed, TcpMessageInjected)
def relay_messages(self, event: events.Event) -> layer.CommandGenerator[None]:
if isinstance(event, TcpMessageInjected):
# we just spoof that we received data here and then process that regularly.
event = events.DataReceived(
self.context.client
if event.message.from_client
else self.context.server,
event.message.content,
)
assert isinstance(event, events.ConnectionEvent)
from_client = event.connection == self.context.client
send_to: Connection
if from_client:
send_to = self.context.server
else:
send_to = self.context.client
if isinstance(event, events.DataReceived):
if self.flow:
tcp_message = tcp.TCPMessage(from_client, event.data)
self.flow.messages.append(tcp_message)
yield TcpMessageHook(self.flow)
yield commands.SendData(send_to, tcp_message.content)
else:
yield commands.SendData(send_to, event.data)
elif isinstance(event, events.ConnectionClosed):
all_done = not (
(self.context.client.state & ConnectionState.CAN_READ)
or (self.context.server.state & ConnectionState.CAN_READ)
)
if all_done:
self._handle_event = self.done
if self.context.server.state is not ConnectionState.CLOSED:
yield commands.CloseConnection(self.context.server)
if self.context.client.state is not ConnectionState.CLOSED:
yield commands.CloseConnection(self.context.client)
if self.flow:
yield TcpEndHook(self.flow)
self.flow.live = False
else:
yield commands.CloseTcpConnection(send_to, half_close=True)
else:
raise AssertionError(f"Unexpected event: {event}")
@expect(events.DataReceived, events.ConnectionClosed, TcpMessageInjected)
def done(self, _) -> layer.CommandGenerator[None]:
yield from ()
| python | MIT | e6aa924bb411a9687b91920b8d094af37bc02b90 | 2026-01-04T14:40:00.086164Z | false |
mitmproxy/mitmproxy | https://github.com/mitmproxy/mitmproxy/blob/e6aa924bb411a9687b91920b8d094af37bc02b90/mitmproxy/proxy/layers/dns.py | mitmproxy/proxy/layers/dns.py | import struct
import time
from dataclasses import dataclass
from typing import List
from typing import Literal
from mitmproxy import dns
from mitmproxy import flow as mflow
from mitmproxy.net.dns import response_codes
from mitmproxy.proxy import commands
from mitmproxy.proxy import events
from mitmproxy.proxy import layer
from mitmproxy.proxy.context import Context
from mitmproxy.proxy.utils import expect
_LENGTH_LABEL = struct.Struct("!H")
@dataclass
class DnsRequestHook(commands.StartHook):
"""
A DNS query has been received.
"""
flow: dns.DNSFlow
@dataclass
class DnsResponseHook(commands.StartHook):
"""
A DNS response has been received or set.
"""
flow: dns.DNSFlow
@dataclass
class DnsErrorHook(commands.StartHook):
"""
A DNS error has occurred.
"""
flow: dns.DNSFlow
def pack_message(
message: dns.DNSMessage, transport_protocol: Literal["tcp", "udp"]
) -> bytes:
packed = message.packed
if transport_protocol == "tcp":
return struct.pack("!H", len(packed)) + packed
else:
return packed
class DNSLayer(layer.Layer):
"""
Layer that handles resolving DNS queries.
"""
flows: dict[int, dns.DNSFlow]
req_buf: bytearray
resp_buf: bytearray
def __init__(self, context: Context):
super().__init__(context)
self.flows = {}
self.req_buf = bytearray()
self.resp_buf = bytearray()
def handle_request(
self, flow: dns.DNSFlow, msg: dns.DNSMessage
) -> layer.CommandGenerator[None]:
flow.request = msg # if already set, continue and query upstream again
yield DnsRequestHook(flow)
if flow.response:
yield from self.handle_response(flow, flow.response)
elif flow.error:
yield from self.handle_error(flow, flow.error.msg)
elif not self.context.server.address:
yield from self.handle_error(
flow, "No hook has set a response and there is no upstream server."
)
else:
if not self.context.server.connected:
err = yield commands.OpenConnection(self.context.server)
if err:
yield from self.handle_error(flow, str(err))
# cannot recover from this
return
packed = pack_message(flow.request, flow.server_conn.transport_protocol)
yield commands.SendData(self.context.server, packed)
def handle_response(
self, flow: dns.DNSFlow, msg: dns.DNSMessage
) -> layer.CommandGenerator[None]:
flow.response = msg
yield DnsResponseHook(flow)
if flow.response:
packed = pack_message(flow.response, flow.client_conn.transport_protocol)
yield commands.SendData(self.context.client, packed)
def handle_error(self, flow: dns.DNSFlow, err: str) -> layer.CommandGenerator[None]:
flow.error = mflow.Error(err)
yield DnsErrorHook(flow)
servfail = flow.request.fail(response_codes.SERVFAIL)
yield commands.SendData(
self.context.client,
pack_message(servfail, flow.client_conn.transport_protocol),
)
def unpack_message(self, data: bytes, from_client: bool) -> List[dns.DNSMessage]:
msgs: List[dns.DNSMessage] = []
buf = self.req_buf if from_client else self.resp_buf
if self.context.client.transport_protocol == "udp":
msgs.append(dns.DNSMessage.unpack(data, timestamp=time.time()))
elif self.context.client.transport_protocol == "tcp":
buf.extend(data)
size = len(buf)
offset = 0
while True:
if size - offset < _LENGTH_LABEL.size:
break
(expected_size,) = _LENGTH_LABEL.unpack_from(buf, offset)
offset += _LENGTH_LABEL.size
if expected_size == 0:
raise struct.error("Message length field cannot be zero")
if size - offset < expected_size:
offset -= _LENGTH_LABEL.size
break
data = bytes(buf[offset : expected_size + offset])
offset += expected_size
msgs.append(dns.DNSMessage.unpack(data, timestamp=time.time()))
del buf[:offset]
return msgs
@expect(events.Start)
def state_start(self, _) -> layer.CommandGenerator[None]:
self._handle_event = self.state_query
yield from ()
@expect(events.DataReceived, events.ConnectionClosed)
def state_query(self, event: events.Event) -> layer.CommandGenerator[None]:
assert isinstance(event, events.ConnectionEvent)
from_client = event.connection is self.context.client
if isinstance(event, events.DataReceived):
msgs: List[dns.DNSMessage] = []
try:
msgs = self.unpack_message(event.data, from_client)
except struct.error as e:
yield commands.Log(f"{event.connection} sent an invalid message: {e}")
yield commands.CloseConnection(event.connection)
self._handle_event = self.state_done
else:
for msg in msgs:
try:
flow = self.flows[msg.id]
except KeyError:
flow = dns.DNSFlow(
self.context.client, self.context.server, live=True
)
self.flows[msg.id] = flow
if from_client:
yield from self.handle_request(flow, msg)
else:
yield from self.handle_response(flow, msg)
elif isinstance(event, events.ConnectionClosed):
other_conn = self.context.server if from_client else self.context.client
if other_conn.connected:
yield commands.CloseConnection(other_conn)
self._handle_event = self.state_done
for flow in self.flows.values():
flow.live = False
else:
raise AssertionError(f"Unexpected event: {event}")
@expect(events.DataReceived, events.ConnectionClosed)
def state_done(self, _) -> layer.CommandGenerator[None]:
yield from ()
_handle_event = state_start
| python | MIT | e6aa924bb411a9687b91920b8d094af37bc02b90 | 2026-01-04T14:40:00.086164Z | false |
mitmproxy/mitmproxy | https://github.com/mitmproxy/mitmproxy/blob/e6aa924bb411a9687b91920b8d094af37bc02b90/mitmproxy/proxy/layers/udp.py | mitmproxy/proxy/layers/udp.py | from dataclasses import dataclass
from mitmproxy import flow
from mitmproxy import udp
from mitmproxy.connection import Connection
from mitmproxy.proxy import commands
from mitmproxy.proxy import events
from mitmproxy.proxy import layer
from mitmproxy.proxy.commands import StartHook
from mitmproxy.proxy.context import Context
from mitmproxy.proxy.events import MessageInjected
from mitmproxy.proxy.utils import expect
@dataclass
class UdpStartHook(StartHook):
"""
A UDP connection has started.
"""
flow: udp.UDPFlow
@dataclass
class UdpMessageHook(StartHook):
"""
A UDP connection has received a message. The most recent message
will be flow.messages[-1]. The message is user-modifiable.
"""
flow: udp.UDPFlow
@dataclass
class UdpEndHook(StartHook):
"""
A UDP connection has ended.
"""
flow: udp.UDPFlow
@dataclass
class UdpErrorHook(StartHook):
"""
A UDP error has occurred.
Every UDP flow will receive either a udp_error or a udp_end event, but not both.
"""
flow: udp.UDPFlow
class UdpMessageInjected(MessageInjected[udp.UDPMessage]):
"""
The user has injected a custom UDP message.
"""
class UDPLayer(layer.Layer):
"""
Simple UDP layer that just relays messages right now.
"""
flow: udp.UDPFlow | None
def __init__(self, context: Context, ignore: bool = False):
super().__init__(context)
if ignore:
self.flow = None
else:
self.flow = udp.UDPFlow(self.context.client, self.context.server, True)
@expect(events.Start)
def start(self, _) -> layer.CommandGenerator[None]:
if self.flow:
yield UdpStartHook(self.flow)
if self.context.server.timestamp_start is None:
err = yield commands.OpenConnection(self.context.server)
if err:
if self.flow:
self.flow.error = flow.Error(str(err))
yield UdpErrorHook(self.flow)
yield commands.CloseConnection(self.context.client)
self._handle_event = self.done
return
self._handle_event = self.relay_messages
_handle_event = start
@expect(events.DataReceived, events.ConnectionClosed, UdpMessageInjected)
def relay_messages(self, event: events.Event) -> layer.CommandGenerator[None]:
if isinstance(event, UdpMessageInjected):
# we just spoof that we received data here and then process that regularly.
event = events.DataReceived(
self.context.client
if event.message.from_client
else self.context.server,
event.message.content,
)
assert isinstance(event, events.ConnectionEvent)
from_client = event.connection == self.context.client
send_to: Connection
if from_client:
send_to = self.context.server
else:
send_to = self.context.client
if isinstance(event, events.DataReceived):
if self.flow:
udp_message = udp.UDPMessage(from_client, event.data)
self.flow.messages.append(udp_message)
yield UdpMessageHook(self.flow)
yield commands.SendData(send_to, udp_message.content)
else:
yield commands.SendData(send_to, event.data)
elif isinstance(event, events.ConnectionClosed):
self._handle_event = self.done
yield commands.CloseConnection(send_to)
if self.flow:
yield UdpEndHook(self.flow)
self.flow.live = False
else:
raise AssertionError(f"Unexpected event: {event}")
@expect(events.DataReceived, events.ConnectionClosed, UdpMessageInjected)
def done(self, _) -> layer.CommandGenerator[None]:
yield from ()
| python | MIT | e6aa924bb411a9687b91920b8d094af37bc02b90 | 2026-01-04T14:40:00.086164Z | false |
mitmproxy/mitmproxy | https://github.com/mitmproxy/mitmproxy/blob/e6aa924bb411a9687b91920b8d094af37bc02b90/mitmproxy/proxy/layers/__init__.py | mitmproxy/proxy/layers/__init__.py | from . import modes
from .dns import DNSLayer
from .http import HttpLayer
from .quic import ClientQuicLayer
from .quic import QuicStreamLayer
from .quic import RawQuicLayer
from .quic import ServerQuicLayer
from .tcp import TCPLayer
from .tls import ClientTLSLayer
from .tls import ServerTLSLayer
from .udp import UDPLayer
from .websocket import WebsocketLayer
__all__ = [
"modes",
"DNSLayer",
"HttpLayer",
"QuicStreamLayer",
"RawQuicLayer",
"TCPLayer",
"UDPLayer",
"ClientQuicLayer",
"ClientTLSLayer",
"ServerQuicLayer",
"ServerTLSLayer",
"WebsocketLayer",
]
| python | MIT | e6aa924bb411a9687b91920b8d094af37bc02b90 | 2026-01-04T14:40:00.086164Z | false |
mitmproxy/mitmproxy | https://github.com/mitmproxy/mitmproxy/blob/e6aa924bb411a9687b91920b8d094af37bc02b90/mitmproxy/proxy/layers/modes.py | mitmproxy/proxy/layers/modes.py | from __future__ import annotations
import socket
import struct
import sys
from abc import ABCMeta
from collections.abc import Callable
from dataclasses import dataclass
from mitmproxy import connection
from mitmproxy.proxy import commands
from mitmproxy.proxy import events
from mitmproxy.proxy import layer
from mitmproxy.proxy.commands import StartHook
from mitmproxy.proxy.mode_specs import ReverseMode
from mitmproxy.proxy.utils import expect
if sys.version_info < (3, 11):
from typing_extensions import assert_never
else:
from typing import assert_never
class HttpProxy(layer.Layer):
@expect(events.Start)
def _handle_event(self, event: events.Event) -> layer.CommandGenerator[None]:
child_layer = layer.NextLayer(self.context)
self._handle_event = child_layer.handle_event
yield from child_layer.handle_event(event)
class HttpUpstreamProxy(layer.Layer):
@expect(events.Start)
def _handle_event(self, event: events.Event) -> layer.CommandGenerator[None]:
child_layer = layer.NextLayer(self.context)
self._handle_event = child_layer.handle_event
yield from child_layer.handle_event(event)
class DestinationKnown(layer.Layer, metaclass=ABCMeta):
"""Base layer for layers that gather connection destination info and then delegate."""
child_layer: layer.Layer
def finish_start(self) -> layer.CommandGenerator[str | None]:
if (
self.context.options.connection_strategy == "eager"
and self.context.server.address
and self.context.server.transport_protocol == "tcp"
):
err = yield commands.OpenConnection(self.context.server)
if err:
self._handle_event = self.done # type: ignore
return err
self._handle_event = self.child_layer.handle_event # type: ignore
yield from self.child_layer.handle_event(events.Start())
return None
@expect(events.DataReceived, events.ConnectionClosed)
def done(self, _) -> layer.CommandGenerator[None]:
yield from ()
class ReverseProxy(DestinationKnown):
@expect(events.Start)
def _handle_event(self, event: events.Event) -> layer.CommandGenerator[None]:
spec = self.context.client.proxy_mode
assert isinstance(spec, ReverseMode)
self.context.server.address = spec.address
self.child_layer = layer.NextLayer(self.context)
# For secure protocols, set SNI if keep_host_header is false
match spec.scheme:
case "http3" | "quic" | "https" | "tls" | "dtls":
if not self.context.options.keep_host_header:
self.context.server.sni = spec.address[0]
case "tcp" | "http" | "udp" | "dns":
pass
case _: # pragma: no cover
assert_never(spec.scheme)
err = yield from self.finish_start()
if err:
yield commands.CloseConnection(self.context.client)
class TransparentProxy(DestinationKnown):
@expect(events.Start)
def _handle_event(self, event: events.Event) -> layer.CommandGenerator[None]:
assert self.context.server.address, "No server address set."
self.child_layer = layer.NextLayer(self.context)
err = yield from self.finish_start()
if err:
yield commands.CloseConnection(self.context.client)
SOCKS5_VERSION = 0x05
SOCKS5_METHOD_NO_AUTHENTICATION_REQUIRED = 0x00
SOCKS5_METHOD_USER_PASSWORD_AUTHENTICATION = 0x02
SOCKS5_METHOD_NO_ACCEPTABLE_METHODS = 0xFF
SOCKS5_ATYP_IPV4_ADDRESS = 0x01
SOCKS5_ATYP_DOMAINNAME = 0x03
SOCKS5_ATYP_IPV6_ADDRESS = 0x04
SOCKS5_REP_HOST_UNREACHABLE = 0x04
SOCKS5_REP_COMMAND_NOT_SUPPORTED = 0x07
SOCKS5_REP_ADDRESS_TYPE_NOT_SUPPORTED = 0x08
@dataclass
class Socks5AuthData:
client_conn: connection.Client
username: str
password: str
valid: bool = False
@dataclass
class Socks5AuthHook(StartHook):
"""
Mitmproxy has received username/password SOCKS5 credentials.
This hook decides whether they are valid by setting `data.valid`.
"""
data: Socks5AuthData
class Socks5Proxy(DestinationKnown):
buf: bytes = b""
def socks_err(
self,
message: str,
reply_code: int | None = None,
) -> layer.CommandGenerator[None]:
if reply_code is not None:
yield commands.SendData(
self.context.client,
bytes([SOCKS5_VERSION, reply_code])
+ b"\x00\x01\x00\x00\x00\x00\x00\x00",
)
yield commands.CloseConnection(self.context.client)
yield commands.Log(message)
self._handle_event = self.done
@expect(events.Start, events.DataReceived, events.ConnectionClosed)
def _handle_event(self, event: events.Event) -> layer.CommandGenerator[None]:
if isinstance(event, events.Start):
pass
elif isinstance(event, events.DataReceived):
self.buf += event.data
yield from self.state()
elif isinstance(event, events.ConnectionClosed):
if self.buf:
yield commands.Log(
f"Client closed connection before completing SOCKS5 handshake: {self.buf!r}"
)
yield commands.CloseConnection(event.connection)
else:
raise AssertionError(f"Unknown event: {event}")
def state_greet(self) -> layer.CommandGenerator[None]:
if len(self.buf) < 2:
return
if self.buf[0] != SOCKS5_VERSION:
if self.buf[:3].isupper():
guess = "Probably not a SOCKS request but a regular HTTP request. "
else:
guess = ""
yield from self.socks_err(
guess + "Invalid SOCKS version. Expected 0x05, got 0x%x" % self.buf[0]
)
return
n_methods = self.buf[1]
if len(self.buf) < 2 + n_methods:
return
if "proxyauth" in self.context.options and self.context.options.proxyauth:
method = SOCKS5_METHOD_USER_PASSWORD_AUTHENTICATION
self.state = self.state_auth
else:
method = SOCKS5_METHOD_NO_AUTHENTICATION_REQUIRED
self.state = self.state_connect
if method not in self.buf[2 : 2 + n_methods]:
method_str = (
"user/password"
if method == SOCKS5_METHOD_USER_PASSWORD_AUTHENTICATION
else "no"
)
yield from self.socks_err(
f"Client does not support SOCKS5 with {method_str} authentication.",
SOCKS5_METHOD_NO_ACCEPTABLE_METHODS,
)
return
yield commands.SendData(self.context.client, bytes([SOCKS5_VERSION, method]))
self.buf = self.buf[2 + n_methods :]
yield from self.state()
state: Callable[..., layer.CommandGenerator[None]] = state_greet
def state_auth(self) -> layer.CommandGenerator[None]:
if len(self.buf) < 3:
return
# Parsing username and password, which is somewhat atrocious
user_len = self.buf[1]
if len(self.buf) < 3 + user_len:
return
pass_len = self.buf[2 + user_len]
if len(self.buf) < 3 + user_len + pass_len:
return
user = self.buf[2 : (2 + user_len)].decode("utf-8", "backslashreplace")
password = self.buf[(3 + user_len) : (3 + user_len + pass_len)].decode(
"utf-8", "backslashreplace"
)
data = Socks5AuthData(self.context.client, user, password)
yield Socks5AuthHook(data)
if not data.valid:
# The VER field contains the current **version of the subnegotiation**, which is X'01'.
yield commands.SendData(self.context.client, b"\x01\x01")
yield from self.socks_err("authentication failed")
return
yield commands.SendData(self.context.client, b"\x01\x00")
self.buf = self.buf[3 + user_len + pass_len :]
self.state = self.state_connect
yield from self.state()
def state_connect(self) -> layer.CommandGenerator[None]:
# Parse Connect Request
if len(self.buf) < 5:
return
if self.buf[:3] != b"\x05\x01\x00":
yield from self.socks_err(
f"Unsupported SOCKS5 request: {self.buf!r}",
SOCKS5_REP_COMMAND_NOT_SUPPORTED,
)
return
# Determine message length
atyp = self.buf[3]
message_len: int
if atyp == SOCKS5_ATYP_IPV4_ADDRESS:
message_len = 4 + 4 + 2
elif atyp == SOCKS5_ATYP_IPV6_ADDRESS:
message_len = 4 + 16 + 2
elif atyp == SOCKS5_ATYP_DOMAINNAME:
message_len = 4 + 1 + self.buf[4] + 2
else:
yield from self.socks_err(
f"Unknown address type: {atyp}", SOCKS5_REP_ADDRESS_TYPE_NOT_SUPPORTED
)
return
# Do we have enough bytes yet?
if len(self.buf) < message_len:
return
# Parse host and port
msg, self.buf = self.buf[:message_len], self.buf[message_len:]
host: str
if atyp == SOCKS5_ATYP_IPV4_ADDRESS:
host = socket.inet_ntop(socket.AF_INET, msg[4:-2])
elif atyp == SOCKS5_ATYP_IPV6_ADDRESS:
host = socket.inet_ntop(socket.AF_INET6, msg[4:-2])
else:
host_bytes = msg[5:-2]
host = host_bytes.decode("ascii", "replace")
(port,) = struct.unpack("!H", msg[-2:])
# We now have all we need, let's get going.
self.context.server.address = (host, port)
self.child_layer = layer.NextLayer(self.context)
# this already triggers the child layer's Start event,
# but that's not a problem in practice...
err = yield from self.finish_start()
if err:
yield commands.SendData(
self.context.client, b"\x05\x04\x00\x01\x00\x00\x00\x00\x00\x00"
)
yield commands.CloseConnection(self.context.client)
else:
yield commands.SendData(
self.context.client, b"\x05\x00\x00\x01\x00\x00\x00\x00\x00\x00"
)
if self.buf:
yield from self.child_layer.handle_event(
events.DataReceived(self.context.client, self.buf)
)
del self.buf
| python | MIT | e6aa924bb411a9687b91920b8d094af37bc02b90 | 2026-01-04T14:40:00.086164Z | false |
mitmproxy/mitmproxy | https://github.com/mitmproxy/mitmproxy/blob/e6aa924bb411a9687b91920b8d094af37bc02b90/mitmproxy/proxy/layers/quic/_hooks.py | mitmproxy/proxy/layers/quic/_hooks.py | from __future__ import annotations
from dataclasses import dataclass
from dataclasses import field
from ssl import VerifyMode
from aioquic.tls import CipherSuite
from cryptography import x509
from cryptography.hazmat.primitives.asymmetric import dsa
from cryptography.hazmat.primitives.asymmetric import ec
from cryptography.hazmat.primitives.asymmetric import rsa
from mitmproxy.proxy import commands
from mitmproxy.tls import TlsData
@dataclass
class QuicTlsSettings:
"""
Settings necessary to establish QUIC's TLS context.
"""
alpn_protocols: list[str] | None = None
"""A list of supported ALPN protocols."""
certificate: x509.Certificate | None = None
"""The certificate to use for the connection."""
certificate_chain: list[x509.Certificate] = field(default_factory=list)
"""A list of additional certificates to send to the peer."""
certificate_private_key: (
dsa.DSAPrivateKey | ec.EllipticCurvePrivateKey | rsa.RSAPrivateKey | None
) = None
"""The certificate's private key."""
cipher_suites: list[CipherSuite] | None = None
"""An optional list of allowed/advertised cipher suites."""
ca_path: str | None = None
"""An optional path to a directory that contains the necessary information to verify the peer certificate."""
ca_file: str | None = None
"""An optional path to a PEM file that will be used to verify the peer certificate."""
verify_mode: VerifyMode | None = None
"""An optional flag that specifies how/if the peer's certificate should be validated."""
@dataclass
class QuicTlsData(TlsData):
"""
Event data for `quic_start_client` and `quic_start_server` event hooks.
"""
settings: QuicTlsSettings | None = None
"""
The associated `QuicTlsSettings` object.
This will be set by an addon in the `quic_start_*` event hooks.
"""
@dataclass
class QuicStartClientHook(commands.StartHook):
"""
TLS negotiation between mitmproxy and a client over QUIC is about to start.
An addon is expected to initialize data.settings.
(by default, this is done by `mitmproxy.addons.tlsconfig`)
"""
data: QuicTlsData
@dataclass
class QuicStartServerHook(commands.StartHook):
"""
TLS negotiation between mitmproxy and a server over QUIC is about to start.
An addon is expected to initialize data.settings.
(by default, this is done by `mitmproxy.addons.tlsconfig`)
"""
data: QuicTlsData
| python | MIT | e6aa924bb411a9687b91920b8d094af37bc02b90 | 2026-01-04T14:40:00.086164Z | false |
mitmproxy/mitmproxy | https://github.com/mitmproxy/mitmproxy/blob/e6aa924bb411a9687b91920b8d094af37bc02b90/mitmproxy/proxy/layers/quic/_events.py | mitmproxy/proxy/layers/quic/_events.py | from __future__ import annotations
from dataclasses import dataclass
from mitmproxy import connection
from mitmproxy.proxy import events
@dataclass
class QuicStreamEvent(events.ConnectionEvent):
"""Base class for all QUIC stream events."""
stream_id: int
"""The ID of the stream the event was fired for."""
@dataclass
class QuicStreamDataReceived(QuicStreamEvent):
"""Event that is fired whenever data is received on a stream."""
data: bytes
"""The data which was received."""
end_stream: bool
"""Whether the STREAM frame had the FIN bit set."""
def __repr__(self):
target = repr(self.connection).partition("(")[0].lower()
end_stream = "[end_stream] " if self.end_stream else ""
return f"QuicStreamDataReceived({target} on {self.stream_id}, {end_stream}{self.data!r})"
@dataclass
class QuicStreamReset(QuicStreamEvent):
"""Event that is fired when the remote peer resets a stream."""
error_code: int
"""The error code that triggered the reset."""
@dataclass
class QuicStreamStopSending(QuicStreamEvent):
"""Event that is fired when the remote peer sends a STOP_SENDING frame."""
error_code: int
"""The application protocol error code."""
class QuicConnectionClosed(events.ConnectionClosed):
"""QUIC connection has been closed."""
error_code: int
"The error code which was specified when closing the connection."
frame_type: int | None
"The frame type which caused the connection to be closed, or `None`."
reason_phrase: str
"The human-readable reason for which the connection was closed."
def __init__(
self,
conn: connection.Connection,
error_code: int,
frame_type: int | None,
reason_phrase: str,
) -> None:
super().__init__(conn)
self.error_code = error_code
self.frame_type = frame_type
self.reason_phrase = reason_phrase
| python | MIT | e6aa924bb411a9687b91920b8d094af37bc02b90 | 2026-01-04T14:40:00.086164Z | false |
mitmproxy/mitmproxy | https://github.com/mitmproxy/mitmproxy/blob/e6aa924bb411a9687b91920b8d094af37bc02b90/mitmproxy/proxy/layers/quic/_stream_layers.py | mitmproxy/proxy/layers/quic/_stream_layers.py | """
This module contains the client and server proxy layers for QUIC streams
which decrypt and encrypt traffic. Decrypted stream data is then forwarded
to either the raw layers, or the HTTP/3 client in ../http/_http3.py.
"""
from __future__ import annotations
import time
from collections.abc import Callable
from logging import DEBUG
from logging import ERROR
from logging import WARNING
from aioquic.buffer import Buffer as QuicBuffer
from aioquic.h3.connection import ErrorCode as H3ErrorCode
from aioquic.quic import events as quic_events
from aioquic.quic.configuration import QuicConfiguration
from aioquic.quic.connection import QuicConnection
from aioquic.quic.connection import QuicConnectionState
from aioquic.quic.connection import QuicErrorCode
from aioquic.quic.packet import encode_quic_version_negotiation
from aioquic.quic.packet import PACKET_TYPE_INITIAL
from aioquic.quic.packet import pull_quic_header
from cryptography import x509
from ._client_hello_parser import quic_parse_client_hello_from_datagrams
from ._commands import CloseQuicConnection
from ._commands import QuicStreamCommand
from ._commands import ResetQuicStream
from ._commands import SendQuicStreamData
from ._commands import StopSendingQuicStream
from ._events import QuicConnectionClosed
from ._events import QuicStreamDataReceived
from ._events import QuicStreamReset
from ._events import QuicStreamStopSending
from ._hooks import QuicStartClientHook
from ._hooks import QuicStartServerHook
from ._hooks import QuicTlsData
from ._hooks import QuicTlsSettings
from mitmproxy import certs
from mitmproxy import connection
from mitmproxy import ctx
from mitmproxy.net import tls
from mitmproxy.proxy import commands
from mitmproxy.proxy import context
from mitmproxy.proxy import events
from mitmproxy.proxy import layer
from mitmproxy.proxy import tunnel
from mitmproxy.proxy.layers.tls import TlsClienthelloHook
from mitmproxy.proxy.layers.tls import TlsEstablishedClientHook
from mitmproxy.proxy.layers.tls import TlsEstablishedServerHook
from mitmproxy.proxy.layers.tls import TlsFailedClientHook
from mitmproxy.proxy.layers.tls import TlsFailedServerHook
from mitmproxy.proxy.layers.udp import UDPLayer
from mitmproxy.tls import ClientHelloData
SUPPORTED_QUIC_VERSIONS_SERVER = QuicConfiguration(is_client=False).supported_versions
class QuicLayer(tunnel.TunnelLayer):
quic: QuicConnection | None = None
tls: QuicTlsSettings | None = None
def __init__(
self,
context: context.Context,
conn: connection.Connection,
time: Callable[[], float] | None,
) -> None:
super().__init__(context, tunnel_connection=conn, conn=conn)
self.child_layer = layer.NextLayer(self.context, ask_on_start=True)
self._time = time or ctx.master.event_loop.time
self._wakeup_commands: dict[commands.RequestWakeup, float] = dict()
conn.tls = True
def _handle_event(self, event: events.Event) -> layer.CommandGenerator[None]:
if isinstance(event, events.Wakeup) and event.command in self._wakeup_commands:
# TunnelLayer has no understanding of wakeups, so we turn this into an empty DataReceived event
# which TunnelLayer recognizes as belonging to our connection.
assert self.quic
scheduled_time = self._wakeup_commands.pop(event.command)
if self.quic._state is not QuicConnectionState.TERMINATED:
# weird quirk: asyncio sometimes returns a bit ahead of time.
now = max(scheduled_time, self._time())
self.quic.handle_timer(now)
yield from super()._handle_event(
events.DataReceived(self.tunnel_connection, b"")
)
else:
yield from super()._handle_event(event)
def event_to_child(self, event: events.Event) -> layer.CommandGenerator[None]:
# the parent will call _handle_command multiple times, we transmit cumulative afterwards
# this will reduce the number of sends, especially if data=b"" and end_stream=True
yield from super().event_to_child(event)
if self.quic:
yield from self.tls_interact()
def _handle_command(
self, command: commands.Command
) -> layer.CommandGenerator[None]:
"""Turns stream commands into aioquic connection invocations."""
if isinstance(command, QuicStreamCommand) and command.connection is self.conn:
assert self.quic
if isinstance(command, SendQuicStreamData):
self.quic.send_stream_data(
command.stream_id, command.data, command.end_stream
)
elif isinstance(command, ResetQuicStream):
stream = self.quic._get_or_create_stream_for_send(command.stream_id)
existing_reset_error_code = stream.sender._reset_error_code
if existing_reset_error_code is None:
self.quic.reset_stream(command.stream_id, command.error_code)
elif self.debug: # pragma: no cover
yield commands.Log(
f"{self.debug}[quic] stream {stream.stream_id} already reset ({existing_reset_error_code=}, {command.error_code=})",
DEBUG,
)
elif isinstance(command, StopSendingQuicStream):
# the stream might have already been closed, check before stopping
if command.stream_id in self.quic._streams:
self.quic.stop_stream(command.stream_id, command.error_code)
else:
raise AssertionError(f"Unexpected stream command: {command!r}")
else:
yield from super()._handle_command(command)
def start_tls(
self, original_destination_connection_id: bytes | None
) -> layer.CommandGenerator[None]:
"""Initiates the aioquic connection."""
# must only be called if QUIC is uninitialized
assert not self.quic
assert not self.tls
# query addons to provide the necessary TLS settings
tls_data = QuicTlsData(self.conn, self.context)
if self.conn is self.context.client:
yield QuicStartClientHook(tls_data)
else:
yield QuicStartServerHook(tls_data)
if not tls_data.settings:
yield commands.Log(
f"No QUIC context was provided, failing connection.", ERROR
)
yield commands.CloseConnection(self.conn)
return
# build the aioquic connection
configuration = tls_settings_to_configuration(
settings=tls_data.settings,
is_client=self.conn is self.context.server,
server_name=self.conn.sni,
)
self.quic = QuicConnection(
configuration=configuration,
original_destination_connection_id=original_destination_connection_id,
)
self.tls = tls_data.settings
# if we act as client, connect to upstream
if original_destination_connection_id is None:
self.quic.connect(self.conn.peername, now=self._time())
yield from self.tls_interact()
def tls_interact(self) -> layer.CommandGenerator[None]:
"""Retrieves all pending outgoing packets from aioquic and sends the data."""
# send all queued datagrams
assert self.quic
now = self._time()
for data, addr in self.quic.datagrams_to_send(now=now):
assert addr == self.conn.peername
yield commands.SendData(self.tunnel_connection, data)
timer = self.quic.get_timer()
if timer is not None:
# smooth wakeups a bit.
smoothed = timer + 0.002
# request a new wakeup if all pending requests trigger at a later time
if not any(
existing <= smoothed for existing in self._wakeup_commands.values()
):
command = commands.RequestWakeup(timer - now)
self._wakeup_commands[command] = timer
yield command
def receive_handshake_data(
self, data: bytes
) -> layer.CommandGenerator[tuple[bool, str | None]]:
assert self.quic
# forward incoming data to aioquic
if data:
self.quic.receive_datagram(data, self.conn.peername, now=self._time())
# handle pre-handshake events
while event := self.quic.next_event():
if isinstance(event, quic_events.ConnectionTerminated):
err = event.reason_phrase or error_code_to_str(event.error_code)
return False, err
elif isinstance(event, quic_events.HandshakeCompleted):
# concatenate all peer certificates
all_certs: list[x509.Certificate] = []
if self.quic.tls._peer_certificate:
all_certs.append(self.quic.tls._peer_certificate)
all_certs.extend(self.quic.tls._peer_certificate_chain)
# set the connection's TLS properties
self.conn.timestamp_tls_setup = time.time()
if event.alpn_protocol:
self.conn.alpn = event.alpn_protocol.encode("ascii")
self.conn.certificate_list = [certs.Cert(cert) for cert in all_certs]
assert self.quic.tls.key_schedule
self.conn.cipher = self.quic.tls.key_schedule.cipher_suite.name
self.conn.tls_version = "QUICv1"
# log the result and report the success to addons
if self.debug:
yield commands.Log(
f"{self.debug}[quic] tls established: {self.conn}", DEBUG
)
if self.conn is self.context.client:
yield TlsEstablishedClientHook(
QuicTlsData(self.conn, self.context, settings=self.tls)
)
else:
yield TlsEstablishedServerHook(
QuicTlsData(self.conn, self.context, settings=self.tls)
)
yield from self.tls_interact()
return True, None
elif isinstance(
event,
(
quic_events.ConnectionIdIssued,
quic_events.ConnectionIdRetired,
quic_events.PingAcknowledged,
quic_events.ProtocolNegotiated,
),
):
pass
else:
raise AssertionError(f"Unexpected event: {event!r}")
# transmit buffered data and re-arm timer
yield from self.tls_interact()
return False, None
def on_handshake_error(self, err: str) -> layer.CommandGenerator[None]:
self.conn.error = err
if self.conn is self.context.client:
yield TlsFailedClientHook(
QuicTlsData(self.conn, self.context, settings=self.tls)
)
else:
yield TlsFailedServerHook(
QuicTlsData(self.conn, self.context, settings=self.tls)
)
yield from super().on_handshake_error(err)
def receive_data(self, data: bytes) -> layer.CommandGenerator[None]:
assert self.quic
# forward incoming data to aioquic
if data:
self.quic.receive_datagram(data, self.conn.peername, now=self._time())
# handle post-handshake events
while event := self.quic.next_event():
if isinstance(event, quic_events.ConnectionTerminated):
if self.debug:
reason = event.reason_phrase or error_code_to_str(event.error_code)
yield commands.Log(
f"{self.debug}[quic] close_notify {self.conn} ({reason=!s})",
DEBUG,
)
# We don't rely on `ConnectionTerminated` to dispatch `QuicConnectionClosed`, because
# after aioquic receives a termination frame, it still waits for the next `handle_timer`
# before returning `ConnectionTerminated` in `next_event`. In the meantime, the underlying
# connection could be closed. Therefore, we instead dispatch on `ConnectionClosed` and simply
# close the connection here.
yield commands.CloseConnection(self.tunnel_connection)
return # we don't handle any further events, nor do/can we transmit data, so exit
elif isinstance(event, quic_events.DatagramFrameReceived):
yield from self.event_to_child(
events.DataReceived(self.conn, event.data)
)
elif isinstance(event, quic_events.StreamDataReceived):
yield from self.event_to_child(
QuicStreamDataReceived(
self.conn, event.stream_id, event.data, event.end_stream
)
)
elif isinstance(event, quic_events.StreamReset):
yield from self.event_to_child(
QuicStreamReset(self.conn, event.stream_id, event.error_code)
)
elif isinstance(event, quic_events.StopSendingReceived):
yield from self.event_to_child(
QuicStreamStopSending(self.conn, event.stream_id, event.error_code)
)
elif isinstance(
event,
(
quic_events.ConnectionIdIssued,
quic_events.ConnectionIdRetired,
quic_events.PingAcknowledged,
quic_events.ProtocolNegotiated,
),
):
pass
else:
raise AssertionError(f"Unexpected event: {event!r}")
# transmit buffered data and re-arm timer
yield from self.tls_interact()
def receive_close(self) -> layer.CommandGenerator[None]:
assert self.quic
# if `_close_event` is not set, the underlying connection has been closed
# we turn this into a QUIC close event as well
close_event = self.quic._close_event or quic_events.ConnectionTerminated(
QuicErrorCode.NO_ERROR, None, "Connection closed."
)
yield from self.event_to_child(
QuicConnectionClosed(
self.conn,
close_event.error_code,
close_event.frame_type,
close_event.reason_phrase,
)
)
def send_data(self, data: bytes) -> layer.CommandGenerator[None]:
# non-stream data uses datagram frames
assert self.quic
if data:
self.quic.send_datagram_frame(data)
yield from self.tls_interact()
def send_close(
self, command: commands.CloseConnection
) -> layer.CommandGenerator[None]:
# properly close the QUIC connection
if self.quic:
if isinstance(command, CloseQuicConnection):
self.quic.close(
command.error_code, command.frame_type, command.reason_phrase
)
else:
self.quic.close()
yield from self.tls_interact()
yield from super().send_close(command)
class ServerQuicLayer(QuicLayer):
"""
This layer establishes QUIC for a single server connection.
"""
wait_for_clienthello: bool = False
def __init__(
self,
context: context.Context,
conn: connection.Server | None = None,
time: Callable[[], float] | None = None,
):
super().__init__(context, conn or context.server, time)
def start_handshake(self) -> layer.CommandGenerator[None]:
wait_for_clienthello = not self.command_to_reply_to and isinstance(
self.child_layer, ClientQuicLayer
)
if wait_for_clienthello:
self.wait_for_clienthello = True
self.tunnel_state = tunnel.TunnelState.CLOSED
else:
yield from self.start_tls(None)
def event_to_child(self, event: events.Event) -> layer.CommandGenerator[None]:
if self.wait_for_clienthello:
for command in super().event_to_child(event):
if (
isinstance(command, commands.OpenConnection)
and command.connection == self.conn
):
self.wait_for_clienthello = False
else:
yield command
else:
yield from super().event_to_child(event)
def on_handshake_error(self, err: str) -> layer.CommandGenerator[None]:
yield commands.Log(f"Server QUIC handshake failed. {err}", level=WARNING)
yield from super().on_handshake_error(err)
class ClientQuicLayer(QuicLayer):
"""
This layer establishes QUIC on a single client connection.
"""
server_tls_available: bool
"""Indicates whether the parent layer is a ServerQuicLayer."""
handshake_datagram_buf: list[bytes]
def __init__(
self, context: context.Context, time: Callable[[], float] | None = None
) -> None:
# same as ClientTLSLayer, we might be nested in some other transport
if context.client.tls:
context.client.alpn = None
context.client.cipher = None
context.client.sni = None
context.client.timestamp_tls_setup = None
context.client.tls_version = None
context.client.certificate_list = []
context.client.mitmcert = None
context.client.alpn_offers = []
context.client.cipher_list = []
super().__init__(context, context.client, time)
self.server_tls_available = len(self.context.layers) >= 2 and isinstance(
self.context.layers[-2], ServerQuicLayer
)
self.handshake_datagram_buf = []
def start_handshake(self) -> layer.CommandGenerator[None]:
yield from ()
def receive_handshake_data(
self, data: bytes
) -> layer.CommandGenerator[tuple[bool, str | None]]:
if not self.context.options.http3:
yield commands.Log(
f"Swallowing QUIC handshake because HTTP/3 is disabled.", DEBUG
)
return False, None
# if we already had a valid client hello, don't process further packets
if self.tls:
return (yield from super().receive_handshake_data(data))
# fail if the received data is not a QUIC packet
buffer = QuicBuffer(data=data)
try:
header = pull_quic_header(buffer)
except TypeError:
return False, f"Cannot parse QUIC header: Malformed head ({data.hex()})"
except ValueError as e:
return False, f"Cannot parse QUIC header: {e} ({data.hex()})"
# negotiate version, support all versions known to aioquic
if (
header.version is not None
and header.version not in SUPPORTED_QUIC_VERSIONS_SERVER
):
yield commands.SendData(
self.tunnel_connection,
encode_quic_version_negotiation(
source_cid=header.destination_cid,
destination_cid=header.source_cid,
supported_versions=SUPPORTED_QUIC_VERSIONS_SERVER,
),
)
return False, None
# ensure it's (likely) a client handshake packet
if len(data) < 1200 or header.packet_type != PACKET_TYPE_INITIAL:
return (
False,
f"Invalid handshake received, roaming not supported. ({data.hex()})",
)
self.handshake_datagram_buf.append(data)
# extract the client hello
try:
client_hello = quic_parse_client_hello_from_datagrams(
self.handshake_datagram_buf
)
except ValueError as e:
msgs = b"\n".join(self.handshake_datagram_buf)
dbg = f"Cannot parse ClientHello: {e} ({msgs.hex()})"
self.handshake_datagram_buf.clear()
return False, dbg
if not client_hello:
return False, None
# copy the client hello information
self.conn.sni = client_hello.sni
self.conn.alpn_offers = client_hello.alpn_protocols
# check with addons what we shall do
tls_clienthello = ClientHelloData(self.context, client_hello)
yield TlsClienthelloHook(tls_clienthello)
# replace the QUIC layer with an UDP layer if requested
if tls_clienthello.ignore_connection:
self.conn = self.tunnel_connection = connection.Client(
peername=("ignore-conn", 0),
sockname=("ignore-conn", 0),
transport_protocol="udp",
state=connection.ConnectionState.OPEN,
)
# we need to replace the server layer as well, if there is one
parent_layer = self.context.layers[self.context.layers.index(self) - 1]
if isinstance(parent_layer, ServerQuicLayer):
parent_layer.conn = parent_layer.tunnel_connection = connection.Server(
address=None
)
replacement_layer = UDPLayer(self.context, ignore=True)
parent_layer.handle_event = replacement_layer.handle_event # type: ignore
parent_layer._handle_event = replacement_layer._handle_event # type: ignore
yield from parent_layer.handle_event(events.Start())
for dgm in self.handshake_datagram_buf:
yield from parent_layer.handle_event(
events.DataReceived(self.context.client, dgm)
)
self.handshake_datagram_buf.clear()
return True, None
# start the server QUIC connection if demanded and available
if (
tls_clienthello.establish_server_tls_first
and not self.context.server.tls_established
):
err = yield from self.start_server_tls()
if err:
yield commands.Log(
f"Unable to establish QUIC connection with server ({err}). "
f"Trying to establish QUIC with client anyway. "
f"If you plan to redirect requests away from this server, "
f"consider setting `connection_strategy` to `lazy` to suppress early connections."
)
# start the client QUIC connection
yield from self.start_tls(header.destination_cid)
# XXX copied from TLS, we assume that `CloseConnection` in `start_tls` takes effect immediately
if not self.conn.connected:
return False, "connection closed early"
# send the client hello to aioquic
assert self.quic
for dgm in self.handshake_datagram_buf:
self.quic.receive_datagram(dgm, self.conn.peername, now=self._time())
self.handshake_datagram_buf.clear()
# handle events emanating from `self.quic`
return (yield from super().receive_handshake_data(b""))
def start_server_tls(self) -> layer.CommandGenerator[str | None]:
if not self.server_tls_available:
return f"No server QUIC available."
err = yield commands.OpenConnection(self.context.server)
return err
def on_handshake_error(self, err: str) -> layer.CommandGenerator[None]:
yield commands.Log(f"Client QUIC handshake failed. {err}", level=WARNING)
yield from super().on_handshake_error(err)
self.event_to_child = self.errored # type: ignore
def errored(self, event: events.Event) -> layer.CommandGenerator[None]:
if self.debug is not None:
yield commands.Log(
f"{self.debug}[quic] Swallowing {event} as handshake failed.", DEBUG
)
class QuicSecretsLogger:
logger: tls.MasterSecretLogger
def __init__(self, logger: tls.MasterSecretLogger) -> None:
super().__init__()
self.logger = logger
def write(self, s: str) -> int:
if s[-1:] == "\n":
s = s[:-1]
data = s.encode("ascii")
self.logger(None, data) # type: ignore
return len(data) + 1
def flush(self) -> None:
# done by the logger during write
pass
def error_code_to_str(error_code: int) -> str:
"""Returns the corresponding name of the given error code or a string containing its numeric value."""
try:
return H3ErrorCode(error_code).name
except ValueError:
try:
return QuicErrorCode(error_code).name
except ValueError:
return f"unknown error (0x{error_code:x})"
def is_success_error_code(error_code: int) -> bool:
"""Returns whether the given error code actually indicates no error."""
return error_code in (QuicErrorCode.NO_ERROR, H3ErrorCode.H3_NO_ERROR)
def tls_settings_to_configuration(
settings: QuicTlsSettings,
is_client: bool,
server_name: str | None = None,
) -> QuicConfiguration:
"""Converts `QuicTlsSettings` to `QuicConfiguration`."""
return QuicConfiguration(
alpn_protocols=settings.alpn_protocols,
is_client=is_client,
secrets_log_file=(
QuicSecretsLogger(tls.log_master_secret) # type: ignore
if tls.log_master_secret is not None
else None
),
server_name=server_name,
cafile=settings.ca_file,
capath=settings.ca_path,
certificate=settings.certificate,
certificate_chain=settings.certificate_chain,
cipher_suites=settings.cipher_suites,
private_key=settings.certificate_private_key,
verify_mode=settings.verify_mode,
max_datagram_frame_size=65536,
)
| python | MIT | e6aa924bb411a9687b91920b8d094af37bc02b90 | 2026-01-04T14:40:00.086164Z | false |
mitmproxy/mitmproxy | https://github.com/mitmproxy/mitmproxy/blob/e6aa924bb411a9687b91920b8d094af37bc02b90/mitmproxy/proxy/layers/quic/_commands.py | mitmproxy/proxy/layers/quic/_commands.py | from __future__ import annotations
from mitmproxy import connection
from mitmproxy.proxy import commands
class QuicStreamCommand(commands.ConnectionCommand):
"""Base class for all QUIC stream commands."""
stream_id: int
"""The ID of the stream the command was issued for."""
def __init__(self, connection: connection.Connection, stream_id: int) -> None:
super().__init__(connection)
self.stream_id = stream_id
class SendQuicStreamData(QuicStreamCommand):
"""Command that sends data on a stream."""
data: bytes
"""The data which should be sent."""
end_stream: bool
"""Whether the FIN bit should be set in the STREAM frame."""
def __init__(
self,
connection: connection.Connection,
stream_id: int,
data: bytes,
end_stream: bool = False,
) -> None:
super().__init__(connection, stream_id)
self.data = data
self.end_stream = end_stream
def __repr__(self):
target = repr(self.connection).partition("(")[0].lower()
end_stream = "[end_stream] " if self.end_stream else ""
return f"SendQuicStreamData({target} on {self.stream_id}, {end_stream}{self.data!r})"
class ResetQuicStream(QuicStreamCommand):
"""Abruptly terminate the sending part of a stream."""
error_code: int
"""An error code indicating why the stream is being reset."""
def __init__(
self, connection: connection.Connection, stream_id: int, error_code: int
) -> None:
super().__init__(connection, stream_id)
self.error_code = error_code
class StopSendingQuicStream(QuicStreamCommand):
"""Request termination of the receiving part of a stream."""
error_code: int
"""An error code indicating why the stream is being stopped."""
def __init__(
self, connection: connection.Connection, stream_id: int, error_code: int
) -> None:
super().__init__(connection, stream_id)
self.error_code = error_code
class CloseQuicConnection(commands.CloseConnection):
"""Close a QUIC connection."""
error_code: int
"The error code which was specified when closing the connection."
frame_type: int | None
"The frame type which caused the connection to be closed, or `None`."
reason_phrase: str
"The human-readable reason for which the connection was closed."
# XXX: A bit much boilerplate right now. Should switch to dataclasses.
def __init__(
self,
conn: connection.Connection,
error_code: int,
frame_type: int | None,
reason_phrase: str,
) -> None:
super().__init__(conn)
self.error_code = error_code
self.frame_type = frame_type
self.reason_phrase = reason_phrase
| python | MIT | e6aa924bb411a9687b91920b8d094af37bc02b90 | 2026-01-04T14:40:00.086164Z | false |
mitmproxy/mitmproxy | https://github.com/mitmproxy/mitmproxy/blob/e6aa924bb411a9687b91920b8d094af37bc02b90/mitmproxy/proxy/layers/quic/_raw_layers.py | mitmproxy/proxy/layers/quic/_raw_layers.py | """
This module contains the proxy layers for raw QUIC proxying.
This is used if we want to speak QUIC, but we do not want to do HTTP.
"""
from __future__ import annotations
import time
from aioquic.quic.connection import QuicErrorCode
from aioquic.quic.connection import stream_is_client_initiated
from aioquic.quic.connection import stream_is_unidirectional
from ._commands import CloseQuicConnection
from ._commands import ResetQuicStream
from ._commands import SendQuicStreamData
from ._commands import StopSendingQuicStream
from ._events import QuicConnectionClosed
from ._events import QuicStreamDataReceived
from ._events import QuicStreamEvent
from ._events import QuicStreamReset
from mitmproxy import connection
from mitmproxy.connection import Connection
from mitmproxy.proxy import commands
from mitmproxy.proxy import context
from mitmproxy.proxy import events
from mitmproxy.proxy import layer
from mitmproxy.proxy import tunnel
from mitmproxy.proxy.layers.tcp import TCPLayer
from mitmproxy.proxy.layers.udp import UDPLayer
class QuicStreamNextLayer(layer.NextLayer):
"""`NextLayer` variant that callbacks `QuicStreamLayer` after layer decision."""
def __init__(
self,
context: context.Context,
stream: QuicStreamLayer,
ask_on_start: bool = False,
) -> None:
super().__init__(context, ask_on_start)
self._stream = stream
self._layer: layer.Layer | None = None
@property # type: ignore
def layer(self) -> layer.Layer | None: # type: ignore
return self._layer
@layer.setter
def layer(self, value: layer.Layer | None) -> None:
self._layer = value
if self._layer:
self._stream.refresh_metadata()
class QuicStreamLayer(layer.Layer):
"""
Layer for QUIC streams.
Serves as a marker for NextLayer and keeps track of the connection states.
"""
client: connection.Client
"""Virtual client connection for this stream. Use this in QuicRawLayer instead of `context.client`."""
server: connection.Server
"""Virtual server connection for this stream. Use this in QuicRawLayer instead of `context.server`."""
child_layer: layer.Layer
"""The stream's child layer."""
def __init__(
self, context: context.Context, force_raw: bool, stream_id: int
) -> None:
# we mustn't reuse the client from the QUIC connection, as the state and protocol differs
self.client = context.client = context.client.copy()
self.client.transport_protocol = "tcp"
self.client.state = connection.ConnectionState.OPEN
# unidirectional client streams are not fully open, set the appropriate state
if stream_is_unidirectional(stream_id):
self.client.state = (
connection.ConnectionState.CAN_READ
if stream_is_client_initiated(stream_id)
else connection.ConnectionState.CAN_WRITE
)
self._client_stream_id = stream_id
# start with a closed server
self.server = context.server = connection.Server(
address=context.server.address,
transport_protocol="tcp",
)
self._server_stream_id: int | None = None
super().__init__(context)
self.child_layer = (
TCPLayer(context) if force_raw else QuicStreamNextLayer(context, self)
)
self.refresh_metadata()
# we don't handle any events, pass everything to the child layer
self.handle_event = self.child_layer.handle_event # type: ignore
self._handle_event = self.child_layer._handle_event # type: ignore
def _handle_event(self, event: events.Event) -> layer.CommandGenerator[None]:
raise AssertionError # pragma: no cover
def open_server_stream(self, server_stream_id) -> None:
assert self._server_stream_id is None
self._server_stream_id = server_stream_id
self.server.timestamp_start = time.time()
self.server.state = (
(
connection.ConnectionState.CAN_WRITE
if stream_is_client_initiated(server_stream_id)
else connection.ConnectionState.CAN_READ
)
if stream_is_unidirectional(server_stream_id)
else connection.ConnectionState.OPEN
)
self.refresh_metadata()
def refresh_metadata(self) -> None:
# find the first transport layer
child_layer: layer.Layer | None = self.child_layer
while True:
if isinstance(child_layer, layer.NextLayer):
child_layer = child_layer.layer
elif isinstance(child_layer, tunnel.TunnelLayer):
child_layer = child_layer.child_layer
else:
break # pragma: no cover
if isinstance(child_layer, (UDPLayer, TCPLayer)) and child_layer.flow:
child_layer.flow.metadata["quic_is_unidirectional"] = (
stream_is_unidirectional(self._client_stream_id)
)
child_layer.flow.metadata["quic_initiator"] = (
"client"
if stream_is_client_initiated(self._client_stream_id)
else "server"
)
child_layer.flow.metadata["quic_stream_id_client"] = self._client_stream_id
child_layer.flow.metadata["quic_stream_id_server"] = self._server_stream_id
def stream_id(self, client: bool) -> int | None:
return self._client_stream_id if client else self._server_stream_id
class RawQuicLayer(layer.Layer):
"""
This layer is responsible for de-multiplexing QUIC streams into an individual layer stack per stream.
"""
force_raw: bool
"""Indicates whether traffic should be treated as raw TCP/UDP without further protocol detection."""
datagram_layer: layer.Layer
"""
The layer that is handling datagrams over QUIC. It's like a child_layer, but with a forked context.
Instead of having a datagram-equivalent for all `QuicStream*` classes, we use `SendData` and `DataReceived` instead.
There is also no need for another `NextLayer` marker, as a missing `QuicStreamLayer` implies UDP,
and the connection state is the same as the one of the underlying QUIC connection.
"""
client_stream_ids: dict[int, QuicStreamLayer]
"""Maps stream IDs from the client connection to stream layers."""
server_stream_ids: dict[int, QuicStreamLayer]
"""Maps stream IDs from the server connection to stream layers."""
connections: dict[connection.Connection, layer.Layer]
"""Maps connections to layers."""
command_sources: dict[commands.Command, layer.Layer]
"""Keeps track of blocking commands and wakeup requests."""
next_stream_id: list[int]
"""List containing the next stream ID for all four is_unidirectional/is_client combinations."""
def __init__(self, context: context.Context, force_raw: bool = False) -> None:
super().__init__(context)
self.force_raw = force_raw
self.datagram_layer = (
UDPLayer(self.context.fork())
if force_raw
else layer.NextLayer(self.context.fork())
)
self.client_stream_ids = {}
self.server_stream_ids = {}
self.connections = {
context.client: self.datagram_layer,
context.server: self.datagram_layer,
}
self.command_sources = {}
self.next_stream_id = [0, 1, 2, 3]
def _handle_event(self, event: events.Event) -> layer.CommandGenerator[None]:
# we treat the datagram layer as child layer, so forward Start
if isinstance(event, events.Start):
if self.context.server.timestamp_start is None:
err = yield commands.OpenConnection(self.context.server)
if err:
yield commands.CloseConnection(self.context.client)
self._handle_event = self.done # type: ignore
return
yield from self.event_to_child(self.datagram_layer, event)
# properly forward completion events based on their command
elif isinstance(event, events.CommandCompleted):
yield from self.event_to_child(
self.command_sources.pop(event.command), event
)
# route injected messages based on their connections (prefer client, fallback to server)
elif isinstance(event, events.MessageInjected):
if event.flow.client_conn in self.connections:
yield from self.event_to_child(
self.connections[event.flow.client_conn], event
)
elif event.flow.server_conn in self.connections:
yield from self.event_to_child(
self.connections[event.flow.server_conn], event
)
else:
raise AssertionError(f"Flow not associated: {event.flow!r}")
# handle stream events targeting this context
elif isinstance(event, QuicStreamEvent) and (
event.connection is self.context.client
or event.connection is self.context.server
):
from_client = event.connection is self.context.client
# fetch or create the layer
stream_ids = (
self.client_stream_ids if from_client else self.server_stream_ids
)
if event.stream_id in stream_ids:
stream_layer = stream_ids[event.stream_id]
else:
# ensure we haven't just forgotten to register the ID
assert stream_is_client_initiated(event.stream_id) == from_client
# for server-initiated streams we need to open the client as well
if from_client:
client_stream_id = event.stream_id
server_stream_id = None
else:
client_stream_id = self.get_next_available_stream_id(
is_client=False,
is_unidirectional=stream_is_unidirectional(event.stream_id),
)
server_stream_id = event.stream_id
# create, register and start the layer
stream_layer = QuicStreamLayer(
self.context.fork(),
force_raw=self.force_raw,
stream_id=client_stream_id,
)
self.client_stream_ids[client_stream_id] = stream_layer
if server_stream_id is not None:
stream_layer.open_server_stream(server_stream_id)
self.server_stream_ids[server_stream_id] = stream_layer
self.connections[stream_layer.client] = stream_layer
self.connections[stream_layer.server] = stream_layer
yield from self.event_to_child(stream_layer, events.Start())
# forward data and close events
conn: Connection = (
stream_layer.client if from_client else stream_layer.server
)
if isinstance(event, QuicStreamDataReceived):
if event.data:
yield from self.event_to_child(
stream_layer, events.DataReceived(conn, event.data)
)
if event.end_stream:
yield from self.close_stream_layer(stream_layer, from_client)
elif isinstance(event, QuicStreamReset):
# preserve stream resets
for command in self.close_stream_layer(stream_layer, from_client):
if (
isinstance(command, SendQuicStreamData)
and command.stream_id == stream_layer.stream_id(not from_client)
and command.end_stream
and not command.data
):
yield ResetQuicStream(
command.connection, command.stream_id, event.error_code
)
else:
yield command
else:
raise AssertionError(f"Unexpected stream event: {event!r}")
# handle close events that target this context
elif isinstance(event, QuicConnectionClosed) and (
event.connection is self.context.client
or event.connection is self.context.server
):
from_client = event.connection is self.context.client
other_conn = self.context.server if from_client else self.context.client
# be done if both connections are closed
if other_conn.connected:
yield CloseQuicConnection(
other_conn, event.error_code, event.frame_type, event.reason_phrase
)
else:
self._handle_event = self.done # type: ignore
# always forward to the datagram layer and swallow `CloseConnection` commands
for command in self.event_to_child(self.datagram_layer, event):
if (
not isinstance(command, commands.CloseConnection)
or command.connection is not other_conn
):
yield command
# forward to either the client or server connection of stream layers and swallow empty stream end
for conn, child_layer in self.connections.items():
if isinstance(child_layer, QuicStreamLayer) and (
(conn is child_layer.client)
if from_client
else (conn is child_layer.server)
):
conn.state &= ~connection.ConnectionState.CAN_WRITE
for command in self.close_stream_layer(child_layer, from_client):
if not isinstance(command, SendQuicStreamData) or command.data:
yield command
# all other connection events are routed to their corresponding layer
elif isinstance(event, events.ConnectionEvent):
yield from self.event_to_child(self.connections[event.connection], event)
else:
raise AssertionError(f"Unexpected event: {event!r}")
def close_stream_layer(
self, stream_layer: QuicStreamLayer, client: bool
) -> layer.CommandGenerator[None]:
"""Closes the incoming part of a connection."""
conn = stream_layer.client if client else stream_layer.server
conn.state &= ~connection.ConnectionState.CAN_READ
assert conn.timestamp_start is not None
if conn.timestamp_end is None:
conn.timestamp_end = time.time()
yield from self.event_to_child(stream_layer, events.ConnectionClosed(conn))
def event_to_child(
self, child_layer: layer.Layer, event: events.Event
) -> layer.CommandGenerator[None]:
"""Forwards events to child layers and translates commands."""
for command in child_layer.handle_event(event):
# intercept commands for streams connections
if (
isinstance(child_layer, QuicStreamLayer)
and isinstance(command, commands.ConnectionCommand)
and (
command.connection is child_layer.client
or command.connection is child_layer.server
)
):
# get the target connection and stream ID
to_client = command.connection is child_layer.client
quic_conn = self.context.client if to_client else self.context.server
stream_id = child_layer.stream_id(to_client)
# write data and check CloseConnection wasn't called before
if isinstance(command, commands.SendData):
assert stream_id is not None
if command.connection.state & connection.ConnectionState.CAN_WRITE:
yield SendQuicStreamData(quic_conn, stream_id, command.data)
# send a FIN and optionally also a STOP frame
elif isinstance(command, commands.CloseConnection):
assert stream_id is not None
if command.connection.state & connection.ConnectionState.CAN_WRITE:
command.connection.state &= (
~connection.ConnectionState.CAN_WRITE
)
yield SendQuicStreamData(
quic_conn, stream_id, b"", end_stream=True
)
# XXX: Use `command.connection.state & connection.ConnectionState.CAN_READ` instead?
only_close_our_half = (
isinstance(command, commands.CloseTcpConnection)
and command.half_close
)
if not only_close_our_half:
if stream_is_client_initiated(
stream_id
) == to_client or not stream_is_unidirectional(stream_id):
yield StopSendingQuicStream(
quic_conn, stream_id, QuicErrorCode.NO_ERROR
)
yield from self.close_stream_layer(child_layer, to_client)
# open server connections by reserving the next stream ID
elif isinstance(command, commands.OpenConnection):
assert not to_client
assert stream_id is None
client_stream_id = child_layer.stream_id(client=True)
assert client_stream_id is not None
stream_id = self.get_next_available_stream_id(
is_client=True,
is_unidirectional=stream_is_unidirectional(client_stream_id),
)
child_layer.open_server_stream(stream_id)
self.server_stream_ids[stream_id] = child_layer
yield from self.event_to_child(
child_layer, events.OpenConnectionCompleted(command, None)
)
else:
raise AssertionError(
f"Unexpected stream connection command: {command!r}"
)
# remember blocking and wakeup commands
else:
if command.blocking or isinstance(command, commands.RequestWakeup):
self.command_sources[command] = child_layer
if isinstance(command, commands.OpenConnection):
self.connections[command.connection] = child_layer
yield command
def get_next_available_stream_id(
self, is_client: bool, is_unidirectional: bool = False
) -> int:
index = (int(is_unidirectional) << 1) | int(not is_client)
stream_id = self.next_stream_id[index]
self.next_stream_id[index] = stream_id + 4
return stream_id
def done(self, _) -> layer.CommandGenerator[None]: # pragma: no cover
yield from ()
| python | MIT | e6aa924bb411a9687b91920b8d094af37bc02b90 | 2026-01-04T14:40:00.086164Z | false |
mitmproxy/mitmproxy | https://github.com/mitmproxy/mitmproxy/blob/e6aa924bb411a9687b91920b8d094af37bc02b90/mitmproxy/proxy/layers/quic/__init__.py | mitmproxy/proxy/layers/quic/__init__.py | from ._client_hello_parser import quic_parse_client_hello_from_datagrams
from ._commands import CloseQuicConnection
from ._commands import ResetQuicStream
from ._commands import SendQuicStreamData
from ._commands import StopSendingQuicStream
from ._events import QuicConnectionClosed
from ._events import QuicStreamDataReceived
from ._events import QuicStreamEvent
from ._events import QuicStreamReset
from ._events import QuicStreamStopSending
from ._hooks import QuicStartClientHook
from ._hooks import QuicStartServerHook
from ._hooks import QuicTlsData
from ._hooks import QuicTlsSettings
from ._raw_layers import QuicStreamLayer
from ._raw_layers import RawQuicLayer
from ._stream_layers import ClientQuicLayer
from ._stream_layers import error_code_to_str
from ._stream_layers import ServerQuicLayer
__all__ = [
"quic_parse_client_hello_from_datagrams",
"CloseQuicConnection",
"ResetQuicStream",
"SendQuicStreamData",
"StopSendingQuicStream",
"QuicConnectionClosed",
"QuicStreamDataReceived",
"QuicStreamEvent",
"QuicStreamReset",
"QuicStreamStopSending",
"QuicStartClientHook",
"QuicStartServerHook",
"QuicTlsData",
"QuicTlsSettings",
"QuicStreamLayer",
"RawQuicLayer",
"ClientQuicLayer",
"error_code_to_str",
"ServerQuicLayer",
]
| python | MIT | e6aa924bb411a9687b91920b8d094af37bc02b90 | 2026-01-04T14:40:00.086164Z | false |
mitmproxy/mitmproxy | https://github.com/mitmproxy/mitmproxy/blob/e6aa924bb411a9687b91920b8d094af37bc02b90/mitmproxy/proxy/layers/quic/_client_hello_parser.py | mitmproxy/proxy/layers/quic/_client_hello_parser.py | """
This module contains a very terrible QUIC client hello parser.
Nothing is more permanent than a temporary solution!
"""
from __future__ import annotations
import time
from dataclasses import dataclass
from typing import Optional
from aioquic.buffer import Buffer as QuicBuffer
from aioquic.quic.configuration import QuicConfiguration
from aioquic.quic.connection import QuicConnection
from aioquic.quic.connection import QuicConnectionError
from aioquic.quic.logger import QuicLogger
from aioquic.quic.packet import PACKET_TYPE_INITIAL
from aioquic.quic.packet import pull_quic_header
from aioquic.tls import HandshakeType
from mitmproxy.tls import ClientHello
@dataclass
class QuicClientHello(Exception):
"""Helper error only used in `quic_parse_client_hello_from_datagrams`."""
data: bytes
def quic_parse_client_hello_from_datagrams(
datagrams: list[bytes],
) -> Optional[ClientHello]:
"""
Check if the supplied bytes contain a full ClientHello message,
and if so, parse it.
Args:
- msgs: list of ClientHello fragments received from client
Returns:
- A ClientHello object on success
- None, if the QUIC record is incomplete
Raises:
- A ValueError, if the passed ClientHello is invalid
"""
# ensure the first packet is indeed the initial one
buffer = QuicBuffer(data=datagrams[0])
header = pull_quic_header(buffer, 8)
if header.packet_type != PACKET_TYPE_INITIAL:
raise ValueError("Packet is not initial one.")
# patch aioquic to intercept the client hello
quic = QuicConnection(
configuration=QuicConfiguration(
is_client=False,
certificate="",
private_key="",
quic_logger=QuicLogger(),
),
original_destination_connection_id=header.destination_cid,
)
_initialize = quic._initialize
def server_handle_hello_replacement(
input_buf: QuicBuffer,
initial_buf: QuicBuffer,
handshake_buf: QuicBuffer,
onertt_buf: QuicBuffer,
) -> None:
assert input_buf.pull_uint8() == HandshakeType.CLIENT_HELLO
length = 0
for b in input_buf.pull_bytes(3):
length = (length << 8) | b
offset = input_buf.tell()
raise QuicClientHello(input_buf.data_slice(offset, offset + length))
def initialize_replacement(peer_cid: bytes) -> None:
try:
return _initialize(peer_cid)
finally:
quic.tls._server_handle_hello = server_handle_hello_replacement # type: ignore
quic._initialize = initialize_replacement # type: ignore
try:
for dgm in datagrams:
quic.receive_datagram(dgm, ("0.0.0.0", 0), now=time.time())
except QuicClientHello as hello:
try:
return ClientHello(hello.data)
except EOFError as e:
raise ValueError("Invalid ClientHello data.") from e
except QuicConnectionError as e:
raise ValueError(e.reason_phrase) from e
quic_logger = quic._configuration.quic_logger
assert isinstance(quic_logger, QuicLogger)
traces = quic_logger.to_dict().get("traces")
assert isinstance(traces, list)
for trace in traces:
quic_events = trace.get("events")
for event in quic_events:
if event["name"] == "transport:packet_dropped":
raise ValueError(
f"Invalid ClientHello packet: {event['data']['trigger']}"
)
return None # pragma: no cover # FIXME: this should have test coverage
| python | MIT | e6aa924bb411a9687b91920b8d094af37bc02b90 | 2026-01-04T14:40:00.086164Z | false |
mitmproxy/mitmproxy | https://github.com/mitmproxy/mitmproxy/blob/e6aa924bb411a9687b91920b8d094af37bc02b90/mitmproxy/proxy/layers/http/_base.py | mitmproxy/proxy/layers/http/_base.py | import html
import textwrap
from dataclasses import dataclass
from mitmproxy import http
from mitmproxy.connection import Connection
from mitmproxy.proxy import commands
from mitmproxy.proxy import events
from mitmproxy.proxy import layer
from mitmproxy.proxy.context import Context
StreamId = int
@dataclass
class HttpEvent(events.Event):
# we need stream ids on every event to avoid race conditions
stream_id: StreamId
class HttpConnection(layer.Layer):
conn: Connection
def __init__(self, context: Context, conn: Connection):
super().__init__(context)
self.conn = conn
class HttpCommand(commands.Command):
pass
class ReceiveHttp(HttpCommand):
event: HttpEvent
def __init__(self, event: HttpEvent):
self.event = event
def __repr__(self) -> str:
return f"Receive({self.event})"
def format_error(status_code: int, message: str) -> bytes:
reason = http.status_codes.RESPONSES.get(status_code, "Unknown")
return (
textwrap.dedent(
f"""
<html>
<head>
<title>{status_code} {reason}</title>
</head>
<body>
<h1>{status_code} {reason}</h1>
<p>{html.escape(message)}</p>
</body>
</html>
"""
)
.strip()
.encode("utf8", "replace")
)
| python | MIT | e6aa924bb411a9687b91920b8d094af37bc02b90 | 2026-01-04T14:40:00.086164Z | false |
mitmproxy/mitmproxy | https://github.com/mitmproxy/mitmproxy/blob/e6aa924bb411a9687b91920b8d094af37bc02b90/mitmproxy/proxy/layers/http/_hooks.py | mitmproxy/proxy/layers/http/_hooks.py | from dataclasses import dataclass
from mitmproxy import http
from mitmproxy.proxy import commands
@dataclass
class HttpRequestHeadersHook(commands.StartHook):
"""
HTTP request headers were successfully read. At this point, the body is empty.
"""
name = "requestheaders"
flow: http.HTTPFlow
@dataclass
class HttpRequestHook(commands.StartHook):
"""
The full HTTP request has been read.
Note: If request streaming is active, this event fires after the entire body has been streamed.
HTTP trailers, if present, have not been transmitted to the server yet and can still be modified.
Enabling streaming may cause unexpected event sequences: For example, `response` may now occur
before `request` because the server replied with "413 Payload Too Large" during upload.
"""
name = "request"
flow: http.HTTPFlow
@dataclass
class HttpResponseHeadersHook(commands.StartHook):
"""
HTTP response headers were successfully read. At this point, the body is empty.
"""
name = "responseheaders"
flow: http.HTTPFlow
@dataclass
class HttpResponseHook(commands.StartHook):
"""
The full HTTP response has been read.
Note: If response streaming is active, this event fires after the entire body has been streamed.
HTTP trailers, if present, have not been transmitted to the client yet and can still be modified.
"""
name = "response"
flow: http.HTTPFlow
@dataclass
class HttpErrorHook(commands.StartHook):
"""
An HTTP error has occurred, e.g. invalid server responses, or
interrupted connections. This is distinct from a valid server HTTP
error response, which is simply a response with an HTTP error code.
Every flow will receive either an error or an response event, but not both.
"""
name = "error"
flow: http.HTTPFlow
@dataclass
class HttpConnectHook(commands.StartHook):
"""
An HTTP CONNECT request was received. This event can be ignored for most practical purposes.
This event only occurs in regular and upstream proxy modes
when the client instructs mitmproxy to open a connection to an upstream host.
Setting a non 2xx response on the flow will return the response to the client and abort the connection.
CONNECT requests are HTTP proxy instructions for mitmproxy itself
and not forwarded. They do not generate the usual HTTP handler events,
but all requests going over the newly opened connection will.
"""
flow: http.HTTPFlow
@dataclass
class HttpConnectUpstreamHook(commands.StartHook):
"""
An HTTP CONNECT request is about to be sent to an upstream proxy.
This event can be ignored for most practical purposes.
This event can be used to set custom authentication headers for upstream proxies.
CONNECT requests do not generate the usual HTTP handler events,
but all requests going over the newly opened connection will.
"""
flow: http.HTTPFlow
@dataclass
class HttpConnectedHook(commands.StartHook):
"""
HTTP CONNECT was successful
> [!WARNING]
> This may fire before an upstream connection has been established
> if `connection_strategy` is set to `lazy` (default)
"""
flow: http.HTTPFlow
@dataclass
class HttpConnectErrorHook(commands.StartHook):
"""
HTTP CONNECT has failed.
This can happen when the upstream server is unreachable or proxy authentication is required.
In contrast to the `error` hook, `flow.error` is not guaranteed to be set.
"""
flow: http.HTTPFlow
| python | MIT | e6aa924bb411a9687b91920b8d094af37bc02b90 | 2026-01-04T14:40:00.086164Z | false |
mitmproxy/mitmproxy | https://github.com/mitmproxy/mitmproxy/blob/e6aa924bb411a9687b91920b8d094af37bc02b90/mitmproxy/proxy/layers/http/_http1.py | mitmproxy/proxy/layers/http/_http1.py | import abc
from collections.abc import Callable
from typing import Union
import h11
from h11._readers import ChunkedReader
from h11._readers import ContentLengthReader
from h11._readers import Http10Reader
from h11._receivebuffer import ReceiveBuffer
from ...context import Context
from ._base import format_error
from ._base import HttpConnection
from ._events import ErrorCode
from ._events import HttpEvent
from ._events import RequestData
from ._events import RequestEndOfMessage
from ._events import RequestHeaders
from ._events import RequestProtocolError
from ._events import ResponseData
from ._events import ResponseEndOfMessage
from ._events import ResponseHeaders
from ._events import ResponseProtocolError
from mitmproxy import http
from mitmproxy import version
from mitmproxy.connection import Connection
from mitmproxy.connection import ConnectionState
from mitmproxy.net.http import http1
from mitmproxy.net.http import status_codes
from mitmproxy.proxy import commands
from mitmproxy.proxy import events
from mitmproxy.proxy import layer
from mitmproxy.proxy.layers.http._base import ReceiveHttp
from mitmproxy.proxy.layers.http._base import StreamId
from mitmproxy.proxy.utils import expect
from mitmproxy.utils import human
TBodyReader = Union[ChunkedReader, Http10Reader, ContentLengthReader]
class Http1Connection(HttpConnection, metaclass=abc.ABCMeta):
stream_id: StreamId | None = None
request: http.Request | None = None
response: http.Response | None = None
request_done: bool = False
response_done: bool = False
# this is a bit of a hack to make both mypy and PyCharm happy.
state: Callable[[events.Event], layer.CommandGenerator[None]] | Callable
body_reader: TBodyReader
buf: ReceiveBuffer
ReceiveProtocolError: type[RequestProtocolError | ResponseProtocolError]
ReceiveData: type[RequestData | ResponseData]
ReceiveEndOfMessage: type[RequestEndOfMessage | ResponseEndOfMessage]
def __init__(self, context: Context, conn: Connection):
super().__init__(context, conn)
self.buf = ReceiveBuffer()
@abc.abstractmethod
def send(self, event: HttpEvent) -> layer.CommandGenerator[None]:
yield from () # pragma: no cover
@abc.abstractmethod
def read_headers(
self, event: events.ConnectionEvent
) -> layer.CommandGenerator[None]:
yield from () # pragma: no cover
def _handle_event(self, event: events.Event) -> layer.CommandGenerator[None]:
if isinstance(event, HttpEvent):
yield from self.send(event)
else:
if (
isinstance(event, events.DataReceived)
and self.state != self.passthrough
):
self.buf += event.data
yield from self.state(event)
@expect(events.Start)
def start(self, _) -> layer.CommandGenerator[None]:
self.state = self.read_headers
yield from ()
state = start
def read_body(self, event: events.Event) -> layer.CommandGenerator[None]:
assert self.stream_id is not None
while True:
try:
if isinstance(event, events.DataReceived):
h11_event = self.body_reader(self.buf)
elif isinstance(event, events.ConnectionClosed):
h11_event = self.body_reader.read_eof()
else:
raise AssertionError(f"Unexpected event: {event}")
except h11.ProtocolError as e:
yield commands.CloseConnection(self.conn)
yield ReceiveHttp(
self.ReceiveProtocolError(
self.stream_id,
f"HTTP/1 protocol error: {e}",
code=self.ReceiveProtocolError.code,
)
)
return
if h11_event is None:
return
elif isinstance(h11_event, h11.Data):
data: bytes = bytes(h11_event.data)
if data:
yield ReceiveHttp(self.ReceiveData(self.stream_id, data))
elif isinstance(h11_event, h11.EndOfMessage):
assert self.request
if h11_event.headers:
raise NotImplementedError(f"HTTP trailers are not implemented yet.")
if self.request.data.method.upper() != b"CONNECT":
yield ReceiveHttp(self.ReceiveEndOfMessage(self.stream_id))
is_request = isinstance(self, Http1Server)
yield from self.mark_done(request=is_request, response=not is_request)
return
def wait(self, event: events.Event) -> layer.CommandGenerator[None]:
"""
We wait for the current flow to be finished before parsing the next message,
as we may want to upgrade to WebSocket or plain TCP before that.
"""
assert self.stream_id
if isinstance(event, events.DataReceived):
return
elif isinstance(event, events.ConnectionClosed):
# for practical purposes, we assume that a peer which sent at least a FIN
# is not interested in any more data from us, see
# see https://github.com/httpwg/http-core/issues/22
if event.connection.state is not ConnectionState.CLOSED:
yield commands.CloseConnection(event.connection)
yield ReceiveHttp(
self.ReceiveProtocolError(
self.stream_id,
f"Client disconnected.",
code=ErrorCode.CLIENT_DISCONNECTED,
)
)
else: # pragma: no cover
raise AssertionError(f"Unexpected event: {event}")
def done(self, event: events.ConnectionEvent) -> layer.CommandGenerator[None]:
yield from () # pragma: no cover
def make_pipe(self) -> layer.CommandGenerator[None]:
self.state = self.passthrough
if self.buf:
already_received = self.buf.maybe_extract_at_most(len(self.buf)) or b""
# Some clients send superfluous newlines after CONNECT, we want to eat those.
already_received = already_received.lstrip(b"\r\n")
if already_received:
yield from self.state(events.DataReceived(self.conn, already_received))
def passthrough(self, event: events.Event) -> layer.CommandGenerator[None]:
assert self.stream_id
if isinstance(event, events.DataReceived):
yield ReceiveHttp(self.ReceiveData(self.stream_id, event.data))
elif isinstance(event, events.ConnectionClosed):
if isinstance(self, Http1Server):
yield ReceiveHttp(RequestEndOfMessage(self.stream_id))
else:
yield ReceiveHttp(ResponseEndOfMessage(self.stream_id))
def mark_done(
self, *, request: bool = False, response: bool = False
) -> layer.CommandGenerator[None]:
if request:
self.request_done = True
if response:
self.response_done = True
if self.request_done and self.response_done:
assert self.request
assert self.response
if should_make_pipe(self.request, self.response):
yield from self.make_pipe()
return
try:
read_until_eof_semantics = (
http1.expected_http_body_size(self.request, self.response) == -1
)
except ValueError:
# this may raise only now (and not earlier) because an addon set invalid headers,
# in which case it's not really clear what we are supposed to do.
read_until_eof_semantics = False
connection_done = (
read_until_eof_semantics
or http1.connection_close(
self.request.http_version, self.request.headers
)
or http1.connection_close(
self.response.http_version, self.response.headers
)
# If we proxy HTTP/2 to HTTP/1, we only use upstream connections for one request.
# This simplifies our connection management quite a bit as we can rely on
# the proxyserver's max-connection-per-server throttling.
or (
(self.request.is_http2 or self.request.is_http3)
and isinstance(self, Http1Client)
)
)
if connection_done:
yield commands.CloseConnection(self.conn)
self.state = self.done
return
self.request_done = self.response_done = False
self.request = self.response = None
if isinstance(self, Http1Server):
self.stream_id += 2
else:
self.stream_id = None
self.state = self.read_headers
if self.buf:
yield from self.state(events.DataReceived(self.conn, b""))
class Http1Server(Http1Connection):
"""A simple HTTP/1 server with no pipelining support."""
ReceiveProtocolError = RequestProtocolError
ReceiveData = RequestData
ReceiveEndOfMessage = RequestEndOfMessage
stream_id: int
def __init__(self, context: Context):
super().__init__(context, context.client)
self.stream_id = 1
def send(self, event: HttpEvent) -> layer.CommandGenerator[None]:
assert event.stream_id == self.stream_id
if isinstance(event, ResponseHeaders):
self.response = response = event.response
if response.is_http2 or response.is_http3:
response = response.copy()
# Convert to an HTTP/1 response.
response.http_version = "HTTP/1.1"
# not everyone supports empty reason phrases, so we better make up one.
response.reason = status_codes.RESPONSES.get(response.status_code, "")
# Shall we set a Content-Length header here if there is none?
# For now, let's try to modify as little as possible.
raw = http1.assemble_response_head(response)
yield commands.SendData(self.conn, raw)
elif isinstance(event, ResponseData):
assert self.response
if "chunked" in self.response.headers.get("transfer-encoding", "").lower():
raw = b"%x\r\n%s\r\n" % (len(event.data), event.data)
else:
raw = event.data
if raw:
yield commands.SendData(self.conn, raw)
elif isinstance(event, ResponseEndOfMessage):
assert self.request
assert self.response
if (
self.request.method.upper() != "HEAD"
and "chunked"
in self.response.headers.get("transfer-encoding", "").lower()
):
yield commands.SendData(self.conn, b"0\r\n\r\n")
yield from self.mark_done(response=True)
elif isinstance(event, ResponseProtocolError):
if not (self.conn.state & ConnectionState.CAN_WRITE):
return
status = event.code.http_status_code()
if not self.response and status is not None:
yield commands.SendData(
self.conn, make_error_response(status, event.message)
)
yield commands.CloseConnection(self.conn)
else:
raise AssertionError(f"Unexpected event: {event}")
def read_headers(
self, event: events.ConnectionEvent
) -> layer.CommandGenerator[None]:
if isinstance(event, events.DataReceived):
request_head = self.buf.maybe_extract_lines()
if request_head:
try:
self.request = http1.read_request_head(
[bytes(x) for x in request_head]
)
expected_body_size = http1.expected_http_body_size(self.request)
except ValueError as e:
yield commands.SendData(self.conn, make_error_response(400, str(e)))
yield commands.CloseConnection(self.conn)
if self.request:
# we have headers that we can show in the ui
yield ReceiveHttp(
RequestHeaders(self.stream_id, self.request, False)
)
yield ReceiveHttp(
RequestProtocolError(
self.stream_id, str(e), ErrorCode.GENERIC_CLIENT_ERROR
)
)
else:
yield commands.Log(
f"{human.format_address(self.conn.peername)}: {e}"
)
self.state = self.done
return
yield ReceiveHttp(
RequestHeaders(
self.stream_id, self.request, expected_body_size == 0
)
)
self.body_reader = make_body_reader(expected_body_size)
self.state = self.read_body
yield from self.state(event)
else:
pass # FIXME: protect against header size DoS
elif isinstance(event, events.ConnectionClosed):
buf = bytes(self.buf)
if buf.strip():
yield commands.Log(
f"Client closed connection before completing request headers: {buf!r}"
)
yield commands.CloseConnection(self.conn)
else:
raise AssertionError(f"Unexpected event: {event}")
def mark_done(
self, *, request: bool = False, response: bool = False
) -> layer.CommandGenerator[None]:
yield from super().mark_done(request=request, response=response)
if self.request_done and not self.response_done:
self.state = self.wait
class Http1Client(Http1Connection):
"""A simple HTTP/1 client with no pipelining support."""
ReceiveProtocolError = ResponseProtocolError
ReceiveData = ResponseData
ReceiveEndOfMessage = ResponseEndOfMessage
def __init__(self, context: Context):
super().__init__(context, context.server)
def send(self, event: HttpEvent) -> layer.CommandGenerator[None]:
if isinstance(event, RequestProtocolError):
yield commands.CloseConnection(self.conn)
return
if self.stream_id is None:
assert isinstance(event, RequestHeaders)
self.stream_id = event.stream_id
self.request = event.request
assert self.stream_id == event.stream_id
if isinstance(event, RequestHeaders):
request = event.request
if request.is_http2 or request.is_http3:
# Convert to an HTTP/1 request.
request = (
request.copy()
) # (we could probably be a bit more efficient here.)
request.http_version = "HTTP/1.1"
if "Host" not in request.headers and request.authority:
request.headers.insert(0, "Host", request.authority)
request.authority = ""
cookie_headers = request.headers.get_all("Cookie")
if len(cookie_headers) > 1:
# Only HTTP/2 supports multiple cookie headers, HTTP/1.x does not.
# see: https://www.rfc-editor.org/rfc/rfc6265#section-5.4
# https://www.rfc-editor.org/rfc/rfc7540#section-8.1.2.5
request.headers["Cookie"] = "; ".join(cookie_headers)
raw = http1.assemble_request_head(request)
yield commands.SendData(self.conn, raw)
elif isinstance(event, RequestData):
assert self.request
if "chunked" in self.request.headers.get("transfer-encoding", "").lower():
raw = b"%x\r\n%s\r\n" % (len(event.data), event.data)
else:
raw = event.data
if raw:
yield commands.SendData(self.conn, raw)
elif isinstance(event, RequestEndOfMessage):
assert self.request
if "chunked" in self.request.headers.get("transfer-encoding", "").lower():
yield commands.SendData(self.conn, b"0\r\n\r\n")
elif http1.expected_http_body_size(self.request, self.response) == -1:
yield commands.CloseTcpConnection(self.conn, half_close=True)
yield from self.mark_done(request=True)
else:
raise AssertionError(f"Unexpected event: {event}")
def read_headers(
self, event: events.ConnectionEvent
) -> layer.CommandGenerator[None]:
if isinstance(event, events.DataReceived):
if not self.request:
# we just received some data for an unknown request.
yield commands.Log(f"Unexpected data from server: {bytes(self.buf)!r}")
yield commands.CloseConnection(self.conn)
return
assert self.stream_id is not None
response_head = self.buf.maybe_extract_lines()
if response_head:
try:
self.response = http1.read_response_head(
[bytes(x) for x in response_head]
)
expected_size = http1.expected_http_body_size(
self.request, self.response
)
except ValueError as e:
yield commands.CloseConnection(self.conn)
yield ReceiveHttp(
ResponseProtocolError(
self.stream_id,
f"Cannot parse HTTP response: {e}",
ErrorCode.GENERIC_SERVER_ERROR,
)
)
return
yield ReceiveHttp(
ResponseHeaders(self.stream_id, self.response, expected_size == 0)
)
self.body_reader = make_body_reader(expected_size)
self.state = self.read_body
yield from self.state(event)
else:
pass # FIXME: protect against header size DoS
elif isinstance(event, events.ConnectionClosed):
if self.conn.state & ConnectionState.CAN_WRITE:
yield commands.CloseConnection(self.conn)
if self.stream_id:
if self.buf:
yield ReceiveHttp(
ResponseProtocolError(
self.stream_id,
f"unexpected server response: {bytes(self.buf)!r}",
ErrorCode.GENERIC_SERVER_ERROR,
)
)
else:
# The server has closed the connection to prevent us from continuing.
# We need to signal that to the stream.
# https://tools.ietf.org/html/rfc7231#section-6.5.11
yield ReceiveHttp(
ResponseProtocolError(
self.stream_id,
"server closed connection",
ErrorCode.GENERIC_SERVER_ERROR,
)
)
else:
return
else:
raise AssertionError(f"Unexpected event: {event}")
def should_make_pipe(request: http.Request, response: http.Response) -> bool:
if response.status_code == 101:
return True
elif response.status_code == 200 and request.method.upper() == "CONNECT":
return True
else:
return False
def make_body_reader(expected_size: int | None) -> TBodyReader:
if expected_size is None:
return ChunkedReader()
elif expected_size == -1:
return Http10Reader()
else:
return ContentLengthReader(expected_size)
def make_error_response(
status_code: int,
message: str = "",
) -> bytes:
resp = http.Response.make(
status_code,
format_error(status_code, message),
http.Headers(
Server=version.MITMPROXY,
Connection="close",
Content_Type="text/html",
),
)
return http1.assemble_response(resp)
__all__ = [
"Http1Client",
"Http1Server",
]
| python | MIT | e6aa924bb411a9687b91920b8d094af37bc02b90 | 2026-01-04T14:40:00.086164Z | false |
mitmproxy/mitmproxy | https://github.com/mitmproxy/mitmproxy/blob/e6aa924bb411a9687b91920b8d094af37bc02b90/mitmproxy/proxy/layers/http/_upstream_proxy.py | mitmproxy/proxy/layers/http/_upstream_proxy.py | import time
from logging import DEBUG
from h11._receivebuffer import ReceiveBuffer
from mitmproxy import connection
from mitmproxy import http
from mitmproxy.net.http import http1
from mitmproxy.proxy import commands
from mitmproxy.proxy import context
from mitmproxy.proxy import layer
from mitmproxy.proxy import tunnel
from mitmproxy.proxy.layers import tls
from mitmproxy.proxy.layers.http._hooks import HttpConnectUpstreamHook
from mitmproxy.utils import human
class HttpUpstreamProxy(tunnel.TunnelLayer):
buf: ReceiveBuffer
send_connect: bool
conn: connection.Server
tunnel_connection: connection.Server
def __init__(
self, ctx: context.Context, tunnel_conn: connection.Server, send_connect: bool
):
super().__init__(ctx, tunnel_connection=tunnel_conn, conn=ctx.server)
self.buf = ReceiveBuffer()
self.send_connect = send_connect
@classmethod
def make(cls, ctx: context.Context, send_connect: bool) -> tunnel.LayerStack:
assert ctx.server.via
scheme, address = ctx.server.via
assert scheme in ("http", "https")
http_proxy = connection.Server(address=address)
stack = tunnel.LayerStack()
if scheme == "https":
http_proxy.alpn_offers = tls.HTTP1_ALPNS
http_proxy.sni = address[0]
stack /= tls.ServerTLSLayer(ctx, http_proxy)
stack /= cls(ctx, http_proxy, send_connect)
return stack
def start_handshake(self) -> layer.CommandGenerator[None]:
if not self.send_connect:
return (yield from super().start_handshake())
assert self.conn.address
flow = http.HTTPFlow(self.context.client, self.tunnel_connection)
authority = (
self.conn.address[0].encode("idna") + f":{self.conn.address[1]}".encode()
)
headers = http.Headers()
if self.context.options.http_connect_send_host_header:
headers.insert(0, b"Host", authority)
flow.request = http.Request(
host=self.conn.address[0],
port=self.conn.address[1],
method=b"CONNECT",
scheme=b"",
authority=authority,
path=b"",
http_version=b"HTTP/1.1",
headers=headers,
content=b"",
trailers=None,
timestamp_start=time.time(),
timestamp_end=time.time(),
)
yield HttpConnectUpstreamHook(flow)
raw = http1.assemble_request(flow.request)
yield commands.SendData(self.tunnel_connection, raw)
def receive_handshake_data(
self, data: bytes
) -> layer.CommandGenerator[tuple[bool, str | None]]:
if not self.send_connect:
return (yield from super().receive_handshake_data(data))
self.buf += data
response_head = self.buf.maybe_extract_lines()
if response_head:
try:
response = http1.read_response_head([bytes(x) for x in response_head])
except ValueError as e:
proxyaddr = human.format_address(self.tunnel_connection.address)
yield commands.Log(f"{proxyaddr}: {e}")
return False, f"Error connecting to {proxyaddr}: {e}"
if 200 <= response.status_code < 300:
if self.buf:
yield from self.receive_data(bytes(self.buf))
del self.buf
return True, None
else:
proxyaddr = human.format_address(self.tunnel_connection.address)
raw_resp = b"\n".join(response_head)
yield commands.Log(f"{proxyaddr}: {raw_resp!r}", DEBUG)
return (
False,
f"Upstream proxy {proxyaddr} refused HTTP CONNECT request: {response.status_code} {response.reason}",
)
else:
return False, None
| python | MIT | e6aa924bb411a9687b91920b8d094af37bc02b90 | 2026-01-04T14:40:00.086164Z | false |
mitmproxy/mitmproxy | https://github.com/mitmproxy/mitmproxy/blob/e6aa924bb411a9687b91920b8d094af37bc02b90/mitmproxy/proxy/layers/http/_events.py | mitmproxy/proxy/layers/http/_events.py | import enum
import typing
from dataclasses import dataclass
from ._base import HttpEvent
from mitmproxy import http
from mitmproxy.http import HTTPFlow
from mitmproxy.net.http import status_codes
@dataclass
class RequestHeaders(HttpEvent):
request: http.Request
end_stream: bool
"""
If True, we already know at this point that there is no message body. This is useful for HTTP/2, where it allows
us to set END_STREAM on headers already (and some servers - Akamai - implicitly expect that).
In either case, this event will nonetheless be followed by RequestEndOfMessage.
"""
replay_flow: HTTPFlow | None = None
"""If set, the current request headers belong to a replayed flow, which should be reused."""
@dataclass
class ResponseHeaders(HttpEvent):
response: http.Response
end_stream: bool = False
# explicit constructors below to facilitate type checking in _http1/_http2
@dataclass
class RequestData(HttpEvent):
data: bytes
def __init__(self, stream_id: int, data: bytes):
self.stream_id = stream_id
self.data = data
@dataclass
class ResponseData(HttpEvent):
data: bytes
def __init__(self, stream_id: int, data: bytes):
self.stream_id = stream_id
self.data = data
@dataclass
class RequestTrailers(HttpEvent):
trailers: http.Headers
def __init__(self, stream_id: int, trailers: http.Headers):
self.stream_id = stream_id
self.trailers = trailers
@dataclass
class ResponseTrailers(HttpEvent):
trailers: http.Headers
def __init__(self, stream_id: int, trailers: http.Headers):
self.stream_id = stream_id
self.trailers = trailers
@dataclass
class RequestEndOfMessage(HttpEvent):
def __init__(self, stream_id: int):
self.stream_id = stream_id
@dataclass
class ResponseEndOfMessage(HttpEvent):
def __init__(self, stream_id: int):
self.stream_id = stream_id
class ErrorCode(enum.Enum):
GENERIC_CLIENT_ERROR = 1
GENERIC_SERVER_ERROR = 2
REQUEST_TOO_LARGE = 3
RESPONSE_TOO_LARGE = 4
CONNECT_FAILED = 5
PASSTHROUGH_CLOSE = 6
KILL = 7
HTTP_1_1_REQUIRED = 8
"""Client should fall back to HTTP/1.1 to perform request."""
DESTINATION_UNKNOWN = 9
"""Proxy does not know where to send request to."""
CLIENT_DISCONNECTED = 10
"""Client disconnected before receiving entire response."""
CANCEL = 11
"""Client or server cancelled h2/h3 stream."""
REQUEST_VALIDATION_FAILED = 12
RESPONSE_VALIDATION_FAILED = 13
def http_status_code(self) -> int | None:
match self:
# Client Errors
case (
ErrorCode.GENERIC_CLIENT_ERROR
| ErrorCode.REQUEST_VALIDATION_FAILED
| ErrorCode.DESTINATION_UNKNOWN
):
return status_codes.BAD_REQUEST
case ErrorCode.REQUEST_TOO_LARGE:
return status_codes.PAYLOAD_TOO_LARGE
case (
ErrorCode.CONNECT_FAILED
| ErrorCode.GENERIC_SERVER_ERROR
| ErrorCode.RESPONSE_VALIDATION_FAILED
| ErrorCode.RESPONSE_TOO_LARGE
):
return status_codes.BAD_GATEWAY
case (
ErrorCode.PASSTHROUGH_CLOSE
| ErrorCode.KILL
| ErrorCode.HTTP_1_1_REQUIRED
| ErrorCode.CLIENT_DISCONNECTED
| ErrorCode.CANCEL
):
return None
case other: # pragma: no cover
typing.assert_never(other)
@dataclass
class RequestProtocolError(HttpEvent):
message: str
code: ErrorCode = ErrorCode.GENERIC_CLIENT_ERROR
def __init__(self, stream_id: int, message: str, code: ErrorCode):
assert isinstance(code, ErrorCode)
self.stream_id = stream_id
self.message = message
self.code = code
@dataclass
class ResponseProtocolError(HttpEvent):
message: str
code: ErrorCode = ErrorCode.GENERIC_SERVER_ERROR
def __init__(self, stream_id: int, message: str, code: ErrorCode):
assert isinstance(code, ErrorCode)
self.stream_id = stream_id
self.message = message
self.code = code
__all__ = [
"ErrorCode",
"HttpEvent",
"RequestHeaders",
"RequestData",
"RequestEndOfMessage",
"ResponseHeaders",
"ResponseData",
"RequestTrailers",
"ResponseTrailers",
"ResponseEndOfMessage",
"RequestProtocolError",
"ResponseProtocolError",
]
| python | MIT | e6aa924bb411a9687b91920b8d094af37bc02b90 | 2026-01-04T14:40:00.086164Z | false |
mitmproxy/mitmproxy | https://github.com/mitmproxy/mitmproxy/blob/e6aa924bb411a9687b91920b8d094af37bc02b90/mitmproxy/proxy/layers/http/_http3.py | mitmproxy/proxy/layers/http/_http3.py | import time
from abc import abstractmethod
from typing import assert_never
from aioquic.h3.connection import ErrorCode as H3ErrorCode
from aioquic.h3.connection import FrameUnexpected as H3FrameUnexpected
from aioquic.h3.events import DataReceived
from aioquic.h3.events import HeadersReceived
from aioquic.h3.events import PushPromiseReceived
from . import ErrorCode
from . import RequestData
from . import RequestEndOfMessage
from . import RequestHeaders
from . import RequestProtocolError
from . import RequestTrailers
from . import ResponseData
from . import ResponseEndOfMessage
from . import ResponseHeaders
from . import ResponseProtocolError
from . import ResponseTrailers
from ._base import format_error
from ._base import HttpConnection
from ._base import HttpEvent
from ._base import ReceiveHttp
from ._http2 import format_h2_request_headers
from ._http2 import format_h2_response_headers
from ._http2 import parse_h2_request_headers
from ._http2 import parse_h2_response_headers
from ._http_h3 import LayeredH3Connection
from ._http_h3 import StreamClosed
from ._http_h3 import TrailersReceived
from mitmproxy import connection
from mitmproxy import http
from mitmproxy import version
from mitmproxy.proxy import commands
from mitmproxy.proxy import context
from mitmproxy.proxy import events
from mitmproxy.proxy import layer
from mitmproxy.proxy.layers.quic import error_code_to_str
from mitmproxy.proxy.layers.quic import QuicConnectionClosed
from mitmproxy.proxy.layers.quic import QuicStreamEvent
from mitmproxy.proxy.utils import expect
class Http3Connection(HttpConnection):
h3_conn: LayeredH3Connection
ReceiveData: type[RequestData | ResponseData]
ReceiveEndOfMessage: type[RequestEndOfMessage | ResponseEndOfMessage]
ReceiveProtocolError: type[RequestProtocolError | ResponseProtocolError]
ReceiveTrailers: type[RequestTrailers | ResponseTrailers]
def __init__(self, context: context.Context, conn: connection.Connection):
super().__init__(context, conn)
self.h3_conn = LayeredH3Connection(
self.conn, is_client=self.conn is self.context.server
)
def _handle_event(self, event: events.Event) -> layer.CommandGenerator[None]:
if isinstance(event, events.Start):
yield from self.h3_conn.transmit()
# send mitmproxy HTTP events over the H3 connection
elif isinstance(event, HttpEvent):
try:
if isinstance(event, (RequestData, ResponseData)):
self.h3_conn.send_data(event.stream_id, event.data)
elif isinstance(event, (RequestHeaders, ResponseHeaders)):
headers = yield from (
format_h2_request_headers(self.context, event)
if isinstance(event, RequestHeaders)
else format_h2_response_headers(self.context, event)
)
self.h3_conn.send_headers(
event.stream_id, headers, end_stream=event.end_stream
)
elif isinstance(event, (RequestTrailers, ResponseTrailers)):
self.h3_conn.send_trailers(
event.stream_id, [*event.trailers.fields]
)
elif isinstance(event, (RequestEndOfMessage, ResponseEndOfMessage)):
self.h3_conn.end_stream(event.stream_id)
elif isinstance(event, (RequestProtocolError, ResponseProtocolError)):
status = event.code.http_status_code()
if (
isinstance(event, ResponseProtocolError)
and not self.h3_conn.has_sent_headers(event.stream_id)
and status is not None
):
self.h3_conn.send_headers(
event.stream_id,
[
(b":status", b"%d" % status),
(b"server", version.MITMPROXY.encode()),
(b"content-type", b"text/html"),
],
)
self.h3_conn.send_data(
event.stream_id,
format_error(status, event.message),
end_stream=True,
)
else:
match event.code:
case ErrorCode.CANCEL | ErrorCode.CLIENT_DISCONNECTED:
error_code = H3ErrorCode.H3_REQUEST_CANCELLED
case ErrorCode.KILL:
error_code = H3ErrorCode.H3_INTERNAL_ERROR
case ErrorCode.HTTP_1_1_REQUIRED:
error_code = H3ErrorCode.H3_VERSION_FALLBACK
case ErrorCode.PASSTHROUGH_CLOSE:
# FIXME: This probably shouldn't be a protocol error, but an EOM event.
error_code = H3ErrorCode.H3_REQUEST_CANCELLED
case (
ErrorCode.GENERIC_CLIENT_ERROR
| ErrorCode.GENERIC_SERVER_ERROR
| ErrorCode.REQUEST_TOO_LARGE
| ErrorCode.RESPONSE_TOO_LARGE
| ErrorCode.CONNECT_FAILED
| ErrorCode.DESTINATION_UNKNOWN
| ErrorCode.REQUEST_VALIDATION_FAILED
| ErrorCode.RESPONSE_VALIDATION_FAILED
):
error_code = H3ErrorCode.H3_INTERNAL_ERROR
case other: # pragma: no cover
assert_never(other)
self.h3_conn.close_stream(event.stream_id, error_code.value)
else: # pragma: no cover
raise AssertionError(f"Unexpected event: {event!r}")
except H3FrameUnexpected as e:
# Http2Connection also ignores HttpEvents that violate the current stream state
yield commands.Log(f"Received {event!r} unexpectedly: {e}")
else:
# transmit buffered data
yield from self.h3_conn.transmit()
# forward stream messages from the QUIC layer to the H3 connection
elif isinstance(event, QuicStreamEvent):
h3_events = self.h3_conn.handle_stream_event(event)
for h3_event in h3_events:
if isinstance(h3_event, StreamClosed):
err_str = error_code_to_str(h3_event.error_code)
match h3_event.error_code:
case H3ErrorCode.H3_REQUEST_CANCELLED:
err_code = ErrorCode.CANCEL
case H3ErrorCode.H3_VERSION_FALLBACK:
err_code = ErrorCode.HTTP_1_1_REQUIRED
case _:
err_code = self.ReceiveProtocolError.code
yield ReceiveHttp(
self.ReceiveProtocolError(
h3_event.stream_id,
f"stream closed by client ({err_str})",
code=err_code,
)
)
elif isinstance(h3_event, DataReceived):
if h3_event.data:
yield ReceiveHttp(
self.ReceiveData(h3_event.stream_id, h3_event.data)
)
if h3_event.stream_ended:
yield ReceiveHttp(self.ReceiveEndOfMessage(h3_event.stream_id))
elif isinstance(h3_event, HeadersReceived):
try:
receive_event = self.parse_headers(h3_event)
except ValueError as e:
self.h3_conn.close_connection(
error_code=H3ErrorCode.H3_GENERAL_PROTOCOL_ERROR,
reason_phrase=f"Invalid HTTP/3 request headers: {e}",
)
else:
yield ReceiveHttp(receive_event)
if h3_event.stream_ended:
yield ReceiveHttp(
self.ReceiveEndOfMessage(h3_event.stream_id)
)
elif isinstance(h3_event, TrailersReceived):
yield ReceiveHttp(
self.ReceiveTrailers(
h3_event.stream_id, http.Headers(h3_event.trailers)
)
)
if h3_event.stream_ended:
yield ReceiveHttp(self.ReceiveEndOfMessage(h3_event.stream_id))
elif isinstance(h3_event, PushPromiseReceived): # pragma: no cover
self.h3_conn.close_connection(
error_code=H3ErrorCode.H3_GENERAL_PROTOCOL_ERROR,
reason_phrase=f"Received HTTP/3 push promise, even though we signalled no support.",
)
else: # pragma: no cover
raise AssertionError(f"Unexpected event: {event!r}")
yield from self.h3_conn.transmit()
# report a protocol error for all remaining open streams when a connection is closed
elif isinstance(event, QuicConnectionClosed):
self._handle_event = self.done # type: ignore
self.h3_conn.handle_connection_closed(event)
msg = event.reason_phrase or error_code_to_str(event.error_code)
for stream_id in self.h3_conn.get_open_stream_ids():
yield ReceiveHttp(
self.ReceiveProtocolError(
stream_id, msg, self.ReceiveProtocolError.code
)
)
else: # pragma: no cover
raise AssertionError(f"Unexpected event: {event!r}")
@expect(HttpEvent, QuicStreamEvent, QuicConnectionClosed)
def done(self, _) -> layer.CommandGenerator[None]:
yield from ()
@abstractmethod
def parse_headers(self, event: HeadersReceived) -> RequestHeaders | ResponseHeaders:
pass # pragma: no cover
class Http3Server(Http3Connection):
ReceiveData = RequestData
ReceiveEndOfMessage = RequestEndOfMessage
ReceiveProtocolError = RequestProtocolError
ReceiveTrailers = RequestTrailers
def __init__(self, context: context.Context):
super().__init__(context, context.client)
def parse_headers(self, event: HeadersReceived) -> RequestHeaders | ResponseHeaders:
# same as HTTP/2
(
host,
port,
method,
scheme,
authority,
path,
headers,
) = parse_h2_request_headers(event.headers)
request = http.Request(
host=host,
port=port,
method=method,
scheme=scheme,
authority=authority,
path=path,
http_version=b"HTTP/3",
headers=headers,
content=None,
trailers=None,
timestamp_start=time.time(),
timestamp_end=None,
)
return RequestHeaders(event.stream_id, request, end_stream=event.stream_ended)
class Http3Client(Http3Connection):
ReceiveData = ResponseData
ReceiveEndOfMessage = ResponseEndOfMessage
ReceiveProtocolError = ResponseProtocolError
ReceiveTrailers = ResponseTrailers
our_stream_id: dict[int, int]
their_stream_id: dict[int, int]
def __init__(self, context: context.Context):
super().__init__(context, context.server)
self.our_stream_id = {}
self.their_stream_id = {}
def _handle_event(self, event: events.Event) -> layer.CommandGenerator[None]:
# QUIC and HTTP/3 would actually allow for direct stream ID mapping, but since we want
# to support H2<->H3, we need to translate IDs.
# NOTE: We always create bidirectional streams, as we can't safely infer unidirectionality.
if isinstance(event, HttpEvent):
ours = self.our_stream_id.get(event.stream_id, None)
if ours is None:
ours = self.h3_conn.get_next_available_stream_id()
self.our_stream_id[event.stream_id] = ours
self.their_stream_id[ours] = event.stream_id
event.stream_id = ours
for cmd in super()._handle_event(event):
if isinstance(cmd, ReceiveHttp):
cmd.event.stream_id = self.their_stream_id[cmd.event.stream_id]
yield cmd
def parse_headers(self, event: HeadersReceived) -> RequestHeaders | ResponseHeaders:
# same as HTTP/2
status_code, headers = parse_h2_response_headers(event.headers)
response = http.Response(
http_version=b"HTTP/3",
status_code=status_code,
reason=b"",
headers=headers,
content=None,
trailers=None,
timestamp_start=time.time(),
timestamp_end=None,
)
return ResponseHeaders(event.stream_id, response, event.stream_ended)
__all__ = [
"Http3Client",
"Http3Server",
]
| python | MIT | e6aa924bb411a9687b91920b8d094af37bc02b90 | 2026-01-04T14:40:00.086164Z | false |
mitmproxy/mitmproxy | https://github.com/mitmproxy/mitmproxy/blob/e6aa924bb411a9687b91920b8d094af37bc02b90/mitmproxy/proxy/layers/http/_http2.py | mitmproxy/proxy/layers/http/_http2.py | import collections
import time
from collections.abc import Sequence
from enum import Enum
from logging import DEBUG
from logging import ERROR
from typing import Any
from typing import assert_never
from typing import ClassVar
import h2.config
import h2.connection
import h2.errors
import h2.events
import h2.exceptions
import h2.settings
import h2.stream
import h2.utilities
from ...commands import CloseConnection
from ...commands import Log
from ...commands import RequestWakeup
from ...commands import SendData
from ...context import Context
from ...events import ConnectionClosed
from ...events import DataReceived
from ...events import Event
from ...events import Start
from ...events import Wakeup
from ...layer import CommandGenerator
from ...utils import expect
from . import ErrorCode
from . import RequestData
from . import RequestEndOfMessage
from . import RequestHeaders
from . import RequestProtocolError
from . import RequestTrailers
from . import ResponseData
from . import ResponseEndOfMessage
from . import ResponseHeaders
from . import ResponseProtocolError
from . import ResponseTrailers
from ._base import format_error
from ._base import HttpConnection
from ._base import HttpEvent
from ._base import ReceiveHttp
from ._http_h2 import BufferedH2Connection
from ._http_h2 import H2ConnectionLogger
from mitmproxy import http
from mitmproxy import version
from mitmproxy.connection import Connection
from mitmproxy.net.http import status_codes
from mitmproxy.net.http import url
from mitmproxy.utils import human
class StreamState(Enum):
EXPECTING_HEADERS = 1
HEADERS_RECEIVED = 2
CATCH_HYPER_H2_ERRORS = (ValueError, IndexError)
class Http2Connection(HttpConnection):
h2_conf: ClassVar[h2.config.H2Configuration]
h2_conf_defaults: dict[str, Any] = dict(
header_encoding=False,
validate_outbound_headers=False,
# validate_inbound_headers is controlled by the validate_inbound_headers option.
normalize_inbound_headers=False, # changing this to True is required to pass h2spec
normalize_outbound_headers=False,
)
h2_conn: BufferedH2Connection
streams: dict[int, StreamState]
"""keep track of all active stream ids to send protocol errors on teardown"""
ReceiveProtocolError: type[RequestProtocolError | ResponseProtocolError]
ReceiveData: type[RequestData | ResponseData]
ReceiveTrailers: type[RequestTrailers | ResponseTrailers]
ReceiveEndOfMessage: type[RequestEndOfMessage | ResponseEndOfMessage]
def __init__(self, context: Context, conn: Connection):
super().__init__(context, conn)
if self.debug:
self.h2_conf.logger = H2ConnectionLogger(
self.context.client.peername, self.__class__.__name__
)
self.h2_conf.validate_inbound_headers = (
self.context.options.validate_inbound_headers
)
self.h2_conn = BufferedH2Connection(self.h2_conf)
self.streams = {}
def is_closed(self, stream_id: int) -> bool:
"""Check if a non-idle stream is closed"""
stream = self.h2_conn.streams.get(stream_id, None)
if (
stream is not None
and stream.state_machine.state is not h2.stream.StreamState.CLOSED
and self.h2_conn.state_machine.state
is not h2.connection.ConnectionState.CLOSED
):
return False
else:
return True
def is_open_for_us(self, stream_id: int) -> bool:
"""Check if we can write to a non-idle stream."""
stream = self.h2_conn.streams.get(stream_id, None)
if (
stream is not None
and stream.state_machine.state
is not h2.stream.StreamState.HALF_CLOSED_LOCAL
and stream.state_machine.state is not h2.stream.StreamState.CLOSED
and self.h2_conn.state_machine.state
is not h2.connection.ConnectionState.CLOSED
):
return True
else:
return False
def _handle_event(self, event: Event) -> CommandGenerator[None]:
if isinstance(event, Start):
self.h2_conn.initiate_connection()
yield SendData(self.conn, self.h2_conn.data_to_send())
elif isinstance(event, HttpEvent):
if isinstance(event, (RequestData, ResponseData)):
if self.is_open_for_us(event.stream_id):
self.h2_conn.send_data(event.stream_id, event.data)
elif isinstance(event, (RequestTrailers, ResponseTrailers)):
if self.is_open_for_us(event.stream_id):
trailers = [*event.trailers.fields]
self.h2_conn.send_trailers(event.stream_id, trailers)
elif isinstance(event, (RequestEndOfMessage, ResponseEndOfMessage)):
if self.is_open_for_us(event.stream_id):
self.h2_conn.end_stream(event.stream_id)
elif isinstance(event, (RequestProtocolError, ResponseProtocolError)):
if not self.is_closed(event.stream_id):
stream: h2.stream.H2Stream = self.h2_conn.streams[event.stream_id]
status = event.code.http_status_code()
if (
isinstance(event, ResponseProtocolError)
and self.is_open_for_us(event.stream_id)
and not stream.state_machine.headers_sent
and status is not None
):
self.h2_conn.send_headers(
event.stream_id,
[
(b":status", b"%d" % status),
(b"server", version.MITMPROXY.encode()),
(b"content-type", b"text/html"),
],
)
self.h2_conn.send_data(
event.stream_id,
format_error(status, event.message),
end_stream=True,
)
else:
match event.code:
case ErrorCode.CANCEL | ErrorCode.CLIENT_DISCONNECTED:
error_code = h2.errors.ErrorCodes.CANCEL
case ErrorCode.KILL:
# XXX: Debateable whether this is the best error code.
error_code = h2.errors.ErrorCodes.INTERNAL_ERROR
case ErrorCode.HTTP_1_1_REQUIRED:
error_code = h2.errors.ErrorCodes.HTTP_1_1_REQUIRED
case ErrorCode.PASSTHROUGH_CLOSE:
# FIXME: This probably shouldn't be a protocol error, but an EOM event.
error_code = h2.errors.ErrorCodes.CANCEL
case (
ErrorCode.GENERIC_CLIENT_ERROR
| ErrorCode.GENERIC_SERVER_ERROR
| ErrorCode.REQUEST_TOO_LARGE
| ErrorCode.RESPONSE_TOO_LARGE
| ErrorCode.CONNECT_FAILED
| ErrorCode.DESTINATION_UNKNOWN
| ErrorCode.REQUEST_VALIDATION_FAILED
| ErrorCode.RESPONSE_VALIDATION_FAILED
):
error_code = h2.errors.ErrorCodes.INTERNAL_ERROR
case other: # pragma: no cover
assert_never(other)
self.h2_conn.reset_stream(event.stream_id, error_code.value)
else:
raise AssertionError(f"Unexpected event: {event}")
data_to_send = self.h2_conn.data_to_send()
if data_to_send:
yield SendData(self.conn, data_to_send)
elif isinstance(event, DataReceived):
try:
try:
events = self.h2_conn.receive_data(event.data)
except CATCH_HYPER_H2_ERRORS as e: # pragma: no cover
# this should never raise a ValueError, but we triggered one while fuzzing:
# https://github.com/python-hyper/hyper-h2/issues/1231
# this stays here as defense-in-depth.
raise h2.exceptions.ProtocolError(
f"uncaught hyper-h2 error: {e}"
) from e
except h2.exceptions.ProtocolError as e:
events = [e]
for h2_event in events:
if self.debug:
yield Log(f"{self.debug}[h2] {h2_event}", DEBUG)
if (yield from self.handle_h2_event(h2_event)):
if self.debug:
yield Log(f"{self.debug}[h2] done", DEBUG)
return
data_to_send = self.h2_conn.data_to_send()
if data_to_send:
yield SendData(self.conn, data_to_send)
elif isinstance(event, ConnectionClosed):
yield from self.close_connection("peer closed connection")
else:
raise AssertionError(f"Unexpected event: {event!r}")
def handle_h2_event(self, event: h2.events.Event) -> CommandGenerator[bool]:
"""returns true if further processing should be stopped."""
if isinstance(event, h2.events.DataReceived):
state = self.streams.get(event.stream_id, None)
if state is StreamState.HEADERS_RECEIVED:
is_empty_eos_data_frame = event.stream_ended and not event.data
if not is_empty_eos_data_frame:
yield ReceiveHttp(self.ReceiveData(event.stream_id, event.data))
elif state is StreamState.EXPECTING_HEADERS:
yield from self.protocol_error(
f"Received HTTP/2 data frame, expected headers."
)
return True
self.h2_conn.acknowledge_received_data(
event.flow_controlled_length, event.stream_id
)
elif isinstance(event, h2.events.TrailersReceived):
trailers = http.Headers(event.headers)
yield ReceiveHttp(self.ReceiveTrailers(event.stream_id, trailers))
elif isinstance(event, h2.events.StreamEnded):
state = self.streams.get(event.stream_id, None)
if state is StreamState.HEADERS_RECEIVED:
yield ReceiveHttp(self.ReceiveEndOfMessage(event.stream_id))
elif state is StreamState.EXPECTING_HEADERS:
raise AssertionError("unreachable")
if self.is_closed(event.stream_id):
self.streams.pop(event.stream_id, None)
elif isinstance(event, h2.events.StreamReset):
if event.stream_id in self.streams:
try:
err_str = h2.errors.ErrorCodes(event.error_code).name
except ValueError:
err_str = str(event.error_code)
match event.error_code:
case h2.errors.ErrorCodes.CANCEL:
err_code = ErrorCode.CANCEL
case h2.errors.ErrorCodes.HTTP_1_1_REQUIRED:
err_code = ErrorCode.HTTP_1_1_REQUIRED
case _:
err_code = self.ReceiveProtocolError.code
yield ReceiveHttp(
self.ReceiveProtocolError(
event.stream_id,
f"stream reset by client ({err_str})",
code=err_code,
)
)
self.streams.pop(event.stream_id)
else:
pass # We don't track priority frames which could be followed by a stream reset here.
elif isinstance(event, h2.exceptions.ProtocolError):
yield from self.protocol_error(f"HTTP/2 protocol error: {event}")
return True
elif isinstance(event, h2.events.ConnectionTerminated):
yield from self.close_connection(f"HTTP/2 connection closed: {event!r}")
return True
# The implementation above isn't really ideal, we should probably only terminate streams > last_stream_id?
# We currently lack a mechanism to signal that connections are still active but cannot be reused.
# for stream_id in self.streams:
# if stream_id > event.last_stream_id:
# yield ReceiveHttp(self.ReceiveProtocolError(stream_id, f"HTTP/2 connection closed: {event!r}"))
# self.streams.pop(stream_id)
elif isinstance(event, h2.events.RemoteSettingsChanged):
pass
elif isinstance(event, h2.events.SettingsAcknowledged):
pass
elif isinstance(event, h2.events.PriorityUpdated):
pass
elif isinstance(event, h2.events.PingReceived):
pass
elif isinstance(event, h2.events.PingAckReceived):
pass
elif isinstance(event, h2.events.PushedStreamReceived):
yield Log(
"Received HTTP/2 push promise, even though we signalled no support.",
ERROR,
)
elif isinstance(event, h2.events.UnknownFrameReceived):
# https://http2.github.io/http2-spec/#rfc.section.4.1
# Implementations MUST ignore and discard any frame that has a type that is unknown.
yield Log(f"Ignoring unknown HTTP/2 frame type: {event.frame.type}")
elif isinstance(event, h2.events.AlternativeServiceAvailable):
yield Log(
"Received HTTP/2 Alt-Svc frame, which will not be forwarded.", DEBUG
)
else:
raise AssertionError(f"Unexpected event: {event!r}")
return False
def protocol_error(
self,
message: str,
error_code: int = h2.errors.ErrorCodes.PROTOCOL_ERROR,
) -> CommandGenerator[None]:
yield Log(f"{human.format_address(self.conn.peername)}: {message}")
self.h2_conn.close_connection(error_code, message.encode())
yield SendData(self.conn, self.h2_conn.data_to_send())
yield from self.close_connection(message)
def close_connection(self, msg: str) -> CommandGenerator[None]:
yield CloseConnection(self.conn)
for stream_id in self.streams:
yield ReceiveHttp(
self.ReceiveProtocolError(
stream_id, msg, self.ReceiveProtocolError.code
)
)
self.streams.clear()
self._handle_event = self.done # type: ignore
@expect(DataReceived, HttpEvent, ConnectionClosed, Wakeup)
def done(self, _) -> CommandGenerator[None]:
yield from ()
def normalize_h1_headers(
headers: list[tuple[bytes, bytes]], is_client: bool
) -> list[tuple[bytes, bytes]]:
# HTTP/1 servers commonly send capitalized headers (Content-Length vs content-length),
# which isn't valid HTTP/2. As such we normalize.
# Make sure that this is not just an iterator but an iterable,
# otherwise hyper-h2 will silently drop headers.
return list(
h2.utilities.normalize_outbound_headers(
headers,
h2.utilities.HeaderValidationFlags(is_client, False, not is_client, False),
)
)
def normalize_h2_headers(headers: list[tuple[bytes, bytes]]) -> CommandGenerator[None]:
for i in range(len(headers)):
if not headers[i][0].islower():
yield Log(
f"Lowercased {repr(headers[i][0]).lstrip('b')} header as uppercase is not allowed with HTTP/2."
)
headers[i] = (headers[i][0].lower(), headers[i][1])
def format_h2_request_headers(
context: Context,
event: RequestHeaders,
) -> CommandGenerator[list[tuple[bytes, bytes]]]:
pseudo_headers = [
(b":method", event.request.data.method),
(b":scheme", event.request.data.scheme),
(b":path", event.request.data.path),
]
if event.request.authority:
pseudo_headers.append((b":authority", event.request.data.authority))
if event.request.is_http2 or event.request.is_http3:
hdrs = list(event.request.headers.fields)
if context.options.normalize_outbound_headers:
yield from normalize_h2_headers(hdrs)
else:
headers = event.request.headers
if not event.request.authority and "host" in headers:
headers = headers.copy()
pseudo_headers.append((b":authority", headers.pop(b"host")))
hdrs = normalize_h1_headers(list(headers.fields), True)
return pseudo_headers + hdrs
def format_h2_response_headers(
context: Context,
event: ResponseHeaders,
) -> CommandGenerator[list[tuple[bytes, bytes]]]:
headers = [
(b":status", b"%d" % event.response.status_code),
*event.response.headers.fields,
]
if event.response.is_http2 or event.response.is_http3:
if context.options.normalize_outbound_headers:
yield from normalize_h2_headers(headers)
else:
headers = normalize_h1_headers(headers, False)
return headers
class Http2Server(Http2Connection):
h2_conf = h2.config.H2Configuration(
**Http2Connection.h2_conf_defaults,
client_side=False,
)
ReceiveProtocolError = RequestProtocolError
ReceiveData = RequestData
ReceiveTrailers = RequestTrailers
ReceiveEndOfMessage = RequestEndOfMessage
def __init__(self, context: Context):
super().__init__(context, context.client)
def _handle_event(self, event: Event) -> CommandGenerator[None]:
if isinstance(event, ResponseHeaders):
if self.is_open_for_us(event.stream_id):
self.h2_conn.send_headers(
event.stream_id,
headers=(
yield from format_h2_response_headers(self.context, event)
),
end_stream=event.end_stream,
)
yield SendData(self.conn, self.h2_conn.data_to_send())
else:
yield from super()._handle_event(event)
def handle_h2_event(self, event: h2.events.Event) -> CommandGenerator[bool]:
if isinstance(event, h2.events.RequestReceived):
try:
(
host,
port,
method,
scheme,
authority,
path,
headers,
) = parse_h2_request_headers(event.headers)
except ValueError as e:
yield from self.protocol_error(f"Invalid HTTP/2 request headers: {e}")
return True
request = http.Request(
host=host,
port=port,
method=method,
scheme=scheme,
authority=authority,
path=path,
http_version=b"HTTP/2.0",
headers=headers,
content=None,
trailers=None,
timestamp_start=time.time(),
timestamp_end=None,
)
self.streams[event.stream_id] = StreamState.HEADERS_RECEIVED
yield ReceiveHttp(
RequestHeaders(
event.stream_id, request, end_stream=bool(event.stream_ended)
)
)
return False
else:
return (yield from super().handle_h2_event(event))
class Http2Client(Http2Connection):
h2_conf = h2.config.H2Configuration(
**Http2Connection.h2_conf_defaults,
client_side=True,
)
ReceiveProtocolError = ResponseProtocolError
ReceiveData = ResponseData
ReceiveTrailers = ResponseTrailers
ReceiveEndOfMessage = ResponseEndOfMessage
our_stream_id: dict[int, int]
their_stream_id: dict[int, int]
stream_queue: collections.defaultdict[int, list[Event]]
"""Queue of streams that we haven't sent yet because we have reached MAX_CONCURRENT_STREAMS"""
provisional_max_concurrency: int | None = 10
"""A provisional currency limit before we get the server's first settings frame."""
last_activity: float
"""Timestamp of when we've last seen network activity on this connection."""
def __init__(self, context: Context):
super().__init__(context, context.server)
# Disable HTTP/2 push for now to keep things simple.
# don't send here, that is done as part of initiate_connection().
self.h2_conn.local_settings.enable_push = 0
# hyper-h2 pitfall: we need to acknowledge here, otherwise its sends out the old settings.
self.h2_conn.local_settings.acknowledge()
self.our_stream_id = {}
self.their_stream_id = {}
self.stream_queue = collections.defaultdict(list)
def _handle_event(self, event: Event) -> CommandGenerator[None]:
# We can't reuse stream ids from the client because they may arrived reordered here
# and HTTP/2 forbids opening a stream on a lower id than what was previously sent (see test_stream_concurrency).
# To mitigate this, we transparently map the outside's stream id to our stream id.
if isinstance(event, HttpEvent):
ours = self.our_stream_id.get(event.stream_id, None)
if ours is None:
no_free_streams = self.h2_conn.open_outbound_streams >= (
self.provisional_max_concurrency
or self.h2_conn.remote_settings.max_concurrent_streams
)
if no_free_streams:
self.stream_queue[event.stream_id].append(event)
return
ours = self.h2_conn.get_next_available_stream_id()
self.our_stream_id[event.stream_id] = ours
self.their_stream_id[ours] = event.stream_id
event.stream_id = ours
for cmd in self._handle_event2(event):
if isinstance(cmd, ReceiveHttp):
cmd.event.stream_id = self.their_stream_id[cmd.event.stream_id]
yield cmd
can_resume_queue = self.stream_queue and self.h2_conn.open_outbound_streams < (
self.provisional_max_concurrency
or self.h2_conn.remote_settings.max_concurrent_streams
)
if can_resume_queue:
# popitem would be LIFO, but we want FIFO.
events = self.stream_queue.pop(next(iter(self.stream_queue)))
for event in events:
yield from self._handle_event(event)
def _handle_event2(self, event: Event) -> CommandGenerator[None]:
if isinstance(event, Wakeup):
send_ping_now = (
# add one second to avoid unnecessary roundtrip, we don't need to be super correct here.
time.time() - self.last_activity + 1
> self.context.options.http2_ping_keepalive
)
if send_ping_now:
# PING frames MUST contain 8 octets of opaque data in the payload.
# A sender can include any value it chooses and use those octets in any fashion.
self.last_activity = time.time()
self.h2_conn.ping(b"0" * 8)
data = self.h2_conn.data_to_send()
if data is not None:
yield Log(
f"Send HTTP/2 keep-alive PING to {human.format_address(self.conn.peername)}",
DEBUG,
)
yield SendData(self.conn, data)
time_until_next_ping = self.context.options.http2_ping_keepalive - (
time.time() - self.last_activity
)
yield RequestWakeup(time_until_next_ping)
return
self.last_activity = time.time()
if isinstance(event, Start):
if self.context.options.http2_ping_keepalive > 0:
yield RequestWakeup(self.context.options.http2_ping_keepalive)
yield from super()._handle_event(event)
elif isinstance(event, RequestHeaders):
self.h2_conn.send_headers(
event.stream_id,
headers=(yield from format_h2_request_headers(self.context, event)),
end_stream=event.end_stream,
)
self.streams[event.stream_id] = StreamState.EXPECTING_HEADERS
yield SendData(self.conn, self.h2_conn.data_to_send())
else:
yield from super()._handle_event(event)
def handle_h2_event(self, event: h2.events.Event) -> CommandGenerator[bool]:
if isinstance(event, h2.events.ResponseReceived):
if (
self.streams.get(event.stream_id, None)
is not StreamState.EXPECTING_HEADERS
):
yield from self.protocol_error(f"Received unexpected HTTP/2 response.")
return True
try:
status_code, headers = parse_h2_response_headers(event.headers)
except ValueError as e:
yield from self.protocol_error(f"Invalid HTTP/2 response headers: {e}")
return True
response = http.Response(
http_version=b"HTTP/2.0",
status_code=status_code,
reason=b"",
headers=headers,
content=None,
trailers=None,
timestamp_start=time.time(),
timestamp_end=None,
)
self.streams[event.stream_id] = StreamState.HEADERS_RECEIVED
yield ReceiveHttp(
ResponseHeaders(event.stream_id, response, bool(event.stream_ended))
)
return False
elif isinstance(event, h2.events.InformationalResponseReceived):
# We violate the spec here ("A proxy MUST forward 1xx responses", RFC 7231),
# but that's probably fine:
# - 100 Continue is sent by mitmproxy to clients (irrespective of what the server does).
# - 101 Switching Protocols is not allowed for HTTP/2.
# - 102 Processing is WebDAV only and also ignorable.
# - 103 Early Hints is not mission-critical.
headers = http.Headers(event.headers)
status: str | int = "<unknown status>"
try:
status = int(headers[":status"])
reason = status_codes.RESPONSES.get(status, "")
except (KeyError, ValueError):
reason = ""
yield Log(f"Swallowing HTTP/2 informational response: {status} {reason}")
return False
elif isinstance(event, h2.events.RequestReceived):
yield from self.protocol_error(
f"HTTP/2 protocol error: received request from server"
)
return True
elif isinstance(event, h2.events.RemoteSettingsChanged):
# We have received at least one settings from now,
# which means we can rely on the max concurrency in remote_settings
self.provisional_max_concurrency = None
return (yield from super().handle_h2_event(event))
else:
return (yield from super().handle_h2_event(event))
def split_pseudo_headers(
h2_headers: Sequence[tuple[bytes, bytes]],
) -> tuple[dict[bytes, bytes], http.Headers]:
pseudo_headers: dict[bytes, bytes] = {}
i = 0
for header, value in h2_headers:
if header.startswith(b":"):
if header in pseudo_headers:
raise ValueError(f"Duplicate HTTP/2 pseudo header: {header!r}")
pseudo_headers[header] = value
i += 1
else:
# Pseudo-headers must be at the start, we are done here.
break
headers = http.Headers(h2_headers[i:])
return pseudo_headers, headers
def parse_h2_request_headers(
h2_headers: Sequence[tuple[bytes, bytes]],
) -> tuple[str, int, bytes, bytes, bytes, bytes, http.Headers]:
"""Split HTTP/2 pseudo-headers from the actual headers and parse them."""
pseudo_headers, headers = split_pseudo_headers(h2_headers)
try:
method: bytes = pseudo_headers.pop(b":method")
scheme: bytes = pseudo_headers.pop(
b":scheme"
) # this raises for HTTP/2 CONNECT requests
path: bytes = pseudo_headers.pop(b":path")
authority: bytes = pseudo_headers.pop(b":authority", b"")
except KeyError as e:
raise ValueError(f"Required pseudo header is missing: {e}")
if pseudo_headers:
raise ValueError(f"Unknown pseudo headers: {pseudo_headers}")
if authority:
host, port = url.parse_authority(authority, check=True)
if port is None:
port = 80 if scheme == b"http" else 443
else:
host = ""
port = 0
return host, port, method, scheme, authority, path, headers
def parse_h2_response_headers(
h2_headers: Sequence[tuple[bytes, bytes]],
) -> tuple[int, http.Headers]:
"""Split HTTP/2 pseudo-headers from the actual headers and parse them."""
pseudo_headers, headers = split_pseudo_headers(h2_headers)
try:
status_code: int = int(pseudo_headers.pop(b":status"))
except KeyError as e:
raise ValueError(f"Required pseudo header is missing: {e}")
if pseudo_headers:
raise ValueError(f"Unknown pseudo headers: {pseudo_headers}")
return status_code, headers
__all__ = [
"format_h2_request_headers",
"format_h2_response_headers",
"parse_h2_request_headers",
"parse_h2_response_headers",
"Http2Client",
"Http2Server",
]
| python | MIT | e6aa924bb411a9687b91920b8d094af37bc02b90 | 2026-01-04T14:40:00.086164Z | false |
mitmproxy/mitmproxy | https://github.com/mitmproxy/mitmproxy/blob/e6aa924bb411a9687b91920b8d094af37bc02b90/mitmproxy/proxy/layers/http/__init__.py | mitmproxy/proxy/layers/http/__init__.py | import collections
import enum
import time
from dataclasses import dataclass
from functools import cached_property
from logging import DEBUG
from logging import WARNING
import wsproto.handshake
from ...context import Context
from ...mode_specs import ReverseMode
from ...mode_specs import UpstreamMode
from ..quic import QuicStreamEvent
from ._base import HttpCommand
from ._base import HttpConnection
from ._base import ReceiveHttp
from ._base import StreamId
from ._events import ErrorCode
from ._events import HttpEvent
from ._events import RequestData
from ._events import RequestEndOfMessage
from ._events import RequestHeaders
from ._events import RequestProtocolError
from ._events import RequestTrailers
from ._events import ResponseData
from ._events import ResponseEndOfMessage
from ._events import ResponseHeaders
from ._events import ResponseProtocolError
from ._events import ResponseTrailers
from ._hooks import HttpConnectedHook
from ._hooks import HttpConnectErrorHook
from ._hooks import HttpConnectHook
from ._hooks import HttpErrorHook
from ._hooks import HttpRequestHeadersHook
from ._hooks import HttpRequestHook
from ._hooks import HttpResponseHeadersHook
from ._hooks import HttpResponseHook
from ._http1 import Http1Client
from ._http1 import Http1Connection
from ._http1 import Http1Server
from ._http2 import Http2Client
from ._http2 import Http2Server
from ._http3 import Http3Client
from ._http3 import Http3Server
from mitmproxy import flow
from mitmproxy import http
from mitmproxy.connection import Connection
from mitmproxy.connection import Server
from mitmproxy.connection import TransportProtocol
from mitmproxy.net import server_spec
from mitmproxy.net.http import url
from mitmproxy.net.http.http1 import expected_http_body_size
from mitmproxy.net.http.validate import validate_headers
from mitmproxy.proxy import commands
from mitmproxy.proxy import events
from mitmproxy.proxy import layer
from mitmproxy.proxy import tunnel
from mitmproxy.proxy.layers import quic
from mitmproxy.proxy.layers import tcp
from mitmproxy.proxy.layers import tls
from mitmproxy.proxy.layers import websocket
from mitmproxy.proxy.layers.http import _upstream_proxy
from mitmproxy.proxy.utils import expect
from mitmproxy.proxy.utils import ReceiveBuffer
from mitmproxy.utils import human
from mitmproxy.websocket import WebSocketData
class HTTPMode(enum.Enum):
regular = 1
transparent = 2
upstream = 3
def validate_request(
mode: HTTPMode, request: http.Request, validate_inbound_headers: bool
) -> str | None:
if request.scheme not in ("http", "https", ""):
return f"Invalid request scheme: {request.scheme}"
if mode is HTTPMode.transparent and request.method == "CONNECT":
return (
f"mitmproxy received an HTTP CONNECT request even though it is not running in regular/upstream mode. "
f"This usually indicates a misconfiguration, please see the mitmproxy mode documentation for details."
)
if validate_inbound_headers:
try:
validate_headers(request)
except ValueError as e:
return (
f"Received {e} from client, refusing to prevent request smuggling attacks. "
"Disable the validate_inbound_headers option to skip this security check."
)
return None
def is_h3_alpn(alpn: bytes | None) -> bool:
return alpn == b"h3" or (alpn is not None and alpn.startswith(b"h3-"))
@dataclass
class GetHttpConnection(HttpCommand):
"""
Open an HTTP Connection. This may not actually open a connection, but return an existing HTTP connection instead.
"""
blocking = True
address: tuple[str, int]
tls: bool
via: server_spec.ServerSpec | None
transport_protocol: TransportProtocol = "tcp"
def __hash__(self):
return id(self)
def connection_spec_matches(self, connection: Connection) -> bool:
return (
isinstance(connection, Server)
and self.address == connection.address
and self.tls == connection.tls
and self.via == connection.via
and self.transport_protocol == connection.transport_protocol
)
@dataclass
class GetHttpConnectionCompleted(events.CommandCompleted):
command: GetHttpConnection
reply: tuple[None, str] | tuple[Connection, None]
"""connection object, error message"""
@dataclass
class RegisterHttpConnection(HttpCommand):
"""
Register that a HTTP connection attempt has been completed.
"""
connection: Connection
err: str | None
@dataclass
class SendHttp(HttpCommand):
event: HttpEvent
connection: Connection
def __repr__(self) -> str:
return f"Send({self.event})"
@dataclass
class DropStream(HttpCommand):
"""Signal to the HTTP layer that this stream is done processing and can be dropped from memory."""
stream_id: StreamId
class HttpStream(layer.Layer):
request_body_buf: ReceiveBuffer
response_body_buf: ReceiveBuffer
flow: http.HTTPFlow
stream_id: StreamId
child_layer: layer.Layer | None = None
@cached_property
def mode(self) -> HTTPMode:
i = self.context.layers.index(self)
parent = self.context.layers[i - 1]
assert isinstance(parent, HttpLayer)
return parent.mode
def __init__(self, context: Context, stream_id: int) -> None:
super().__init__(context)
self.request_body_buf = ReceiveBuffer()
self.response_body_buf = ReceiveBuffer()
self.client_state = self.state_uninitialized
self.server_state = self.state_uninitialized
self.stream_id = stream_id
def __repr__(self):
if self._handle_event == self.passthrough:
return f"HttpStream(id={self.stream_id}, passthrough)"
else:
return (
f"HttpStream("
f"id={self.stream_id}, "
f"client_state={self.client_state.__name__}, "
f"server_state={self.server_state.__name__}"
f")"
)
@expect(events.Start, HttpEvent)
def _handle_event(self, event: events.Event) -> layer.CommandGenerator[None]:
if isinstance(event, events.Start):
self.client_state = self.state_wait_for_request_headers
elif isinstance(event, (RequestProtocolError, ResponseProtocolError)):
yield from self.handle_protocol_error(event)
elif isinstance(
event, (RequestHeaders, RequestData, RequestTrailers, RequestEndOfMessage)
):
yield from self.client_state(event)
else:
yield from self.server_state(event)
@expect(RequestHeaders)
def state_wait_for_request_headers(
self, event: RequestHeaders
) -> layer.CommandGenerator[None]:
if not event.replay_flow:
self.flow = http.HTTPFlow(self.context.client, self.context.server)
else:
self.flow = event.replay_flow
self.flow.request = event.request
self.flow.live = True
if (yield from self.check_invalid(True)):
return
if self.flow.request.method == "CONNECT":
return (yield from self.handle_connect())
if self.mode is HTTPMode.transparent:
# Determine .scheme, .host and .port attributes for transparent requests
assert self.context.server.address
self.flow.request.data.host = self.context.server.address[0]
self.flow.request.data.port = self.context.server.address[1]
self.flow.request.scheme = "https" if self.context.server.tls else "http"
elif not self.flow.request.host:
# We need to extract destination information from the host header.
try:
host, port = url.parse_authority(
self.flow.request.host_header or "", check=True
)
except ValueError:
yield SendHttp(
ResponseProtocolError(
self.stream_id,
"HTTP request has no host header, destination unknown.",
ErrorCode.DESTINATION_UNKNOWN,
),
self.context.client,
)
self.client_state = self.state_errored
return
else:
if port is None:
port = 443 if self.context.client.tls else 80
self.flow.request.data.host = host
self.flow.request.data.port = port
self.flow.request.scheme = (
"https" if self.context.client.tls else "http"
)
if self.mode is HTTPMode.regular and not (
self.flow.request.is_http2 or self.flow.request.is_http3
):
# Set the request target to origin-form for HTTP/1, some servers don't support absolute-form requests.
# see https://github.com/mitmproxy/mitmproxy/issues/1759
self.flow.request.authority = ""
# update host header in reverse proxy mode
if (
isinstance(self.context.client.proxy_mode, ReverseMode)
and not self.context.options.keep_host_header
):
assert self.context.server.address
self.flow.request.host_header = url.hostport(
"https" if self.context.server.tls else "http",
self.context.server.address[0],
self.context.server.address[1],
)
if not event.end_stream and (yield from self.check_body_size(True)):
return
yield HttpRequestHeadersHook(self.flow)
if (yield from self.check_killed(True)):
return
if self.flow.request.headers.get("expect", "").lower() == "100-continue":
continue_response = http.Response.make(100)
continue_response.headers.clear()
yield SendHttp(
ResponseHeaders(self.stream_id, continue_response), self.context.client
)
self.flow.request.headers.pop("expect")
if self.flow.request.stream and not event.end_stream:
yield from self.start_request_stream()
else:
self.client_state = self.state_consume_request_body
self.server_state = self.state_wait_for_response_headers
def start_request_stream(self) -> layer.CommandGenerator[None]:
if self.flow.response:
raise NotImplementedError(
"Can't set a response and enable streaming at the same time."
)
ok = yield from self.make_server_connection()
if not ok:
self.client_state = self.state_errored
return
yield SendHttp(
RequestHeaders(self.stream_id, self.flow.request, end_stream=False),
self.context.server,
)
yield commands.Log(f"Streaming request to {self.flow.request.host}.")
self.client_state = self.state_stream_request_body
@expect(RequestData, RequestTrailers, RequestEndOfMessage)
def state_stream_request_body(
self, event: RequestData | RequestEndOfMessage
) -> layer.CommandGenerator[None]:
if isinstance(event, RequestData):
if callable(self.flow.request.stream):
chunks = self.flow.request.stream(event.data)
if isinstance(chunks, bytes):
chunks = [chunks]
else:
chunks = [event.data]
for chunk in chunks:
if self.context.options.store_streamed_bodies:
self.request_body_buf += chunk
yield SendHttp(RequestData(self.stream_id, chunk), self.context.server)
elif isinstance(event, RequestTrailers):
# we don't do anything further here, we wait for RequestEndOfMessage first to trigger the request hook.
self.flow.request.trailers = event.trailers
elif isinstance(event, RequestEndOfMessage):
if callable(self.flow.request.stream):
chunks = self.flow.request.stream(b"")
if chunks == b"":
chunks = []
elif isinstance(chunks, bytes):
chunks = [chunks]
for chunk in chunks:
if self.context.options.store_streamed_bodies:
self.request_body_buf += chunk
yield SendHttp(
RequestData(self.stream_id, chunk), self.context.server
)
if self.context.options.store_streamed_bodies:
self.flow.request.data.content = bytes(self.request_body_buf)
self.request_body_buf.clear()
self.flow.request.timestamp_end = time.time()
yield HttpRequestHook(self.flow)
self.client_state = self.state_done
if self.flow.request.trailers:
# we've delayed sending trailers until after `request` has been triggered.
yield SendHttp(
RequestTrailers(self.stream_id, self.flow.request.trailers),
self.context.server,
)
yield SendHttp(event, self.context.server)
if self.server_state == self.state_done:
yield from self.flow_done()
@expect(RequestData, RequestTrailers, RequestEndOfMessage)
def state_consume_request_body(
self, event: events.Event
) -> layer.CommandGenerator[None]:
if isinstance(event, RequestData):
self.request_body_buf += event.data
yield from self.check_body_size(True)
elif isinstance(event, RequestTrailers):
assert self.flow.request
self.flow.request.trailers = event.trailers
elif isinstance(event, RequestEndOfMessage):
self.flow.request.timestamp_end = time.time()
self.flow.request.data.content = bytes(self.request_body_buf)
self.request_body_buf.clear()
self.client_state = self.state_done
yield HttpRequestHook(self.flow)
if (yield from self.check_killed(True)):
return
elif self.flow.response:
# response was set by an inline script.
# we now need to emulate the responseheaders hook.
self.flow.response.timestamp_start = time.time()
yield HttpResponseHeadersHook(self.flow)
if (yield from self.check_killed(True)):
return
yield from self.send_response()
else:
ok = yield from self.make_server_connection()
if not ok:
return
content = self.flow.request.raw_content
done_after_headers = not (content or self.flow.request.trailers)
yield SendHttp(
RequestHeaders(
self.stream_id, self.flow.request, done_after_headers
),
self.context.server,
)
if content:
yield SendHttp(
RequestData(self.stream_id, content), self.context.server
)
if self.flow.request.trailers:
yield SendHttp(
RequestTrailers(self.stream_id, self.flow.request.trailers),
self.context.server,
)
yield SendHttp(RequestEndOfMessage(self.stream_id), self.context.server)
@expect(ResponseHeaders)
def state_wait_for_response_headers(
self, event: ResponseHeaders
) -> layer.CommandGenerator[None]:
self.flow.response = event.response
if not event.end_stream and (yield from self.check_body_size(False)):
return
if (yield from self.check_invalid(False)):
return
yield HttpResponseHeadersHook(self.flow)
if (yield from self.check_killed(True)):
return
elif self.flow.response.stream and not event.end_stream:
yield from self.start_response_stream()
else:
self.server_state = self.state_consume_response_body
def start_response_stream(self) -> layer.CommandGenerator[None]:
assert self.flow.response
yield SendHttp(
ResponseHeaders(self.stream_id, self.flow.response, end_stream=False),
self.context.client,
)
yield commands.Log(f"Streaming response from {self.flow.request.host}.")
self.server_state = self.state_stream_response_body
@expect(ResponseData, ResponseTrailers, ResponseEndOfMessage)
def state_stream_response_body(
self, event: events.Event
) -> layer.CommandGenerator[None]:
assert self.flow.response
if isinstance(event, ResponseData):
if callable(self.flow.response.stream):
chunks = self.flow.response.stream(event.data)
if isinstance(chunks, bytes):
chunks = [chunks]
else:
chunks = [event.data]
for chunk in chunks:
if self.context.options.store_streamed_bodies:
self.response_body_buf += chunk
yield SendHttp(ResponseData(self.stream_id, chunk), self.context.client)
elif isinstance(event, ResponseTrailers):
self.flow.response.trailers = event.trailers
# will be sent in send_response() after the response hook.
elif isinstance(event, ResponseEndOfMessage):
if callable(self.flow.response.stream):
chunks = self.flow.response.stream(b"")
if chunks == b"":
chunks = []
elif isinstance(chunks, bytes):
chunks = [chunks]
for chunk in chunks:
if self.context.options.store_streamed_bodies:
self.response_body_buf += chunk
yield SendHttp(
ResponseData(self.stream_id, chunk), self.context.client
)
if self.context.options.store_streamed_bodies:
self.flow.response.data.content = bytes(self.response_body_buf)
self.response_body_buf.clear()
yield from self.send_response(already_streamed=True)
@expect(ResponseData, ResponseTrailers, ResponseEndOfMessage)
def state_consume_response_body(
self, event: events.Event
) -> layer.CommandGenerator[None]:
if isinstance(event, ResponseData):
self.response_body_buf += event.data
yield from self.check_body_size(False)
elif isinstance(event, ResponseTrailers):
assert self.flow.response
self.flow.response.trailers = event.trailers
elif isinstance(event, ResponseEndOfMessage):
assert self.flow.response
self.flow.response.data.content = bytes(self.response_body_buf)
self.response_body_buf.clear()
yield from self.send_response()
def send_response(self, already_streamed: bool = False):
"""We have either consumed the entire response from the server or the response was set by an addon."""
assert self.flow.response
self.flow.response.timestamp_end = time.time()
is_websocket = (
self.flow.response.status_code == 101
and self.flow.response.headers.get("upgrade", "").lower() == "websocket"
and self.flow.request.headers.get("Sec-WebSocket-Version", "").encode()
== wsproto.handshake.WEBSOCKET_VERSION
and self.context.options.websocket
)
if is_websocket:
# We need to set this before calling the response hook
# so that addons can determine if a WebSocket connection is following up.
self.flow.websocket = WebSocketData()
yield HttpResponseHook(self.flow)
self.server_state = self.state_done
if (yield from self.check_killed(False)):
return
if not already_streamed:
content = self.flow.response.raw_content
done_after_headers = not (content or self.flow.response.trailers)
yield SendHttp(
ResponseHeaders(self.stream_id, self.flow.response, done_after_headers),
self.context.client,
)
if content:
yield SendHttp(
ResponseData(self.stream_id, content), self.context.client
)
if self.flow.response.trailers:
yield SendHttp(
ResponseTrailers(self.stream_id, self.flow.response.trailers),
self.context.client,
)
if self.client_state == self.state_done:
yield from self.flow_done()
def flow_done(self) -> layer.CommandGenerator[None]:
if not self.flow.websocket:
self.flow.live = False
assert self.flow.response
if self.flow.response.status_code == 101:
if self.flow.websocket:
self.child_layer = websocket.WebsocketLayer(self.context, self.flow)
elif self.context.options.rawtcp:
self.child_layer = tcp.TCPLayer(self.context)
else:
yield commands.Log(
f"Sent HTTP 101 response, but no protocol is enabled to upgrade to.",
WARNING,
)
yield commands.CloseConnection(self.context.client)
self.client_state = self.server_state = self.state_errored
return
if self.debug:
yield commands.Log(
f"{self.debug}[http] upgrading to {self.child_layer}", DEBUG
)
self._handle_event = self.passthrough
yield from self.child_layer.handle_event(events.Start())
else:
yield DropStream(self.stream_id)
# delay sending EOM until the child layer is set up,
# we may get data immediately and need to be prepared to handle it.
yield SendHttp(ResponseEndOfMessage(self.stream_id), self.context.client)
def check_body_size(self, request: bool) -> layer.CommandGenerator[bool]:
"""
Check if the body size exceeds limits imposed by stream_large_bodies or body_size_limit.
Returns `True` if the body size exceeds body_size_limit and further processing should be stopped.
"""
if not (
self.context.options.stream_large_bodies
or self.context.options.body_size_limit
):
return False
# Step 1: Determine the expected body size. This can either come from a known content-length header,
# or from the amount of currently buffered bytes (e.g. for chunked encoding).
response = not request
expected_size: int | None
# the 'late' case: we already started consuming the body
if request and self.request_body_buf:
expected_size = len(self.request_body_buf)
elif response and self.response_body_buf:
expected_size = len(self.response_body_buf)
else:
# the 'early' case: we have not started consuming the body
try:
expected_size = expected_http_body_size(
self.flow.request, self.flow.response if response else None
)
except ValueError: # pragma: no cover
# we just don't stream/kill malformed content-length headers.
expected_size = None
if expected_size is None or expected_size <= 0:
return False
# Step 2: Do we need to abort this?
max_total_size = human.parse_size(self.context.options.body_size_limit)
if max_total_size is not None and expected_size > max_total_size:
if request and not self.request_body_buf:
yield HttpRequestHeadersHook(self.flow)
if response and not self.response_body_buf:
yield HttpResponseHeadersHook(self.flow)
err_msg = f"{'Request' if request else 'Response'} body exceeds mitmproxy's body_size_limit."
err_code = (
ErrorCode.REQUEST_TOO_LARGE if request else ErrorCode.RESPONSE_TOO_LARGE
)
self.flow.error = flow.Error(err_msg)
yield HttpErrorHook(self.flow)
yield SendHttp(
ResponseProtocolError(self.stream_id, err_msg, err_code),
self.context.client,
)
self.client_state = self.state_errored
if response:
yield SendHttp(
RequestProtocolError(self.stream_id, err_msg, err_code),
self.context.server,
)
self.server_state = self.state_errored
self.flow.live = False
return True
# Step 3: Do we need to stream this?
max_stream_size = human.parse_size(self.context.options.stream_large_bodies)
if max_stream_size is not None and expected_size > max_stream_size:
if request:
self.flow.request.stream = True
if self.request_body_buf:
# clear buffer and then fake a DataReceived event with everything we had in the buffer so far.
body_buf = bytes(self.request_body_buf)
self.request_body_buf.clear()
yield from self.start_request_stream()
yield from self.handle_event(RequestData(self.stream_id, body_buf))
if response:
assert self.flow.response
self.flow.response.stream = True
if self.response_body_buf:
body_buf = bytes(self.response_body_buf)
self.response_body_buf.clear()
yield from self.start_response_stream()
yield from self.handle_event(ResponseData(self.stream_id, body_buf))
return False
def check_invalid(self, request: bool) -> layer.CommandGenerator[bool]:
err: str | None = None
if request:
err = validate_request(
self.mode,
self.flow.request,
self.context.options.validate_inbound_headers,
)
elif self.context.options.validate_inbound_headers:
assert self.flow.response is not None
try:
validate_headers(self.flow.response)
except ValueError as e:
err = (
f"Received {e} from server, refusing to prevent request smuggling attacks. "
"Disable the validate_inbound_headers option to skip this security check."
)
if err:
self.flow.error = flow.Error(err)
if request:
# flow has not been seen yet, register it.
yield HttpRequestHeadersHook(self.flow)
else:
# immediately kill server connection
yield commands.CloseConnection(self.flow.server_conn)
yield HttpErrorHook(self.flow)
yield SendHttp(
ResponseProtocolError(
self.stream_id,
err,
ErrorCode.REQUEST_VALIDATION_FAILED
if request
else ErrorCode.RESPONSE_VALIDATION_FAILED,
),
self.context.client,
)
self.flow.live = False
self.client_state = self.server_state = self.state_errored
return True
else:
return False
def check_killed(self, emit_error_hook: bool) -> layer.CommandGenerator[bool]:
killed_by_us = (
self.flow.error and self.flow.error.msg == flow.Error.KILLED_MESSAGE
)
# The client may have closed the connection while we were waiting for the hook to complete.
# We peek into the event queue to see if that is the case.
killed_by_remote = None
for evt in self._paused_event_queue:
if isinstance(evt, RequestProtocolError):
killed_by_remote = evt.message
break
if killed_by_remote:
if not self.flow.error:
self.flow.error = flow.Error(killed_by_remote)
if killed_by_us or killed_by_remote:
if emit_error_hook:
yield HttpErrorHook(self.flow)
yield SendHttp(
ResponseProtocolError(self.stream_id, "killed", ErrorCode.KILL),
self.context.client,
)
self.flow.live = False
self.client_state = self.server_state = self.state_errored
return True
return False
def handle_protocol_error(
self, event: RequestProtocolError | ResponseProtocolError
) -> layer.CommandGenerator[None]:
is_client_error_but_we_already_talk_upstream = (
isinstance(event, RequestProtocolError)
and self.client_state in (self.state_stream_request_body, self.state_done)
and self.server_state not in (self.state_done, self.state_errored)
)
need_error_hook = not (
self.client_state == self.state_errored
or self.server_state in (self.state_done, self.state_errored)
)
if is_client_error_but_we_already_talk_upstream:
yield SendHttp(event, self.context.server)
self.client_state = self.state_errored
if need_error_hook:
# We don't want to trigger both a response hook and an error hook,
# so we need to check if the response is done yet or not.
self.flow.error = flow.Error(event.message)
yield HttpErrorHook(self.flow)
if (yield from self.check_killed(False)):
return
if isinstance(event, ResponseProtocolError):
if self.client_state != self.state_errored:
yield SendHttp(event, self.context.client)
self.server_state = self.state_errored
self.flow.live = False
yield DropStream(self.stream_id)
def make_server_connection(self) -> layer.CommandGenerator[bool]:
connection, err = yield GetHttpConnection(
(self.flow.request.host, self.flow.request.port),
self.flow.request.scheme == "https",
self.flow.server_conn.via,
self.flow.server_conn.transport_protocol,
)
if err:
yield from self.handle_protocol_error(
ResponseProtocolError(self.stream_id, err, ErrorCode.CONNECT_FAILED)
)
return False
else:
self.context.server = self.flow.server_conn = connection
return True
def handle_connect(self) -> layer.CommandGenerator[None]:
self.client_state = self.state_done
yield HttpConnectHook(self.flow)
if (yield from self.check_killed(False)):
return
self.context.server.address = (self.flow.request.host, self.flow.request.port)
if self.mode == HTTPMode.regular:
yield from self.handle_connect_regular()
else:
yield from self.handle_connect_upstream()
def handle_connect_regular(self):
if (
not self.flow.response
and self.context.options.connection_strategy == "eager"
):
err = yield commands.OpenConnection(self.context.server)
if err:
self.flow.response = http.Response.make(
502,
f"Cannot connect to {human.format_address(self.context.server.address)}: {err} "
f"If you plan to redirect requests away from this server, "
f"consider setting `connection_strategy` to `lazy` to suppress early connections.",
)
self.child_layer = layer.NextLayer(self.context)
yield from self.handle_connect_finish()
def handle_connect_upstream(self):
self.child_layer = _upstream_proxy.HttpUpstreamProxy.make(self.context, True)[0]
yield from self.handle_connect_finish()
def handle_connect_finish(self):
if not self.flow.response:
# Do not send any response headers as it breaks proxying non-80 ports on
# Android emulators using the -http-proxy option.
self.flow.response = http.Response(
| python | MIT | e6aa924bb411a9687b91920b8d094af37bc02b90 | 2026-01-04T14:40:00.086164Z | true |
mitmproxy/mitmproxy | https://github.com/mitmproxy/mitmproxy/blob/e6aa924bb411a9687b91920b8d094af37bc02b90/mitmproxy/proxy/layers/http/_http_h2.py | mitmproxy/proxy/layers/http/_http_h2.py | import collections
import logging
from typing import NamedTuple
import h2.config
import h2.connection
import h2.events
import h2.exceptions
import h2.settings
import h2.stream
logger = logging.getLogger(__name__)
class H2ConnectionLogger(h2.config.DummyLogger):
def __init__(self, peername: tuple, conn_type: str):
super().__init__()
self.peername = peername
self.conn_type = conn_type
def debug(self, fmtstr, *args):
logger.debug(
f"{self.conn_type} {fmtstr}", *args, extra={"client": self.peername}
)
def trace(self, fmtstr, *args):
logger.log(
logging.DEBUG - 1,
f"{self.conn_type} {fmtstr}",
*args,
extra={"client": self.peername},
)
class SendH2Data(NamedTuple):
data: bytes
end_stream: bool
class BufferedH2Connection(h2.connection.H2Connection):
"""
This class wrap's hyper-h2's H2Connection and adds internal send buffers.
To simplify implementation, padding is unsupported.
"""
stream_buffers: collections.defaultdict[int, collections.deque[SendH2Data]]
stream_trailers: dict[int, list[tuple[bytes, bytes]]]
def __init__(self, config: h2.config.H2Configuration):
super().__init__(config)
self.local_settings.initial_window_size = 2**31 - 1
self.local_settings.max_frame_size = 2**17
self.max_inbound_frame_size = 2**17
# hyper-h2 pitfall: we need to acknowledge here, otherwise its sends out the old settings.
self.local_settings.acknowledge()
self.stream_buffers = collections.defaultdict(collections.deque)
self.stream_trailers = {}
def initiate_connection(self):
super().initiate_connection()
# We increase the flow-control window for new streams with a setting,
# but we need to increase the overall connection flow-control window as well.
self.increment_flow_control_window(
2**31 - 1 - self.inbound_flow_control_window
) # maximum - default
def send_data(
self,
stream_id: int,
data: bytes,
end_stream: bool = False,
pad_length: None = None,
) -> None:
"""
Send data on a given stream.
In contrast to plain hyper-h2, this method will not raise if the data cannot be sent immediately.
Data is split up and buffered internally.
"""
frame_size = len(data)
assert pad_length is None
if frame_size > self.max_outbound_frame_size:
for start in range(0, frame_size, self.max_outbound_frame_size):
chunk = data[start : start + self.max_outbound_frame_size]
self.send_data(stream_id, chunk, end_stream=False)
return
if self.stream_buffers.get(stream_id, None):
# We already have some data buffered, let's append.
self.stream_buffers[stream_id].append(SendH2Data(data, end_stream))
else:
available_window = self.local_flow_control_window(stream_id)
if frame_size <= available_window:
super().send_data(stream_id, data, end_stream)
else:
if available_window:
can_send_now = data[:available_window]
super().send_data(stream_id, can_send_now, end_stream=False)
data = data[available_window:]
# We can't send right now, so we buffer.
self.stream_buffers[stream_id].append(SendH2Data(data, end_stream))
def send_trailers(self, stream_id: int, trailers: list[tuple[bytes, bytes]]):
if self.stream_buffers.get(stream_id, None):
# Though trailers are not subject to flow control, we need to queue them and send strictly after data frames
self.stream_trailers[stream_id] = trailers
else:
self.send_headers(stream_id, trailers, end_stream=True)
def end_stream(self, stream_id: int) -> None:
if stream_id in self.stream_trailers:
return # we already have trailers queued up that will end the stream.
self.send_data(stream_id, b"", end_stream=True)
def reset_stream(self, stream_id: int, error_code: int = 0) -> None:
self.stream_buffers.pop(stream_id, None)
super().reset_stream(stream_id, error_code)
def receive_data(self, data: bytes):
events = super().receive_data(data)
ret = []
for event in events:
if isinstance(event, h2.events.WindowUpdated):
if event.stream_id == 0:
self.connection_window_updated()
else:
self.stream_window_updated(event.stream_id)
continue
elif isinstance(event, h2.events.RemoteSettingsChanged):
if (
h2.settings.SettingCodes.INITIAL_WINDOW_SIZE
in event.changed_settings
):
self.connection_window_updated()
elif isinstance(event, h2.events.StreamReset):
self.stream_buffers.pop(event.stream_id, None)
elif isinstance(event, h2.events.ConnectionTerminated):
self.stream_buffers.clear()
ret.append(event)
return ret
def stream_window_updated(self, stream_id: int) -> bool:
"""
The window for a specific stream has updated. Send as much buffered data as possible.
"""
# If the stream has been reset in the meantime, we just clear the buffer.
try:
stream: h2.stream.H2Stream = self.streams[stream_id]
except KeyError:
stream_was_reset = True
else:
stream_was_reset = stream.state_machine.state not in (
h2.stream.StreamState.OPEN,
h2.stream.StreamState.HALF_CLOSED_REMOTE,
)
if stream_was_reset:
self.stream_buffers.pop(stream_id, None)
return False
available_window = self.local_flow_control_window(stream_id)
sent_any_data = False
while available_window > 0 and stream_id in self.stream_buffers:
chunk: SendH2Data = self.stream_buffers[stream_id].popleft()
if len(chunk.data) > available_window:
# We can't send the entire chunk, so we have to put some bytes back into the buffer.
self.stream_buffers[stream_id].appendleft(
SendH2Data(
data=chunk.data[available_window:],
end_stream=chunk.end_stream,
)
)
chunk = SendH2Data(
data=chunk.data[:available_window],
end_stream=False,
)
super().send_data(stream_id, data=chunk.data, end_stream=chunk.end_stream)
available_window -= len(chunk.data)
if not self.stream_buffers[stream_id]:
del self.stream_buffers[stream_id]
if stream_id in self.stream_trailers:
self.send_headers(
stream_id, self.stream_trailers.pop(stream_id), end_stream=True
)
sent_any_data = True
return sent_any_data
def connection_window_updated(self) -> None:
"""
The connection window has updated. Send data from buffers in a round-robin fashion.
"""
sent_any_data = True
while sent_any_data:
sent_any_data = False
for stream_id in list(self.stream_buffers):
self.stream_buffers[stream_id] = self.stream_buffers.pop(
stream_id
) # move to end of dict
if self.stream_window_updated(stream_id):
sent_any_data = True
if self.outbound_flow_control_window == 0:
return
| python | MIT | e6aa924bb411a9687b91920b8d094af37bc02b90 | 2026-01-04T14:40:00.086164Z | false |
mitmproxy/mitmproxy | https://github.com/mitmproxy/mitmproxy/blob/e6aa924bb411a9687b91920b8d094af37bc02b90/mitmproxy/proxy/layers/http/_http_h3.py | mitmproxy/proxy/layers/http/_http_h3.py | from collections.abc import Iterable
from dataclasses import dataclass
from aioquic.h3.connection import FrameUnexpected
from aioquic.h3.connection import H3Connection
from aioquic.h3.connection import H3Event
from aioquic.h3.connection import H3Stream
from aioquic.h3.connection import Headers
from aioquic.h3.connection import HeadersState
from aioquic.h3.events import HeadersReceived
from aioquic.quic.configuration import QuicConfiguration
from aioquic.quic.events import StreamDataReceived
from aioquic.quic.packet import QuicErrorCode
from mitmproxy import connection
from mitmproxy.proxy import commands
from mitmproxy.proxy import layer
from mitmproxy.proxy.layers.quic import CloseQuicConnection
from mitmproxy.proxy.layers.quic import QuicConnectionClosed
from mitmproxy.proxy.layers.quic import QuicStreamDataReceived
from mitmproxy.proxy.layers.quic import QuicStreamEvent
from mitmproxy.proxy.layers.quic import QuicStreamReset
from mitmproxy.proxy.layers.quic import QuicStreamStopSending
from mitmproxy.proxy.layers.quic import ResetQuicStream
from mitmproxy.proxy.layers.quic import SendQuicStreamData
from mitmproxy.proxy.layers.quic import StopSendingQuicStream
@dataclass
class TrailersReceived(H3Event):
"""
The TrailersReceived event is fired whenever trailers are received.
"""
trailers: Headers
"The trailers."
stream_id: int
"The ID of the stream the trailers were received for."
stream_ended: bool
"Whether the STREAM frame had the FIN bit set."
@dataclass
class StreamClosed(H3Event):
"""
The StreamReset event is fired when the peer is sending a CLOSE_STREAM
or a STOP_SENDING frame. For HTTP/3, we don't differentiate between the two.
"""
stream_id: int
"The ID of the stream that was reset."
error_code: int
"""The error code indicating why the stream was closed."""
class MockQuic:
"""
aioquic intermingles QUIC and HTTP/3. This is something we don't want to do because that makes testing much harder.
Instead, we mock our QUIC connection object here and then take out the wire data to be sent.
"""
def __init__(self, conn: connection.Connection, is_client: bool) -> None:
self.conn = conn
self.pending_commands: list[commands.Command] = []
self._next_stream_id: list[int] = [0, 1, 2, 3]
self._is_client = is_client
# the following fields are accessed by H3Connection
self.configuration = QuicConfiguration(is_client=is_client)
self._quic_logger = None
self._remote_max_datagram_frame_size = 0
def close(
self,
error_code: int = QuicErrorCode.NO_ERROR,
frame_type: int | None = None,
reason_phrase: str = "",
) -> None:
# we'll get closed if a protocol error occurs in `H3Connection.handle_event`
# we note the error on the connection and yield a CloseConnection
# this will then call `QuicConnection.close` with the proper values
# once the `Http3Connection` receives `ConnectionClosed`, it will send out `ProtocolError`
self.pending_commands.append(
CloseQuicConnection(self.conn, error_code, frame_type, reason_phrase)
)
def get_next_available_stream_id(self, is_unidirectional: bool = False) -> int:
# since we always reserve the ID, we have to "find" the next ID like `QuicConnection` does
index = (int(is_unidirectional) << 1) | int(not self._is_client)
stream_id = self._next_stream_id[index]
self._next_stream_id[index] = stream_id + 4
return stream_id
def reset_stream(self, stream_id: int, error_code: int) -> None:
self.pending_commands.append(ResetQuicStream(self.conn, stream_id, error_code))
def stop_send(self, stream_id: int, error_code: int) -> None:
self.pending_commands.append(
StopSendingQuicStream(self.conn, stream_id, error_code)
)
def send_stream_data(
self, stream_id: int, data: bytes, end_stream: bool = False
) -> None:
self.pending_commands.append(
SendQuicStreamData(self.conn, stream_id, data, end_stream)
)
class LayeredH3Connection(H3Connection):
"""
Creates a H3 connection using a fake QUIC connection, which allows layer separation.
Also ensures that headers, data and trailers are sent in that order.
"""
def __init__(
self,
conn: connection.Connection,
is_client: bool,
enable_webtransport: bool = False,
) -> None:
self._mock = MockQuic(conn, is_client)
self._closed_streams: set[int] = set()
"""
We keep track of all stream IDs for which we have requested
STOP_SENDING to silently discard incoming data.
"""
super().__init__(self._mock, enable_webtransport) # type: ignore
# aioquic's constructor sets and then uses _max_push_id.
# This is a hack to forcibly disable it.
@property
def _max_push_id(self) -> int | None:
return None
@_max_push_id.setter
def _max_push_id(self, value):
pass
def _after_send(self, stream_id: int, end_stream: bool) -> None:
# if the stream ended, `QuicConnection` has an assert that no further data is being sent
# to catch this more early on, we set the header state on the `H3Stream`
if end_stream:
self._stream[stream_id].headers_send_state = HeadersState.AFTER_TRAILERS
def _handle_request_or_push_frame(
self,
frame_type: int,
frame_data: bytes | None,
stream: H3Stream,
stream_ended: bool,
) -> list[H3Event]:
# turn HeadersReceived into TrailersReceived for trailers
events = super()._handle_request_or_push_frame(
frame_type, frame_data, stream, stream_ended
)
for index, event in enumerate(events):
if (
isinstance(event, HeadersReceived)
and self._stream[event.stream_id].headers_recv_state
== HeadersState.AFTER_TRAILERS
):
events[index] = TrailersReceived(
event.headers, event.stream_id, event.stream_ended
)
return events
def close_connection(
self,
error_code: int = QuicErrorCode.NO_ERROR,
frame_type: int | None = None,
reason_phrase: str = "",
) -> None:
"""Closes the underlying QUIC connection and ignores any incoming events."""
self._is_done = True
self._quic.close(error_code, frame_type, reason_phrase)
def end_stream(self, stream_id: int) -> None:
"""Ends the given stream if not already done so."""
stream = self._get_or_create_stream(stream_id)
if stream.headers_send_state != HeadersState.AFTER_TRAILERS:
super().send_data(stream_id, b"", end_stream=True)
stream.headers_send_state = HeadersState.AFTER_TRAILERS
def get_next_available_stream_id(self, is_unidirectional: bool = False):
"""Reserves and returns the next available stream ID."""
return self._quic.get_next_available_stream_id(is_unidirectional)
def get_open_stream_ids(self) -> Iterable[int]:
"""Iterates over all non-special open streams"""
return (
stream.stream_id
for stream in self._stream.values()
if (
stream.stream_type is None
and not (
stream.headers_recv_state == HeadersState.AFTER_TRAILERS
and stream.headers_send_state == HeadersState.AFTER_TRAILERS
)
)
)
def handle_connection_closed(self, event: QuicConnectionClosed) -> None:
self._is_done = True
def handle_stream_event(self, event: QuicStreamEvent) -> list[H3Event]:
# don't do anything if we're done
if self._is_done:
return []
elif isinstance(event, (QuicStreamReset, QuicStreamStopSending)):
self.close_stream(
event.stream_id,
event.error_code,
stop_send=isinstance(event, QuicStreamStopSending),
)
stream = self._get_or_create_stream(event.stream_id)
stream.ended = True
stream.headers_recv_state = HeadersState.AFTER_TRAILERS
return [StreamClosed(event.stream_id, event.error_code)]
# convert data events from the QUIC layer back to aioquic events
elif isinstance(event, QuicStreamDataReceived):
# Discard contents if we have already sent STOP_SENDING on this stream.
if event.stream_id in self._closed_streams:
return []
elif self._get_or_create_stream(event.stream_id).ended:
# aioquic will not send us any data events once a stream has ended.
# Instead, it will close the connection. We simulate this here for H3 tests.
self.close_connection(
error_code=QuicErrorCode.PROTOCOL_VIOLATION,
reason_phrase="stream already ended",
)
return []
else:
return self.handle_event(
StreamDataReceived(event.data, event.end_stream, event.stream_id)
)
# should never happen
else: # pragma: no cover
raise AssertionError(f"Unexpected event: {event!r}")
def has_sent_headers(self, stream_id: int) -> bool:
"""Indicates whether headers have been sent over the given stream."""
try:
return self._stream[stream_id].headers_send_state != HeadersState.INITIAL
except KeyError:
return False
def close_stream(
self, stream_id: int, error_code: int, stop_send: bool = True
) -> None:
"""Close a stream that hasn't been closed locally yet."""
if stream_id not in self._closed_streams:
self._closed_streams.add(stream_id)
stream = self._get_or_create_stream(stream_id)
stream.headers_send_state = HeadersState.AFTER_TRAILERS
# https://www.rfc-editor.org/rfc/rfc9000.html#section-3.5-8
# An endpoint that wishes to terminate both directions of
# a bidirectional stream can terminate one direction by
# sending a RESET_STREAM frame, and it can encourage prompt
# termination in the opposite direction by sending a
# STOP_SENDING frame.
self._mock.reset_stream(stream_id=stream_id, error_code=error_code)
if stop_send:
self._mock.stop_send(stream_id=stream_id, error_code=error_code)
def send_data(self, stream_id: int, data: bytes, end_stream: bool = False) -> None:
"""Sends data over the given stream."""
super().send_data(stream_id, data, end_stream)
self._after_send(stream_id, end_stream)
def send_datagram(self, flow_id: int, data: bytes) -> None:
# supporting datagrams would require additional information from the underlying QUIC connection
raise NotImplementedError() # pragma: no cover
def send_headers(
self, stream_id: int, headers: Headers, end_stream: bool = False
) -> None:
"""Sends headers over the given stream."""
# ensure we haven't sent something before
stream = self._get_or_create_stream(stream_id)
if stream.headers_send_state != HeadersState.INITIAL:
raise FrameUnexpected("initial HEADERS frame is not allowed in this state")
super().send_headers(stream_id, headers, end_stream)
self._after_send(stream_id, end_stream)
def send_trailers(self, stream_id: int, trailers: Headers) -> None:
"""Sends trailers over the given stream and ends it."""
# ensure we got some headers first
stream = self._get_or_create_stream(stream_id)
if stream.headers_send_state != HeadersState.AFTER_HEADERS:
raise FrameUnexpected("trailing HEADERS frame is not allowed in this state")
super().send_headers(stream_id, trailers, end_stream=True)
self._after_send(stream_id, end_stream=True)
def transmit(self) -> layer.CommandGenerator[None]:
"""Yields all pending commands for the upper QUIC layer."""
while self._mock.pending_commands:
yield self._mock.pending_commands.pop(0)
__all__ = [
"LayeredH3Connection",
"StreamClosed",
"TrailersReceived",
]
| python | MIT | e6aa924bb411a9687b91920b8d094af37bc02b90 | 2026-01-04T14:40:00.086164Z | false |
mitmproxy/mitmproxy | https://github.com/mitmproxy/mitmproxy/blob/e6aa924bb411a9687b91920b8d094af37bc02b90/mitmproxy/contrib/__init__.py | mitmproxy/contrib/__init__.py | python | MIT | e6aa924bb411a9687b91920b8d094af37bc02b90 | 2026-01-04T14:40:00.086164Z | false | |
mitmproxy/mitmproxy | https://github.com/mitmproxy/mitmproxy/blob/e6aa924bb411a9687b91920b8d094af37bc02b90/mitmproxy/contrib/imghdr.py | mitmproxy/contrib/imghdr.py | # A vendored copy of Python's imghdr module, which is slated for removal in Python 3.13.
#
# Source: https://github.com/python/cpython/blob/3.12/Lib/imghdr.py
# SPDX-License-Identifier: PSF-2.0
"""Recognize image file formats based on their first few bytes."""
from os import PathLike
import warnings
__all__ = ["what"]
# warnings._deprecated(__name__, remove=(3, 13))
#-------------------------#
# Recognize image headers #
#-------------------------#
def what(file, h=None):
"""Return the type of image contained in a file or byte stream."""
f = None
try:
if h is None:
if isinstance(file, (str, PathLike)):
f = open(file, 'rb')
h = f.read(32)
else:
location = file.tell()
h = file.read(32)
file.seek(location)
for tf in tests:
res = tf(h, f)
if res:
return res
finally:
if f: f.close()
return None
#---------------------------------#
# Subroutines per image file type #
#---------------------------------#
tests = []
def test_jpeg(h, f):
"""Test for JPEG data with JFIF or Exif markers; and raw JPEG."""
if h[6:10] in (b'JFIF', b'Exif'):
return 'jpeg'
elif h[:4] == b'\xff\xd8\xff\xdb':
return 'jpeg'
tests.append(test_jpeg)
def test_png(h, f):
"""Verify if the image is a PNG."""
if h.startswith(b'\211PNG\r\n\032\n'):
return 'png'
tests.append(test_png)
def test_gif(h, f):
"""Verify if the image is a GIF ('87 or '89 variants)."""
if h[:6] in (b'GIF87a', b'GIF89a'):
return 'gif'
tests.append(test_gif)
def test_tiff(h, f):
"""Verify if the image is a TIFF (can be in Motorola or Intel byte order)."""
if h[:2] in (b'MM', b'II'):
return 'tiff'
tests.append(test_tiff)
def test_rgb(h, f):
"""test for the SGI image library."""
if h.startswith(b'\001\332'):
return 'rgb'
tests.append(test_rgb)
def test_pbm(h, f):
"""Verify if the image is a PBM (portable bitmap)."""
if len(h) >= 3 and \
h[0] == ord(b'P') and h[1] in b'14' and h[2] in b' \t\n\r':
return 'pbm'
tests.append(test_pbm)
def test_pgm(h, f):
"""Verify if the image is a PGM (portable graymap)."""
if len(h) >= 3 and \
h[0] == ord(b'P') and h[1] in b'25' and h[2] in b' \t\n\r':
return 'pgm'
tests.append(test_pgm)
def test_ppm(h, f):
"""Verify if the image is a PPM (portable pixmap)."""
if len(h) >= 3 and \
h[0] == ord(b'P') and h[1] in b'36' and h[2] in b' \t\n\r':
return 'ppm'
tests.append(test_ppm)
def test_rast(h, f):
"""test for the Sun raster file."""
if h.startswith(b'\x59\xA6\x6A\x95'):
return 'rast'
tests.append(test_rast)
def test_xbm(h, f):
"""Verify if the image is a X bitmap (X10 or X11)."""
if h.startswith(b'#define '):
return 'xbm'
tests.append(test_xbm)
def test_bmp(h, f):
"""Verify if the image is a BMP file."""
if h.startswith(b'BM'):
return 'bmp'
tests.append(test_bmp)
def test_webp(h, f):
"""Verify if the image is a WebP."""
if h.startswith(b'RIFF') and h[8:12] == b'WEBP':
return 'webp'
tests.append(test_webp)
def test_exr(h, f):
"""verify is the image ia a OpenEXR fileOpenEXR."""
if h.startswith(b'\x76\x2f\x31\x01'):
return 'exr'
tests.append(test_exr)
| python | MIT | e6aa924bb411a9687b91920b8d094af37bc02b90 | 2026-01-04T14:40:00.086164Z | false |
mitmproxy/mitmproxy | https://github.com/mitmproxy/mitmproxy/blob/e6aa924bb411a9687b91920b8d094af37bc02b90/mitmproxy/contrib/kaitaistruct/gif.py | mitmproxy/contrib/kaitaistruct/gif.py | # This is a generated file! Please edit source .ksy file and use kaitai-struct-compiler to rebuild
import kaitaistruct
from kaitaistruct import KaitaiStruct, KaitaiStream, BytesIO
from enum import Enum
if getattr(kaitaistruct, 'API_VERSION', (0, 9)) < (0, 9):
raise Exception("Incompatible Kaitai Struct Python API: 0.9 or later is required, but you have %s" % (kaitaistruct.__version__))
class Gif(KaitaiStruct):
"""GIF (Graphics Interchange Format) is an image file format, developed
in 1987. It became popular in 1990s as one of the main image formats
used in World Wide Web.
GIF format allows encoding of palette-based images up to 256 colors
(each of the colors can be chosen from a 24-bit RGB
colorspace). Image data stream uses LZW (Lempel-Ziv-Welch) lossless
compression.
Over the years, several version of the format were published and
several extensions to it were made, namely, a popular Netscape
extension that allows to store several images in one file, switching
between them, which produces crude form of animation.
Structurally, format consists of several mandatory headers and then
a stream of blocks follows. Blocks can carry additional
metainformation or image data.
"""
class BlockType(Enum):
extension = 33
local_image_descriptor = 44
end_of_file = 59
class ExtensionLabel(Enum):
graphic_control = 249
comment = 254
application = 255
def __init__(self, _io, _parent=None, _root=None):
self._io = _io
self._parent = _parent
self._root = _root if _root else self
self._read()
def _read(self):
self.hdr = Gif.Header(self._io, self, self._root)
self.logical_screen_descriptor = Gif.LogicalScreenDescriptorStruct(self._io, self, self._root)
if self.logical_screen_descriptor.has_color_table:
self._raw_global_color_table = self._io.read_bytes((self.logical_screen_descriptor.color_table_size * 3))
_io__raw_global_color_table = KaitaiStream(BytesIO(self._raw_global_color_table))
self.global_color_table = Gif.ColorTable(_io__raw_global_color_table, self, self._root)
self.blocks = []
i = 0
while True:
_ = Gif.Block(self._io, self, self._root)
self.blocks.append(_)
if ((self._io.is_eof()) or (_.block_type == Gif.BlockType.end_of_file)) :
break
i += 1
class ImageData(KaitaiStruct):
"""
.. seealso::
- section 22 - https://www.w3.org/Graphics/GIF/spec-gif89a.txt
"""
def __init__(self, _io, _parent=None, _root=None):
self._io = _io
self._parent = _parent
self._root = _root if _root else self
self._read()
def _read(self):
self.lzw_min_code_size = self._io.read_u1()
self.subblocks = Gif.Subblocks(self._io, self, self._root)
class ColorTableEntry(KaitaiStruct):
def __init__(self, _io, _parent=None, _root=None):
self._io = _io
self._parent = _parent
self._root = _root if _root else self
self._read()
def _read(self):
self.red = self._io.read_u1()
self.green = self._io.read_u1()
self.blue = self._io.read_u1()
class LogicalScreenDescriptorStruct(KaitaiStruct):
"""
.. seealso::
- section 18 - https://www.w3.org/Graphics/GIF/spec-gif89a.txt
"""
def __init__(self, _io, _parent=None, _root=None):
self._io = _io
self._parent = _parent
self._root = _root if _root else self
self._read()
def _read(self):
self.screen_width = self._io.read_u2le()
self.screen_height = self._io.read_u2le()
self.flags = self._io.read_u1()
self.bg_color_index = self._io.read_u1()
self.pixel_aspect_ratio = self._io.read_u1()
@property
def has_color_table(self):
if hasattr(self, '_m_has_color_table'):
return self._m_has_color_table
self._m_has_color_table = (self.flags & 128) != 0
return getattr(self, '_m_has_color_table', None)
@property
def color_table_size(self):
if hasattr(self, '_m_color_table_size'):
return self._m_color_table_size
self._m_color_table_size = (2 << (self.flags & 7))
return getattr(self, '_m_color_table_size', None)
class LocalImageDescriptor(KaitaiStruct):
def __init__(self, _io, _parent=None, _root=None):
self._io = _io
self._parent = _parent
self._root = _root if _root else self
self._read()
def _read(self):
self.left = self._io.read_u2le()
self.top = self._io.read_u2le()
self.width = self._io.read_u2le()
self.height = self._io.read_u2le()
self.flags = self._io.read_u1()
if self.has_color_table:
self._raw_local_color_table = self._io.read_bytes((self.color_table_size * 3))
_io__raw_local_color_table = KaitaiStream(BytesIO(self._raw_local_color_table))
self.local_color_table = Gif.ColorTable(_io__raw_local_color_table, self, self._root)
self.image_data = Gif.ImageData(self._io, self, self._root)
@property
def has_color_table(self):
if hasattr(self, '_m_has_color_table'):
return self._m_has_color_table
self._m_has_color_table = (self.flags & 128) != 0
return getattr(self, '_m_has_color_table', None)
@property
def has_interlace(self):
if hasattr(self, '_m_has_interlace'):
return self._m_has_interlace
self._m_has_interlace = (self.flags & 64) != 0
return getattr(self, '_m_has_interlace', None)
@property
def has_sorted_color_table(self):
if hasattr(self, '_m_has_sorted_color_table'):
return self._m_has_sorted_color_table
self._m_has_sorted_color_table = (self.flags & 32) != 0
return getattr(self, '_m_has_sorted_color_table', None)
@property
def color_table_size(self):
if hasattr(self, '_m_color_table_size'):
return self._m_color_table_size
self._m_color_table_size = (2 << (self.flags & 7))
return getattr(self, '_m_color_table_size', None)
class Block(KaitaiStruct):
def __init__(self, _io, _parent=None, _root=None):
self._io = _io
self._parent = _parent
self._root = _root if _root else self
self._read()
def _read(self):
self.block_type = KaitaiStream.resolve_enum(Gif.BlockType, self._io.read_u1())
_on = self.block_type
if _on == Gif.BlockType.extension:
self.body = Gif.Extension(self._io, self, self._root)
elif _on == Gif.BlockType.local_image_descriptor:
self.body = Gif.LocalImageDescriptor(self._io, self, self._root)
class ColorTable(KaitaiStruct):
"""
.. seealso::
- section 19 - https://www.w3.org/Graphics/GIF/spec-gif89a.txt
"""
def __init__(self, _io, _parent=None, _root=None):
self._io = _io
self._parent = _parent
self._root = _root if _root else self
self._read()
def _read(self):
self.entries = []
i = 0
while not self._io.is_eof():
self.entries.append(Gif.ColorTableEntry(self._io, self, self._root))
i += 1
class Header(KaitaiStruct):
"""
.. seealso::
- section 17 - https://www.w3.org/Graphics/GIF/spec-gif89a.txt
"""
def __init__(self, _io, _parent=None, _root=None):
self._io = _io
self._parent = _parent
self._root = _root if _root else self
self._read()
def _read(self):
self.magic = self._io.read_bytes(3)
if not self.magic == b"\x47\x49\x46":
raise kaitaistruct.ValidationNotEqualError(b"\x47\x49\x46", self.magic, self._io, u"/types/header/seq/0")
self.version = (self._io.read_bytes(3)).decode(u"ASCII")
class ExtGraphicControl(KaitaiStruct):
"""
.. seealso::
- section 23 - https://www.w3.org/Graphics/GIF/spec-gif89a.txt
"""
def __init__(self, _io, _parent=None, _root=None):
self._io = _io
self._parent = _parent
self._root = _root if _root else self
self._read()
def _read(self):
self.block_size = self._io.read_bytes(1)
if not self.block_size == b"\x04":
raise kaitaistruct.ValidationNotEqualError(b"\x04", self.block_size, self._io, u"/types/ext_graphic_control/seq/0")
self.flags = self._io.read_u1()
self.delay_time = self._io.read_u2le()
self.transparent_idx = self._io.read_u1()
self.terminator = self._io.read_bytes(1)
if not self.terminator == b"\x00":
raise kaitaistruct.ValidationNotEqualError(b"\x00", self.terminator, self._io, u"/types/ext_graphic_control/seq/4")
@property
def transparent_color_flag(self):
if hasattr(self, '_m_transparent_color_flag'):
return self._m_transparent_color_flag
self._m_transparent_color_flag = (self.flags & 1) != 0
return getattr(self, '_m_transparent_color_flag', None)
@property
def user_input_flag(self):
if hasattr(self, '_m_user_input_flag'):
return self._m_user_input_flag
self._m_user_input_flag = (self.flags & 2) != 0
return getattr(self, '_m_user_input_flag', None)
class Subblock(KaitaiStruct):
def __init__(self, _io, _parent=None, _root=None):
self._io = _io
self._parent = _parent
self._root = _root if _root else self
self._read()
def _read(self):
self.len_bytes = self._io.read_u1()
self.bytes = self._io.read_bytes(self.len_bytes)
class ApplicationId(KaitaiStruct):
def __init__(self, _io, _parent=None, _root=None):
self._io = _io
self._parent = _parent
self._root = _root if _root else self
self._read()
def _read(self):
self.len_bytes = self._io.read_u1()
if not self.len_bytes == 11:
raise kaitaistruct.ValidationNotEqualError(11, self.len_bytes, self._io, u"/types/application_id/seq/0")
self.application_identifier = (self._io.read_bytes(8)).decode(u"ASCII")
self.application_auth_code = self._io.read_bytes(3)
class ExtApplication(KaitaiStruct):
def __init__(self, _io, _parent=None, _root=None):
self._io = _io
self._parent = _parent
self._root = _root if _root else self
self._read()
def _read(self):
self.application_id = Gif.ApplicationId(self._io, self, self._root)
self.subblocks = []
i = 0
while True:
_ = Gif.Subblock(self._io, self, self._root)
self.subblocks.append(_)
if _.len_bytes == 0:
break
i += 1
class Subblocks(KaitaiStruct):
def __init__(self, _io, _parent=None, _root=None):
self._io = _io
self._parent = _parent
self._root = _root if _root else self
self._read()
def _read(self):
self.entries = []
i = 0
while True:
_ = Gif.Subblock(self._io, self, self._root)
self.entries.append(_)
if _.len_bytes == 0:
break
i += 1
class Extension(KaitaiStruct):
def __init__(self, _io, _parent=None, _root=None):
self._io = _io
self._parent = _parent
self._root = _root if _root else self
self._read()
def _read(self):
self.label = KaitaiStream.resolve_enum(Gif.ExtensionLabel, self._io.read_u1())
_on = self.label
if _on == Gif.ExtensionLabel.application:
self.body = Gif.ExtApplication(self._io, self, self._root)
elif _on == Gif.ExtensionLabel.comment:
self.body = Gif.Subblocks(self._io, self, self._root)
elif _on == Gif.ExtensionLabel.graphic_control:
self.body = Gif.ExtGraphicControl(self._io, self, self._root)
else:
self.body = Gif.Subblocks(self._io, self, self._root)
| python | MIT | e6aa924bb411a9687b91920b8d094af37bc02b90 | 2026-01-04T14:40:00.086164Z | false |
mitmproxy/mitmproxy | https://github.com/mitmproxy/mitmproxy/blob/e6aa924bb411a9687b91920b8d094af37bc02b90/mitmproxy/contrib/kaitaistruct/tls_client_hello.py | mitmproxy/contrib/kaitaistruct/tls_client_hello.py | # This is a generated file! Please edit source .ksy file and use kaitai-struct-compiler to rebuild
import kaitaistruct
from kaitaistruct import KaitaiStruct, KaitaiStream, BytesIO
if getattr(kaitaistruct, 'API_VERSION', (0, 9)) < (0, 9):
raise Exception("Incompatible Kaitai Struct Python API: 0.9 or later is required, but you have %s" % (kaitaistruct.__version__))
class TlsClientHello(KaitaiStruct):
def __init__(self, _io, _parent=None, _root=None):
self._io = _io
self._parent = _parent
self._root = _root if _root else self
self._read()
def _read(self):
self.version = TlsClientHello.Version(self._io, self, self._root)
self.random = TlsClientHello.Random(self._io, self, self._root)
self.session_id = TlsClientHello.SessionId(self._io, self, self._root)
self.cipher_suites = TlsClientHello.CipherSuites(self._io, self, self._root)
self.compression_methods = TlsClientHello.CompressionMethods(self._io, self, self._root)
if self._io.is_eof() == False:
self.extensions = TlsClientHello.Extensions(self._io, self, self._root)
class ServerName(KaitaiStruct):
def __init__(self, _io, _parent=None, _root=None):
self._io = _io
self._parent = _parent
self._root = _root if _root else self
self._read()
def _read(self):
self.name_type = self._io.read_u1()
self.length = self._io.read_u2be()
self.host_name = self._io.read_bytes(self.length)
class Random(KaitaiStruct):
def __init__(self, _io, _parent=None, _root=None):
self._io = _io
self._parent = _parent
self._root = _root if _root else self
self._read()
def _read(self):
self.gmt_unix_time = self._io.read_u4be()
self.random = self._io.read_bytes(28)
class SessionId(KaitaiStruct):
def __init__(self, _io, _parent=None, _root=None):
self._io = _io
self._parent = _parent
self._root = _root if _root else self
self._read()
def _read(self):
self.len = self._io.read_u1()
self.sid = self._io.read_bytes(self.len)
class Sni(KaitaiStruct):
def __init__(self, _io, _parent=None, _root=None):
self._io = _io
self._parent = _parent
self._root = _root if _root else self
self._read()
def _read(self):
self.list_length = self._io.read_u2be()
self.server_names = []
i = 0
while not self._io.is_eof():
self.server_names.append(TlsClientHello.ServerName(self._io, self, self._root))
i += 1
class CipherSuites(KaitaiStruct):
def __init__(self, _io, _parent=None, _root=None):
self._io = _io
self._parent = _parent
self._root = _root if _root else self
self._read()
def _read(self):
self.len = self._io.read_u2be()
self.cipher_suites = []
for i in range(self.len // 2):
self.cipher_suites.append(self._io.read_u2be())
class CompressionMethods(KaitaiStruct):
def __init__(self, _io, _parent=None, _root=None):
self._io = _io
self._parent = _parent
self._root = _root if _root else self
self._read()
def _read(self):
self.len = self._io.read_u1()
self.compression_methods = self._io.read_bytes(self.len)
class Alpn(KaitaiStruct):
def __init__(self, _io, _parent=None, _root=None):
self._io = _io
self._parent = _parent
self._root = _root if _root else self
self._read()
def _read(self):
self.ext_len = self._io.read_u2be()
self.alpn_protocols = []
i = 0
while not self._io.is_eof():
self.alpn_protocols.append(TlsClientHello.Protocol(self._io, self, self._root))
i += 1
class Extensions(KaitaiStruct):
def __init__(self, _io, _parent=None, _root=None):
self._io = _io
self._parent = _parent
self._root = _root if _root else self
self._read()
def _read(self):
self.len = self._io.read_u2be()
self.extensions = []
i = 0
while not self._io.is_eof():
self.extensions.append(TlsClientHello.Extension(self._io, self, self._root))
i += 1
class Version(KaitaiStruct):
def __init__(self, _io, _parent=None, _root=None):
self._io = _io
self._parent = _parent
self._root = _root if _root else self
self._read()
def _read(self):
self.major = self._io.read_u1()
self.minor = self._io.read_u1()
class Protocol(KaitaiStruct):
def __init__(self, _io, _parent=None, _root=None):
self._io = _io
self._parent = _parent
self._root = _root if _root else self
self._read()
def _read(self):
self.strlen = self._io.read_u1()
self.name = self._io.read_bytes(self.strlen)
class Extension(KaitaiStruct):
def __init__(self, _io, _parent=None, _root=None):
self._io = _io
self._parent = _parent
self._root = _root if _root else self
self._read()
def _read(self):
self.type = self._io.read_u2be()
self.len = self._io.read_u2be()
_on = self.type
if _on == 0:
self._raw_body = self._io.read_bytes(self.len)
_io__raw_body = KaitaiStream(BytesIO(self._raw_body))
self.body = TlsClientHello.Sni(_io__raw_body, self, self._root)
elif _on == 16:
self._raw_body = self._io.read_bytes(self.len)
_io__raw_body = KaitaiStream(BytesIO(self._raw_body))
self.body = TlsClientHello.Alpn(_io__raw_body, self, self._root)
else:
self.body = self._io.read_bytes(self.len)
| python | MIT | e6aa924bb411a9687b91920b8d094af37bc02b90 | 2026-01-04T14:40:00.086164Z | false |
mitmproxy/mitmproxy | https://github.com/mitmproxy/mitmproxy/blob/e6aa924bb411a9687b91920b8d094af37bc02b90/mitmproxy/contrib/kaitaistruct/vlq_base128_le.py | mitmproxy/contrib/kaitaistruct/vlq_base128_le.py | # This is a generated file! Please edit source .ksy file and use kaitai-struct-compiler to rebuild
import kaitaistruct
from kaitaistruct import KaitaiStruct
if getattr(kaitaistruct, 'API_VERSION', (0, 9)) < (0, 9):
raise Exception("Incompatible Kaitai Struct Python API: 0.9 or later is required, but you have %s" % (kaitaistruct.__version__))
class VlqBase128Le(KaitaiStruct):
"""A variable-length unsigned/signed integer using base128 encoding. 1-byte groups
consist of 1-bit flag of continuation and 7-bit value chunk, and are ordered
"least significant group first", i.e. in "little-endian" manner.
This particular encoding is specified and used in:
* DWARF debug file format, where it's dubbed "unsigned LEB128" or "ULEB128".
http://dwarfstd.org/doc/dwarf-2.0.0.pdf - page 139
* Google Protocol Buffers, where it's called "Base 128 Varints".
https://developers.google.com/protocol-buffers/docs/encoding?csw=1#varints
* Apache Lucene, where it's called "VInt"
https://lucene.apache.org/core/3_5_0/fileformats.html#VInt
* Apache Avro uses this as a basis for integer encoding, adding ZigZag on
top of it for signed ints
https://avro.apache.org/docs/current/spec.html#binary_encode_primitive
More information on this encoding is available at https://en.wikipedia.org/wiki/LEB128
This particular implementation supports serialized values to up 8 bytes long.
"""
def __init__(self, _io, _parent=None, _root=None):
self._io = _io
self._parent = _parent
self._root = _root if _root else self
self._read()
def _read(self):
self.groups = []
i = 0
while True:
_ = VlqBase128Le.Group(self._io, self, self._root)
self.groups.append(_)
if not (_.has_next):
break
i += 1
class Group(KaitaiStruct):
"""One byte group, clearly divided into 7-bit "value" chunk and 1-bit "continuation" flag.
"""
def __init__(self, _io, _parent=None, _root=None):
self._io = _io
self._parent = _parent
self._root = _root if _root else self
self._read()
def _read(self):
self.b = self._io.read_u1()
@property
def has_next(self):
"""If true, then we have more bytes to read."""
if hasattr(self, '_m_has_next'):
return self._m_has_next
self._m_has_next = (self.b & 128) != 0
return getattr(self, '_m_has_next', None)
@property
def value(self):
"""The 7-bit (base128) numeric value chunk of this group."""
if hasattr(self, '_m_value'):
return self._m_value
self._m_value = (self.b & 127)
return getattr(self, '_m_value', None)
@property
def len(self):
if hasattr(self, '_m_len'):
return self._m_len
self._m_len = len(self.groups)
return getattr(self, '_m_len', None)
@property
def value(self):
"""Resulting unsigned value as normal integer."""
if hasattr(self, '_m_value'):
return self._m_value
self._m_value = (((((((self.groups[0].value + ((self.groups[1].value << 7) if self.len >= 2 else 0)) + ((self.groups[2].value << 14) if self.len >= 3 else 0)) + ((self.groups[3].value << 21) if self.len >= 4 else 0)) + ((self.groups[4].value << 28) if self.len >= 5 else 0)) + ((self.groups[5].value << 35) if self.len >= 6 else 0)) + ((self.groups[6].value << 42) if self.len >= 7 else 0)) + ((self.groups[7].value << 49) if self.len >= 8 else 0))
return getattr(self, '_m_value', None)
@property
def sign_bit(self):
if hasattr(self, '_m_sign_bit'):
return self._m_sign_bit
self._m_sign_bit = (1 << ((7 * self.len) - 1))
return getattr(self, '_m_sign_bit', None)
@property
def value_signed(self):
"""
.. seealso::
Source - https://graphics.stanford.edu/~seander/bithacks.html#VariableSignExtend
"""
if hasattr(self, '_m_value_signed'):
return self._m_value_signed
self._m_value_signed = ((self.value ^ self.sign_bit) - self.sign_bit)
return getattr(self, '_m_value_signed', None)
| python | MIT | e6aa924bb411a9687b91920b8d094af37bc02b90 | 2026-01-04T14:40:00.086164Z | false |
mitmproxy/mitmproxy | https://github.com/mitmproxy/mitmproxy/blob/e6aa924bb411a9687b91920b8d094af37bc02b90/mitmproxy/contrib/kaitaistruct/dtls_client_hello.py | mitmproxy/contrib/kaitaistruct/dtls_client_hello.py | # This is a generated file! Please edit source .ksy file and use kaitai-struct-compiler to rebuild
import kaitaistruct
from kaitaistruct import KaitaiStruct, KaitaiStream, BytesIO
if getattr(kaitaistruct, 'API_VERSION', (0, 9)) < (0, 9):
raise Exception("Incompatible Kaitai Struct Python API: 0.9 or later is required, but you have %s" % (kaitaistruct.__version__))
class DtlsClientHello(KaitaiStruct):
def __init__(self, _io, _parent=None, _root=None):
self._io = _io
self._parent = _parent
self._root = _root if _root else self
self._read()
def _read(self):
self.version = DtlsClientHello.Version(self._io, self, self._root)
self.random = DtlsClientHello.Random(self._io, self, self._root)
self.session_id = DtlsClientHello.SessionId(self._io, self, self._root)
self.cookie = DtlsClientHello.Cookie(self._io, self, self._root)
self.cipher_suites = DtlsClientHello.CipherSuites(self._io, self, self._root)
self.compression_methods = DtlsClientHello.CompressionMethods(self._io, self, self._root)
if self._io.is_eof() == False:
self.extensions = DtlsClientHello.Extensions(self._io, self, self._root)
class ServerName(KaitaiStruct):
def __init__(self, _io, _parent=None, _root=None):
self._io = _io
self._parent = _parent
self._root = _root if _root else self
self._read()
def _read(self):
self.name_type = self._io.read_u1()
self.length = self._io.read_u2be()
self.host_name = self._io.read_bytes(self.length)
class Random(KaitaiStruct):
def __init__(self, _io, _parent=None, _root=None):
self._io = _io
self._parent = _parent
self._root = _root if _root else self
self._read()
def _read(self):
self.gmt_unix_time = self._io.read_u4be()
self.random = self._io.read_bytes(28)
class SessionId(KaitaiStruct):
def __init__(self, _io, _parent=None, _root=None):
self._io = _io
self._parent = _parent
self._root = _root if _root else self
self._read()
def _read(self):
self.len = self._io.read_u1()
self.sid = self._io.read_bytes(self.len)
class Sni(KaitaiStruct):
def __init__(self, _io, _parent=None, _root=None):
self._io = _io
self._parent = _parent
self._root = _root if _root else self
self._read()
def _read(self):
self.list_length = self._io.read_u2be()
self.server_names = []
i = 0
while not self._io.is_eof():
self.server_names.append(DtlsClientHello.ServerName(self._io, self, self._root))
i += 1
class CipherSuites(KaitaiStruct):
def __init__(self, _io, _parent=None, _root=None):
self._io = _io
self._parent = _parent
self._root = _root if _root else self
self._read()
def _read(self):
self.len = self._io.read_u2be()
self.cipher_suites = []
for i in range(self.len // 2):
self.cipher_suites.append(self._io.read_u2be())
class CompressionMethods(KaitaiStruct):
def __init__(self, _io, _parent=None, _root=None):
self._io = _io
self._parent = _parent
self._root = _root if _root else self
self._read()
def _read(self):
self.len = self._io.read_u1()
self.compression_methods = self._io.read_bytes(self.len)
class Alpn(KaitaiStruct):
def __init__(self, _io, _parent=None, _root=None):
self._io = _io
self._parent = _parent
self._root = _root if _root else self
self._read()
def _read(self):
self.ext_len = self._io.read_u2be()
self.alpn_protocols = []
i = 0
while not self._io.is_eof():
self.alpn_protocols.append(DtlsClientHello.Protocol(self._io, self, self._root))
i += 1
class Extensions(KaitaiStruct):
def __init__(self, _io, _parent=None, _root=None):
self._io = _io
self._parent = _parent
self._root = _root if _root else self
self._read()
def _read(self):
self.len = self._io.read_u2be()
self.extensions = []
i = 0
while not self._io.is_eof():
self.extensions.append(DtlsClientHello.Extension(self._io, self, self._root))
i += 1
class Version(KaitaiStruct):
def __init__(self, _io, _parent=None, _root=None):
self._io = _io
self._parent = _parent
self._root = _root if _root else self
self._read()
def _read(self):
self.major = self._io.read_u1()
self.minor = self._io.read_u1()
class Cookie(KaitaiStruct):
def __init__(self, _io, _parent=None, _root=None):
self._io = _io
self._parent = _parent
self._root = _root if _root else self
self._read()
def _read(self):
self.len = self._io.read_u1()
self.cookie = self._io.read_bytes(self.len)
class Protocol(KaitaiStruct):
def __init__(self, _io, _parent=None, _root=None):
self._io = _io
self._parent = _parent
self._root = _root if _root else self
self._read()
def _read(self):
self.strlen = self._io.read_u1()
self.name = self._io.read_bytes(self.strlen)
class Extension(KaitaiStruct):
def __init__(self, _io, _parent=None, _root=None):
self._io = _io
self._parent = _parent
self._root = _root if _root else self
self._read()
def _read(self):
self.type = self._io.read_u2be()
self.len = self._io.read_u2be()
_on = self.type
if _on == 0:
self._raw_body = self._io.read_bytes(self.len)
_io__raw_body = KaitaiStream(BytesIO(self._raw_body))
self.body = DtlsClientHello.Sni(_io__raw_body, self, self._root)
elif _on == 16:
self._raw_body = self._io.read_bytes(self.len)
_io__raw_body = KaitaiStream(BytesIO(self._raw_body))
self.body = DtlsClientHello.Alpn(_io__raw_body, self, self._root)
else:
self.body = self._io.read_bytes(self.len)
| python | MIT | e6aa924bb411a9687b91920b8d094af37bc02b90 | 2026-01-04T14:40:00.086164Z | false |
mitmproxy/mitmproxy | https://github.com/mitmproxy/mitmproxy/blob/e6aa924bb411a9687b91920b8d094af37bc02b90/mitmproxy/contrib/kaitaistruct/ico.py | mitmproxy/contrib/kaitaistruct/ico.py | # This is a generated file! Please edit source .ksy file and use kaitai-struct-compiler to rebuild
import kaitaistruct
from kaitaistruct import KaitaiStruct
if getattr(kaitaistruct, 'API_VERSION', (0, 9)) < (0, 9):
raise Exception("Incompatible Kaitai Struct Python API: 0.9 or later is required, but you have %s" % (kaitaistruct.__version__))
class Ico(KaitaiStruct):
"""Microsoft Windows uses specific file format to store applications
icons - ICO. This is a container that contains one or more image
files (effectively, DIB parts of BMP files or full PNG files are
contained inside).
.. seealso::
Source - https://docs.microsoft.com/en-us/previous-versions/ms997538(v=msdn.10)
"""
def __init__(self, _io, _parent=None, _root=None):
self._io = _io
self._parent = _parent
self._root = _root if _root else self
self._read()
def _read(self):
self.magic = self._io.read_bytes(4)
if not self.magic == b"\x00\x00\x01\x00":
raise kaitaistruct.ValidationNotEqualError(b"\x00\x00\x01\x00", self.magic, self._io, u"/seq/0")
self.num_images = self._io.read_u2le()
self.images = []
for i in range(self.num_images):
self.images.append(Ico.IconDirEntry(self._io, self, self._root))
class IconDirEntry(KaitaiStruct):
def __init__(self, _io, _parent=None, _root=None):
self._io = _io
self._parent = _parent
self._root = _root if _root else self
self._read()
def _read(self):
self.width = self._io.read_u1()
self.height = self._io.read_u1()
self.num_colors = self._io.read_u1()
self.reserved = self._io.read_bytes(1)
if not self.reserved == b"\x00":
raise kaitaistruct.ValidationNotEqualError(b"\x00", self.reserved, self._io, u"/types/icon_dir_entry/seq/3")
self.num_planes = self._io.read_u2le()
self.bpp = self._io.read_u2le()
self.len_img = self._io.read_u4le()
self.ofs_img = self._io.read_u4le()
@property
def img(self):
"""Raw image data. Use `is_png` to determine whether this is an
embedded PNG file (true) or a DIB bitmap (false) and call a
relevant parser, if needed to parse image data further.
"""
if hasattr(self, '_m_img'):
return self._m_img
_pos = self._io.pos()
self._io.seek(self.ofs_img)
self._m_img = self._io.read_bytes(self.len_img)
self._io.seek(_pos)
return getattr(self, '_m_img', None)
@property
def png_header(self):
"""Pre-reads first 8 bytes of the image to determine if it's an
embedded PNG file.
"""
if hasattr(self, '_m_png_header'):
return self._m_png_header
_pos = self._io.pos()
self._io.seek(self.ofs_img)
self._m_png_header = self._io.read_bytes(8)
self._io.seek(_pos)
return getattr(self, '_m_png_header', None)
@property
def is_png(self):
"""True if this image is in PNG format."""
if hasattr(self, '_m_is_png'):
return self._m_is_png
self._m_is_png = self.png_header == b"\x89\x50\x4E\x47\x0D\x0A\x1A\x0A"
return getattr(self, '_m_is_png', None)
| python | MIT | e6aa924bb411a9687b91920b8d094af37bc02b90 | 2026-01-04T14:40:00.086164Z | false |
mitmproxy/mitmproxy | https://github.com/mitmproxy/mitmproxy/blob/e6aa924bb411a9687b91920b8d094af37bc02b90/mitmproxy/contrib/kaitaistruct/__init__.py | mitmproxy/contrib/kaitaistruct/__init__.py | python | MIT | e6aa924bb411a9687b91920b8d094af37bc02b90 | 2026-01-04T14:40:00.086164Z | false | |
mitmproxy/mitmproxy | https://github.com/mitmproxy/mitmproxy/blob/e6aa924bb411a9687b91920b8d094af37bc02b90/mitmproxy/contrib/kaitaistruct/jpeg.py | mitmproxy/contrib/kaitaistruct/jpeg.py | # This is a generated file! Please edit source .ksy file and use kaitai-struct-compiler to rebuild
import kaitaistruct
from kaitaistruct import KaitaiStruct, KaitaiStream, BytesIO
from enum import Enum
if getattr(kaitaistruct, 'API_VERSION', (0, 9)) < (0, 9):
raise Exception("Incompatible Kaitai Struct Python API: 0.9 or later is required, but you have %s" % (kaitaistruct.__version__))
from . import exif
class Jpeg(KaitaiStruct):
"""JPEG File Interchange Format, or JFIF, or, more colloquially known
as just "JPEG" or "JPG", is a popular 2D bitmap image file format,
offering lossy compression which works reasonably well with
photographic images.
Format is organized as a container format, serving multiple
"segments", each starting with a magic and a marker. JFIF standard
dictates order and mandatory apperance of segments:
* SOI
* APP0 (with JFIF magic)
* APP0 (with JFXX magic, optional)
* everything else
* SOS
* JPEG-compressed stream
* EOI
"""
class ComponentId(Enum):
y = 1
cb = 2
cr = 3
i = 4
q = 5
def __init__(self, _io, _parent=None, _root=None):
self._io = _io
self._parent = _parent
self._root = _root if _root else self
self._read()
def _read(self):
self.segments = []
i = 0
while not self._io.is_eof():
self.segments.append(Jpeg.Segment(self._io, self, self._root))
i += 1
class Segment(KaitaiStruct):
class MarkerEnum(Enum):
tem = 1
sof0 = 192
sof1 = 193
sof2 = 194
sof3 = 195
dht = 196
sof5 = 197
sof6 = 198
sof7 = 199
soi = 216
eoi = 217
sos = 218
dqt = 219
dnl = 220
dri = 221
dhp = 222
app0 = 224
app1 = 225
app2 = 226
app3 = 227
app4 = 228
app5 = 229
app6 = 230
app7 = 231
app8 = 232
app9 = 233
app10 = 234
app11 = 235
app12 = 236
app13 = 237
app14 = 238
app15 = 239
com = 254
def __init__(self, _io, _parent=None, _root=None):
self._io = _io
self._parent = _parent
self._root = _root if _root else self
self._read()
def _read(self):
self.magic = self._io.read_bytes(1)
if not self.magic == b"\xFF":
raise kaitaistruct.ValidationNotEqualError(b"\xFF", self.magic, self._io, u"/types/segment/seq/0")
self.marker = KaitaiStream.resolve_enum(Jpeg.Segment.MarkerEnum, self._io.read_u1())
if ((self.marker != Jpeg.Segment.MarkerEnum.soi) and (self.marker != Jpeg.Segment.MarkerEnum.eoi)) :
self.length = self._io.read_u2be()
if ((self.marker != Jpeg.Segment.MarkerEnum.soi) and (self.marker != Jpeg.Segment.MarkerEnum.eoi)) :
_on = self.marker
if _on == Jpeg.Segment.MarkerEnum.app1:
self._raw_data = self._io.read_bytes((self.length - 2))
_io__raw_data = KaitaiStream(BytesIO(self._raw_data))
self.data = Jpeg.SegmentApp1(_io__raw_data, self, self._root)
elif _on == Jpeg.Segment.MarkerEnum.app0:
self._raw_data = self._io.read_bytes((self.length - 2))
_io__raw_data = KaitaiStream(BytesIO(self._raw_data))
self.data = Jpeg.SegmentApp0(_io__raw_data, self, self._root)
elif _on == Jpeg.Segment.MarkerEnum.sof0:
self._raw_data = self._io.read_bytes((self.length - 2))
_io__raw_data = KaitaiStream(BytesIO(self._raw_data))
self.data = Jpeg.SegmentSof0(_io__raw_data, self, self._root)
elif _on == Jpeg.Segment.MarkerEnum.sos:
self._raw_data = self._io.read_bytes((self.length - 2))
_io__raw_data = KaitaiStream(BytesIO(self._raw_data))
self.data = Jpeg.SegmentSos(_io__raw_data, self, self._root)
else:
self.data = self._io.read_bytes((self.length - 2))
if self.marker == Jpeg.Segment.MarkerEnum.sos:
self.image_data = self._io.read_bytes_full()
class SegmentSos(KaitaiStruct):
def __init__(self, _io, _parent=None, _root=None):
self._io = _io
self._parent = _parent
self._root = _root if _root else self
self._read()
def _read(self):
self.num_components = self._io.read_u1()
self.components = []
for i in range(self.num_components):
self.components.append(Jpeg.SegmentSos.Component(self._io, self, self._root))
self.start_spectral_selection = self._io.read_u1()
self.end_spectral = self._io.read_u1()
self.appr_bit_pos = self._io.read_u1()
class Component(KaitaiStruct):
def __init__(self, _io, _parent=None, _root=None):
self._io = _io
self._parent = _parent
self._root = _root if _root else self
self._read()
def _read(self):
self.id = KaitaiStream.resolve_enum(Jpeg.ComponentId, self._io.read_u1())
self.huffman_table = self._io.read_u1()
class SegmentApp1(KaitaiStruct):
def __init__(self, _io, _parent=None, _root=None):
self._io = _io
self._parent = _parent
self._root = _root if _root else self
self._read()
def _read(self):
self.magic = (self._io.read_bytes_term(0, False, True, True)).decode(u"ASCII")
_on = self.magic
if _on == u"Exif":
self.body = Jpeg.ExifInJpeg(self._io, self, self._root)
class SegmentSof0(KaitaiStruct):
def __init__(self, _io, _parent=None, _root=None):
self._io = _io
self._parent = _parent
self._root = _root if _root else self
self._read()
def _read(self):
self.bits_per_sample = self._io.read_u1()
self.image_height = self._io.read_u2be()
self.image_width = self._io.read_u2be()
self.num_components = self._io.read_u1()
self.components = []
for i in range(self.num_components):
self.components.append(Jpeg.SegmentSof0.Component(self._io, self, self._root))
class Component(KaitaiStruct):
def __init__(self, _io, _parent=None, _root=None):
self._io = _io
self._parent = _parent
self._root = _root if _root else self
self._read()
def _read(self):
self.id = KaitaiStream.resolve_enum(Jpeg.ComponentId, self._io.read_u1())
self.sampling_factors = self._io.read_u1()
self.quantization_table_id = self._io.read_u1()
@property
def sampling_x(self):
if hasattr(self, '_m_sampling_x'):
return self._m_sampling_x
self._m_sampling_x = ((self.sampling_factors & 240) >> 4)
return getattr(self, '_m_sampling_x', None)
@property
def sampling_y(self):
if hasattr(self, '_m_sampling_y'):
return self._m_sampling_y
self._m_sampling_y = (self.sampling_factors & 15)
return getattr(self, '_m_sampling_y', None)
class ExifInJpeg(KaitaiStruct):
def __init__(self, _io, _parent=None, _root=None):
self._io = _io
self._parent = _parent
self._root = _root if _root else self
self._read()
def _read(self):
self.extra_zero = self._io.read_bytes(1)
if not self.extra_zero == b"\x00":
raise kaitaistruct.ValidationNotEqualError(b"\x00", self.extra_zero, self._io, u"/types/exif_in_jpeg/seq/0")
self._raw_data = self._io.read_bytes_full()
_io__raw_data = KaitaiStream(BytesIO(self._raw_data))
self.data = exif.Exif(_io__raw_data)
class SegmentApp0(KaitaiStruct):
class DensityUnit(Enum):
no_units = 0
pixels_per_inch = 1
pixels_per_cm = 2
def __init__(self, _io, _parent=None, _root=None):
self._io = _io
self._parent = _parent
self._root = _root if _root else self
self._read()
def _read(self):
self.magic = (self._io.read_bytes(5)).decode(u"ASCII")
self.version_major = self._io.read_u1()
self.version_minor = self._io.read_u1()
self.density_units = KaitaiStream.resolve_enum(Jpeg.SegmentApp0.DensityUnit, self._io.read_u1())
self.density_x = self._io.read_u2be()
self.density_y = self._io.read_u2be()
self.thumbnail_x = self._io.read_u1()
self.thumbnail_y = self._io.read_u1()
self.thumbnail = self._io.read_bytes(((self.thumbnail_x * self.thumbnail_y) * 3))
| python | MIT | e6aa924bb411a9687b91920b8d094af37bc02b90 | 2026-01-04T14:40:00.086164Z | false |
mitmproxy/mitmproxy | https://github.com/mitmproxy/mitmproxy/blob/e6aa924bb411a9687b91920b8d094af37bc02b90/mitmproxy/contrib/kaitaistruct/google_protobuf.py | mitmproxy/contrib/kaitaistruct/google_protobuf.py | # This is a generated file! Please edit source .ksy file and use kaitai-struct-compiler to rebuild
import kaitaistruct
from kaitaistruct import KaitaiStream, KaitaiStruct
from enum import Enum
if getattr(kaitaistruct, 'API_VERSION', (0, 9)) < (0, 9):
raise Exception("Incompatible Kaitai Struct Python API: 0.9 or later is required, but you have %s" % (kaitaistruct.__version__))
from . import vlq_base128_le
class GoogleProtobuf(KaitaiStruct):
"""Google Protocol Buffers (AKA protobuf) is a popular data
serialization scheme used for communication protocols, data storage,
etc. There are implementations are available for almost every
popular language. The focus points of this scheme are brevity (data
is encoded in a very size-efficient manner) and extensibility (one
can add keys to the structure, while keeping it readable in previous
version of software).
Protobuf uses semi-self-describing encoding scheme for its
messages. It means that it is possible to parse overall structure of
the message (skipping over fields one can't understand), but to
fully understand the message, one needs a protocol definition file
(`.proto`). To be specific:
* "Keys" in key-value pairs provided in the message are identified
only with an integer "field tag". `.proto` file provides info on
which symbolic field names these field tags map to.
* "Keys" also provide something called "wire type". It's not a data
type in its common sense (i.e. you can't, for example, distinguish
`sint32` vs `uint32` vs some enum, or `string` from `bytes`), but
it's enough information to determine how many bytes to
parse. Interpretation of the value should be done according to the
type specified in `.proto` file.
* There's no direct information on which fields are optional /
required, which fields may be repeated or constitute a map, what
restrictions are placed on fields usage in a single message, what
are the fields' default values, etc, etc.
.. seealso::
Source - https://developers.google.com/protocol-buffers/docs/encoding
"""
def __init__(self, _io, _parent=None, _root=None):
self._io = _io
self._parent = _parent
self._root = _root if _root else self
self._read()
def _read(self):
self.pairs = []
i = 0
while not self._io.is_eof():
self.pairs.append(GoogleProtobuf.Pair(self._io, self, self._root))
i += 1
class Pair(KaitaiStruct):
"""Key-value pair."""
class WireTypes(Enum):
varint = 0
bit_64 = 1
len_delimited = 2
group_start = 3
group_end = 4
bit_32 = 5
def __init__(self, _io, _parent=None, _root=None):
self._io = _io
self._parent = _parent
self._root = _root if _root else self
self._read()
def _read(self):
self.key = vlq_base128_le.VlqBase128Le(self._io)
_on = self.wire_type
if _on == GoogleProtobuf.Pair.WireTypes.varint:
self.value = vlq_base128_le.VlqBase128Le(self._io)
elif _on == GoogleProtobuf.Pair.WireTypes.len_delimited:
self.value = GoogleProtobuf.DelimitedBytes(self._io, self, self._root)
elif _on == GoogleProtobuf.Pair.WireTypes.bit_64:
self.value = self._io.read_u8le()
elif _on == GoogleProtobuf.Pair.WireTypes.bit_32:
self.value = self._io.read_u4le()
@property
def wire_type(self):
""""Wire type" is a part of the "key" that carries enough
information to parse value from the wire, i.e. read correct
amount of bytes, but there's not enough informaton to
interprete in unambiguously. For example, one can't clearly
distinguish 64-bit fixed-sized integers from 64-bit floats,
signed zigzag-encoded varints from regular unsigned varints,
arbitrary bytes from UTF-8 encoded strings, etc.
"""
if hasattr(self, '_m_wire_type'):
return self._m_wire_type
self._m_wire_type = KaitaiStream.resolve_enum(GoogleProtobuf.Pair.WireTypes, (self.key.value & 7))
return getattr(self, '_m_wire_type', None)
@property
def field_tag(self):
"""Identifies a field of protocol. One can look up symbolic
field name in a `.proto` file by this field tag.
"""
if hasattr(self, '_m_field_tag'):
return self._m_field_tag
self._m_field_tag = (self.key.value >> 3)
return getattr(self, '_m_field_tag', None)
class DelimitedBytes(KaitaiStruct):
def __init__(self, _io, _parent=None, _root=None):
self._io = _io
self._parent = _parent
self._root = _root if _root else self
self._read()
def _read(self):
self.len = vlq_base128_le.VlqBase128Le(self._io)
self.body = self._io.read_bytes(self.len.value)
| python | MIT | e6aa924bb411a9687b91920b8d094af37bc02b90 | 2026-01-04T14:40:00.086164Z | false |
mitmproxy/mitmproxy | https://github.com/mitmproxy/mitmproxy/blob/e6aa924bb411a9687b91920b8d094af37bc02b90/mitmproxy/contrib/kaitaistruct/png.py | mitmproxy/contrib/kaitaistruct/png.py | # This is a generated file! Please edit source .ksy file and use kaitai-struct-compiler to rebuild
import kaitaistruct
from kaitaistruct import KaitaiStruct, KaitaiStream, BytesIO
from enum import Enum
import zlib
if getattr(kaitaistruct, 'API_VERSION', (0, 9)) < (0, 9):
raise Exception("Incompatible Kaitai Struct Python API: 0.9 or later is required, but you have %s" % (kaitaistruct.__version__))
class Png(KaitaiStruct):
"""Test files for APNG can be found at the following locations:
* <https://philip.html5.org/tests/apng/tests.html>
* <http://littlesvr.ca/apng/>
"""
class PhysUnit(Enum):
unknown = 0
meter = 1
class BlendOpValues(Enum):
source = 0
over = 1
class CompressionMethods(Enum):
zlib = 0
class DisposeOpValues(Enum):
none = 0
background = 1
previous = 2
class ColorType(Enum):
greyscale = 0
truecolor = 2
indexed = 3
greyscale_alpha = 4
truecolor_alpha = 6
def __init__(self, _io, _parent=None, _root=None):
self._io = _io
self._parent = _parent
self._root = _root if _root else self
self._read()
def _read(self):
self.magic = self._io.read_bytes(8)
if not self.magic == b"\x89\x50\x4E\x47\x0D\x0A\x1A\x0A":
raise kaitaistruct.ValidationNotEqualError(b"\x89\x50\x4E\x47\x0D\x0A\x1A\x0A", self.magic, self._io, u"/seq/0")
self.ihdr_len = self._io.read_u4be()
if not self.ihdr_len == 13:
raise kaitaistruct.ValidationNotEqualError(13, self.ihdr_len, self._io, u"/seq/1")
self.ihdr_type = self._io.read_bytes(4)
if not self.ihdr_type == b"\x49\x48\x44\x52":
raise kaitaistruct.ValidationNotEqualError(b"\x49\x48\x44\x52", self.ihdr_type, self._io, u"/seq/2")
self.ihdr = Png.IhdrChunk(self._io, self, self._root)
self.ihdr_crc = self._io.read_bytes(4)
self.chunks = []
i = 0
while True:
_ = Png.Chunk(self._io, self, self._root)
self.chunks.append(_)
if ((_.type == u"IEND") or (self._io.is_eof())) :
break
i += 1
class Rgb(KaitaiStruct):
def __init__(self, _io, _parent=None, _root=None):
self._io = _io
self._parent = _parent
self._root = _root if _root else self
self._read()
def _read(self):
self.r = self._io.read_u1()
self.g = self._io.read_u1()
self.b = self._io.read_u1()
class Chunk(KaitaiStruct):
def __init__(self, _io, _parent=None, _root=None):
self._io = _io
self._parent = _parent
self._root = _root if _root else self
self._read()
def _read(self):
self.len = self._io.read_u4be()
self.type = (self._io.read_bytes(4)).decode(u"UTF-8")
_on = self.type
if _on == u"iTXt":
self._raw_body = self._io.read_bytes(self.len)
_io__raw_body = KaitaiStream(BytesIO(self._raw_body))
self.body = Png.InternationalTextChunk(_io__raw_body, self, self._root)
elif _on == u"gAMA":
self._raw_body = self._io.read_bytes(self.len)
_io__raw_body = KaitaiStream(BytesIO(self._raw_body))
self.body = Png.GamaChunk(_io__raw_body, self, self._root)
elif _on == u"tIME":
self._raw_body = self._io.read_bytes(self.len)
_io__raw_body = KaitaiStream(BytesIO(self._raw_body))
self.body = Png.TimeChunk(_io__raw_body, self, self._root)
elif _on == u"PLTE":
self._raw_body = self._io.read_bytes(self.len)
_io__raw_body = KaitaiStream(BytesIO(self._raw_body))
self.body = Png.PlteChunk(_io__raw_body, self, self._root)
elif _on == u"bKGD":
self._raw_body = self._io.read_bytes(self.len)
_io__raw_body = KaitaiStream(BytesIO(self._raw_body))
self.body = Png.BkgdChunk(_io__raw_body, self, self._root)
elif _on == u"pHYs":
self._raw_body = self._io.read_bytes(self.len)
_io__raw_body = KaitaiStream(BytesIO(self._raw_body))
self.body = Png.PhysChunk(_io__raw_body, self, self._root)
elif _on == u"fdAT":
self._raw_body = self._io.read_bytes(self.len)
_io__raw_body = KaitaiStream(BytesIO(self._raw_body))
self.body = Png.FrameDataChunk(_io__raw_body, self, self._root)
elif _on == u"tEXt":
self._raw_body = self._io.read_bytes(self.len)
_io__raw_body = KaitaiStream(BytesIO(self._raw_body))
self.body = Png.TextChunk(_io__raw_body, self, self._root)
elif _on == u"cHRM":
self._raw_body = self._io.read_bytes(self.len)
_io__raw_body = KaitaiStream(BytesIO(self._raw_body))
self.body = Png.ChrmChunk(_io__raw_body, self, self._root)
elif _on == u"acTL":
self._raw_body = self._io.read_bytes(self.len)
_io__raw_body = KaitaiStream(BytesIO(self._raw_body))
self.body = Png.AnimationControlChunk(_io__raw_body, self, self._root)
elif _on == u"sRGB":
self._raw_body = self._io.read_bytes(self.len)
_io__raw_body = KaitaiStream(BytesIO(self._raw_body))
self.body = Png.SrgbChunk(_io__raw_body, self, self._root)
elif _on == u"zTXt":
self._raw_body = self._io.read_bytes(self.len)
_io__raw_body = KaitaiStream(BytesIO(self._raw_body))
self.body = Png.CompressedTextChunk(_io__raw_body, self, self._root)
elif _on == u"fcTL":
self._raw_body = self._io.read_bytes(self.len)
_io__raw_body = KaitaiStream(BytesIO(self._raw_body))
self.body = Png.FrameControlChunk(_io__raw_body, self, self._root)
else:
self.body = self._io.read_bytes(self.len)
self.crc = self._io.read_bytes(4)
class BkgdIndexed(KaitaiStruct):
"""Background chunk for images with indexed palette."""
def __init__(self, _io, _parent=None, _root=None):
self._io = _io
self._parent = _parent
self._root = _root if _root else self
self._read()
def _read(self):
self.palette_index = self._io.read_u1()
class Point(KaitaiStruct):
def __init__(self, _io, _parent=None, _root=None):
self._io = _io
self._parent = _parent
self._root = _root if _root else self
self._read()
def _read(self):
self.x_int = self._io.read_u4be()
self.y_int = self._io.read_u4be()
@property
def x(self):
if hasattr(self, '_m_x'):
return self._m_x
self._m_x = (self.x_int / 100000.0)
return getattr(self, '_m_x', None)
@property
def y(self):
if hasattr(self, '_m_y'):
return self._m_y
self._m_y = (self.y_int / 100000.0)
return getattr(self, '_m_y', None)
class BkgdGreyscale(KaitaiStruct):
"""Background chunk for greyscale images."""
def __init__(self, _io, _parent=None, _root=None):
self._io = _io
self._parent = _parent
self._root = _root if _root else self
self._read()
def _read(self):
self.value = self._io.read_u2be()
class ChrmChunk(KaitaiStruct):
"""
.. seealso::
Source - https://www.w3.org/TR/PNG/#11cHRM
"""
def __init__(self, _io, _parent=None, _root=None):
self._io = _io
self._parent = _parent
self._root = _root if _root else self
self._read()
def _read(self):
self.white_point = Png.Point(self._io, self, self._root)
self.red = Png.Point(self._io, self, self._root)
self.green = Png.Point(self._io, self, self._root)
self.blue = Png.Point(self._io, self, self._root)
class IhdrChunk(KaitaiStruct):
"""
.. seealso::
Source - https://www.w3.org/TR/PNG/#11IHDR
"""
def __init__(self, _io, _parent=None, _root=None):
self._io = _io
self._parent = _parent
self._root = _root if _root else self
self._read()
def _read(self):
self.width = self._io.read_u4be()
self.height = self._io.read_u4be()
self.bit_depth = self._io.read_u1()
self.color_type = KaitaiStream.resolve_enum(Png.ColorType, self._io.read_u1())
self.compression_method = self._io.read_u1()
self.filter_method = self._io.read_u1()
self.interlace_method = self._io.read_u1()
class PlteChunk(KaitaiStruct):
"""
.. seealso::
Source - https://www.w3.org/TR/PNG/#11PLTE
"""
def __init__(self, _io, _parent=None, _root=None):
self._io = _io
self._parent = _parent
self._root = _root if _root else self
self._read()
def _read(self):
self.entries = []
i = 0
while not self._io.is_eof():
self.entries.append(Png.Rgb(self._io, self, self._root))
i += 1
class SrgbChunk(KaitaiStruct):
"""
.. seealso::
Source - https://www.w3.org/TR/PNG/#11sRGB
"""
class Intent(Enum):
perceptual = 0
relative_colorimetric = 1
saturation = 2
absolute_colorimetric = 3
def __init__(self, _io, _parent=None, _root=None):
self._io = _io
self._parent = _parent
self._root = _root if _root else self
self._read()
def _read(self):
self.render_intent = KaitaiStream.resolve_enum(Png.SrgbChunk.Intent, self._io.read_u1())
class CompressedTextChunk(KaitaiStruct):
"""Compressed text chunk effectively allows to store key-value
string pairs in PNG container, compressing "value" part (which
can be quite lengthy) with zlib compression.
.. seealso::
Source - https://www.w3.org/TR/PNG/#11zTXt
"""
def __init__(self, _io, _parent=None, _root=None):
self._io = _io
self._parent = _parent
self._root = _root if _root else self
self._read()
def _read(self):
self.keyword = (self._io.read_bytes_term(0, False, True, True)).decode(u"UTF-8")
self.compression_method = KaitaiStream.resolve_enum(Png.CompressionMethods, self._io.read_u1())
self._raw_text_datastream = self._io.read_bytes_full()
self.text_datastream = zlib.decompress(self._raw_text_datastream)
class FrameDataChunk(KaitaiStruct):
"""
.. seealso::
Source - https://wiki.mozilla.org/APNG_Specification#.60fdAT.60:_The_Frame_Data_Chunk
"""
def __init__(self, _io, _parent=None, _root=None):
self._io = _io
self._parent = _parent
self._root = _root if _root else self
self._read()
def _read(self):
self.sequence_number = self._io.read_u4be()
self.frame_data = self._io.read_bytes_full()
class BkgdTruecolor(KaitaiStruct):
"""Background chunk for truecolor images."""
def __init__(self, _io, _parent=None, _root=None):
self._io = _io
self._parent = _parent
self._root = _root if _root else self
self._read()
def _read(self):
self.red = self._io.read_u2be()
self.green = self._io.read_u2be()
self.blue = self._io.read_u2be()
class GamaChunk(KaitaiStruct):
"""
.. seealso::
Source - https://www.w3.org/TR/PNG/#11gAMA
"""
def __init__(self, _io, _parent=None, _root=None):
self._io = _io
self._parent = _parent
self._root = _root if _root else self
self._read()
def _read(self):
self.gamma_int = self._io.read_u4be()
@property
def gamma_ratio(self):
if hasattr(self, '_m_gamma_ratio'):
return self._m_gamma_ratio
self._m_gamma_ratio = (100000.0 / self.gamma_int)
return getattr(self, '_m_gamma_ratio', None)
class BkgdChunk(KaitaiStruct):
"""Background chunk stores default background color to display this
image against. Contents depend on `color_type` of the image.
.. seealso::
Source - https://www.w3.org/TR/PNG/#11bKGD
"""
def __init__(self, _io, _parent=None, _root=None):
self._io = _io
self._parent = _parent
self._root = _root if _root else self
self._read()
def _read(self):
_on = self._root.ihdr.color_type
if _on == Png.ColorType.indexed:
self.bkgd = Png.BkgdIndexed(self._io, self, self._root)
elif _on == Png.ColorType.truecolor_alpha:
self.bkgd = Png.BkgdTruecolor(self._io, self, self._root)
elif _on == Png.ColorType.greyscale_alpha:
self.bkgd = Png.BkgdGreyscale(self._io, self, self._root)
elif _on == Png.ColorType.truecolor:
self.bkgd = Png.BkgdTruecolor(self._io, self, self._root)
elif _on == Png.ColorType.greyscale:
self.bkgd = Png.BkgdGreyscale(self._io, self, self._root)
class PhysChunk(KaitaiStruct):
""""Physical size" chunk stores data that allows to translate
logical pixels into physical units (meters, etc) and vice-versa.
.. seealso::
Source - https://www.w3.org/TR/PNG/#11pHYs
"""
def __init__(self, _io, _parent=None, _root=None):
self._io = _io
self._parent = _parent
self._root = _root if _root else self
self._read()
def _read(self):
self.pixels_per_unit_x = self._io.read_u4be()
self.pixels_per_unit_y = self._io.read_u4be()
self.unit = KaitaiStream.resolve_enum(Png.PhysUnit, self._io.read_u1())
class FrameControlChunk(KaitaiStruct):
"""
.. seealso::
Source - https://wiki.mozilla.org/APNG_Specification#.60fcTL.60:_The_Frame_Control_Chunk
"""
def __init__(self, _io, _parent=None, _root=None):
self._io = _io
self._parent = _parent
self._root = _root if _root else self
self._read()
def _read(self):
self.sequence_number = self._io.read_u4be()
self.width = self._io.read_u4be()
if not self.width >= 1:
raise kaitaistruct.ValidationLessThanError(1, self.width, self._io, u"/types/frame_control_chunk/seq/1")
if not self.width <= self._root.ihdr.width:
raise kaitaistruct.ValidationGreaterThanError(self._root.ihdr.width, self.width, self._io, u"/types/frame_control_chunk/seq/1")
self.height = self._io.read_u4be()
if not self.height >= 1:
raise kaitaistruct.ValidationLessThanError(1, self.height, self._io, u"/types/frame_control_chunk/seq/2")
if not self.height <= self._root.ihdr.height:
raise kaitaistruct.ValidationGreaterThanError(self._root.ihdr.height, self.height, self._io, u"/types/frame_control_chunk/seq/2")
self.x_offset = self._io.read_u4be()
if not self.x_offset <= (self._root.ihdr.width - self.width):
raise kaitaistruct.ValidationGreaterThanError((self._root.ihdr.width - self.width), self.x_offset, self._io, u"/types/frame_control_chunk/seq/3")
self.y_offset = self._io.read_u4be()
if not self.y_offset <= (self._root.ihdr.height - self.height):
raise kaitaistruct.ValidationGreaterThanError((self._root.ihdr.height - self.height), self.y_offset, self._io, u"/types/frame_control_chunk/seq/4")
self.delay_num = self._io.read_u2be()
self.delay_den = self._io.read_u2be()
self.dispose_op = KaitaiStream.resolve_enum(Png.DisposeOpValues, self._io.read_u1())
self.blend_op = KaitaiStream.resolve_enum(Png.BlendOpValues, self._io.read_u1())
@property
def delay(self):
"""Time to display this frame, in seconds."""
if hasattr(self, '_m_delay'):
return self._m_delay
self._m_delay = (self.delay_num / (100.0 if self.delay_den == 0 else self.delay_den))
return getattr(self, '_m_delay', None)
class InternationalTextChunk(KaitaiStruct):
"""International text chunk effectively allows to store key-value string pairs in
PNG container. Both "key" (keyword) and "value" (text) parts are
given in pre-defined subset of iso8859-1 without control
characters.
.. seealso::
Source - https://www.w3.org/TR/PNG/#11iTXt
"""
def __init__(self, _io, _parent=None, _root=None):
self._io = _io
self._parent = _parent
self._root = _root if _root else self
self._read()
def _read(self):
self.keyword = (self._io.read_bytes_term(0, False, True, True)).decode(u"UTF-8")
self.compression_flag = self._io.read_u1()
self.compression_method = KaitaiStream.resolve_enum(Png.CompressionMethods, self._io.read_u1())
self.language_tag = (self._io.read_bytes_term(0, False, True, True)).decode(u"ASCII")
self.translated_keyword = (self._io.read_bytes_term(0, False, True, True)).decode(u"UTF-8")
self.text = (self._io.read_bytes_full()).decode(u"UTF-8")
class TextChunk(KaitaiStruct):
"""Text chunk effectively allows to store key-value string pairs in
PNG container. Both "key" (keyword) and "value" (text) parts are
given in pre-defined subset of iso8859-1 without control
characters.
.. seealso::
Source - https://www.w3.org/TR/PNG/#11tEXt
"""
def __init__(self, _io, _parent=None, _root=None):
self._io = _io
self._parent = _parent
self._root = _root if _root else self
self._read()
def _read(self):
self.keyword = (self._io.read_bytes_term(0, False, True, True)).decode(u"iso8859-1")
self.text = (self._io.read_bytes_full()).decode(u"iso8859-1")
class AnimationControlChunk(KaitaiStruct):
"""
.. seealso::
Source - https://wiki.mozilla.org/APNG_Specification#.60acTL.60:_The_Animation_Control_Chunk
"""
def __init__(self, _io, _parent=None, _root=None):
self._io = _io
self._parent = _parent
self._root = _root if _root else self
self._read()
def _read(self):
self.num_frames = self._io.read_u4be()
self.num_plays = self._io.read_u4be()
class TimeChunk(KaitaiStruct):
"""Time chunk stores time stamp of last modification of this image,
up to 1 second precision in UTC timezone.
.. seealso::
Source - https://www.w3.org/TR/PNG/#11tIME
"""
def __init__(self, _io, _parent=None, _root=None):
self._io = _io
self._parent = _parent
self._root = _root if _root else self
self._read()
def _read(self):
self.year = self._io.read_u2be()
self.month = self._io.read_u1()
self.day = self._io.read_u1()
self.hour = self._io.read_u1()
self.minute = self._io.read_u1()
self.second = self._io.read_u1()
| python | MIT | e6aa924bb411a9687b91920b8d094af37bc02b90 | 2026-01-04T14:40:00.086164Z | false |
mitmproxy/mitmproxy | https://github.com/mitmproxy/mitmproxy/blob/e6aa924bb411a9687b91920b8d094af37bc02b90/mitmproxy/contrib/kaitaistruct/exif.py | mitmproxy/contrib/kaitaistruct/exif.py | # This is a generated file! Please edit source .ksy file and use kaitai-struct-compiler to rebuild
import kaitaistruct
from kaitaistruct import KaitaiStream, KaitaiStruct
from enum import Enum
if getattr(kaitaistruct, 'API_VERSION', (0, 9)) < (0, 9):
raise Exception("Incompatible Kaitai Struct Python API: 0.9 or later is required, but you have %s" % (kaitaistruct.__version__))
class Exif(KaitaiStruct):
def __init__(self, _io, _parent=None, _root=None):
self._io = _io
self._parent = _parent
self._root = _root if _root else self
self._read()
def _read(self):
self.endianness = self._io.read_u2le()
self.body = Exif.ExifBody(self._io, self, self._root)
class ExifBody(KaitaiStruct):
def __init__(self, _io, _parent=None, _root=None):
self._io = _io
self._parent = _parent
self._root = _root if _root else self
self._read()
def _read(self):
_on = self._root.endianness
if _on == 18761:
self._is_le = True
elif _on == 19789:
self._is_le = False
if not hasattr(self, '_is_le'):
raise kaitaistruct.UndecidedEndiannessError("/types/exif_body")
elif self._is_le == True:
self._read_le()
elif self._is_le == False:
self._read_be()
def _read_le(self):
self.version = self._io.read_u2le()
self.ifd0_ofs = self._io.read_u4le()
def _read_be(self):
self.version = self._io.read_u2be()
self.ifd0_ofs = self._io.read_u4be()
class Ifd(KaitaiStruct):
def __init__(self, _io, _parent=None, _root=None, _is_le=None):
self._io = _io
self._parent = _parent
self._root = _root if _root else self
self._is_le = _is_le
self._read()
def _read(self):
if not hasattr(self, '_is_le'):
raise kaitaistruct.UndecidedEndiannessError("/types/exif_body/types/ifd")
elif self._is_le == True:
self._read_le()
elif self._is_le == False:
self._read_be()
def _read_le(self):
self.num_fields = self._io.read_u2le()
self.fields = []
for i in range(self.num_fields):
self.fields.append(Exif.ExifBody.IfdField(self._io, self, self._root, self._is_le))
self.next_ifd_ofs = self._io.read_u4le()
def _read_be(self):
self.num_fields = self._io.read_u2be()
self.fields = []
for i in range(self.num_fields):
self.fields.append(Exif.ExifBody.IfdField(self._io, self, self._root, self._is_le))
self.next_ifd_ofs = self._io.read_u4be()
@property
def next_ifd(self):
if hasattr(self, '_m_next_ifd'):
return self._m_next_ifd
if self.next_ifd_ofs != 0:
_pos = self._io.pos()
self._io.seek(self.next_ifd_ofs)
if self._is_le:
self._m_next_ifd = Exif.ExifBody.Ifd(self._io, self, self._root, self._is_le)
else:
self._m_next_ifd = Exif.ExifBody.Ifd(self._io, self, self._root, self._is_le)
self._io.seek(_pos)
return getattr(self, '_m_next_ifd', None)
class IfdField(KaitaiStruct):
class FieldTypeEnum(Enum):
byte = 1
ascii_string = 2
word = 3
dword = 4
rational = 5
undefined = 7
slong = 9
srational = 10
class TagEnum(Enum):
image_width = 256
image_height = 257
bits_per_sample = 258
compression = 259
photometric_interpretation = 262
thresholding = 263
cell_width = 264
cell_length = 265
fill_order = 266
document_name = 269
image_description = 270
make = 271
model = 272
strip_offsets = 273
orientation = 274
samples_per_pixel = 277
rows_per_strip = 278
strip_byte_counts = 279
min_sample_value = 280
max_sample_value = 281
x_resolution = 282
y_resolution = 283
planar_configuration = 284
page_name = 285
x_position = 286
y_position = 287
free_offsets = 288
free_byte_counts = 289
gray_response_unit = 290
gray_response_curve = 291
t4_options = 292
t6_options = 293
resolution_unit = 296
page_number = 297
color_response_unit = 300
transfer_function = 301
software = 305
modify_date = 306
artist = 315
host_computer = 316
predictor = 317
white_point = 318
primary_chromaticities = 319
color_map = 320
halftone_hints = 321
tile_width = 322
tile_length = 323
tile_offsets = 324
tile_byte_counts = 325
bad_fax_lines = 326
clean_fax_data = 327
consecutive_bad_fax_lines = 328
sub_ifd = 330
ink_set = 332
ink_names = 333
numberof_inks = 334
dot_range = 336
target_printer = 337
extra_samples = 338
sample_format = 339
s_min_sample_value = 340
s_max_sample_value = 341
transfer_range = 342
clip_path = 343
x_clip_path_units = 344
y_clip_path_units = 345
indexed = 346
jpeg_tables = 347
opi_proxy = 351
global_parameters_ifd = 400
profile_type = 401
fax_profile = 402
coding_methods = 403
version_year = 404
mode_number = 405
decode = 433
default_image_color = 434
t82_options = 435
jpeg_tables2 = 437
jpeg_proc = 512
thumbnail_offset = 513
thumbnail_length = 514
jpeg_restart_interval = 515
jpeg_lossless_predictors = 517
jpeg_point_transforms = 518
jpegq_tables = 519
jpegdc_tables = 520
jpegac_tables = 521
y_cb_cr_coefficients = 529
y_cb_cr_sub_sampling = 530
y_cb_cr_positioning = 531
reference_black_white = 532
strip_row_counts = 559
application_notes = 700
uspto_miscellaneous = 999
related_image_file_format = 4096
related_image_width = 4097
related_image_height = 4098
rating = 18246
xp_dip_xml = 18247
stitch_info = 18248
rating_percent = 18249
sony_raw_file_type = 28672
light_falloff_params = 28722
chromatic_aberration_corr_params = 28725
distortion_corr_params = 28727
image_id = 32781
wang_tag1 = 32931
wang_annotation = 32932
wang_tag3 = 32933
wang_tag4 = 32934
image_reference_points = 32953
region_xform_tack_point = 32954
warp_quadrilateral = 32955
affine_transform_mat = 32956
matteing = 32995
data_type = 32996
image_depth = 32997
tile_depth = 32998
image_full_width = 33300
image_full_height = 33301
texture_format = 33302
wrap_modes = 33303
fov_cot = 33304
matrix_world_to_screen = 33305
matrix_world_to_camera = 33306
model2 = 33405
cfa_repeat_pattern_dim = 33421
cfa_pattern2 = 33422
battery_level = 33423
kodak_ifd = 33424
copyright = 33432
exposure_time = 33434
f_number = 33437
md_file_tag = 33445
md_scale_pixel = 33446
md_color_table = 33447
md_lab_name = 33448
md_sample_info = 33449
md_prep_date = 33450
md_prep_time = 33451
md_file_units = 33452
pixel_scale = 33550
advent_scale = 33589
advent_revision = 33590
uic1_tag = 33628
uic2_tag = 33629
uic3_tag = 33630
uic4_tag = 33631
iptc_naa = 33723
intergraph_packet_data = 33918
intergraph_flag_registers = 33919
intergraph_matrix = 33920
ingr_reserved = 33921
model_tie_point = 33922
site = 34016
color_sequence = 34017
it8_header = 34018
raster_padding = 34019
bits_per_run_length = 34020
bits_per_extended_run_length = 34021
color_table = 34022
image_color_indicator = 34023
background_color_indicator = 34024
image_color_value = 34025
background_color_value = 34026
pixel_intensity_range = 34027
transparency_indicator = 34028
color_characterization = 34029
hc_usage = 34030
trap_indicator = 34031
cmyk_equivalent = 34032
sem_info = 34118
afcp_iptc = 34152
pixel_magic_jbig_options = 34232
jpl_carto_ifd = 34263
model_transform = 34264
wb_grgb_levels = 34306
leaf_data = 34310
photoshop_settings = 34377
exif_offset = 34665
icc_profile = 34675
tiff_fx_extensions = 34687
multi_profiles = 34688
shared_data = 34689
t88_options = 34690
image_layer = 34732
geo_tiff_directory = 34735
geo_tiff_double_params = 34736
geo_tiff_ascii_params = 34737
jbig_options = 34750
exposure_program = 34850
spectral_sensitivity = 34852
gps_info = 34853
iso = 34855
opto_electric_conv_factor = 34856
interlace = 34857
time_zone_offset = 34858
self_timer_mode = 34859
sensitivity_type = 34864
standard_output_sensitivity = 34865
recommended_exposure_index = 34866
iso_speed = 34867
iso_speed_latitudeyyy = 34868
iso_speed_latitudezzz = 34869
fax_recv_params = 34908
fax_sub_address = 34909
fax_recv_time = 34910
fedex_edr = 34929
leaf_sub_ifd = 34954
exif_version = 36864
date_time_original = 36867
create_date = 36868
google_plus_upload_code = 36873
offset_time = 36880
offset_time_original = 36881
offset_time_digitized = 36882
components_configuration = 37121
compressed_bits_per_pixel = 37122
shutter_speed_value = 37377
aperture_value = 37378
brightness_value = 37379
exposure_compensation = 37380
max_aperture_value = 37381
subject_distance = 37382
metering_mode = 37383
light_source = 37384
flash = 37385
focal_length = 37386
flash_energy = 37387
spatial_frequency_response = 37388
noise = 37389
focal_plane_x_resolution = 37390
focal_plane_y_resolution = 37391
focal_plane_resolution_unit = 37392
image_number = 37393
security_classification = 37394
image_history = 37395
subject_area = 37396
exposure_index = 37397
tiff_ep_standard_id = 37398
sensing_method = 37399
cip3_data_file = 37434
cip3_sheet = 37435
cip3_side = 37436
sto_nits = 37439
maker_note = 37500
user_comment = 37510
sub_sec_time = 37520
sub_sec_time_original = 37521
sub_sec_time_digitized = 37522
ms_document_text = 37679
ms_property_set_storage = 37680
ms_document_text_position = 37681
image_source_data = 37724
ambient_temperature = 37888
humidity = 37889
pressure = 37890
water_depth = 37891
acceleration = 37892
camera_elevation_angle = 37893
xp_title = 40091
xp_comment = 40092
xp_author = 40093
xp_keywords = 40094
xp_subject = 40095
flashpix_version = 40960
color_space = 40961
exif_image_width = 40962
exif_image_height = 40963
related_sound_file = 40964
interop_offset = 40965
samsung_raw_pointers_offset = 40976
samsung_raw_pointers_length = 40977
samsung_raw_byte_order = 41217
samsung_raw_unknown = 41218
flash_energy2 = 41483
spatial_frequency_response2 = 41484
noise2 = 41485
focal_plane_x_resolution2 = 41486
focal_plane_y_resolution2 = 41487
focal_plane_resolution_unit2 = 41488
image_number2 = 41489
security_classification2 = 41490
image_history2 = 41491
subject_location = 41492
exposure_index2 = 41493
tiff_ep_standard_id2 = 41494
sensing_method2 = 41495
file_source = 41728
scene_type = 41729
cfa_pattern = 41730
custom_rendered = 41985
exposure_mode = 41986
white_balance = 41987
digital_zoom_ratio = 41988
focal_length_in35mm_format = 41989
scene_capture_type = 41990
gain_control = 41991
contrast = 41992
saturation = 41993
sharpness = 41994
device_setting_description = 41995
subject_distance_range = 41996
image_unique_id = 42016
owner_name = 42032
serial_number = 42033
lens_info = 42034
lens_make = 42035
lens_model = 42036
lens_serial_number = 42037
gdal_metadata = 42112
gdal_no_data = 42113
gamma = 42240
expand_software = 44992
expand_lens = 44993
expand_film = 44994
expand_filter_lens = 44995
expand_scanner = 44996
expand_flash_lamp = 44997
pixel_format = 48129
transformation = 48130
uncompressed = 48131
image_type = 48132
image_width2 = 48256
image_height2 = 48257
width_resolution = 48258
height_resolution = 48259
image_offset = 48320
image_byte_count = 48321
alpha_offset = 48322
alpha_byte_count = 48323
image_data_discard = 48324
alpha_data_discard = 48325
oce_scanjob_desc = 50215
oce_application_selector = 50216
oce_id_number = 50217
oce_image_logic = 50218
annotations = 50255
print_im = 50341
original_file_name = 50547
uspto_original_content_type = 50560
dng_version = 50706
dng_backward_version = 50707
unique_camera_model = 50708
localized_camera_model = 50709
cfa_plane_color = 50710
cfa_layout = 50711
linearization_table = 50712
black_level_repeat_dim = 50713
black_level = 50714
black_level_delta_h = 50715
black_level_delta_v = 50716
white_level = 50717
default_scale = 50718
default_crop_origin = 50719
default_crop_size = 50720
color_matrix1 = 50721
color_matrix2 = 50722
camera_calibration1 = 50723
camera_calibration2 = 50724
reduction_matrix1 = 50725
reduction_matrix2 = 50726
analog_balance = 50727
as_shot_neutral = 50728
as_shot_white_xy = 50729
baseline_exposure = 50730
baseline_noise = 50731
baseline_sharpness = 50732
bayer_green_split = 50733
linear_response_limit = 50734
camera_serial_number = 50735
dng_lens_info = 50736
chroma_blur_radius = 50737
anti_alias_strength = 50738
shadow_scale = 50739
sr2_private = 50740
maker_note_safety = 50741
raw_image_segmentation = 50752
calibration_illuminant1 = 50778
calibration_illuminant2 = 50779
best_quality_scale = 50780
raw_data_unique_id = 50781
alias_layer_metadata = 50784
original_raw_file_name = 50827
original_raw_file_data = 50828
active_area = 50829
masked_areas = 50830
as_shot_icc_profile = 50831
as_shot_pre_profile_matrix = 50832
current_icc_profile = 50833
current_pre_profile_matrix = 50834
colorimetric_reference = 50879
s_raw_type = 50885
panasonic_title = 50898
panasonic_title2 = 50899
camera_calibration_sig = 50931
profile_calibration_sig = 50932
profile_ifd = 50933
as_shot_profile_name = 50934
noise_reduction_applied = 50935
profile_name = 50936
profile_hue_sat_map_dims = 50937
profile_hue_sat_map_data1 = 50938
profile_hue_sat_map_data2 = 50939
profile_tone_curve = 50940
profile_embed_policy = 50941
profile_copyright = 50942
forward_matrix1 = 50964
forward_matrix2 = 50965
preview_application_name = 50966
preview_application_version = 50967
preview_settings_name = 50968
preview_settings_digest = 50969
preview_color_space = 50970
preview_date_time = 50971
raw_image_digest = 50972
original_raw_file_digest = 50973
sub_tile_block_size = 50974
row_interleave_factor = 50975
profile_look_table_dims = 50981
profile_look_table_data = 50982
opcode_list1 = 51008
opcode_list2 = 51009
opcode_list3 = 51022
noise_profile = 51041
time_codes = 51043
frame_rate = 51044
t_stop = 51058
reel_name = 51081
original_default_final_size = 51089
original_best_quality_size = 51090
original_default_crop_size = 51091
camera_label = 51105
profile_hue_sat_map_encoding = 51107
profile_look_table_encoding = 51108
baseline_exposure_offset = 51109
default_black_render = 51110
new_raw_image_digest = 51111
raw_to_preview_gain = 51112
default_user_crop = 51125
padding = 59932
offset_schema = 59933
owner_name2 = 65000
serial_number2 = 65001
lens = 65002
kdc_ifd = 65024
raw_file = 65100
converter = 65101
white_balance2 = 65102
exposure = 65105
shadows = 65106
brightness = 65107
contrast2 = 65108
saturation2 = 65109
sharpness2 = 65110
smoothness = 65111
moire_filter = 65112
def __init__(self, _io, _parent=None, _root=None, _is_le=None):
self._io = _io
self._parent = _parent
self._root = _root if _root else self
self._is_le = _is_le
self._read()
def _read(self):
if not hasattr(self, '_is_le'):
raise kaitaistruct.UndecidedEndiannessError("/types/exif_body/types/ifd_field")
elif self._is_le == True:
self._read_le()
elif self._is_le == False:
self._read_be()
def _read_le(self):
self.tag = KaitaiStream.resolve_enum(Exif.ExifBody.IfdField.TagEnum, self._io.read_u2le())
self.field_type = KaitaiStream.resolve_enum(Exif.ExifBody.IfdField.FieldTypeEnum, self._io.read_u2le())
self.length = self._io.read_u4le()
self.ofs_or_data = self._io.read_u4le()
def _read_be(self):
self.tag = KaitaiStream.resolve_enum(Exif.ExifBody.IfdField.TagEnum, self._io.read_u2be())
self.field_type = KaitaiStream.resolve_enum(Exif.ExifBody.IfdField.FieldTypeEnum, self._io.read_u2be())
self.length = self._io.read_u4be()
self.ofs_or_data = self._io.read_u4be()
@property
def type_byte_length(self):
if hasattr(self, '_m_type_byte_length'):
return self._m_type_byte_length
self._m_type_byte_length = (2 if self.field_type == Exif.ExifBody.IfdField.FieldTypeEnum.word else (4 if self.field_type == Exif.ExifBody.IfdField.FieldTypeEnum.dword else 1))
return getattr(self, '_m_type_byte_length', None)
@property
def byte_length(self):
if hasattr(self, '_m_byte_length'):
return self._m_byte_length
self._m_byte_length = (self.length * self.type_byte_length)
return getattr(self, '_m_byte_length', None)
@property
def is_immediate_data(self):
if hasattr(self, '_m_is_immediate_data'):
return self._m_is_immediate_data
self._m_is_immediate_data = self.byte_length <= 4
return getattr(self, '_m_is_immediate_data', None)
@property
def data(self):
if hasattr(self, '_m_data'):
return self._m_data
if not (self.is_immediate_data):
io = self._root._io
_pos = io.pos()
io.seek(self.ofs_or_data)
if self._is_le:
self._m_data = io.read_bytes(self.byte_length)
else:
self._m_data = io.read_bytes(self.byte_length)
io.seek(_pos)
return getattr(self, '_m_data', None)
@property
def ifd0(self):
if hasattr(self, '_m_ifd0'):
return self._m_ifd0
_pos = self._io.pos()
self._io.seek(self.ifd0_ofs)
if self._is_le:
self._m_ifd0 = Exif.ExifBody.Ifd(self._io, self, self._root, self._is_le)
else:
self._m_ifd0 = Exif.ExifBody.Ifd(self._io, self, self._root, self._is_le)
self._io.seek(_pos)
return getattr(self, '_m_ifd0', None)
| python | MIT | e6aa924bb411a9687b91920b8d094af37bc02b90 | 2026-01-04T14:40:00.086164Z | false |
mitmproxy/mitmproxy | https://github.com/mitmproxy/mitmproxy/blob/e6aa924bb411a9687b91920b8d094af37bc02b90/mitmproxy/contrib/wbxml/ASCommandResponse.py | mitmproxy/contrib/wbxml/ASCommandResponse.py | #!/usr/bin/env python3
'''
@author: David Shaw, shawd@vmware.com
Inspired by EAS Inspector for Fiddler
https://easinspectorforfiddler.codeplex.com
----- The MIT License (MIT) -----
Filename: ASCommandResponse.py
Copyright (c) 2014, David P. Shaw
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
'''
from .ASWBXML import ASWBXML
import logging
class ASCommandResponse:
def __init__(self, response):
self.wbxmlBody = response
try:
if ( len(response) > 0):
self.xmlString = self.decodeWBXML(self.wbxmlBody)
else:
raise ValueError("Empty WBXML body passed")
except Exception as e:
self.xmlString = None
raise ValueError("Error: {0}".format(e))
def getWBXMLBytes(self):
return self.wbxmlBytes
def getXMLString(self):
return self.xmlString
def decodeWBXML(self, body):
self.instance = ASWBXML()
self.instance.loadBytes(body)
return self.instance.getXml()
if __name__ == "__main__":
import os
logging.basicConfig(level=logging.INFO)
projectDir = os.path.dirname(os.path.realpath("."))
samplesDir = os.path.join(projectDir, "Samples/")
listOfSamples = os.listdir(samplesDir)
for filename in listOfSamples:
with open(samplesDir + os.sep + filename, "rb") as f:
byteWBXML = f.read()
logging.info("-"*100)
logging.info(filename)
logging.info("-"*100)
instance = ASCommandResponse(byteWBXML)
logging.info(instance.xmlString)
| python | MIT | e6aa924bb411a9687b91920b8d094af37bc02b90 | 2026-01-04T14:40:00.086164Z | false |
mitmproxy/mitmproxy | https://github.com/mitmproxy/mitmproxy/blob/e6aa924bb411a9687b91920b8d094af37bc02b90/mitmproxy/contrib/wbxml/ASWBXMLCodePage.py | mitmproxy/contrib/wbxml/ASWBXMLCodePage.py | #!/usr/bin/env python3
'''
@author: David Shaw, shawd@vmware.com
Inspired by EAS Inspector for Fiddler
https://easinspectorforfiddler.codeplex.com
----- The MIT License (MIT) -----
Filename: ASWBXMLCodePage.py
Copyright (c) 2014, David P. Shaw
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
'''
class ASWBXMLCodePage:
def __init__(self):
self.namespace = ""
self.xmlns = ""
self.tokenLookup = {}
self.tagLookup = {}
def addToken(self, token, tag):
self.tokenLookup[token] = tag
self.tagLookup[tag] = token
def getToken(self, tag):
if tag in self.tagLookup:
return self.tagLookup[tag]
return 0xFF
def getTag(self, token):
if token in self.tokenLookup:
return self.tokenLookup[token]
return None
def __repr__(self):
return str(self.tokenLookup)
| python | MIT | e6aa924bb411a9687b91920b8d094af37bc02b90 | 2026-01-04T14:40:00.086164Z | false |
mitmproxy/mitmproxy | https://github.com/mitmproxy/mitmproxy/blob/e6aa924bb411a9687b91920b8d094af37bc02b90/mitmproxy/contrib/wbxml/ASWBXMLByteQueue.py | mitmproxy/contrib/wbxml/ASWBXMLByteQueue.py | #!/usr/bin/env python3
'''
@author: David Shaw, shawd@vmware.com
Inspired by EAS Inspector for Fiddler
https://easinspectorforfiddler.codeplex.com
----- The MIT License (MIT) -----
Filename: ASWBXMLByteQueue.py
Copyright (c) 2014, David P. Shaw
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
'''
from queue import Queue
import logging
class ASWBXMLByteQueue(Queue):
def __init__(self, wbxmlBytes):
self.bytesDequeued = 0
self.bytesEnqueued = 0
Queue.__init__(self)
for byte in wbxmlBytes:
self.put(byte)
self.bytesEnqueued += 1
logging.debug("Array byte count: %d, enqueued: %d" % (self.qsize(), self.bytesEnqueued))
"""
Created to debug the dequeueing of bytes
"""
def dequeueAndLog(self):
singleByte = self.get()
self.bytesDequeued += 1
logging.debug("Dequeued byte 0x{0:X} ({1} total)".format(singleByte, self.bytesDequeued))
return singleByte
"""
Return true if the continuation bit is set in the byte
"""
def checkContinuationBit(self, byteval):
continuationBitmask = 0x80
return (continuationBitmask & byteval) != 0
def dequeueMultibyteInt(self):
iReturn = 0
singleByte = 0xFF
while True:
iReturn <<= 7
if (self.qsize() == 0):
break
else:
singleByte = self.dequeueAndLog()
iReturn += int(singleByte & 0x7F)
if not self.checkContinuationBit(singleByte):
return iReturn
def dequeueString(self, length=None):
if ( length != None):
currentByte = 0x00
strReturn = ""
for i in range(0, length):
# TODO: Improve this handling. We are technically UTF-8, meaning
# that characters could be more than one byte long. This will fail if we have
# characters outside of the US-ASCII range
if ( self.qsize() == 0 ):
break
currentByte = self.dequeueAndLog()
strReturn += chr(currentByte)
else:
currentByte = 0x00
strReturn = ""
while True:
currentByte = self.dequeueAndLog()
if (currentByte != 0x00):
strReturn += chr(currentByte)
else:
break
return strReturn
| python | MIT | e6aa924bb411a9687b91920b8d094af37bc02b90 | 2026-01-04T14:40:00.086164Z | false |
mitmproxy/mitmproxy | https://github.com/mitmproxy/mitmproxy/blob/e6aa924bb411a9687b91920b8d094af37bc02b90/mitmproxy/contrib/wbxml/InvalidDataException.py | mitmproxy/contrib/wbxml/InvalidDataException.py | #!/usr/bin/env python3
'''
@author: David Shaw, shawd@vmware.com
Inspired by EAS Inspector for Fiddler
https://easinspectorforfiddler.codeplex.com
----- The MIT License (MIT) -----
Filename: InvalidDataException.py
Copyright (c) 2014, David P. Shaw
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
'''
class InvalidDataException(Exception):
pass
| python | MIT | e6aa924bb411a9687b91920b8d094af37bc02b90 | 2026-01-04T14:40:00.086164Z | false |
mitmproxy/mitmproxy | https://github.com/mitmproxy/mitmproxy/blob/e6aa924bb411a9687b91920b8d094af37bc02b90/mitmproxy/contrib/wbxml/__init__.py | mitmproxy/contrib/wbxml/__init__.py | python | MIT | e6aa924bb411a9687b91920b8d094af37bc02b90 | 2026-01-04T14:40:00.086164Z | false | |
mitmproxy/mitmproxy | https://github.com/mitmproxy/mitmproxy/blob/e6aa924bb411a9687b91920b8d094af37bc02b90/mitmproxy/contrib/wbxml/GlobalTokens.py | mitmproxy/contrib/wbxml/GlobalTokens.py | #!/usr/bin/env python3
'''
@author: David Shaw, shawd@vmware.com
Inspired by EAS Inspector for Fiddler
https://easinspectorforfiddler.codeplex.com
----- The MIT License (MIT) -----
Filename: GlobalTokens.py
Copyright (c) 2014, David P. Shaw
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
'''
class GlobalTokens:
SWITCH_PAGE = 0x00
END = 0x01
ENTITY = 0x02
STR_I = 0x03
LITERAL = 0x04
EXT_I_0 = 0x40
EXT_I_1 = 0x41
EXT_I_2 = 0x42
PI = 0x43
LITERAL_C = 0x44
EXT_T_0 = 0x80
EXT_T_1 = 0x81
EXT_T_2 = 0x82
STR_T = 0x83
LITERAL_A = 0x84
EXT_0 = 0xC0
EXT_1 = 0xC1
EXT_2 = 0xC2
OPAQUE = 0xC3
LITERAL_AC = 0xC4
| python | MIT | e6aa924bb411a9687b91920b8d094af37bc02b90 | 2026-01-04T14:40:00.086164Z | false |
mitmproxy/mitmproxy | https://github.com/mitmproxy/mitmproxy/blob/e6aa924bb411a9687b91920b8d094af37bc02b90/mitmproxy/contrib/wbxml/ASWBXML.py | mitmproxy/contrib/wbxml/ASWBXML.py | #!/usr/bin/env python3
'''
@author: David Shaw, shawd@vmware.com
Inspired by EAS Inspector for Fiddler
https://easinspectorforfiddler.codeplex.com
----- The MIT License (MIT) -----
Filename: ASWBXML.py
Copyright (c) 2014, David P. Shaw
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
'''
import xml.dom.minidom
import logging
from .ASWBXMLCodePage import ASWBXMLCodePage
from .ASWBXMLByteQueue import ASWBXMLByteQueue
from .GlobalTokens import GlobalTokens
from .InvalidDataException import InvalidDataException
class ASWBXML:
versionByte = 0x03
publicIdentifierByte = 0x01
characterSetByte = 0x6A
stringTableLengthByte = 0x00
def __init__(self):
# empty on init
self.xmlDoc = xml.dom.minidom.Document()
self.currentCodePage = 0
self.defaultCodePage = -1
# Load up code pages
# Currently there are 25 code pages as per MS-ASWBXML
self.codePages = []
# region Code Page Initialization
# Code Page 0: AirSync
# region AirSync Code Page
page = ASWBXMLCodePage()
page.namespace = "AirSync:"
page.xmlns = "airsync"
page.addToken(0x05, "Sync")
page.addToken(0x06, "Responses")
page.addToken(0x07, "Add")
page.addToken(0x08, "Change")
page.addToken(0x09, "Delete")
page.addToken(0x0A, "Fetch")
page.addToken(0x0B, "SyncKey")
page.addToken(0x0C, "ClientId")
page.addToken(0x0D, "ServerId")
page.addToken(0x0E, "Status")
page.addToken(0x0F, "Collection")
page.addToken(0x10, "Class")
page.addToken(0x12, "CollectionId")
page.addToken(0x13, "GetChanges")
page.addToken(0x14, "MoreAvailable")
page.addToken(0x15, "WindowSize")
page.addToken(0x16, "Commands")
page.addToken(0x17, "Options")
page.addToken(0x18, "FilterType")
page.addToken(0x1B, "Conflict")
page.addToken(0x1C, "Collections")
page.addToken(0x1D, "ApplicationData")
page.addToken(0x1E, "DeletesAsMoves")
page.addToken(0x20, "Supported")
page.addToken(0x21, "SoftDelete")
page.addToken(0x22, "MIMESupport")
page.addToken(0x23, "MIMETruncation")
page.addToken(0x24, "Wait")
page.addToken(0x25, "Limit")
page.addToken(0x26, "Partial")
page.addToken(0x27, "ConversationMode")
page.addToken(0x28, "MaxItems")
page.addToken(0x29, "HeartbeatInterval")
self.codePages.append(page)
# endregion
# Code Page 1: Contacts
# region Contacts Code Page
page = ASWBXMLCodePage()
page.namespace = "Contacts:"
page.xmlns = "contacts"
page.addToken(0x05, "Anniversary")
page.addToken(0x06, "AssistantName")
page.addToken(0x07, "AssistantTelephoneNumber")
page.addToken(0x08, "Birthday")
page.addToken(0x0C, "Business2PhoneNumber")
page.addToken(0x0D, "BusinessCity")
page.addToken(0x0E, "BusinessCountry")
page.addToken(0x0F, "BusinessPostalCode")
page.addToken(0x10, "BusinessState")
page.addToken(0x11, "BusinessStreet")
page.addToken(0x12, "BusinessFaxNumber")
page.addToken(0x13, "BusinessPhoneNumber")
page.addToken(0x14, "CarPhoneNumber")
page.addToken(0x15, "Categories")
page.addToken(0x16, "Category")
page.addToken(0x17, "Children")
page.addToken(0x18, "Child")
page.addToken(0x19, "CompanyName")
page.addToken(0x1A, "Department")
page.addToken(0x1B, "Email1Address")
page.addToken(0x1C, "Email2Address")
page.addToken(0x1D, "Email3Address")
page.addToken(0x1E, "FileAs")
page.addToken(0x1F, "FirstName")
page.addToken(0x20, "Home2PhoneNumber")
page.addToken(0x21, "HomeCity")
page.addToken(0x22, "HomeCountry")
page.addToken(0x23, "HomePostalCode")
page.addToken(0x24, "HomeState")
page.addToken(0x25, "HomeStreet")
page.addToken(0x26, "HomeFaxNumber")
page.addToken(0x27, "HomePhoneNumber")
page.addToken(0x28, "JobTitle")
page.addToken(0x29, "LastName")
page.addToken(0x2A, "MiddleName")
page.addToken(0x2B, "MobilePhoneNumber")
page.addToken(0x2C, "OfficeLocation")
page.addToken(0x2D, "OtherCity")
page.addToken(0x2E, "OtherCountry")
page.addToken(0x2F, "OtherPostalCode")
page.addToken(0x30, "OtherState")
page.addToken(0x31, "OtherStreet")
page.addToken(0x32, "PagerNumber")
page.addToken(0x33, "RadioPhoneNumber")
page.addToken(0x34, "Spouse")
page.addToken(0x35, "Suffix")
page.addToken(0x36, "Title")
page.addToken(0x37, "Webpage")
page.addToken(0x38, "YomiCompanyName")
page.addToken(0x39, "YomiFirstName")
page.addToken(0x3A, "YomiLastName")
page.addToken(0x3C, "Picture")
page.addToken(0x3D, "Alias")
page.addToken(0x3E, "WeightedRank")
self.codePages.append(page)
# endregion
# Code Page 2: Email
# region Email Code Page
page = ASWBXMLCodePage()
page.namespace = "Email:"
page.xmlns = "email"
page.addToken(0x0F, "DateReceived")
page.addToken(0x11, "DisplayTo")
page.addToken(0x12, "Importance")
page.addToken(0x13, "MessageClass")
page.addToken(0x14, "Subject")
page.addToken(0x15, "Read")
page.addToken(0x16, "To")
page.addToken(0x17, "CC")
page.addToken(0x18, "From")
page.addToken(0x19, "ReplyTo")
page.addToken(0x1A, "AllDayEvent")
page.addToken(0x1B, "Categories")
page.addToken(0x1C, "Category")
page.addToken(0x1D, "DTStamp")
page.addToken(0x1E, "EndTime")
page.addToken(0x1F, "InstanceType")
page.addToken(0x20, "BusyStatus")
page.addToken(0x21, "Location")
page.addToken(0x22, "MeetingRequest")
page.addToken(0x23, "Organizer")
page.addToken(0x24, "RecurrenceId")
page.addToken(0x25, "Reminder")
page.addToken(0x26, "ResponseRequested")
page.addToken(0x27, "Recurrences")
page.addToken(0x28, "Recurrence")
page.addToken(0x29, "Recurrence_Type")
page.addToken(0x2A, "Recurrence_Until")
page.addToken(0x2B, "Recurrence_Occurrences")
page.addToken(0x2C, "Recurrence_Interval")
page.addToken(0x2D, "Recurrence_DayOfWeek")
page.addToken(0x2E, "Recurrence_DayOfMonth")
page.addToken(0x2F, "Recurrence_WeekOfMonth")
page.addToken(0x30, "Recurrence_MonthOfYear")
page.addToken(0x31, "StartTime")
page.addToken(0x32, "Sensitivity")
page.addToken(0x33, "TimeZone")
page.addToken(0x34, "GlobalObjId")
page.addToken(0x35, "ThreadTopic")
page.addToken(0x39, "InternetCPID")
page.addToken(0x3A, "Flag")
page.addToken(0x3B, "FlagStatus")
page.addToken(0x3C, "ContentClass")
page.addToken(0x3D, "FlagType")
page.addToken(0x3E, "CompleteTime")
page.addToken(0x3F, "DisallowNewTimeProposal")
self.codePages.append(page)
# endregion
# Code Page 3: AirNotify - retired
# region AirNotify Code Page
page = ASWBXMLCodePage()
page.namespace = ""
page.xmlns = ""
self.codePages.append(page)
# endregion
# Code Page 4: Calendar
# region Calendar Code Page
page = ASWBXMLCodePage()
page.namespace = "Calendar:"
page.xmlns = "calendar"
page.addToken(0x05, "TimeZone")
page.addToken(0x06, "AllDayEvent")
page.addToken(0x07, "Attendees")
page.addToken(0x08, "Attendee")
page.addToken(0x09, "Attendee_Email")
page.addToken(0x0A, "Attendee_Name")
page.addToken(0x0D, "BusyStatus")
page.addToken(0x0E, "Categories")
page.addToken(0x0F, "Category")
page.addToken(0x11, "DTStamp")
page.addToken(0x12, "EndTime")
page.addToken(0x13, "Exception")
page.addToken(0x14, "Exceptions")
page.addToken(0x15, "Exception_Deleted")
page.addToken(0x16, "Exception_StartTime")
page.addToken(0x17, "Location")
page.addToken(0x18, "MeetingStatus")
page.addToken(0x19, "Organizer_Email")
page.addToken(0x1A, "Organizer_Name")
page.addToken(0x1B, "Recurrence")
page.addToken(0x1C, "Recurrence_Type")
page.addToken(0x1D, "Recurrence_Until")
page.addToken(0x1E, "Recurrence_Occurrences")
page.addToken(0x1F, "Recurrence_Interval")
page.addToken(0x20, "Recurrence_DayOfWeek")
page.addToken(0x21, "Recurrence_DayOfMonth")
page.addToken(0x22, "Recurrence_WeekOfMonth")
page.addToken(0x23, "Recurrence_MonthOfYear")
page.addToken(0x24, "Reminder")
page.addToken(0x25, "Sensitivity")
page.addToken(0x26, "Subject")
page.addToken(0x27, "StartTime")
page.addToken(0x28, "UID")
page.addToken(0x29, "Attendee_Status")
page.addToken(0x2A, "Attendee_Type")
page.addToken(0x33, "DisallowNewTimeProposal")
page.addToken(0x34, "ResponseRequested")
page.addToken(0x35, "AppointmentReplyTime")
page.addToken(0x36, "ResponseType")
page.addToken(0x37, "CalendarType")
page.addToken(0x38, "IsLeapMonth")
page.addToken(0x39, "FirstDayOfWeek")
page.addToken(0x3A, "OnlineMeetingConfLink")
page.addToken(0x3B, "OnlineMeetingExternalLink")
self.codePages.append(page)
# endregion
# Code Page 5: Move
# region Move Code Page
page = ASWBXMLCodePage()
page.namespace = "Move:"
page.xmlns = "move"
page.addToken(0x05, "MoveItems")
page.addToken(0x06, "Move")
page.addToken(0x07, "SrcMsgId")
page.addToken(0x08, "SrcFldId")
page.addToken(0x09, "DstFldId")
page.addToken(0x0A, "Response")
page.addToken(0x0B, "Status")
page.addToken(0x0C, "DstMsgId")
self.codePages.append(page)
# endregion
# Code Page 6: ItemEstimate
# region ItemEstimate Code Page
page = ASWBXMLCodePage()
page.namespace = "GetItemEstimate:"
page.xmlns = "getitemestimate"
page.addToken(0x05, "GetItemEstimate")
page.addToken(0x06, "Version")
page.addToken(0x07, "Collections")
page.addToken(0x08, "Collection")
page.addToken(0x09, "Class")
page.addToken(0x0A, "CollectionId")
page.addToken(0x0B, "DateTime")
page.addToken(0x0C, "Estimate")
page.addToken(0x0D, "Response")
page.addToken(0x0E, "Status")
self.codePages.append(page)
# endregion
# Code Page 7: FolderHierarchy
# region FolderHierarchy Code Page
page = ASWBXMLCodePage()
page.namespace = "FolderHierarchy:"
page.xmlns = "folderhierarchy"
page.addToken(0x07, "DisplayName")
page.addToken(0x08, "ServerId")
page.addToken(0x09, "ParentId")
page.addToken(0x0A, "Type")
page.addToken(0x0C, "Status")
page.addToken(0x0E, "Changes")
page.addToken(0x0F, "Add")
page.addToken(0x10, "Delete")
page.addToken(0x11, "Update")
page.addToken(0x12, "SyncKey")
page.addToken(0x13, "FolderCreate")
page.addToken(0x14, "FolderDelete")
page.addToken(0x15, "FolderUpdate")
page.addToken(0x16, "FolderSync")
page.addToken(0x17, "Count")
self.codePages.append(page)
# endregion
# Code Page 8: MeetingResponse
# region MeetingResponse Code Page
page = ASWBXMLCodePage()
page.namespace = "MeetingResponse:"
page.xmlns = "meetingresponse"
page.addToken(0x05, "CalendarId")
page.addToken(0x06, "CollectionId")
page.addToken(0x07, "MeetingResponse")
page.addToken(0x08, "RequestId")
page.addToken(0x09, "Request")
page.addToken(0x0A, "Result")
page.addToken(0x0B, "Status")
page.addToken(0x0C, "UserResponse")
page.addToken(0x0E, "InstanceId")
self.codePages.append(page)
# endregion
# Code Page 9: Tasks
# region Tasks Code Page
page = ASWBXMLCodePage()
page.namespace = "Tasks:"
page.xmlns = "tasks"
page.addToken(0x08, "Categories")
page.addToken(0x09, "Category")
page.addToken(0x0A, "Complete")
page.addToken(0x0B, "DateCompleted")
page.addToken(0x0C, "DueDate")
page.addToken(0x0D, "UTCDueDate")
page.addToken(0x0E, "Importance")
page.addToken(0x0F, "Recurrence")
page.addToken(0x10, "Recurrence_Type")
page.addToken(0x11, "Recurrence_Start")
page.addToken(0x12, "Recurrence_Until")
page.addToken(0x13, "Recurrence_Occurrences")
page.addToken(0x14, "Recurrence_Interval")
page.addToken(0x15, "Recurrence_DayOfMonth")
page.addToken(0x16, "Recurrence_DayOfWeek")
page.addToken(0x17, "Recurrence_WeekOfMonth")
page.addToken(0x18, "Recurrence_MonthOfYear")
page.addToken(0x19, "Recurrence_Regenerate")
page.addToken(0x1A, "Recurrence_DeadOccur")
page.addToken(0x1B, "ReminderSet")
page.addToken(0x1C, "ReminderTime")
page.addToken(0x1D, "Sensitivity")
page.addToken(0x1E, "StartDate")
page.addToken(0x1F, "UTCStartDate")
page.addToken(0x20, "Subject")
page.addToken(0x22, "OrdinalDate")
page.addToken(0x23, "SubOrdinalDate")
page.addToken(0x24, "CalendarType")
page.addToken(0x25, "IsLeapMonth")
page.addToken(0x26, "FirstDayOfWeek")
self.codePages.append(page)
# endregion
# Code Page 10: ResolveRecipients
# region ResolveRecipients Code Page
page = ASWBXMLCodePage()
page.namespace = "ResolveRecipients:"
page.xmlns = "resolverecipients"
page.addToken(0x05, "ResolveRecipients")
page.addToken(0x06, "Response")
page.addToken(0x07, "Status")
page.addToken(0x08, "Type")
page.addToken(0x09, "Recipient")
page.addToken(0x0A, "DisplayName")
page.addToken(0x0B, "EmailAddress")
page.addToken(0x0C, "Certificates")
page.addToken(0x0D, "Certificate")
page.addToken(0x0E, "MiniCertificate")
page.addToken(0x0F, "Options")
page.addToken(0x10, "To")
page.addToken(0x11, "CertificateRetrieval")
page.addToken(0x12, "RecipientCount")
page.addToken(0x13, "MaxCertificates")
page.addToken(0x14, "MaxAmbiguousRecipients")
page.addToken(0x15, "CertificateCount")
page.addToken(0x16, "Availability")
page.addToken(0x17, "StartTime")
page.addToken(0x18, "EndTime")
page.addToken(0x19, "MergedFreeBusy")
page.addToken(0x1A, "Picture")
page.addToken(0x1B, "MaxSize")
page.addToken(0x1C, "Data")
page.addToken(0x1D, "MaxPictures")
self.codePages.append(page)
# endregion
# Code Page 11: ValidateCert
# region ValidateCert Code Page
page = ASWBXMLCodePage()
page.namespace = "ValidateCert:"
page.xmlns = "validatecert"
page.addToken(0x05, "ValidateCert")
page.addToken(0x06, "Certificates")
page.addToken(0x07, "Certificate")
page.addToken(0x08, "CertificateChain")
page.addToken(0x09, "CheckCRL")
page.addToken(0x0A, "Status")
self.codePages.append(page)
# endregion
# Code Page 12: Contacts2
# region Contacts2 Code Page
page = ASWBXMLCodePage()
page.namespace = "Contacts2:"
page.xmlns = "contacts2"
page.addToken(0x05, "CustomerId")
page.addToken(0x06, "GovernmentId")
page.addToken(0x07, "IMAddress")
page.addToken(0x08, "IMAddress2")
page.addToken(0x09, "IMAddress3")
page.addToken(0x0A, "ManagerName")
page.addToken(0x0B, "CompanyMainPhone")
page.addToken(0x0C, "AccountName")
page.addToken(0x0D, "NickName")
page.addToken(0x0E, "MMS")
self.codePages.append(page)
# endregion
# Code Page 13: Ping
# region Ping Code Page
page = ASWBXMLCodePage()
page.namespace = "Ping:"
page.xmlns = "ping"
page.addToken(0x05, "Ping")
page.addToken(0x06, "AutdState") # Per MS-ASWBXML, this tag is not used by protocol
page.addToken(0x07, "Status")
page.addToken(0x08, "HeartbeatInterval")
page.addToken(0x09, "Folders")
page.addToken(0x0A, "Folder")
page.addToken(0x0B, "Id")
page.addToken(0x0C, "Class")
page.addToken(0x0D, "MaxFolders")
self.codePages.append(page)
# endregion
# Code Page 14: Provision
# region Provision Code Page
page = ASWBXMLCodePage()
page.namespace = "Provision:"
page.xmlns = "provision"
page.addToken(0x05, "Provision")
page.addToken(0x06, "Policies")
page.addToken(0x07, "Policy")
page.addToken(0x08, "PolicyType")
page.addToken(0x09, "PolicyKey")
page.addToken(0x0A, "Data")
page.addToken(0x0B, "Status")
page.addToken(0x0C, "RemoteWipe")
page.addToken(0x0D, "EASProvisionDoc")
page.addToken(0x0E, "DevicePasswordEnabled")
page.addToken(0x0F, "AlphanumericDevicePasswordRequired")
page.addToken(0x10, "RequireStorageCardEncryption")
page.addToken(0x11, "PasswordRecoveryEnabled")
page.addToken(0x13, "AttachmentsEnabled")
page.addToken(0x14, "MinDevicePasswordLength")
page.addToken(0x15, "MaxInactivityTimeDeviceLock")
page.addToken(0x16, "MaxDevicePasswordFailedAttempts")
page.addToken(0x17, "MaxAttachmentSize")
page.addToken(0x18, "AllowSimpleDevicePassword")
page.addToken(0x19, "DevicePasswordExpiration")
page.addToken(0x1A, "DevicePasswordHistory")
page.addToken(0x1B, "AllowStorageCard")
page.addToken(0x1C, "AllowCamera")
page.addToken(0x1D, "RequireDeviceEncryption")
page.addToken(0x1E, "AllowUnsignedApplications")
page.addToken(0x1F, "AllowUnsignedInstallationPackages")
page.addToken(0x20, "MinDevicePasswordComplexCharacters")
page.addToken(0x21, "AllowWiFi")
page.addToken(0x22, "AllowTextMessaging")
page.addToken(0x23, "AllowPOPIMAPEmail")
page.addToken(0x24, "AllowBluetooth")
page.addToken(0x25, "AllowIrDA")
page.addToken(0x26, "RequireManualSyncWhenRoaming")
page.addToken(0x27, "AllowDesktopSync")
page.addToken(0x28, "MaxCalendarAgeFilter")
page.addToken(0x29, "AllowHTMLEmail")
page.addToken(0x2A, "MaxEmailAgeFilter")
page.addToken(0x2B, "MaxEmailBodyTruncationSize")
page.addToken(0x2C, "MaxEmailHTMLBodyTruncationSize")
page.addToken(0x2D, "RequireSignedSMIMEMessages")
page.addToken(0x2E, "RequireEncryptedSMIMEMessages")
page.addToken(0x2F, "RequireSignedSMIMEAlgorithm")
page.addToken(0x30, "RequireEncryptionSMIMEAlgorithm")
page.addToken(0x31, "AllowSMIMEEncryptionAlgorithmNegotiation")
page.addToken(0x32, "AllowSMIMESoftCerts")
page.addToken(0x33, "AllowBrowser")
page.addToken(0x34, "AllowConsumerEmail")
page.addToken(0x35, "AllowRemoteDesktop")
page.addToken(0x36, "AllowInternetSharing")
page.addToken(0x37, "UnapprovedInROMApplicationList")
page.addToken(0x38, "ApplicationName")
page.addToken(0x39, "ApprovedApplicationList")
page.addToken(0x3A, "Hash")
self.codePages.append(page)
# endregion
# Code Page 15: Search
# region Search Code Page
page = ASWBXMLCodePage()
page.namespace = "Search:"
page.xmlns = "search"
page.addToken(0x05, "Search")
page.addToken(0x07, "Store")
page.addToken(0x08, "Name")
page.addToken(0x09, "Query")
page.addToken(0x0A, "Options")
page.addToken(0x0B, "Range")
page.addToken(0x0C, "Status")
page.addToken(0x0D, "Response")
page.addToken(0x0E, "Result")
page.addToken(0x0F, "Properties")
page.addToken(0x10, "Total")
page.addToken(0x11, "EqualTo")
page.addToken(0x12, "Value")
page.addToken(0x13, "And")
page.addToken(0x14, "Or")
page.addToken(0x15, "FreeText")
page.addToken(0x17, "DeepTraversal")
page.addToken(0x18, "LongId")
page.addToken(0x19, "RebuildResults")
page.addToken(0x1A, "LessThan")
page.addToken(0x1B, "GreaterThan")
page.addToken(0x1E, "UserName")
page.addToken(0x1F, "Password")
page.addToken(0x20, "ConversationId")
page.addToken(0x21, "Picture")
page.addToken(0x22, "MaxSize")
page.addToken(0x23, "MaxPictures")
self.codePages.append(page)
# endregion
# Code Page 16: GAL
# region GAL Code Page
page = ASWBXMLCodePage()
page.namespace = "GAL:"
page.xmlns = "gal"
page.addToken(0x05, "DisplayName")
page.addToken(0x06, "Phone")
page.addToken(0x07, "Office")
page.addToken(0x08, "Title")
page.addToken(0x09, "Company")
page.addToken(0x0A, "Alias")
page.addToken(0x0B, "FirstName")
page.addToken(0x0C, "LastName")
page.addToken(0x0D, "HomePhone")
page.addToken(0x0E, "MobilePhone")
page.addToken(0x0F, "EmailAddress")
page.addToken(0x10, "Picture")
page.addToken(0x11, "Status")
page.addToken(0x12, "Data")
self.codePages.append(page)
# endregion
# Code Page 17: AirSyncBase
# region AirSyncBase Code Page
page = ASWBXMLCodePage()
page.namespace = "AirSyncBase:"
page.xmlns = "airsyncbase"
page.addToken(0x05, "BodyPreference")
page.addToken(0x06, "Type")
page.addToken(0x07, "TruncationSize")
page.addToken(0x08, "AllOrNone")
page.addToken(0x0A, "Body")
page.addToken(0x0B, "Data")
page.addToken(0x0C, "EstimatedDataSize")
page.addToken(0x0D, "Truncated")
page.addToken(0x0E, "Attachments")
page.addToken(0x0F, "Attachment")
page.addToken(0x10, "DisplayName")
page.addToken(0x11, "FileReference")
page.addToken(0x12, "Method")
page.addToken(0x13, "ContentId")
page.addToken(0x14, "ContentLocation")
page.addToken(0x15, "IsInline")
page.addToken(0x16, "NativeBodyType")
page.addToken(0x17, "ContentType")
page.addToken(0x18, "Preview")
page.addToken(0x19, "BodyPartPreference")
page.addToken(0x1A, "BodyPart")
page.addToken(0x1B, "Status")
self.codePages.append(page)
# endregion
# Code Page 18: Settings
# region Settings Code Page
page = ASWBXMLCodePage()
page.namespace = "Settings:"
page.xmlns = "settings"
page.addToken(0x05, "Settings")
page.addToken(0x06, "Status")
page.addToken(0x07, "Get")
page.addToken(0x08, "Set")
page.addToken(0x09, "Oof")
page.addToken(0x0A, "OofState")
page.addToken(0x0B, "StartTime")
page.addToken(0x0C, "EndTime")
page.addToken(0x0D, "OofMessage")
page.addToken(0x0E, "AppliesToInternal")
page.addToken(0x0F, "AppliesToExternalKnown")
page.addToken(0x10, "AppliesToExternalUnknown")
page.addToken(0x11, "Enabled")
page.addToken(0x12, "ReplyMessage")
page.addToken(0x13, "BodyType")
page.addToken(0x14, "DevicePassword")
page.addToken(0x15, "Password")
page.addToken(0x16, "DeviceInformation")
page.addToken(0x17, "Model")
page.addToken(0x18, "IMEI")
page.addToken(0x19, "FriendlyName")
page.addToken(0x1A, "OS")
page.addToken(0x1B, "OSLanguage")
page.addToken(0x1C, "PhoneNumber")
page.addToken(0x1D, "UserInformation")
page.addToken(0x1E, "EmailAddresses")
page.addToken(0x1F, "SmtpAddress")
page.addToken(0x20, "UserAgent")
page.addToken(0x21, "EnableOutboundSMS")
page.addToken(0x22, "MobileOperator")
page.addToken(0x23, "PrimarySmtpAddress")
page.addToken(0x24, "Accounts")
page.addToken(0x25, "Account")
page.addToken(0x26, "AccountId")
page.addToken(0x27, "AccountName")
page.addToken(0x28, "UserDisplayName")
page.addToken(0x29, "SendDisabled")
page.addToken(0x2B, "RightsManagementInformation")
self.codePages.append(page)
# endregion
# Code Page 19: DocumentLibrary
# region DocumentLibrary Code Page
page = ASWBXMLCodePage()
page.namespace = "DocumentLibrary:"
page.xmlns = "documentlibrary"
page.addToken(0x05, "LinkId")
page.addToken(0x06, "DisplayName")
page.addToken(0x07, "IsFolder")
page.addToken(0x08, "CreationDate")
page.addToken(0x09, "LastModifiedDate")
page.addToken(0x0A, "IsHidden")
page.addToken(0x0B, "ContentLength")
page.addToken(0x0C, "ContentType")
self.codePages.append(page)
# endregion
# Code Page 20: ItemOperations
# region ItemOperations Code Page
page = ASWBXMLCodePage()
page.namespace = "ItemOperations:"
page.xmlns = "itemoperations"
page.addToken(0x05, "ItemOperations")
page.addToken(0x06, "Fetch")
page.addToken(0x07, "Store")
page.addToken(0x08, "Options")
page.addToken(0x09, "Range")
page.addToken(0x0A, "Total")
page.addToken(0x0B, "Properties")
page.addToken(0x0C, "Data")
page.addToken(0x0D, "Status")
page.addToken(0x0E, "Response")
page.addToken(0x0F, "Version")
page.addToken(0x10, "Schema")
page.addToken(0x11, "Part")
page.addToken(0x12, "EmptyFolderContents")
page.addToken(0x13, "DeleteSubFolders")
page.addToken(0x14, "UserName")
page.addToken(0x15, "Password")
page.addToken(0x16, "Move")
page.addToken(0x17, "DstFldId")
page.addToken(0x18, "ConversationId")
page.addToken(0x19, "MoveAlways")
self.codePages.append(page)
# endregion
# Code Page 21: ComposeMail
# region ComposeMail Code Page
page = ASWBXMLCodePage()
page.namespace = "ComposeMail:"
page.xmlns = "composemail"
page.addToken(0x05, "SendMail")
page.addToken(0x06, "SmartForward")
page.addToken(0x07, "SmartReply")
page.addToken(0x08, "SaveInSentItems")
page.addToken(0x09, "ReplaceMime")
page.addToken(0x0B, "Source")
page.addToken(0x0C, "FolderId")
page.addToken(0x0D, "ItemId")
page.addToken(0x0E, "LongId")
page.addToken(0x0F, "InstanceId")
page.addToken(0x10, "MIME")
page.addToken(0x11, "ClientId")
page.addToken(0x12, "Status")
page.addToken(0x13, "AccountId")
self.codePages.append(page)
# endregion
# Code Page 22: Email2
# region Email2 Code Page
page = ASWBXMLCodePage()
page.namespace = "Email2:"
page.xmlns = "email2"
page.addToken(0x05, "UmCallerID")
page.addToken(0x06, "UmUserNotes")
page.addToken(0x07, "UmAttDuration")
page.addToken(0x08, "UmAttOrder")
page.addToken(0x09, "ConversationId")
page.addToken(0x0A, "ConversationIndex")
page.addToken(0x0B, "LastVerbExecuted")
page.addToken(0x0C, "LastVerbExecutionTime")
page.addToken(0x0D, "ReceivedAsBcc")
page.addToken(0x0E, "Sender")
page.addToken(0x0F, "CalendarType")
page.addToken(0x10, "IsLeapMonth")
page.addToken(0x11, "AccountId")
page.addToken(0x12, "FirstDayOfWeek")
page.addToken(0x13, "MeetingMessageType")
self.codePages.append(page)
# endregion
# Code Page 23: Notes
# region Notes Code Page
page = ASWBXMLCodePage()
page.namespace = "Notes:"
page.xmlns = "notes"
page.addToken(0x05, "Subject")
page.addToken(0x06, "MessageClass")
page.addToken(0x07, "LastModifiedDate")
page.addToken(0x08, "Categories")
page.addToken(0x09, "Category")
self.codePages.append(page)
# endregion
# Code Page 24: RightsManagement
# region RightsManagement Code Page
page = ASWBXMLCodePage()
page.namespace = "RightsManagement:"
page.xmlns = "rightsmanagement"
page.addToken(0x05, "RightsManagementSupport")
page.addToken(0x06, "RightsManagementTemplates")
page.addToken(0x07, "RightsManagementTemplate")
page.addToken(0x08, "RightsManagementLicense")
page.addToken(0x09, "EditAllowed")
page.addToken(0x0A, "ReplyAllowed")
page.addToken(0x0B, "ReplyAllAllowed")
page.addToken(0x0C, "ForwardAllowed")
page.addToken(0x0D, "ModifyRecipientsAllowed")
page.addToken(0x0E, "ExtractAllowed")
page.addToken(0x0F, "PrintAllowed")
page.addToken(0x10, "ExportAllowed")
page.addToken(0x11, "ProgrammaticAccessAllowed")
page.addToken(0x12, "RMOwner")
page.addToken(0x13, "ContentExpiryDate")
page.addToken(0x14, "TemplateID")
page.addToken(0x15, "TemplateName")
page.addToken(0x16, "TemplateDescription")
page.addToken(0x17, "ContentOwner")
page.addToken(0x18, "RemoveRightsManagementDistribution")
self.codePages.append(page)
# endregion
# endregion
def loadXml(self, strXML):
# note xmlDoc has .childNodes and .parentNode
self.xmlDoc = xml.dom.minidom.parseString(strXML)
def getXml(self):
if (self.xmlDoc != None):
try:
return self.xmlDoc.toprettyxml(indent=" ", newl="\n")
except:
return self.xmlDoc.toxml()
def loadBytes(self, byteWBXML):
currentNode = self.xmlDoc
wbXMLBytes = ASWBXMLByteQueue(byteWBXML)
# Version is ignored
version = wbXMLBytes.dequeueAndLog()
# Public Identifier is ignored
publicId = wbXMLBytes.dequeueMultibyteInt()
logging.debug("Version: %d, Public Identifier: %d" % (version, publicId))
# Character set
# Currently only UTF-8 is supported, throw if something else
charset = wbXMLBytes.dequeueMultibyteInt()
if (charset != 0x6A):
raise InvalidDataException("ASWBXML only supports UTF-8 encoded XML.")
# String table length
# This should be 0, MS-ASWBXML does not use string tables
stringTableLength = wbXMLBytes.dequeueMultibyteInt()
if (stringTableLength != 0):
raise InvalidDataException("WBXML data contains a string table.")
# Now we should be at the body of the data.
# Add the declaration
unusedArray = [GlobalTokens.ENTITY, GlobalTokens.EXT_0, GlobalTokens.EXT_1, GlobalTokens.EXT_2, GlobalTokens.EXT_I_0, GlobalTokens.EXT_I_1, GlobalTokens.EXT_I_2, GlobalTokens.EXT_T_0, GlobalTokens.EXT_T_1, GlobalTokens.EXT_T_2, GlobalTokens.LITERAL, GlobalTokens.LITERAL_A, GlobalTokens.LITERAL_AC, GlobalTokens.LITERAL_C, GlobalTokens.PI, GlobalTokens.STR_T]
while ( wbXMLBytes.qsize() > 0):
currentByte = wbXMLBytes.dequeueAndLog()
if ( currentByte == GlobalTokens.SWITCH_PAGE ):
newCodePage = wbXMLBytes.dequeueAndLog()
if (newCodePage >= 0 and newCodePage < 25):
self.currentCodePage = newCodePage
else:
raise InvalidDataException("Unknown code page ID 0x{0:X} encountered in WBXML".format(currentByte))
elif ( currentByte == GlobalTokens.END ):
if (currentNode != None and currentNode.parentNode != None):
currentNode = currentNode.parentNode
else:
raise InvalidDataException("END global token encountered out of sequence")
break
elif ( currentByte == GlobalTokens.OPAQUE ):
CDATALength = wbXMLBytes.dequeueMultibyteInt()
newOpaqueNode = self.xmlDoc.createCDATASection(wbXMLBytes.dequeueString(CDATALength))
currentNode.appendChild(newOpaqueNode)
elif ( currentByte == GlobalTokens.STR_I ):
newTextNode = self.xmlDoc.createTextNode(wbXMLBytes.dequeueString())
currentNode.appendChild(newTextNode)
elif ( currentByte in unusedArray):
raise InvalidDataException("Encountered unknown global token 0x{0:X}.".format(currentByte))
else:
hasAttributes = (currentByte & 0x80) > 0
hasContent = (currentByte & 0x40) > 0
token = currentByte & 0x3F
if (hasAttributes):
raise InvalidDataException("Token 0x{0:X} has attributes.".format(token))
strTag = self.codePages[self.currentCodePage].getTag(token)
if (strTag == None):
strTag = "UNKNOWN_TAG_{0,2:X}".format(token)
newNode = self.xmlDoc.createElement(strTag)
# not sure if this should be set on every node or not
#newNode.setAttribute("xmlns", self.codePages[self.currentCodePage].xmlns)
currentNode.appendChild(newNode)
if (hasContent):
currentNode = newNode
logging.debug("Total bytes dequeued: %d" % wbXMLBytes.bytesDequeued)
| python | MIT | e6aa924bb411a9687b91920b8d094af37bc02b90 | 2026-01-04T14:40:00.086164Z | false |
mitmproxy/mitmproxy | https://github.com/mitmproxy/mitmproxy/blob/e6aa924bb411a9687b91920b8d094af37bc02b90/mitmproxy/contrib/click/__init__.py | mitmproxy/contrib/click/__init__.py | """
SPDX-License-Identifier: BSD-3-Clause
A vendored copy of click.style() @ 4f7b255
"""
import typing as t
_ansi_colors = {
"black": 30,
"red": 31,
"green": 32,
"yellow": 33,
"blue": 34,
"magenta": 35,
"cyan": 36,
"white": 37,
"reset": 39,
"bright_black": 90,
"bright_red": 91,
"bright_green": 92,
"bright_yellow": 93,
"bright_blue": 94,
"bright_magenta": 95,
"bright_cyan": 96,
"bright_white": 97,
}
_ansi_reset_all = "\033[0m"
def _interpret_color(
color: t.Union[int, t.Tuple[int, int, int], str], offset: int = 0
) -> str:
if isinstance(color, int):
return f"{38 + offset};5;{color:d}"
if isinstance(color, (tuple, list)):
r, g, b = color
return f"{38 + offset};2;{r:d};{g:d};{b:d}"
return str(_ansi_colors[color] + offset)
def style(
text: t.Any,
fg: t.Optional[t.Union[int, t.Tuple[int, int, int], str]] = None,
bg: t.Optional[t.Union[int, t.Tuple[int, int, int], str]] = None,
bold: t.Optional[bool] = None,
dim: t.Optional[bool] = None,
underline: t.Optional[bool] = None,
overline: t.Optional[bool] = None,
italic: t.Optional[bool] = None,
blink: t.Optional[bool] = None,
reverse: t.Optional[bool] = None,
strikethrough: t.Optional[bool] = None,
reset: bool = True,
) -> str:
"""Styles a text with ANSI styles and returns the new string. By
default the styling is self contained which means that at the end
of the string a reset code is issued. This can be prevented by
passing ``reset=False``.
Examples::
click.echo(click.style('Hello World!', fg='green'))
click.echo(click.style('ATTENTION!', blink=True))
click.echo(click.style('Some things', reverse=True, fg='cyan'))
click.echo(click.style('More colors', fg=(255, 12, 128), bg=117))
Supported color names:
* ``black`` (might be a gray)
* ``red``
* ``green``
* ``yellow`` (might be an orange)
* ``blue``
* ``magenta``
* ``cyan``
* ``white`` (might be light gray)
* ``bright_black``
* ``bright_red``
* ``bright_green``
* ``bright_yellow``
* ``bright_blue``
* ``bright_magenta``
* ``bright_cyan``
* ``bright_white``
* ``reset`` (reset the color code only)
If the terminal supports it, color may also be specified as:
- An integer in the interval [0, 255]. The terminal must support
8-bit/256-color mode.
- An RGB tuple of three integers in [0, 255]. The terminal must
support 24-bit/true-color mode.
See https://en.wikipedia.org/wiki/ANSI_color and
https://gist.github.com/XVilka/8346728 for more information.
:param text: the string to style with ansi codes.
:param fg: if provided this will become the foreground color.
:param bg: if provided this will become the background color.
:param bold: if provided this will enable or disable bold mode.
:param dim: if provided this will enable or disable dim mode. This is
badly supported.
:param underline: if provided this will enable or disable underline.
:param overline: if provided this will enable or disable overline.
:param italic: if provided this will enable or disable italic.
:param blink: if provided this will enable or disable blinking.
:param reverse: if provided this will enable or disable inverse
rendering (foreground becomes background and the
other way round).
:param strikethrough: if provided this will enable or disable
striking through text.
:param reset: by default a reset-all code is added at the end of the
string which means that styles do not carry over. This
can be disabled to compose styles.
.. versionchanged:: 8.0
A non-string ``message`` is converted to a string.
.. versionchanged:: 8.0
Added support for 256 and RGB color codes.
.. versionchanged:: 8.0
Added the ``strikethrough``, ``italic``, and ``overline``
parameters.
.. versionchanged:: 7.0
Added support for bright colors.
.. versionadded:: 2.0
"""
if not isinstance(text, str):
text = str(text)
bits = []
if fg:
try:
bits.append(f"\033[{_interpret_color(fg)}m")
except KeyError:
raise TypeError(f"Unknown color {fg!r}") from None
if bg:
try:
bits.append(f"\033[{_interpret_color(bg, 10)}m")
except KeyError:
raise TypeError(f"Unknown color {bg!r}") from None
if bold is not None:
bits.append(f"\033[{1 if bold else 22}m")
if dim is not None:
bits.append(f"\033[{2 if dim else 22}m")
if underline is not None:
bits.append(f"\033[{4 if underline else 24}m")
if overline is not None:
bits.append(f"\033[{53 if overline else 55}m")
if italic is not None:
bits.append(f"\033[{3 if italic else 23}m")
if blink is not None:
bits.append(f"\033[{5 if blink else 25}m")
if reverse is not None:
bits.append(f"\033[{7 if reverse else 27}m")
if strikethrough is not None:
bits.append(f"\033[{9 if strikethrough else 29}m")
bits.append(text)
if reset:
bits.append(_ansi_reset_all)
return "".join(bits)
__all__ = ["style"]
| python | MIT | e6aa924bb411a9687b91920b8d094af37bc02b90 | 2026-01-04T14:40:00.086164Z | false |
mitmproxy/mitmproxy | https://github.com/mitmproxy/mitmproxy/blob/e6aa924bb411a9687b91920b8d094af37bc02b90/mitmproxy/addons/block.py | mitmproxy/addons/block.py | import ipaddress
import logging
from mitmproxy import ctx
from mitmproxy.proxy import mode_specs
class Block:
def load(self, loader):
loader.add_option(
"block_global",
bool,
True,
"""
Block connections from public IP addresses.
""",
)
loader.add_option(
"block_private",
bool,
False,
"""
Block connections from local (private) IP addresses.
This option does not affect loopback addresses (connections from the local machine),
which are always permitted.
""",
)
def client_connected(self, client):
parts = client.peername[0].rsplit("%", 1)
address = ipaddress.ip_address(parts[0])
if isinstance(address, ipaddress.IPv6Address):
address = address.ipv4_mapped or address
if address.is_loopback or isinstance(client.proxy_mode, mode_specs.LocalMode):
return
if ctx.options.block_private and address.is_private:
logging.warning(
f"Client connection from {client.peername[0]} killed by block_private option."
)
client.error = "Connection killed by block_private."
if ctx.options.block_global and address.is_global:
logging.warning(
f"Client connection from {client.peername[0]} killed by block_global option."
)
client.error = "Connection killed by block_global."
| python | MIT | e6aa924bb411a9687b91920b8d094af37bc02b90 | 2026-01-04T14:40:00.086164Z | false |
mitmproxy/mitmproxy | https://github.com/mitmproxy/mitmproxy/blob/e6aa924bb411a9687b91920b8d094af37bc02b90/mitmproxy/addons/errorcheck.py | mitmproxy/addons/errorcheck.py | import asyncio
import logging
import sys
from mitmproxy import log
from mitmproxy.contrib import click as miniclick
from mitmproxy.utils import vt_codes
class ErrorCheck:
"""Monitor startup for error log entries, and terminate immediately if there are some."""
repeat_errors_on_stderr: bool
"""
Repeat all errors on stderr before exiting.
This is useful for the console UI, which otherwise swallows all output.
"""
def __init__(self, repeat_errors_on_stderr: bool = False) -> None:
self.repeat_errors_on_stderr = repeat_errors_on_stderr
self.logger = ErrorCheckHandler()
self.logger.install()
def finish(self):
self.logger.uninstall()
async def shutdown_if_errored(self):
# don't run immediately, wait for all logging tasks to finish.
await asyncio.sleep(0)
if self.logger.has_errored:
plural = "s" if len(self.logger.has_errored) > 1 else ""
if self.repeat_errors_on_stderr:
message = f"Error{plural} logged during startup:"
if vt_codes.ensure_supported(sys.stderr): # pragma: no cover
message = miniclick.style(message, fg="red")
details = "\n".join(
self.logger.format(r) for r in self.logger.has_errored
)
print(f"{message}\n{details}", file=sys.stderr)
else:
print(
f"Error{plural} logged during startup, exiting...", file=sys.stderr
)
sys.exit(1)
class ErrorCheckHandler(log.MitmLogHandler):
def __init__(self) -> None:
super().__init__(logging.ERROR)
self.has_errored: list[logging.LogRecord] = []
def emit(self, record: logging.LogRecord) -> None:
self.has_errored.append(record)
| python | MIT | e6aa924bb411a9687b91920b8d094af37bc02b90 | 2026-01-04T14:40:00.086164Z | false |
mitmproxy/mitmproxy | https://github.com/mitmproxy/mitmproxy/blob/e6aa924bb411a9687b91920b8d094af37bc02b90/mitmproxy/addons/dns_resolver.py | mitmproxy/addons/dns_resolver.py | from __future__ import annotations
import asyncio
import ipaddress
import logging
import socket
from collections.abc import Sequence
from functools import cache
from typing import Protocol
import mitmproxy_rs
from mitmproxy import ctx
from mitmproxy import dns
from mitmproxy.flow import Error
from mitmproxy.proxy import mode_specs
logger = logging.getLogger(__name__)
class DnsResolver:
def load(self, loader):
loader.add_option(
"dns_use_hosts_file",
bool,
True,
"Use the hosts file for DNS lookups in regular DNS mode/wireguard mode.",
)
loader.add_option(
"dns_name_servers",
Sequence[str],
[],
"Name servers to use for lookups in regular DNS mode/wireguard mode. Default: operating system's name servers",
)
def configure(self, updated):
if "dns_use_hosts_file" in updated or "dns_name_servers" in updated:
self.resolver.cache_clear()
self.name_servers.cache_clear()
@cache
def name_servers(self) -> list[str]:
"""
Returns the operating system's name servers unless custom name servers are set.
On error, an empty list is returned.
"""
try:
return (
ctx.options.dns_name_servers
or mitmproxy_rs.dns.get_system_dns_servers()
)
except RuntimeError as e:
logger.warning(
f"Failed to get system dns servers: {e}\n"
f"The dns_name_servers option needs to be set manually."
)
return []
@cache
def resolver(self) -> Resolver:
"""
Returns:
The DNS resolver to use.
Raises:
MissingNameServers, if name servers are unknown and `dns_use_hosts_file` is disabled.
"""
if ns := self.name_servers():
# We always want to use our own resolver if name server info is available.
return mitmproxy_rs.dns.DnsResolver(
name_servers=ns,
use_hosts_file=ctx.options.dns_use_hosts_file,
)
elif ctx.options.dns_use_hosts_file:
# Fallback to getaddrinfo as hickory's resolver isn't as reliable
# as we would like it to be (https://github.com/mitmproxy/mitmproxy/issues/7064).
return GetaddrinfoFallbackResolver()
else:
raise MissingNameServers()
async def dns_request(self, flow: dns.DNSFlow) -> None:
if self._should_resolve(flow):
all_ip_lookups = (
flow.request.query
and flow.request.op_code == dns.op_codes.QUERY
and flow.request.question
and flow.request.question.class_ == dns.classes.IN
and flow.request.question.type in (dns.types.A, dns.types.AAAA)
)
if all_ip_lookups:
try:
flow.response = await self.resolve(flow.request)
except MissingNameServers:
flow.error = Error("Cannot resolve, dns_name_servers unknown.")
elif name_servers := self.name_servers():
# For other records, the best we can do is to forward the query
# to an upstream server.
flow.server_conn.address = (name_servers[0], 53)
else:
flow.error = Error("Cannot resolve, dns_name_servers unknown.")
@staticmethod
def _should_resolve(flow: dns.DNSFlow) -> bool:
return (
(
isinstance(flow.client_conn.proxy_mode, mode_specs.DnsMode)
or (
isinstance(flow.client_conn.proxy_mode, mode_specs.WireGuardMode)
and flow.server_conn.address == ("10.0.0.53", 53)
)
)
and flow.live
and not flow.response
and not flow.error
)
async def resolve(
self,
message: dns.DNSMessage,
) -> dns.DNSMessage:
q = message.question
assert q
try:
if q.type == dns.types.A:
ip_addrs = await self.resolver().lookup_ipv4(q.name)
else:
ip_addrs = await self.resolver().lookup_ipv6(q.name)
except socket.gaierror as e:
match e.args[0]:
case socket.EAI_NONAME:
return message.fail(dns.response_codes.NXDOMAIN)
case socket.EAI_NODATA:
ip_addrs = []
case _:
return message.fail(dns.response_codes.SERVFAIL)
return message.succeed(
[
dns.ResourceRecord(
name=q.name,
type=q.type,
class_=q.class_,
ttl=dns.ResourceRecord.DEFAULT_TTL,
data=ipaddress.ip_address(ip).packed,
)
for ip in ip_addrs
]
)
class Resolver(Protocol):
async def lookup_ip(self, domain: str) -> list[str]: # pragma: no cover
...
async def lookup_ipv4(self, domain: str) -> list[str]: # pragma: no cover
...
async def lookup_ipv6(self, domain: str) -> list[str]: # pragma: no cover
...
class GetaddrinfoFallbackResolver(Resolver):
async def lookup_ip(self, domain: str) -> list[str]:
return await self._lookup(domain, socket.AF_UNSPEC)
async def lookup_ipv4(self, domain: str) -> list[str]:
return await self._lookup(domain, socket.AF_INET)
async def lookup_ipv6(self, domain: str) -> list[str]:
return await self._lookup(domain, socket.AF_INET6)
async def _lookup(self, domain: str, family: socket.AddressFamily) -> list[str]:
addrinfos = await asyncio.get_running_loop().getaddrinfo(
host=domain,
port=None,
family=family,
type=socket.SOCK_STREAM,
)
return [addrinfo[4][0] for addrinfo in addrinfos]
class MissingNameServers(RuntimeError):
pass
| python | MIT | e6aa924bb411a9687b91920b8d094af37bc02b90 | 2026-01-04T14:40:00.086164Z | false |
mitmproxy/mitmproxy | https://github.com/mitmproxy/mitmproxy/blob/e6aa924bb411a9687b91920b8d094af37bc02b90/mitmproxy/addons/serverplayback.py | mitmproxy/addons/serverplayback.py | import hashlib
import logging
import urllib
from collections.abc import Hashable
from collections.abc import Sequence
from typing import Any
import mitmproxy.types
from mitmproxy import command
from mitmproxy import ctx
from mitmproxy import exceptions
from mitmproxy import flow
from mitmproxy import hooks
from mitmproxy import http
from mitmproxy import io
logger = logging.getLogger(__name__)
HASH_OPTIONS = [
"server_replay_ignore_content",
"server_replay_ignore_host",
"server_replay_ignore_params",
"server_replay_ignore_payload_params",
"server_replay_ignore_port",
"server_replay_use_headers",
]
class ServerPlayback:
flowmap: dict[Hashable, list[http.HTTPFlow]]
configured: bool
def __init__(self):
self.flowmap = {}
self.configured = False
def load(self, loader):
loader.add_option(
"server_replay_kill_extra",
bool,
False,
"Kill extra requests during replay (for which no replayable response was found)."
"[Deprecated, prefer to use server_replay_extra='kill']",
)
loader.add_option(
"server_replay_extra",
str,
"forward",
"Behaviour for extra requests during replay for which no replayable response was found. "
"Setting a numeric string value will return an empty HTTP response with the respective status code.",
choices=["forward", "kill", "204", "400", "404", "500"],
)
loader.add_option(
"server_replay_reuse",
bool,
False,
"""
Don't remove flows from server replay state after use. This makes it
possible to replay same response multiple times.
""",
)
loader.add_option(
"server_replay_nopop",
bool,
False,
"""
Deprecated alias for `server_replay_reuse`.
""",
)
loader.add_option(
"server_replay_refresh",
bool,
True,
"""
Refresh server replay responses by adjusting date, expires and
last-modified headers, as well as adjusting cookie expiration.
""",
)
loader.add_option(
"server_replay_use_headers",
Sequence[str],
[],
"""
Request headers that need to match while searching for a saved flow
to replay.
""",
)
loader.add_option(
"server_replay",
Sequence[str],
[],
"Replay server responses from a saved file.",
)
loader.add_option(
"server_replay_ignore_content",
bool,
False,
"Ignore request content while searching for a saved flow to replay.",
)
loader.add_option(
"server_replay_ignore_params",
Sequence[str],
[],
"""
Request parameters to be ignored while searching for a saved flow
to replay.
""",
)
loader.add_option(
"server_replay_ignore_payload_params",
Sequence[str],
[],
"""
Request payload parameters (application/x-www-form-urlencoded or
multipart/form-data) to be ignored while searching for a saved flow
to replay.
""",
)
loader.add_option(
"server_replay_ignore_host",
bool,
False,
"""
Ignore request destination host while searching for a saved flow
to replay.
""",
)
loader.add_option(
"server_replay_ignore_port",
bool,
False,
"""
Ignore request destination port while searching for a saved flow
to replay.
""",
)
@command.command("replay.server")
def load_flows(self, flows: Sequence[flow.Flow]) -> None:
"""
Replay server responses from flows.
"""
self.flowmap = {}
self.add_flows(flows)
@command.command("replay.server.add")
def add_flows(self, flows: Sequence[flow.Flow]) -> None:
"""
Add responses from flows to server replay list.
"""
for f in flows:
if isinstance(f, http.HTTPFlow):
lst = self.flowmap.setdefault(self._hash(f), [])
lst.append(f)
ctx.master.addons.trigger(hooks.UpdateHook([]))
@command.command("replay.server.file")
def load_file(self, path: mitmproxy.types.Path) -> None:
try:
flows = io.read_flows_from_paths([path])
except exceptions.FlowReadException as e:
raise exceptions.CommandError(str(e))
self.load_flows(flows)
@command.command("replay.server.stop")
def clear(self) -> None:
"""
Stop server replay.
"""
self.flowmap = {}
ctx.master.addons.trigger(hooks.UpdateHook([]))
@command.command("replay.server.count")
def count(self) -> int:
return sum(len(i) for i in self.flowmap.values())
def _hash(self, flow: http.HTTPFlow) -> Hashable:
"""
Calculates a loose hash of the flow request.
"""
r = flow.request
_, _, path, _, query, _ = urllib.parse.urlparse(r.url)
queriesArray = urllib.parse.parse_qsl(query, keep_blank_values=True)
key: list[Any] = [str(r.scheme), str(r.method), str(path)]
if not ctx.options.server_replay_ignore_content:
if ctx.options.server_replay_ignore_payload_params and r.multipart_form:
key.extend(
(k, v)
for k, v in r.multipart_form.items(multi=True)
if k.decode(errors="replace")
not in ctx.options.server_replay_ignore_payload_params
)
elif ctx.options.server_replay_ignore_payload_params and r.urlencoded_form:
key.extend(
(k, v)
for k, v in r.urlencoded_form.items(multi=True)
if k not in ctx.options.server_replay_ignore_payload_params
)
else:
key.append(str(r.raw_content))
if not ctx.options.server_replay_ignore_host:
key.append(r.pretty_host)
if not ctx.options.server_replay_ignore_port:
key.append(r.port)
filtered = []
ignore_params = ctx.options.server_replay_ignore_params or []
for p in queriesArray:
if p[0] not in ignore_params:
filtered.append(p)
for p in filtered:
key.append(p[0])
key.append(p[1])
if ctx.options.server_replay_use_headers:
headers = []
for i in ctx.options.server_replay_use_headers:
v = r.headers.get(i)
headers.append((i, v))
key.append(headers)
return hashlib.sha256(repr(key).encode("utf8", "surrogateescape")).digest()
def next_flow(self, flow: http.HTTPFlow) -> http.HTTPFlow | None:
"""
Returns the next flow object, or None if no matching flow was
found.
"""
hash = self._hash(flow)
if hash in self.flowmap:
if ctx.options.server_replay_reuse or ctx.options.server_replay_nopop:
return next(
(flow for flow in self.flowmap[hash] if flow.response), None
)
else:
ret = self.flowmap[hash].pop(0)
while not ret.response:
if self.flowmap[hash]:
ret = self.flowmap[hash].pop(0)
else:
del self.flowmap[hash]
return None
if not self.flowmap[hash]:
del self.flowmap[hash]
return ret
else:
return None
def configure(self, updated):
if ctx.options.server_replay_kill_extra:
logger.warning(
"server_replay_kill_extra has been deprecated, "
"please update your config to use server_replay_extra='kill'."
)
if ctx.options.server_replay_nopop: # pragma: no cover
logger.error(
"server_replay_nopop has been renamed to server_replay_reuse, please update your config."
)
if not self.configured and ctx.options.server_replay:
self.configured = True
try:
flows = io.read_flows_from_paths(ctx.options.server_replay)
except exceptions.FlowReadException as e:
raise exceptions.OptionsError(str(e))
self.load_flows(flows)
if any(option in updated for option in HASH_OPTIONS):
self.recompute_hashes()
def recompute_hashes(self) -> None:
"""
Rebuild flowmap if the hashing method has changed during execution,
see https://github.com/mitmproxy/mitmproxy/issues/4506
"""
flows = [flow for lst in self.flowmap.values() for flow in lst]
self.load_flows(flows)
def request(self, f: http.HTTPFlow) -> None:
if self.flowmap:
rflow = self.next_flow(f)
if rflow:
assert rflow.response
response = rflow.response.copy()
if ctx.options.server_replay_refresh:
response.refresh()
f.response = response
f.is_replay = "response"
elif (
ctx.options.server_replay_kill_extra
or ctx.options.server_replay_extra == "kill"
):
logging.warning(
"server_playback: killed non-replay request {}".format(
f.request.url
)
)
f.kill()
elif ctx.options.server_replay_extra != "forward":
logging.warning(
"server_playback: returned {} non-replay request {}".format(
ctx.options.server_replay_extra, f.request.url
)
)
f.response = http.Response.make(int(ctx.options.server_replay_extra))
f.is_replay = "response"
| python | MIT | e6aa924bb411a9687b91920b8d094af37bc02b90 | 2026-01-04T14:40:00.086164Z | false |
mitmproxy/mitmproxy | https://github.com/mitmproxy/mitmproxy/blob/e6aa924bb411a9687b91920b8d094af37bc02b90/mitmproxy/addons/tlsconfig.py | mitmproxy/addons/tlsconfig.py | import ipaddress
import logging
import os
import ssl
import urllib.parse
from pathlib import Path
from typing import Any
from typing import Literal
from typing import TypedDict
from aioquic.h3.connection import H3_ALPN
from aioquic.tls import CipherSuite
from cryptography import x509
from OpenSSL import SSL
from mitmproxy import certs
from mitmproxy import connection
from mitmproxy import ctx
from mitmproxy import exceptions
from mitmproxy import http
from mitmproxy import tls
from mitmproxy.net import tls as net_tls
from mitmproxy.options import CONF_BASENAME
from mitmproxy.proxy import context
from mitmproxy.proxy.layers import modes
from mitmproxy.proxy.layers import quic
from mitmproxy.proxy.layers import tls as proxy_tls
logger = logging.getLogger(__name__)
# We manually need to specify this, otherwise OpenSSL may select a non-HTTP2 cipher by default.
# https://ssl-config.mozilla.org/#config=old
_DEFAULT_CIPHERS = (
"ECDHE-ECDSA-AES128-GCM-SHA256",
"ECDHE-RSA-AES128-GCM-SHA256",
"ECDHE-ECDSA-AES256-GCM-SHA384",
"ECDHE-RSA-AES256-GCM-SHA384",
"ECDHE-ECDSA-CHACHA20-POLY1305",
"ECDHE-RSA-CHACHA20-POLY1305",
"DHE-RSA-AES128-GCM-SHA256",
"DHE-RSA-AES256-GCM-SHA384",
"DHE-RSA-CHACHA20-POLY1305",
"ECDHE-ECDSA-AES128-SHA256",
"ECDHE-RSA-AES128-SHA256",
"ECDHE-ECDSA-AES128-SHA",
"ECDHE-RSA-AES128-SHA",
"ECDHE-ECDSA-AES256-SHA384",
"ECDHE-RSA-AES256-SHA384",
"ECDHE-ECDSA-AES256-SHA",
"ECDHE-RSA-AES256-SHA",
"DHE-RSA-AES128-SHA256",
"DHE-RSA-AES256-SHA256",
"AES128-GCM-SHA256",
"AES256-GCM-SHA384",
"AES128-SHA256",
"AES256-SHA256",
"AES128-SHA",
"AES256-SHA",
"DES-CBC3-SHA",
)
_DEFAULT_CIPHERS_WITH_SECLEVEL_0 = ("@SECLEVEL=0", *_DEFAULT_CIPHERS)
def _default_ciphers(
min_tls_version: net_tls.Version,
) -> tuple[str, ...]:
"""
@SECLEVEL=0 is necessary for TLS 1.1 and below to work,
see https://github.com/pyca/cryptography/issues/9523
"""
if min_tls_version in net_tls.INSECURE_TLS_MIN_VERSIONS:
return _DEFAULT_CIPHERS_WITH_SECLEVEL_0
else:
return _DEFAULT_CIPHERS
# 2022/05: X509_CHECK_FLAG_NEVER_CHECK_SUBJECT is not available in LibreSSL, ignore gracefully as it's not critical.
DEFAULT_HOSTFLAGS = (
SSL._lib.X509_CHECK_FLAG_NO_PARTIAL_WILDCARDS # type: ignore
| getattr(SSL._lib, "X509_CHECK_FLAG_NEVER_CHECK_SUBJECT", 0) # type: ignore
)
class AppData(TypedDict):
client_alpn: bytes | None
server_alpn: bytes | None
http2: bool
def alpn_select_callback(conn: SSL.Connection, options: list[bytes]) -> Any:
app_data: AppData = conn.get_app_data()
client_alpn = app_data["client_alpn"]
server_alpn = app_data["server_alpn"]
http2 = app_data["http2"]
if client_alpn is not None:
if client_alpn in options:
return client_alpn
else:
return SSL.NO_OVERLAPPING_PROTOCOLS
if server_alpn and server_alpn in options:
return server_alpn
if server_alpn == b"":
# We do have a server connection, but the remote server refused to negotiate a protocol:
# We need to mirror this on the client connection.
return SSL.NO_OVERLAPPING_PROTOCOLS
http_alpns = proxy_tls.HTTP_ALPNS if http2 else proxy_tls.HTTP1_ALPNS
# client sends in order of preference, so we are nice and respect that.
for alpn in options:
if alpn in http_alpns:
return alpn
else:
return SSL.NO_OVERLAPPING_PROTOCOLS
class TlsConfig:
"""
This addon supplies the proxy core with the desired OpenSSL connection objects to negotiate TLS.
"""
certstore: certs.CertStore = None # type: ignore
# TODO: We should support configuring TLS 1.3 cipher suites (https://github.com/mitmproxy/mitmproxy/issues/4260)
# TODO: We should re-use SSL.Context options here, if only for TLS session resumption.
# This may require patches to pyOpenSSL, as some functionality is only exposed on contexts.
# TODO: This addon should manage the following options itself, which are current defined in mitmproxy/options.py:
# - upstream_cert
# - add_upstream_certs_to_client_chain
# - key_size
# - certs
# - cert_passphrase
# - ssl_verify_upstream_trusted_ca
# - ssl_verify_upstream_trusted_confdir
def load(self, loader):
insecure_tls_min_versions = (
", ".join(x.name for x in net_tls.INSECURE_TLS_MIN_VERSIONS[:-1])
+ f" and {net_tls.INSECURE_TLS_MIN_VERSIONS[-1].name}"
)
loader.add_option(
name="tls_version_client_min",
typespec=str,
default=net_tls.DEFAULT_MIN_VERSION.name,
choices=[x.name for x in net_tls.Version],
help=f"Set the minimum TLS version for client connections. "
f"{insecure_tls_min_versions} are insecure.",
)
loader.add_option(
name="tls_version_client_max",
typespec=str,
default=net_tls.DEFAULT_MAX_VERSION.name,
choices=[x.name for x in net_tls.Version],
help=f"Set the maximum TLS version for client connections.",
)
loader.add_option(
name="tls_version_server_min",
typespec=str,
default=net_tls.DEFAULT_MIN_VERSION.name,
choices=[x.name for x in net_tls.Version],
help=f"Set the minimum TLS version for server connections. "
f"{insecure_tls_min_versions} are insecure.",
)
loader.add_option(
name="tls_version_server_max",
typespec=str,
default=net_tls.DEFAULT_MAX_VERSION.name,
choices=[x.name for x in net_tls.Version],
help=f"Set the maximum TLS version for server connections.",
)
loader.add_option(
name="tls_ecdh_curve_client",
typespec=str | None,
default=None,
help="Use a specific elliptic curve for ECDHE key exchange on client connections. "
'OpenSSL syntax, for example "prime256v1" (see `openssl ecparam -list_curves`).',
)
loader.add_option(
name="tls_ecdh_curve_server",
typespec=str | None,
default=None,
help="Use a specific elliptic curve for ECDHE key exchange on server connections. "
'OpenSSL syntax, for example "prime256v1" (see `openssl ecparam -list_curves`).',
)
loader.add_option(
name="request_client_cert",
typespec=bool,
default=False,
help=f"Requests a client certificate (TLS message 'CertificateRequest') to establish a mutual TLS connection between client and mitmproxy (combined with 'client_certs' option for mitmproxy and upstream).",
)
loader.add_option(
"ciphers_client",
str | None,
None,
"Set supported ciphers for client <-> mitmproxy connections using OpenSSL syntax.",
)
loader.add_option(
"ciphers_server",
str | None,
None,
"Set supported ciphers for mitmproxy <-> server connections using OpenSSL syntax.",
)
def tls_clienthello(self, tls_clienthello: tls.ClientHelloData):
conn_context = tls_clienthello.context
tls_clienthello.establish_server_tls_first = (
conn_context.server.tls and ctx.options.connection_strategy == "eager"
)
def tls_start_client(self, tls_start: tls.TlsData) -> None:
"""Establish TLS or DTLS between client and proxy."""
if tls_start.ssl_conn is not None:
return # a user addon has already provided the pyOpenSSL context.
assert isinstance(tls_start.conn, connection.Client)
client: connection.Client = tls_start.conn
server: connection.Server = tls_start.context.server
entry = self.get_cert(tls_start.context)
if not client.cipher_list and ctx.options.ciphers_client:
client.cipher_list = ctx.options.ciphers_client.split(":")
# don't assign to client.cipher_list, doesn't need to be stored.
cipher_list = client.cipher_list or _default_ciphers(
net_tls.Version[ctx.options.tls_version_client_min]
)
if ctx.options.add_upstream_certs_to_client_chain: # pragma: no cover
# exempted from coverage until https://bugs.python.org/issue18233 is fixed.
extra_chain_certs = server.certificate_list
else:
extra_chain_certs = []
ssl_ctx = net_tls.create_client_proxy_context(
method=net_tls.Method.DTLS_SERVER_METHOD
if tls_start.is_dtls
else net_tls.Method.TLS_SERVER_METHOD,
min_version=net_tls.Version[ctx.options.tls_version_client_min],
max_version=net_tls.Version[ctx.options.tls_version_client_max],
cipher_list=tuple(cipher_list),
ecdh_curve=net_tls.get_curve(ctx.options.tls_ecdh_curve_client),
chain_file=entry.chain_file,
request_client_cert=ctx.options.request_client_cert,
alpn_select_callback=alpn_select_callback,
extra_chain_certs=tuple(extra_chain_certs),
dhparams=self.certstore.dhparams,
)
tls_start.ssl_conn = SSL.Connection(ssl_ctx)
tls_start.ssl_conn.use_certificate(entry.cert.to_cryptography())
tls_start.ssl_conn.use_privatekey(entry.privatekey)
# Force HTTP/1 for secure web proxies, we currently don't support CONNECT over HTTP/2.
# There is a proof-of-concept branch at https://github.com/mhils/mitmproxy/tree/http2-proxy,
# but the complexity outweighs the benefits for now.
if len(tls_start.context.layers) == 2 and isinstance(
tls_start.context.layers[0], modes.HttpProxy
):
client_alpn: bytes | None = b"http/1.1"
else:
client_alpn = client.alpn
tls_start.ssl_conn.set_app_data(
AppData(
client_alpn=client_alpn,
server_alpn=server.alpn,
http2=ctx.options.http2,
)
)
tls_start.ssl_conn.set_accept_state()
def tls_start_server(self, tls_start: tls.TlsData) -> None:
"""Establish TLS or DTLS between proxy and server."""
if tls_start.ssl_conn is not None:
return # a user addon has already provided the pyOpenSSL context.
assert isinstance(tls_start.conn, connection.Server)
client: connection.Client = tls_start.context.client
# tls_start.conn may be different from tls_start.context.server, e.g. an upstream HTTPS proxy.
server: connection.Server = tls_start.conn
assert server.address
if ctx.options.ssl_insecure:
verify = net_tls.Verify.VERIFY_NONE
else:
verify = net_tls.Verify.VERIFY_PEER
if server.sni is None:
server.sni = client.sni or server.address[0]
if not server.alpn_offers:
if client.alpn_offers:
if ctx.options.http2:
# We would perfectly support HTTP/1 -> HTTP/2, but we want to keep things on the same protocol
# version. There are some edge cases where we want to mirror the regular server's behavior
# accurately, for example header capitalization.
server.alpn_offers = tuple(client.alpn_offers)
else:
server.alpn_offers = tuple(
x for x in client.alpn_offers if x != b"h2"
)
else:
# We either have no client TLS or a client without ALPN.
# - If the client does use TLS but did not send an ALPN extension, we want to mirror that upstream.
# - If the client does not use TLS, there's no clear-cut answer. As a pragmatic approach, we also do
# not send any ALPN extension in this case, which defaults to whatever protocol we are speaking
# or falls back to HTTP.
server.alpn_offers = []
if not server.cipher_list and ctx.options.ciphers_server:
server.cipher_list = ctx.options.ciphers_server.split(":")
# don't assign to client.cipher_list, doesn't need to be stored.
cipher_list = server.cipher_list or _default_ciphers(
net_tls.Version[ctx.options.tls_version_server_min]
)
client_cert: str | None = None
if ctx.options.client_certs:
client_certs = os.path.expanduser(ctx.options.client_certs)
if os.path.isfile(client_certs):
client_cert = client_certs
else:
server_name: str = server.sni or server.address[0]
p = os.path.join(client_certs, f"{server_name}.pem")
if os.path.isfile(p):
client_cert = p
ssl_ctx = net_tls.create_proxy_server_context(
method=net_tls.Method.DTLS_CLIENT_METHOD
if tls_start.is_dtls
else net_tls.Method.TLS_CLIENT_METHOD,
min_version=net_tls.Version[ctx.options.tls_version_server_min],
max_version=net_tls.Version[ctx.options.tls_version_server_max],
cipher_list=tuple(cipher_list),
ecdh_curve=net_tls.get_curve(ctx.options.tls_ecdh_curve_server),
verify=verify,
ca_path=ctx.options.ssl_verify_upstream_trusted_confdir,
ca_pemfile=ctx.options.ssl_verify_upstream_trusted_ca,
client_cert=client_cert,
legacy_server_connect=ctx.options.ssl_insecure,
)
tls_start.ssl_conn = SSL.Connection(ssl_ctx)
if server.sni:
# We need to set SNI + enable hostname verification.
assert isinstance(server.sni, str)
# Manually enable hostname verification on the context object.
# https://wiki.openssl.org/index.php/Hostname_validation
param = SSL._lib.SSL_get0_param(tls_start.ssl_conn._ssl) # type: ignore
# Matching on the CN is disabled in both Chrome and Firefox, so we disable it, too.
# https://www.chromestatus.com/feature/4981025180483584
SSL._lib.X509_VERIFY_PARAM_set_hostflags(param, DEFAULT_HOSTFLAGS) # type: ignore
try:
ip: bytes = ipaddress.ip_address(server.sni).packed
except ValueError:
host_name = server.sni.encode("idna")
tls_start.ssl_conn.set_tlsext_host_name(host_name)
ok = SSL._lib.X509_VERIFY_PARAM_set1_host( # type: ignore
param, host_name, len(host_name)
) # type: ignore
SSL._openssl_assert(ok == 1) # type: ignore
else:
# RFC 6066: Literal IPv4 and IPv6 addresses are not permitted in "HostName",
# so we don't call set_tlsext_host_name.
ok = SSL._lib.X509_VERIFY_PARAM_set1_ip(param, ip, len(ip)) # type: ignore
SSL._openssl_assert(ok == 1) # type: ignore
elif verify is not net_tls.Verify.VERIFY_NONE:
raise ValueError("Cannot validate certificate hostname without SNI")
if server.alpn_offers:
tls_start.ssl_conn.set_alpn_protos(list(server.alpn_offers))
tls_start.ssl_conn.set_connect_state()
def quic_start_client(self, tls_start: quic.QuicTlsData) -> None:
"""Establish QUIC between client and proxy."""
if tls_start.settings is not None:
return # a user addon has already provided the settings.
tls_start.settings = quic.QuicTlsSettings()
# keep the following part in sync with `tls_start_client`
assert isinstance(tls_start.conn, connection.Client)
client: connection.Client = tls_start.conn
server: connection.Server = tls_start.context.server
entry = self.get_cert(tls_start.context)
if not client.cipher_list and ctx.options.ciphers_client:
client.cipher_list = ctx.options.ciphers_client.split(":")
if ctx.options.add_upstream_certs_to_client_chain: # pragma: no cover
extra_chain_certs = server.certificate_list
else:
extra_chain_certs = []
# set context parameters
if client.cipher_list:
tls_start.settings.cipher_suites = [
CipherSuite[cipher] for cipher in client.cipher_list
]
# if we don't have upstream ALPN, we allow all offered by the client
tls_start.settings.alpn_protocols = [
alpn.decode("ascii")
for alpn in [alpn for alpn in (client.alpn, server.alpn) if alpn]
or client.alpn_offers
]
# set the certificates
tls_start.settings.certificate = entry.cert._cert
tls_start.settings.certificate_private_key = entry.privatekey
tls_start.settings.certificate_chain = [
cert._cert for cert in (*entry.chain_certs, *extra_chain_certs)
]
def quic_start_server(self, tls_start: quic.QuicTlsData) -> None:
"""Establish QUIC between proxy and server."""
if tls_start.settings is not None:
return # a user addon has already provided the settings.
tls_start.settings = quic.QuicTlsSettings()
# keep the following part in sync with `tls_start_server`
assert isinstance(tls_start.conn, connection.Server)
client: connection.Client = tls_start.context.client
server: connection.Server = tls_start.conn
assert server.address
if ctx.options.ssl_insecure:
tls_start.settings.verify_mode = ssl.CERT_NONE
else:
tls_start.settings.verify_mode = ssl.CERT_REQUIRED
if server.sni is None:
server.sni = client.sni or server.address[0]
if not server.alpn_offers:
if client.alpn_offers:
server.alpn_offers = tuple(client.alpn_offers)
else:
# aioquic fails if no ALPN is offered, so use H3
server.alpn_offers = tuple(alpn.encode("ascii") for alpn in H3_ALPN)
if not server.cipher_list and ctx.options.ciphers_server:
server.cipher_list = ctx.options.ciphers_server.split(":")
# set context parameters
if server.cipher_list:
tls_start.settings.cipher_suites = [
CipherSuite[cipher] for cipher in server.cipher_list
]
if server.alpn_offers:
tls_start.settings.alpn_protocols = [
alpn.decode("ascii") for alpn in server.alpn_offers
]
# set the certificates
# NOTE client certificates are not supported
tls_start.settings.ca_path = ctx.options.ssl_verify_upstream_trusted_confdir
tls_start.settings.ca_file = ctx.options.ssl_verify_upstream_trusted_ca
def running(self):
# FIXME: We have a weird bug where the contract for configure is not followed and it is never called with
# confdir or command_history as updated.
self.configure("confdir") # pragma: no cover
def configure(self, updated):
if (
"certs" in updated
or "confdir" in updated
or "key_size" in updated
or "cert_passphrase" in updated
):
certstore_path = os.path.expanduser(ctx.options.confdir)
self.certstore = certs.CertStore.from_store(
path=certstore_path,
basename=CONF_BASENAME,
key_size=ctx.options.key_size,
passphrase=ctx.options.cert_passphrase.encode("utf8")
if ctx.options.cert_passphrase
else None,
)
if self.certstore.default_ca.has_expired():
logger.warning(
"The mitmproxy certificate authority has expired!\n"
"Please delete all CA-related files in your ~/.mitmproxy folder.\n"
"The CA will be regenerated automatically after restarting mitmproxy.\n"
"See https://docs.mitmproxy.org/stable/concepts-certificates/ for additional help.",
)
for certspec in ctx.options.certs:
parts = certspec.split("=", 1)
if len(parts) == 1:
parts = ["*", parts[0]]
cert = Path(parts[1]).expanduser()
if not cert.exists():
raise exceptions.OptionsError(
f"Certificate file does not exist: {cert}"
)
try:
self.certstore.add_cert_file(
parts[0],
cert,
passphrase=ctx.options.cert_passphrase.encode("utf8")
if ctx.options.cert_passphrase
else None,
)
except ValueError as e:
raise exceptions.OptionsError(
f"Invalid certificate format for {cert}: {e}"
) from e
if "tls_ecdh_curve_client" in updated or "tls_ecdh_curve_server" in updated:
for ecdh_curve in [
ctx.options.tls_ecdh_curve_client,
ctx.options.tls_ecdh_curve_server,
]:
if ecdh_curve is not None and ecdh_curve not in net_tls.EC_CURVES:
raise exceptions.OptionsError(
f"Invalid ECDH curve: {ecdh_curve!r}. Valid curves are: {', '.join(net_tls.EC_CURVES)}"
)
if "tls_version_client_min" in updated:
self._warn_unsupported_version("tls_version_client_min", True)
if "tls_version_client_max" in updated:
self._warn_unsupported_version("tls_version_client_max", False)
if "tls_version_server_min" in updated:
self._warn_unsupported_version("tls_version_server_min", True)
if "tls_version_server_max" in updated:
self._warn_unsupported_version("tls_version_server_max", False)
if "tls_version_client_min" in updated or "ciphers_client" in updated:
self._warn_seclevel_missing("client")
if "tls_version_server_min" in updated or "ciphers_server" in updated:
self._warn_seclevel_missing("server")
def _warn_unsupported_version(self, attribute: str, warn_unbound: bool):
val = net_tls.Version[getattr(ctx.options, attribute)]
supported_versions = [
v for v in net_tls.Version if net_tls.is_supported_version(v)
]
supported_versions_str = ", ".join(v.name for v in supported_versions)
if val is net_tls.Version.UNBOUNDED:
if warn_unbound:
logger.info(
f"{attribute} has been set to {val.name}. Note that your "
f"OpenSSL build only supports the following TLS versions: {supported_versions_str}"
)
elif val not in supported_versions:
logger.warning(
f"{attribute} has been set to {val.name}, which is not supported by the current OpenSSL build. "
f"The current build only supports the following versions: {supported_versions_str}"
)
def _warn_seclevel_missing(self, side: Literal["client", "server"]) -> None:
"""
OpenSSL cipher spec need to specify @SECLEVEL for old TLS versions to work,
see https://github.com/pyca/cryptography/issues/9523.
"""
if side == "client":
custom_ciphers = ctx.options.ciphers_client
min_tls_version = ctx.options.tls_version_client_min
else:
custom_ciphers = ctx.options.ciphers_server
min_tls_version = ctx.options.tls_version_server_min
if (
custom_ciphers
and net_tls.Version[min_tls_version] in net_tls.INSECURE_TLS_MIN_VERSIONS
and "@SECLEVEL=0" not in custom_ciphers
):
logger.warning(
f'With tls_version_{side}_min set to {min_tls_version}, ciphers_{side} must include "@SECLEVEL=0" '
f"for insecure TLS versions to work."
)
def crl_path(self) -> str:
return f"/mitmproxy-{self.certstore.default_ca.serial}.crl"
def get_cert(self, conn_context: context.Context) -> certs.CertStoreEntry:
"""
This function determines the Common Name (CN), Subject Alternative Names (SANs) and Organization Name
our certificate should have and then fetches a matching cert from the certstore.
"""
altnames: list[x509.GeneralName] = []
organization: str | None = None
crl_distribution_point: str | None = None
# Use upstream certificate if available.
if ctx.options.upstream_cert and conn_context.server.certificate_list:
upstream_cert: certs.Cert = conn_context.server.certificate_list[0]
if upstream_cert.cn:
altnames.append(_ip_or_dns_name(upstream_cert.cn))
altnames.extend(upstream_cert.altnames)
if upstream_cert.organization:
organization = upstream_cert.organization
# Replace original URL path with the CA cert serial number, which acts as a magic token
if crls := upstream_cert.crl_distribution_points:
try:
scheme, netloc, *_ = urllib.parse.urlsplit(crls[0])
except ValueError:
logger.info(f"Failed to parse CRL URL: {crls[0]!r}")
else:
# noinspection PyTypeChecker
crl_distribution_point = urllib.parse.urlunsplit(
(scheme, netloc, self.crl_path(), None, None)
)
# Add SNI or our local IP address.
if conn_context.client.sni:
altnames.append(_ip_or_dns_name(conn_context.client.sni))
else:
altnames.append(_ip_or_dns_name(conn_context.client.sockname[0]))
# If we already know of a server address, include that in the SANs as well.
if conn_context.server.address:
altnames.append(_ip_or_dns_name(conn_context.server.address[0]))
# only keep first occurrence of each hostname
altnames = list(dict.fromkeys(altnames))
# RFC 2818: If a subjectAltName extension of type dNSName is present, that MUST be used as the identity.
# In other words, the Common Name is irrelevant then.
cn = next((str(x.value) for x in altnames), None)
return self.certstore.get_cert(
cn, altnames, organization, crl_distribution_point
)
def request(self, flow: http.HTTPFlow):
if not flow.live or flow.error or flow.response:
return
# Check if a request has a magic CRL token at the end
if flow.request.path.endswith(self.crl_path()):
flow.response = http.Response.make(
200,
self.certstore.default_crl,
{"Content-Type": "application/pkix-crl"},
)
def _ip_or_dns_name(val: str) -> x509.GeneralName:
"""Convert a string into either an x509.IPAddress or x509.DNSName object."""
try:
ip = ipaddress.ip_address(val)
except ValueError:
return x509.DNSName(val.encode("idna").decode())
else:
return x509.IPAddress(ip)
| python | MIT | e6aa924bb411a9687b91920b8d094af37bc02b90 | 2026-01-04T14:40:00.086164Z | false |
mitmproxy/mitmproxy | https://github.com/mitmproxy/mitmproxy/blob/e6aa924bb411a9687b91920b8d094af37bc02b90/mitmproxy/addons/view.py | mitmproxy/addons/view.py | """
The View:
- Keeps track of a store of flows
- Maintains a filtered, ordered view onto that list of flows
- Exposes a number of signals so the view can be monitored
- Tracks focus within the view
- Exposes a settings store for flows that automatically expires if the flow is
removed from the store.
"""
import collections
import logging
import re
from collections.abc import Iterator
from collections.abc import MutableMapping
from collections.abc import Sequence
from typing import Any
from typing import Optional
import sortedcontainers
import mitmproxy.flow
from mitmproxy import command
from mitmproxy import connection
from mitmproxy import ctx
from mitmproxy import dns
from mitmproxy import exceptions
from mitmproxy import flowfilter
from mitmproxy import hooks
from mitmproxy import http
from mitmproxy import io
from mitmproxy import tcp
from mitmproxy import udp
from mitmproxy.log import ALERT
from mitmproxy.utils import human
from mitmproxy.utils import signals
# The underlying sorted list implementation expects the sort key to be stable
# for the lifetime of the object. However, if we sort by size, for instance,
# the sort order changes as the flow progresses through its lifecycle. We
# address this through two means:
#
# - Let order keys cache the sort value by flow ID.
#
# - Add a facility to refresh items in the list by removing and re-adding them
# when they are updated.
class _OrderKey:
def __init__(self, view):
self.view = view
def generate(self, f: mitmproxy.flow.Flow) -> Any: # pragma: no cover
pass
def refresh(self, f):
k = self._key()
old = self.view.settings[f][k]
new = self.generate(f)
if old != new:
self.view._view.remove(f)
self.view.settings[f][k] = new
self.view._view.add(f)
self.view.sig_view_refresh.send()
def _key(self):
return "_order_%s" % id(self)
def __call__(self, f):
if f.id in self.view._store:
k = self._key()
s = self.view.settings[f]
if k in s:
return s[k]
val = self.generate(f)
s[k] = val
return val
else:
return self.generate(f)
class OrderRequestStart(_OrderKey):
def generate(self, f: mitmproxy.flow.Flow) -> float:
return f.timestamp_created
class OrderRequestMethod(_OrderKey):
def generate(self, f: mitmproxy.flow.Flow) -> str:
if isinstance(f, http.HTTPFlow):
return f.request.method
elif isinstance(f, (tcp.TCPFlow, udp.UDPFlow)):
return f.type.upper()
elif isinstance(f, dns.DNSFlow):
return dns.op_codes.to_str(f.request.op_code)
else:
raise NotImplementedError()
class OrderRequestURL(_OrderKey):
def generate(self, f: mitmproxy.flow.Flow) -> str:
if isinstance(f, http.HTTPFlow):
return f.request.url
elif isinstance(f, (tcp.TCPFlow, udp.UDPFlow)):
return human.format_address(f.server_conn.address)
elif isinstance(f, dns.DNSFlow):
return f.request.questions[0].name if f.request.questions else ""
else:
raise NotImplementedError()
class OrderKeySize(_OrderKey):
def generate(self, f: mitmproxy.flow.Flow) -> int:
if isinstance(f, http.HTTPFlow):
size = 0
if f.request.raw_content:
size += len(f.request.raw_content)
if f.response and f.response.raw_content:
size += len(f.response.raw_content)
return size
elif isinstance(f, (tcp.TCPFlow, udp.UDPFlow)):
size = 0
for message in f.messages:
size += len(message.content)
return size
elif isinstance(f, dns.DNSFlow):
return f.response.size if f.response else 0
else:
raise NotImplementedError()
orders = [
("t", "time"),
("m", "method"),
("u", "url"),
("z", "size"),
]
def _signal_with_flow(flow: mitmproxy.flow.Flow) -> None: ...
def _sig_view_remove(flow: mitmproxy.flow.Flow, index: int) -> None: ...
class View(collections.abc.Sequence):
def __init__(self) -> None:
super().__init__()
self._store: collections.OrderedDict[str, mitmproxy.flow.Flow] = (
collections.OrderedDict()
)
self.filter = flowfilter.match_all
# Should we show only marked flows?
self.show_marked = False
self.default_order = OrderRequestStart(self)
self.orders = dict(
time=OrderRequestStart(self),
method=OrderRequestMethod(self),
url=OrderRequestURL(self),
size=OrderKeySize(self),
)
self.order_key: _OrderKey = self.default_order
self.order_reversed = False
self.focus_follow = False
self._view = sortedcontainers.SortedListWithKey(key=self.order_key)
# The sig_view* signals broadcast events that affect the view. That is,
# an update to a flow in the store but not in the view does not trigger
# a signal. All signals are called after the view has been updated.
self.sig_view_update = signals.SyncSignal(_signal_with_flow)
self.sig_view_add = signals.SyncSignal(_signal_with_flow)
self.sig_view_remove = signals.SyncSignal(_sig_view_remove)
# Signals that the view should be refreshed completely
self.sig_view_refresh = signals.SyncSignal(lambda: None)
# The sig_store* signals broadcast events that affect the underlying
# store. If a flow is removed from just the view, sig_view_remove is
# triggered. If it is removed from the store while it is also in the
# view, both sig_store_remove and sig_view_remove are triggered.
self.sig_store_remove = signals.SyncSignal(_signal_with_flow)
# Signals that the store should be refreshed completely
self.sig_store_refresh = signals.SyncSignal(lambda: None)
self.focus = Focus(self)
self.settings = Settings(self)
def load(self, loader):
loader.add_option(
"view_filter", Optional[str], None, "Limit the view to matching flows."
)
loader.add_option(
"view_order",
str,
"time",
"Flow sort order.",
choices=list(map(lambda c: c[1], orders)),
)
loader.add_option(
"view_order_reversed", bool, False, "Reverse the sorting order."
)
loader.add_option(
"console_focus_follow", bool, False, "Focus follows new flows."
)
def store_count(self):
return len(self._store)
def _rev(self, idx: int) -> int:
"""
Reverses an index, if needed
"""
if self.order_reversed:
if idx < 0:
idx = -idx - 1
else:
idx = len(self._view) - idx - 1
if idx < 0:
raise IndexError
return idx
def __len__(self):
return len(self._view)
def __getitem__(self, offset) -> Any:
return self._view[self._rev(offset)]
# Reflect some methods to the efficient underlying implementation
def _bisect(self, f: mitmproxy.flow.Flow) -> int:
v = self._view.bisect_right(f)
return self._rev(v - 1) + 1
def index(
self, f: mitmproxy.flow.Flow, start: int = 0, stop: int | None = None
) -> int:
return self._rev(self._view.index(f, start, stop))
def __contains__(self, f: Any) -> bool:
return self._view.__contains__(f)
def _order_key_name(self):
return "_order_%s" % id(self.order_key)
def _base_add(self, f):
self.settings[f][self._order_key_name()] = self.order_key(f)
self._view.add(f)
def _refilter(self):
self._view.clear()
for i in self._store.values():
if self.show_marked and not i.marked:
continue
if self.filter(i):
self._base_add(i)
self.sig_view_refresh.send()
""" View API """
# Focus
@command.command("view.focus.go")
def go(self, offset: int) -> None:
"""
Go to a specified offset. Positive offests are from the beginning of
the view, negative from the end of the view, so that 0 is the first
flow, -1 is the last flow.
"""
if len(self) == 0:
return
if offset < 0:
offset = len(self) + offset
if offset < 0:
offset = 0
if offset > len(self) - 1:
offset = len(self) - 1
self.focus.flow = self[offset]
@command.command("view.focus.next")
def focus_next(self) -> None:
"""
Set focus to the next flow.
"""
if self.focus.index is not None:
idx = self.focus.index + 1
if self.inbounds(idx):
self.focus.flow = self[idx]
else:
pass
@command.command("view.focus.prev")
def focus_prev(self) -> None:
"""
Set focus to the previous flow.
"""
if self.focus.index is not None:
idx = self.focus.index - 1
if self.inbounds(idx):
self.focus.flow = self[idx]
else:
pass
# Order
@command.command("view.order.options")
def order_options(self) -> Sequence[str]:
"""
Choices supported by the view_order option.
"""
return list(sorted(self.orders.keys()))
@command.command("view.order.reverse")
def set_reversed(self, boolean: bool) -> None:
self.order_reversed = boolean
self.sig_view_refresh.send()
@command.command("view.order.set")
def set_order(self, order_key: str) -> None:
"""
Sets the current view order.
"""
if order_key not in self.orders:
raise exceptions.CommandError("Unknown flow order: %s" % order_key)
key = self.orders[order_key]
self.order_key = key
newview = sortedcontainers.SortedListWithKey(key=key)
newview.update(self._view)
self._view = newview
@command.command("view.order")
def get_order(self) -> str:
"""
Returns the current view order.
"""
order = ""
for k in self.orders.keys():
if self.order_key == self.orders[k]:
order = k
return order
# Filter
@command.command("view.filter.set")
def set_filter_cmd(self, filter_expr: str) -> None:
"""
Sets the current view filter.
"""
filt = None
if filter_expr:
try:
filt = flowfilter.parse(filter_expr)
except ValueError as e:
raise exceptions.CommandError(str(e)) from e
self.set_filter(filt)
def set_filter(self, flt: flowfilter.TFilter | None):
self.filter = flt or flowfilter.match_all
self._refilter()
# View Updates
@command.command("view.clear")
def clear(self) -> None:
"""
Clears both the store and view.
"""
self._store.clear()
self._view.clear()
self.sig_view_refresh.send()
self.sig_store_refresh.send()
@command.command("view.clear_unmarked")
def clear_not_marked(self) -> None:
"""
Clears only the unmarked flows.
"""
for flow in self._store.copy().values():
if not flow.marked:
self._store.pop(flow.id)
self._refilter()
self.sig_store_refresh.send()
# View Settings
@command.command("view.settings.getval")
def getvalue(self, flow: mitmproxy.flow.Flow, key: str, default: str) -> str:
"""
Get a value from the settings store for the specified flow.
"""
return self.settings[flow].get(key, default)
@command.command("view.settings.setval.toggle")
def setvalue_toggle(self, flows: Sequence[mitmproxy.flow.Flow], key: str) -> None:
"""
Toggle a boolean value in the settings store, setting the value to
the string "true" or "false".
"""
updated = []
for f in flows:
current = self.settings[f].get("key", "false")
self.settings[f][key] = "false" if current == "true" else "true"
updated.append(f)
ctx.master.addons.trigger(hooks.UpdateHook(updated))
@command.command("view.settings.setval")
def setvalue(
self, flows: Sequence[mitmproxy.flow.Flow], key: str, value: str
) -> None:
"""
Set a value in the settings store for the specified flows.
"""
updated = []
for f in flows:
self.settings[f][key] = value
updated.append(f)
ctx.master.addons.trigger(hooks.UpdateHook(updated))
# Flows
@command.command("view.flows.duplicate")
def duplicate(self, flows: Sequence[mitmproxy.flow.Flow]) -> None:
"""
Duplicates the specified flows, and sets the focus to the first
duplicate.
"""
dups = [f.copy() for f in flows]
if dups:
self.add(dups)
self.focus.flow = dups[0]
logging.log(ALERT, "Duplicated %s flows" % len(dups))
@command.command("view.flows.remove")
def remove(self, flows: Sequence[mitmproxy.flow.Flow]) -> None:
"""
Removes the flow from the underlying store and the view.
"""
for f in flows:
if f.id in self._store:
if f.killable:
f.kill()
if f in self._view:
# We manually pass the index here because multiple flows may have the same
# sorting key, and we cannot reconstruct the index from that.
idx = self._view.index(f)
self._view.remove(f)
self.sig_view_remove.send(flow=f, index=idx)
del self._store[f.id]
self.sig_store_remove.send(flow=f)
if len(flows) > 1:
logging.log(ALERT, "Removed %s flows" % len(flows))
@command.command("view.flows.resolve")
def resolve(self, flow_spec: str) -> Sequence[mitmproxy.flow.Flow]:
"""
Resolve a flow list specification to an actual list of flows.
"""
if flow_spec == "@all":
return [i for i in self._store.values()]
if flow_spec == "@focus":
return [self.focus.flow] if self.focus.flow else []
elif flow_spec == "@shown":
return [i for i in self]
elif flow_spec == "@hidden":
return [i for i in self._store.values() if i not in self._view]
elif flow_spec == "@marked":
return [i for i in self._store.values() if i.marked]
elif flow_spec == "@unmarked":
return [i for i in self._store.values() if not i.marked]
elif re.match(r"@[0-9a-f\-,]{36,}", flow_spec):
ids = flow_spec[1:].split(",")
return [i for i in self._store.values() if i.id in ids]
else:
try:
filt = flowfilter.parse(flow_spec)
except ValueError as e:
raise exceptions.CommandError(str(e)) from e
return [i for i in self._store.values() if filt(i)]
@command.command("view.flows.create")
def create(self, method: str, url: str) -> None:
try:
req = http.Request.make(method.upper(), url)
except ValueError as e:
raise exceptions.CommandError("Invalid URL: %s" % e)
c = connection.Client(
peername=("", 0),
sockname=("", 0),
timestamp_start=req.timestamp_start - 0.0001,
)
s = connection.Server(address=(req.host, req.port))
f = http.HTTPFlow(c, s)
f.request = req
f.request.headers["Host"] = req.host
self.add([f])
@command.command("view.flows.load")
def load_file(self, path: mitmproxy.types.Path) -> None:
"""
Load flows into the view, without processing them with addons.
"""
try:
with open(path, "rb") as f:
for i in io.FlowReader(f).stream():
# Do this to get a new ID, so we can load the same file N times and
# get new flows each time. It would be more efficient to just have a
# .newid() method or something.
self.add([i.copy()])
except OSError as e:
logging.error(e.strerror)
except exceptions.FlowReadException as e:
logging.error(str(e))
def add(self, flows: Sequence[mitmproxy.flow.Flow]) -> None:
"""
Adds a flow to the state. If the flow already exists, it is
ignored.
"""
for f in flows:
if f.id not in self._store:
self._store[f.id] = f
if self.filter(f):
self._base_add(f)
if self.focus_follow:
self.focus.flow = f
self.sig_view_add.send(flow=f)
def get_by_id(self, flow_id: str) -> mitmproxy.flow.Flow | None:
"""
Get flow with the given id from the store.
Returns None if the flow is not found.
"""
return self._store.get(flow_id)
# View Properties
@command.command("view.properties.length")
def get_length(self) -> int:
"""
Returns view length.
"""
return len(self)
@command.command("view.properties.marked")
def get_marked(self) -> bool:
"""
Returns true if view is in marked mode.
"""
return self.show_marked
@command.command("view.properties.marked.toggle")
def toggle_marked(self) -> None:
"""
Toggle whether to show marked views only.
"""
self.show_marked = not self.show_marked
self._refilter()
@command.command("view.properties.inbounds")
def inbounds(self, index: int) -> bool:
"""
Is this 0 <= index < len(self)?
"""
return 0 <= index < len(self)
# Event handlers
def configure(self, updated):
if "view_filter" in updated:
filt = None
if ctx.options.view_filter:
try:
filt = flowfilter.parse(ctx.options.view_filter)
except ValueError as e:
raise exceptions.OptionsError(str(e)) from e
self.set_filter(filt)
if "view_order" in updated:
if ctx.options.view_order not in self.orders:
raise exceptions.OptionsError(
"Unknown flow order: %s" % ctx.options.view_order
)
self.set_order(ctx.options.view_order)
if "view_order_reversed" in updated:
self.set_reversed(ctx.options.view_order_reversed)
if "console_focus_follow" in updated:
self.focus_follow = ctx.options.console_focus_follow
def requestheaders(self, f):
self.add([f])
def error(self, f):
self.update([f])
def response(self, f):
self.update([f])
def intercept(self, f):
self.update([f])
def resume(self, f):
self.update([f])
def kill(self, f):
self.update([f])
def tcp_start(self, f):
self.add([f])
def tcp_message(self, f):
self.update([f])
def tcp_error(self, f):
self.update([f])
def tcp_end(self, f):
self.update([f])
def udp_start(self, f):
self.add([f])
def udp_message(self, f):
self.update([f])
def udp_error(self, f):
self.update([f])
def udp_end(self, f):
self.update([f])
def dns_request(self, f):
self.add([f])
def dns_response(self, f):
self.update([f])
def dns_error(self, f):
self.update([f])
def update(self, flows: Sequence[mitmproxy.flow.Flow]) -> None:
"""
Updates a list of flows. If flow is not in the state, it's ignored.
"""
for f in flows:
if f.id in self._store:
if self.filter(f):
if f not in self._view:
self._base_add(f)
if self.focus_follow:
self.focus.flow = f
self.sig_view_add.send(flow=f)
else:
# This is a tad complicated. The sortedcontainers
# implementation assumes that the order key is stable. If
# it changes mid-way Very Bad Things happen. We detect when
# this happens, and re-fresh the item.
self.order_key.refresh(f)
self.sig_view_update.send(flow=f)
else:
try:
idx = self._view.index(f)
except ValueError:
pass # The value was not in the view
else:
self._view.remove(f)
self.sig_view_remove.send(flow=f, index=idx)
class Focus:
"""
Tracks a focus element within a View.
"""
def __init__(self, v: View) -> None:
self.view = v
self._flow: mitmproxy.flow.Flow | None = None
self.sig_change = signals.SyncSignal(lambda: None)
if len(self.view):
self.flow = self.view[0]
v.sig_view_add.connect(self._sig_view_add)
v.sig_view_remove.connect(self._sig_view_remove)
v.sig_view_refresh.connect(self._sig_view_refresh)
@property
def flow(self) -> mitmproxy.flow.Flow | None:
return self._flow
@flow.setter
def flow(self, f: mitmproxy.flow.Flow | None):
if f is not None and f not in self.view:
raise ValueError("Attempt to set focus to flow not in view")
self._flow = f
self.sig_change.send()
@property
def index(self) -> int | None:
if self.flow:
return self.view.index(self.flow)
return None
@index.setter
def index(self, idx):
if idx < 0 or idx > len(self.view) - 1:
raise ValueError("Index out of view bounds")
self.flow = self.view[idx]
def _nearest(self, f, v):
return min(v._bisect(f), len(v) - 1)
def _sig_view_remove(self, flow, index):
if len(self.view) == 0:
self.flow = None
elif flow is self.flow:
self.index = min(index, len(self.view) - 1)
def _sig_view_refresh(self):
if len(self.view) == 0:
self.flow = None
elif self.flow is None:
self.flow = self.view[0]
elif self.flow not in self.view:
self.flow = self.view[self._nearest(self.flow, self.view)]
def _sig_view_add(self, flow):
# We only have to act if we don't have a focus element
if not self.flow:
self.flow = flow
class Settings(collections.abc.Mapping):
def __init__(self, view: View) -> None:
self.view = view
self._values: MutableMapping[str, dict] = {}
view.sig_store_remove.connect(self._sig_store_remove)
view.sig_store_refresh.connect(self._sig_store_refresh)
def __iter__(self) -> Iterator:
return iter(self._values)
def __len__(self) -> int:
return len(self._values)
def __getitem__(self, f: mitmproxy.flow.Flow) -> dict:
if f.id not in self.view._store:
raise KeyError
return self._values.setdefault(f.id, {})
def _sig_store_remove(self, flow):
if flow.id in self._values:
del self._values[flow.id]
def _sig_store_refresh(self):
for fid in list(self._values.keys()):
if fid not in self.view._store:
del self._values[fid]
| python | MIT | e6aa924bb411a9687b91920b8d094af37bc02b90 | 2026-01-04T14:40:00.086164Z | false |
mitmproxy/mitmproxy | https://github.com/mitmproxy/mitmproxy/blob/e6aa924bb411a9687b91920b8d094af37bc02b90/mitmproxy/addons/modifybody.py | mitmproxy/addons/modifybody.py | import logging
import re
from collections.abc import Sequence
from mitmproxy import ctx
from mitmproxy import exceptions
from mitmproxy.addons.modifyheaders import ModifySpec
from mitmproxy.addons.modifyheaders import parse_modify_spec
from mitmproxy.log import ALERT
logger = logging.getLogger(__name__)
class ModifyBody:
def __init__(self) -> None:
self.replacements: list[ModifySpec] = []
def load(self, loader):
loader.add_option(
"modify_body",
Sequence[str],
[],
"""
Replacement pattern of the form "[/flow-filter]/regex/[@]replacement", where
the separator can be any character. The @ allows to provide a file path that
is used to read the replacement string.
""",
)
def configure(self, updated):
if "modify_body" in updated:
self.replacements = []
for option in ctx.options.modify_body:
try:
spec = parse_modify_spec(option, True)
except ValueError as e:
raise exceptions.OptionsError(
f"Cannot parse modify_body option {option}: {e}"
) from e
self.replacements.append(spec)
stream_and_modify_conflict = (
ctx.options.modify_body
and ctx.options.stream_large_bodies
and ("modify_body" in updated or "stream_large_bodies" in updated)
)
if stream_and_modify_conflict:
logger.log(
ALERT,
"Both modify_body and stream_large_bodies are active. "
"Streamed bodies will not be modified.",
)
def request(self, flow):
if flow.response or flow.error or not flow.live:
return
self.run(flow)
def response(self, flow):
if flow.error or not flow.live:
return
self.run(flow)
def run(self, flow):
for spec in self.replacements:
if spec.matches(flow):
try:
replacement = spec.read_replacement()
except OSError as e:
logging.warning(f"Could not read replacement file: {e}")
continue
if flow.response:
flow.response.content = re.sub(
spec.subject,
replacement,
flow.response.content,
flags=re.DOTALL,
)
else:
flow.request.content = re.sub(
spec.subject, replacement, flow.request.content, flags=re.DOTALL
)
| python | MIT | e6aa924bb411a9687b91920b8d094af37bc02b90 | 2026-01-04T14:40:00.086164Z | false |
mitmproxy/mitmproxy | https://github.com/mitmproxy/mitmproxy/blob/e6aa924bb411a9687b91920b8d094af37bc02b90/mitmproxy/addons/anticache.py | mitmproxy/addons/anticache.py | from mitmproxy import ctx
class AntiCache:
def load(self, loader):
loader.add_option(
"anticache",
bool,
False,
"""
Strip out request headers that might cause the server to return
304-not-modified.
""",
)
def request(self, flow):
if ctx.options.anticache:
flow.request.anticache()
| python | MIT | e6aa924bb411a9687b91920b8d094af37bc02b90 | 2026-01-04T14:40:00.086164Z | false |
mitmproxy/mitmproxy | https://github.com/mitmproxy/mitmproxy/blob/e6aa924bb411a9687b91920b8d094af37bc02b90/mitmproxy/addons/clientplayback.py | mitmproxy/addons/clientplayback.py | from __future__ import annotations
import asyncio
import logging
import time
from collections.abc import Sequence
from types import TracebackType
from typing import cast
from typing import Literal
import mitmproxy.types
from mitmproxy import command
from mitmproxy import ctx
from mitmproxy import exceptions
from mitmproxy import flow
from mitmproxy import http
from mitmproxy import io
from mitmproxy.connection import ConnectionState
from mitmproxy.connection import Server
from mitmproxy.hooks import UpdateHook
from mitmproxy.log import ALERT
from mitmproxy.options import Options
from mitmproxy.proxy import commands
from mitmproxy.proxy import events
from mitmproxy.proxy import layers
from mitmproxy.proxy import server
from mitmproxy.proxy.context import Context
from mitmproxy.proxy.layer import CommandGenerator
from mitmproxy.proxy.layers.http import HTTPMode
from mitmproxy.proxy.mode_specs import UpstreamMode
from mitmproxy.utils import asyncio_utils
logger = logging.getLogger(__name__)
class MockServer(layers.http.HttpConnection):
"""
A mock HTTP "server" that just pretends it received a full HTTP request,
which is then processed by the proxy core.
"""
flow: http.HTTPFlow
def __init__(self, flow: http.HTTPFlow, context: Context):
super().__init__(context, context.client)
self.flow = flow
def _handle_event(self, event: events.Event) -> CommandGenerator[None]:
if isinstance(event, events.Start):
content = self.flow.request.raw_content
self.flow.request.timestamp_start = self.flow.request.timestamp_end = (
time.time()
)
yield layers.http.ReceiveHttp(
layers.http.RequestHeaders(
1,
self.flow.request,
end_stream=not (content or self.flow.request.trailers),
replay_flow=self.flow,
)
)
if content:
yield layers.http.ReceiveHttp(layers.http.RequestData(1, content))
if self.flow.request.trailers: # pragma: no cover
# TODO: Cover this once we support HTTP/1 trailers.
yield layers.http.ReceiveHttp(
layers.http.RequestTrailers(1, self.flow.request.trailers)
)
yield layers.http.ReceiveHttp(layers.http.RequestEndOfMessage(1))
elif isinstance(
event,
(
layers.http.ResponseHeaders,
layers.http.ResponseData,
layers.http.ResponseTrailers,
layers.http.ResponseEndOfMessage,
layers.http.ResponseProtocolError,
),
):
pass
else: # pragma: no cover
logger.warning(f"Unexpected event during replay: {event}")
class ReplayHandler(server.ConnectionHandler):
layer: layers.HttpLayer
def __init__(self, flow: http.HTTPFlow, options: Options) -> None:
client = flow.client_conn.copy()
client.state = ConnectionState.OPEN
context = Context(client, options)
context.server = Server(address=(flow.request.host, flow.request.port))
if flow.request.scheme == "https":
context.server.tls = True
context.server.sni = flow.request.pretty_host
if options.mode and options.mode[0].startswith("upstream:"):
mode = UpstreamMode.parse(options.mode[0])
assert isinstance(mode, UpstreamMode) # remove once mypy supports Self.
context.server.via = flow.server_conn.via = (mode.scheme, mode.address)
super().__init__(context)
if options.mode and options.mode[0].startswith("upstream:"):
self.layer = layers.HttpLayer(context, HTTPMode.upstream)
else:
self.layer = layers.HttpLayer(context, HTTPMode.transparent)
self.layer.connections[client] = MockServer(flow, context.fork())
self.flow = flow
self.done = asyncio.Event()
async def replay(self) -> None:
await self.server_event(events.Start())
await self.done.wait()
def log(
self,
message: str,
level: int = logging.INFO,
exc_info: Literal[True]
| tuple[type[BaseException] | None, BaseException | None, TracebackType | None]
| None = None,
) -> None:
assert isinstance(level, int)
logger.log(level=level, msg=f"[replay] {message}")
async def handle_hook(self, hook: commands.StartHook) -> None:
(data,) = hook.args()
await ctx.master.addons.handle_lifecycle(hook)
if isinstance(data, flow.Flow):
await data.wait_for_resume()
if isinstance(hook, (layers.http.HttpResponseHook, layers.http.HttpErrorHook)):
if self.transports:
# close server connections
for x in self.transports.values():
if x.handler:
x.handler.cancel()
await asyncio.wait(
[x.handler for x in self.transports.values() if x.handler]
)
# signal completion
self.done.set()
class ClientPlayback:
playback_task: asyncio.Task | None = None
inflight: http.HTTPFlow | None
queue: asyncio.Queue
options: Options
replay_tasks: set[asyncio.Task]
def __init__(self):
self.queue = asyncio.Queue()
self.inflight = None
self.task = None
self.replay_tasks = set()
def running(self):
self.options = ctx.options
self.playback_task = asyncio_utils.create_task(
self.playback(),
name="client playback",
keep_ref=False,
)
async def done(self):
if self.playback_task:
self.playback_task.cancel()
try:
await self.playback_task
except asyncio.CancelledError:
pass
async def playback(self):
while True:
self.inflight = await self.queue.get()
try:
assert self.inflight
h = ReplayHandler(self.inflight, self.options)
if ctx.options.client_replay_concurrency == -1:
t = asyncio_utils.create_task(
h.replay(),
name="client playback awaiting response",
keep_ref=False,
)
# keep a reference so this is not garbage collected
self.replay_tasks.add(t)
t.add_done_callback(self.replay_tasks.remove)
else:
await h.replay()
except Exception:
logger.exception(f"Client replay has crashed!")
self.queue.task_done()
self.inflight = None
def check(self, f: flow.Flow) -> str | None:
if f.live or f == self.inflight:
return "Can't replay live flow."
if f.intercepted:
return "Can't replay intercepted flow."
if isinstance(f, http.HTTPFlow):
if not f.request:
return "Can't replay flow with missing request."
if f.request.raw_content is None:
return "Can't replay flow with missing content."
if f.websocket is not None:
return "Can't replay WebSocket flows."
else:
return "Can only replay HTTP flows."
return None
def load(self, loader):
loader.add_option(
"client_replay",
Sequence[str],
[],
"Replay client requests from a saved file.",
)
loader.add_option(
"client_replay_concurrency",
int,
1,
"Concurrency limit on in-flight client replay requests. Currently the only valid values are 1 and -1 (no limit).",
)
def configure(self, updated):
if "client_replay" in updated and ctx.options.client_replay:
try:
flows = io.read_flows_from_paths(ctx.options.client_replay)
except exceptions.FlowReadException as e:
raise exceptions.OptionsError(str(e))
self.start_replay(flows)
if "client_replay_concurrency" in updated:
if ctx.options.client_replay_concurrency not in [-1, 1]:
raise exceptions.OptionsError(
"Currently the only valid client_replay_concurrency values are -1 and 1."
)
@command.command("replay.client.count")
def count(self) -> int:
"""
Approximate number of flows queued for replay.
"""
return self.queue.qsize() + int(bool(self.inflight))
@command.command("replay.client.stop")
def stop_replay(self) -> None:
"""
Clear the replay queue.
"""
updated = []
while True:
try:
f = self.queue.get_nowait()
except asyncio.QueueEmpty:
break
else:
self.queue.task_done()
f.revert()
updated.append(f)
ctx.master.addons.trigger(UpdateHook(updated))
logger.log(ALERT, "Client replay queue cleared.")
@command.command("replay.client")
def start_replay(self, flows: Sequence[flow.Flow]) -> None:
"""
Add flows to the replay queue, skipping flows that can't be replayed.
"""
updated: list[http.HTTPFlow] = []
for f in flows:
err = self.check(f)
if err:
logger.warning(err)
continue
http_flow = cast(http.HTTPFlow, f)
# Prepare the flow for replay
http_flow.backup()
http_flow.is_replay = "request"
http_flow.response = None
http_flow.error = None
self.queue.put_nowait(http_flow)
updated.append(http_flow)
ctx.master.addons.trigger(UpdateHook(updated))
@command.command("replay.client.file")
def load_file(self, path: mitmproxy.types.Path) -> None:
"""
Load flows from file, and add them to the replay queue.
"""
try:
flows = io.read_flows_from_paths([path])
except exceptions.FlowReadException as e:
raise exceptions.CommandError(str(e))
self.start_replay(flows)
| python | MIT | e6aa924bb411a9687b91920b8d094af37bc02b90 | 2026-01-04T14:40:00.086164Z | false |
mitmproxy/mitmproxy | https://github.com/mitmproxy/mitmproxy/blob/e6aa924bb411a9687b91920b8d094af37bc02b90/mitmproxy/addons/export.py | mitmproxy/addons/export.py | import logging
import shlex
from collections.abc import Callable
from collections.abc import Sequence
import pyperclip
import mitmproxy.types
from mitmproxy import command
from mitmproxy import ctx
from mitmproxy import exceptions
from mitmproxy import flow
from mitmproxy import http
from mitmproxy.net.http.http1 import assemble
from mitmproxy.utils import strutils
def cleanup_request(f: flow.Flow) -> http.Request:
if not getattr(f, "request", None):
raise exceptions.CommandError("Can't export flow with no request.")
assert isinstance(f, http.HTTPFlow)
request = f.request.copy()
request.decode(strict=False)
return request
def pop_headers(request: http.Request) -> None:
"""Remove some headers that are redundant for curl/httpie export."""
request.headers.pop("content-length", None)
if request.headers.get("host", "") == request.host:
request.headers.pop("host")
if request.headers.get(":authority", "") == request.host:
request.headers.pop(":authority")
def cleanup_response(f: flow.Flow) -> http.Response:
if not getattr(f, "response", None):
raise exceptions.CommandError("Can't export flow with no response.")
assert isinstance(f, http.HTTPFlow)
response = f.response.copy() # type: ignore
response.decode(strict=False)
return response
def request_content_for_console(request: http.Request) -> str:
try:
text = request.get_text(strict=True)
assert text
except ValueError:
# shlex.quote doesn't support a bytes object
# see https://github.com/python/cpython/pull/10871
raise exceptions.CommandError("Request content must be valid unicode")
escape_control_chars = {chr(i): f"\\x{i:02x}" for i in range(32)}
escaped_text = "".join(escape_control_chars.get(x, x) for x in text)
if any(char in escape_control_chars for char in text):
# Escaped chars need to be unescaped by the shell to be properly inperpreted by curl and httpie
return f'"$(printf {shlex.quote(escaped_text)})"'
return shlex.quote(escaped_text)
def curl_command(f: flow.Flow) -> str:
request = cleanup_request(f)
pop_headers(request)
args = ["curl"]
server_addr = f.server_conn.peername[0] if f.server_conn.peername else None
if (
ctx.options.export_preserve_original_ip
and server_addr
and request.pretty_host != server_addr
):
resolve = f"{request.pretty_host}:{request.port}:[{server_addr}]"
args.append("--resolve")
args.append(resolve)
for k, v in request.headers.items(multi=True):
if k.lower() == "accept-encoding":
args.append("--compressed")
else:
args += ["-H", f"{k}: {v}"]
if request.method != "GET":
if not request.content:
# curl will not calculate content-length if there is no content
# some server/verb combinations require content-length headers
# (ex. nginx and POST)
args += ["-H", "content-length: 0"]
args += ["-X", request.method]
args.append(request.pretty_url)
command = " ".join(shlex.quote(arg) for arg in args)
if request.content:
command += f" -d {request_content_for_console(request)}"
return command
def httpie_command(f: flow.Flow) -> str:
request = cleanup_request(f)
pop_headers(request)
# TODO: Once https://github.com/httpie/httpie/issues/414 is implemented, we
# should ensure we always connect to the IP address specified in the flow,
# similar to how it's done in curl_command.
url = request.pretty_url
args = ["http", request.method, url]
for k, v in request.headers.items(multi=True):
args.append(f"{k}: {v}")
cmd = " ".join(shlex.quote(arg) for arg in args)
if request.content:
cmd += " <<< " + request_content_for_console(request)
return cmd
def raw_request(f: flow.Flow) -> bytes:
request = cleanup_request(f)
if request.raw_content is None:
raise exceptions.CommandError("Request content missing.")
return assemble.assemble_request(request)
def raw_response(f: flow.Flow) -> bytes:
response = cleanup_response(f)
if response.raw_content is None:
raise exceptions.CommandError("Response content missing.")
return assemble.assemble_response(response)
def raw(f: flow.Flow, separator=b"\r\n\r\n") -> bytes:
"""Return either the request or response if only one exists, otherwise return both"""
request_present = (
isinstance(f, http.HTTPFlow) and f.request and f.request.raw_content is not None
)
response_present = (
isinstance(f, http.HTTPFlow)
and f.response
and f.response.raw_content is not None
)
if request_present and response_present:
parts = [raw_request(f), raw_response(f)]
if isinstance(f, http.HTTPFlow) and f.websocket:
parts.append(f.websocket._get_formatted_messages())
return separator.join(parts)
elif request_present:
return raw_request(f)
elif response_present:
return raw_response(f)
else:
raise exceptions.CommandError("Can't export flow with no request or response.")
formats: dict[str, Callable[[flow.Flow], str | bytes]] = dict(
curl=curl_command,
httpie=httpie_command,
raw=raw,
raw_request=raw_request,
raw_response=raw_response,
)
class Export:
def load(self, loader):
loader.add_option(
"export_preserve_original_ip",
bool,
False,
"""
When exporting a request as an external command, make an effort to
connect to the same IP as in the original request. This helps with
reproducibility in cases where the behaviour depends on the
particular host we are connecting to. Currently this only affects
curl exports.
""",
)
@command.command("export.formats")
def formats(self) -> Sequence[str]:
"""
Return a list of the supported export formats.
"""
return list(sorted(formats.keys()))
@command.command("export.file")
def file(self, format: str, flow: flow.Flow, path: mitmproxy.types.Path) -> None:
"""
Export a flow to path.
"""
if format not in formats:
raise exceptions.CommandError("No such export format: %s" % format)
v = formats[format](flow)
try:
with open(path, "wb") as fp:
if isinstance(v, bytes):
fp.write(v)
else:
fp.write(v.encode("utf-8", "surrogateescape"))
except OSError as e:
logging.error(str(e))
@command.command("export.clip")
def clip(self, format: str, f: flow.Flow) -> None:
"""
Export a flow to the system clipboard.
"""
content = self.export_str(format, f)
try:
pyperclip.copy(content)
except pyperclip.PyperclipException as e:
logging.error(str(e))
@command.command("export")
def export_str(self, format: str, f: flow.Flow) -> str:
"""
Export a flow and return the result.
"""
if format not in formats:
raise exceptions.CommandError("No such export format: %s" % format)
content = formats[format](f)
# The individual formatters may return surrogate-escaped UTF-8, but that may blow up in later steps.
# For example, pyperclip on macOS does not like surrogates.
# To fix this, We first surrogate-encode and then backslash-decode.
content = strutils.always_bytes(content, "utf8", "surrogateescape")
content = strutils.always_str(content, "utf8", "backslashreplace")
return content
| python | MIT | e6aa924bb411a9687b91920b8d094af37bc02b90 | 2026-01-04T14:40:00.086164Z | false |
mitmproxy/mitmproxy | https://github.com/mitmproxy/mitmproxy/blob/e6aa924bb411a9687b91920b8d094af37bc02b90/mitmproxy/addons/anticomp.py | mitmproxy/addons/anticomp.py | from mitmproxy import ctx
class AntiComp:
def load(self, loader):
loader.add_option(
"anticomp",
bool,
False,
"Try to convince servers to send us un-compressed data.",
)
def request(self, flow):
if ctx.options.anticomp:
flow.request.anticomp()
| python | MIT | e6aa924bb411a9687b91920b8d094af37bc02b90 | 2026-01-04T14:40:00.086164Z | false |
mitmproxy/mitmproxy | https://github.com/mitmproxy/mitmproxy/blob/e6aa924bb411a9687b91920b8d094af37bc02b90/mitmproxy/addons/dumper.py | mitmproxy/addons/dumper.py | from __future__ import annotations
import shutil
import sys
from typing import IO
from typing import Optional
from wsproto.frame_protocol import CloseReason
import mitmproxy_rs
from mitmproxy import contentviews
from mitmproxy import ctx
from mitmproxy import dns
from mitmproxy import exceptions
from mitmproxy import flow
from mitmproxy import flowfilter
from mitmproxy import http
from mitmproxy.contrib import click as miniclick
from mitmproxy.net.dns import response_codes
from mitmproxy.options import CONTENT_VIEW_LINES_CUTOFF
from mitmproxy.tcp import TCPFlow
from mitmproxy.tcp import TCPMessage
from mitmproxy.udp import UDPFlow
from mitmproxy.udp import UDPMessage
from mitmproxy.utils import human
from mitmproxy.utils import strutils
from mitmproxy.utils import vt_codes
from mitmproxy.websocket import WebSocketData
from mitmproxy.websocket import WebSocketMessage
def indent(n: int, text: str) -> str:
lines = str(text).strip().splitlines()
pad = " " * n
return "\n".join(pad + i for i in lines)
CONTENTVIEW_STYLES: dict[str, dict[str, str | bool]] = {
"name": dict(fg="yellow"),
"string": dict(fg="green"),
"number": dict(fg="blue"),
"boolean": dict(fg="magenta"),
"comment": dict(dim=True),
"error": dict(fg="red"),
}
class Dumper:
def __init__(self, outfile: IO[str] | None = None):
self.filter: flowfilter.TFilter | None = None
self.outfp: IO[str] = outfile or sys.stdout
self.out_has_vt_codes = vt_codes.ensure_supported(self.outfp)
def load(self, loader):
loader.add_option(
"flow_detail",
int,
1,
f"""
The display detail level for flows in mitmdump: 0 (quiet) to 4 (very verbose).
0: no output
1: shortened request URL with response status code
2: full request URL with response status code and HTTP headers
3: 2 + truncated response content, content of WebSocket and TCP messages (content_view_lines_cutoff: {CONTENT_VIEW_LINES_CUTOFF})
4: 3 + nothing is truncated
""",
)
loader.add_option(
"dumper_default_contentview",
str,
"auto",
"The default content view mode.",
choices=contentviews.registry.available_views(),
)
loader.add_option(
"dumper_filter", Optional[str], None, "Limit which flows are dumped."
)
def configure(self, updated):
if "dumper_filter" in updated:
if ctx.options.dumper_filter:
try:
self.filter = flowfilter.parse(ctx.options.dumper_filter)
except ValueError as e:
raise exceptions.OptionsError(str(e)) from e
else:
self.filter = None
def style(self, text: str, **style) -> str:
if style and self.out_has_vt_codes:
text = miniclick.style(text, **style)
return text
def echo(self, text: str, ident=None, **style):
if ident:
text = indent(ident, text)
text = self.style(text, **style)
print(text, file=self.outfp)
def _echo_headers(self, headers: http.Headers):
for k, v in headers.fields:
ks = strutils.bytes_to_escaped_str(k)
ks = self.style(ks, fg="blue")
vs = strutils.bytes_to_escaped_str(v)
self.echo(f"{ks}: {vs}", ident=4)
def _echo_trailers(self, trailers: http.Headers | None):
if not trailers:
return
self.echo("--- HTTP Trailers", fg="magenta", ident=4)
self._echo_headers(trailers)
def _echo_message(
self,
message: http.Message | TCPMessage | UDPMessage | WebSocketMessage,
flow: http.HTTPFlow | TCPFlow | UDPFlow,
):
pretty = contentviews.prettify_message(
message,
flow,
ctx.options.dumper_default_contentview,
)
if ctx.options.flow_detail == 3:
content_to_echo = strutils.cut_after_n_lines(
pretty.text, ctx.options.content_view_lines_cutoff
)
else:
content_to_echo = pretty.text
if content_to_echo:
highlighted = mitmproxy_rs.syntax_highlight.highlight(
pretty.text, pretty.syntax_highlight
)
self.echo("")
self.echo(
"".join(
self.style(chunk, **CONTENTVIEW_STYLES.get(tag, {}))
for tag, chunk in highlighted
),
ident=4,
)
if len(content_to_echo) < len(pretty.text):
self.echo("(cut off)", ident=4, dim=True)
if ctx.options.flow_detail >= 2:
self.echo("")
def _fmt_client(self, flow: flow.Flow) -> str:
if flow.is_replay == "request":
return self.style("[replay]", fg="yellow", bold=True)
elif flow.client_conn.peername:
return self.style(
strutils.escape_control_characters(
human.format_address(flow.client_conn.peername)
)
)
else: # pragma: no cover
# this should not happen, but we're defensive here.
return ""
def _echo_request_line(self, flow: http.HTTPFlow) -> None:
client = self._fmt_client(flow)
pushed = " PUSH_PROMISE" if "h2-pushed-stream" in flow.metadata else ""
method = flow.request.method + pushed
method_color = dict(GET="green", DELETE="red").get(method.upper(), "magenta")
method = self.style(
strutils.escape_control_characters(method), fg=method_color, bold=True
)
if ctx.options.showhost:
url = flow.request.pretty_url
else:
url = flow.request.url
if ctx.options.flow_detail == 1:
# We need to truncate before applying styles, so we just focus on the URL.
terminal_width_limit = max(shutil.get_terminal_size()[0] - 25, 50)
if len(url) > terminal_width_limit:
url = url[:terminal_width_limit] + "…"
url = self.style(strutils.escape_control_characters(url), bold=True)
http_version = ""
if not (
flow.request.is_http10 or flow.request.is_http11
) or flow.request.http_version != getattr(
flow.response, "http_version", "HTTP/1.1"
):
# Hide version for h1 <-> h1 connections.
http_version = " " + flow.request.http_version
self.echo(f"{client}: {method} {url}{http_version}")
def _echo_response_line(self, flow: http.HTTPFlow) -> None:
if flow.is_replay == "response":
replay_str = "[replay]"
replay = self.style(replay_str, fg="yellow", bold=True)
else:
replay_str = ""
replay = ""
assert flow.response
code_int = flow.response.status_code
code_color = None
if 200 <= code_int < 300:
code_color = "green"
elif 300 <= code_int < 400:
code_color = "magenta"
elif 400 <= code_int < 600:
code_color = "red"
code = self.style(
str(code_int),
fg=code_color,
bold=True,
blink=(code_int == 418),
)
if not (flow.response.is_http2 or flow.response.is_http3):
reason = flow.response.reason
else:
reason = http.status_codes.RESPONSES.get(flow.response.status_code, "")
reason = self.style(
strutils.escape_control_characters(reason), fg=code_color, bold=True
)
if flow.response.raw_content is None:
size = "(content missing)"
else:
size = human.pretty_size(len(flow.response.raw_content))
size = self.style(size, bold=True)
http_version = ""
if (
not (flow.response.is_http10 or flow.response.is_http11)
or flow.request.http_version != flow.response.http_version
):
# Hide version for h1 <-> h1 connections.
http_version = f"{flow.response.http_version} "
arrows = self.style(" <<", bold=True)
if ctx.options.flow_detail == 1:
# This aligns the HTTP response code with the HTTP request method:
# 127.0.0.1:59519: GET http://example.com/
# << 304 Not Modified 0b
pad = max(
0,
len(human.format_address(flow.client_conn.peername))
- (2 + len(http_version) + len(replay_str)),
)
arrows = " " * pad + arrows
self.echo(f"{replay}{arrows} {http_version}{code} {reason} {size}")
def echo_flow(self, f: http.HTTPFlow) -> None:
if f.request:
self._echo_request_line(f)
if ctx.options.flow_detail >= 2:
self._echo_headers(f.request.headers)
if ctx.options.flow_detail >= 3:
self._echo_message(f.request, f)
if ctx.options.flow_detail >= 2:
self._echo_trailers(f.request.trailers)
if f.response:
self._echo_response_line(f)
if ctx.options.flow_detail >= 2:
self._echo_headers(f.response.headers)
if ctx.options.flow_detail >= 3:
self._echo_message(f.response, f)
if ctx.options.flow_detail >= 2:
self._echo_trailers(f.response.trailers)
if f.error:
msg = strutils.escape_control_characters(f.error.msg)
self.echo(f" << {msg}", bold=True, fg="red")
self.outfp.flush()
def match(self, f):
if ctx.options.flow_detail == 0:
return False
if not self.filter:
return True
elif flowfilter.match(self.filter, f):
return True
return False
def response(self, f):
if self.match(f):
self.echo_flow(f)
def error(self, f):
if self.match(f):
self.echo_flow(f)
def websocket_message(self, f: http.HTTPFlow):
assert f.websocket is not None # satisfy type checker
if self.match(f):
message = f.websocket.messages[-1]
direction = "->" if message.from_client else "<-"
self.echo(
f"{human.format_address(f.client_conn.peername)} "
f"{direction} WebSocket {message.type.name.lower()} message "
f"{direction} {human.format_address(f.server_conn.address)}{f.request.path}"
)
if ctx.options.flow_detail >= 3:
self._echo_message(message, f)
def websocket_end(self, f: http.HTTPFlow):
assert f.websocket is not None # satisfy type checker
if self.match(f):
if f.websocket.close_code in {1000, 1001, 1005}:
c = "client" if f.websocket.closed_by_client else "server"
self.echo(
f"WebSocket connection closed by {c}: {f.websocket.close_code} {f.websocket.close_reason}"
)
else:
error = flow.Error(
f"WebSocket Error: {self.format_websocket_error(f.websocket)}"
)
self.echo(
f"Error in WebSocket connection to {human.format_address(f.server_conn.address)}: {error}",
fg="red",
)
def format_websocket_error(self, websocket: WebSocketData) -> str:
try:
ret = CloseReason(websocket.close_code).name # type: ignore
except ValueError:
ret = f"UNKNOWN_ERROR={websocket.close_code}"
if websocket.close_reason:
ret += f" (reason: {websocket.close_reason})"
return ret
def _proto_error(self, f):
if self.match(f):
self.echo(
f"Error in {f.type.upper()} connection to {human.format_address(f.server_conn.address)}: {f.error}",
fg="red",
)
def tcp_error(self, f):
self._proto_error(f)
def udp_error(self, f):
self._proto_error(f)
def _proto_message(self, f: TCPFlow | UDPFlow) -> None:
if self.match(f):
message = f.messages[-1]
direction = "->" if message.from_client else "<-"
if f.client_conn.tls_version == "QUICv1":
if f.type == "tcp":
quic_type = "stream"
else:
quic_type = "dgrams"
# TODO: This should not be metadata, this should be typed attributes.
flow_type = (
f"quic {quic_type} {f.metadata.get('quic_stream_id_client', '')} "
f"{direction} mitmproxy {direction} "
f"quic {quic_type} {f.metadata.get('quic_stream_id_server', '')}"
)
else:
flow_type = f.type
self.echo(
"{client} {direction} {type} {direction} {server}".format(
client=human.format_address(f.client_conn.peername),
server=human.format_address(f.server_conn.address),
direction=direction,
type=flow_type,
)
)
if ctx.options.flow_detail >= 3:
self._echo_message(message, f)
def tcp_message(self, f):
self._proto_message(f)
def udp_message(self, f):
self._proto_message(f)
def _echo_dns_query(self, f: dns.DNSFlow) -> None:
client = self._fmt_client(f)
opcode = dns.op_codes.to_str(f.request.op_code)
type = dns.types.to_str(f.request.questions[0].type)
desc = f"DNS {opcode} ({type})"
desc_color = {
"A": "green",
"AAAA": "magenta",
}.get(type, "red")
desc = self.style(desc, fg=desc_color)
name = self.style(f.request.questions[0].name, bold=True)
self.echo(f"{client}: {desc} {name}")
def dns_response(self, f: dns.DNSFlow):
assert f.response
if self.match(f):
self._echo_dns_query(f)
arrows = self.style(" <<", bold=True)
if f.response.answers:
answers = ", ".join(
self.style(str(x), fg="bright_blue") for x in f.response.answers
)
else:
answers = self.style(
response_codes.to_str(
f.response.response_code,
),
fg="red",
)
self.echo(f"{arrows} {answers}")
def dns_error(self, f: dns.DNSFlow):
assert f.error
if self.match(f):
self._echo_dns_query(f)
msg = strutils.escape_control_characters(f.error.msg)
self.echo(f" << {msg}", bold=True, fg="red")
| python | MIT | e6aa924bb411a9687b91920b8d094af37bc02b90 | 2026-01-04T14:40:00.086164Z | false |
mitmproxy/mitmproxy | https://github.com/mitmproxy/mitmproxy/blob/e6aa924bb411a9687b91920b8d094af37bc02b90/mitmproxy/addons/stickycookie.py | mitmproxy/addons/stickycookie.py | import collections
from http import cookiejar
from typing import Optional
from mitmproxy import ctx
from mitmproxy import exceptions
from mitmproxy import flowfilter
from mitmproxy import http
from mitmproxy.net.http import cookies
TOrigin = tuple[str, int, str]
def ckey(attrs: dict[str, str], f: http.HTTPFlow) -> TOrigin:
"""
Returns a (domain, port, path) tuple.
"""
domain = f.request.host
path = "/"
if "domain" in attrs:
domain = attrs["domain"]
if "path" in attrs:
path = attrs["path"]
return (domain, f.request.port, path)
def domain_match(a: str, b: str) -> bool:
if cookiejar.domain_match(a, b): # type: ignore
return True
elif cookiejar.domain_match(a, b.strip(".")): # type: ignore
return True
return False
class StickyCookie:
def __init__(self) -> None:
self.jar: collections.defaultdict[TOrigin, dict[str, str]] = (
collections.defaultdict(dict)
)
self.flt: flowfilter.TFilter | None = None
def load(self, loader):
loader.add_option(
"stickycookie",
Optional[str],
None,
"Set sticky cookie filter. Matched against requests.",
)
def configure(self, updated):
if "stickycookie" in updated:
if ctx.options.stickycookie:
try:
self.flt = flowfilter.parse(ctx.options.stickycookie)
except ValueError as e:
raise exceptions.OptionsError(str(e)) from e
else:
self.flt = None
def response(self, flow: http.HTTPFlow):
assert flow.response
if self.flt:
for name, (value, attrs) in flow.response.cookies.items(multi=True):
# FIXME: We now know that Cookie.py screws up some cookies with
# valid RFC 822/1123 datetime specifications for expiry. Sigh.
dom_port_path = ckey(attrs, flow)
if domain_match(flow.request.host, dom_port_path[0]):
if cookies.is_expired(attrs):
# Remove the cookie from jar
self.jar[dom_port_path].pop(name, None)
# If all cookies of a dom_port_path have been removed
# then remove it from the jar itself
if not self.jar[dom_port_path]:
self.jar.pop(dom_port_path, None)
else:
self.jar[dom_port_path][name] = value
def request(self, flow: http.HTTPFlow):
if self.flt:
cookie_list: list[tuple[str, str]] = []
if flowfilter.match(self.flt, flow):
for (domain, port, path), c in self.jar.items():
match = [
domain_match(flow.request.host, domain),
flow.request.port == port,
flow.request.path.startswith(path),
]
if all(match):
cookie_list.extend(c.items())
if cookie_list:
# FIXME: we need to formalise this...
flow.metadata["stickycookie"] = True
flow.request.headers["cookie"] = cookies.format_cookie_header(
cookie_list
)
| python | MIT | e6aa924bb411a9687b91920b8d094af37bc02b90 | 2026-01-04T14:40:00.086164Z | false |
mitmproxy/mitmproxy | https://github.com/mitmproxy/mitmproxy/blob/e6aa924bb411a9687b91920b8d094af37bc02b90/mitmproxy/addons/mapremote.py | mitmproxy/addons/mapremote.py | import re
from collections.abc import Sequence
from typing import NamedTuple
from mitmproxy import ctx
from mitmproxy import exceptions
from mitmproxy import flowfilter
from mitmproxy import http
from mitmproxy.utils.spec import parse_spec
class MapRemoteSpec(NamedTuple):
matches: flowfilter.TFilter
subject: str
replacement: str
def parse_map_remote_spec(option: str) -> MapRemoteSpec:
spec = MapRemoteSpec(*parse_spec(option))
try:
re.compile(spec.subject)
except re.error as e:
raise ValueError(f"Invalid regular expression {spec.subject!r} ({e})")
return spec
class MapRemote:
def __init__(self) -> None:
self.replacements: list[MapRemoteSpec] = []
def load(self, loader):
loader.add_option(
"map_remote",
Sequence[str],
[],
"""
Map remote resources to another remote URL using a pattern of the form
"[/flow-filter]/url-regex/replacement", where the separator can
be any character.
""",
)
def configure(self, updated):
if "map_remote" in updated:
self.replacements = []
for option in ctx.options.map_remote:
try:
spec = parse_map_remote_spec(option)
except ValueError as e:
raise exceptions.OptionsError(
f"Cannot parse map_remote option {option}: {e}"
) from e
self.replacements.append(spec)
def request(self, flow: http.HTTPFlow) -> None:
if flow.response or flow.error or not flow.live:
return
for spec in self.replacements:
if spec.matches(flow):
url = flow.request.pretty_url
new_url = re.sub(spec.subject, spec.replacement, url)
# this is a bit messy: setting .url also updates the host header,
# so we really only do that if the replacement affected the URL.
if url != new_url:
flow.request.url = new_url # type: ignore
| python | MIT | e6aa924bb411a9687b91920b8d094af37bc02b90 | 2026-01-04T14:40:00.086164Z | false |
mitmproxy/mitmproxy | https://github.com/mitmproxy/mitmproxy/blob/e6aa924bb411a9687b91920b8d094af37bc02b90/mitmproxy/addons/disable_h2c.py | mitmproxy/addons/disable_h2c.py | import logging
class DisableH2C:
"""
We currently only support HTTP/2 over a TLS connection.
Some clients try to upgrade a connection from HTTP/1.1 to h2c. We need to
remove those headers to avoid protocol errors if one endpoints suddenly
starts sending HTTP/2 frames.
Some clients might use HTTP/2 Prior Knowledge to directly initiate a session
by sending the connection preface. We just kill those flows.
"""
def process_flow(self, f):
if f.request.headers.get("upgrade", "") == "h2c":
logging.warning(
"HTTP/2 cleartext connections (h2c upgrade requests) are currently not supported."
)
del f.request.headers["upgrade"]
if "connection" in f.request.headers:
del f.request.headers["connection"]
if "http2-settings" in f.request.headers:
del f.request.headers["http2-settings"]
is_connection_preface = (
f.request.method == "PRI"
and f.request.path == "*"
and f.request.http_version == "HTTP/2.0"
)
if is_connection_preface:
if f.killable:
f.kill()
logging.warning(
"Initiating HTTP/2 connections with prior knowledge are currently not supported."
)
# Handlers
def request(self, f):
self.process_flow(f)
| python | MIT | e6aa924bb411a9687b91920b8d094af37bc02b90 | 2026-01-04T14:40:00.086164Z | false |
mitmproxy/mitmproxy | https://github.com/mitmproxy/mitmproxy/blob/e6aa924bb411a9687b91920b8d094af37bc02b90/mitmproxy/addons/termlog.py | mitmproxy/addons/termlog.py | from __future__ import annotations
import asyncio
import logging
import sys
from typing import IO
from mitmproxy import ctx
from mitmproxy import log
from mitmproxy.utils import vt_codes
class TermLog:
_teardown_task: asyncio.Task | None = None
def __init__(self, out: IO[str] | None = None):
self.logger = TermLogHandler(out)
self.logger.install()
def load(self, loader):
loader.add_option(
"termlog_verbosity", str, "info", "Log verbosity.", choices=log.LogLevels
)
self.logger.setLevel(logging.INFO)
def configure(self, updated):
if "termlog_verbosity" in updated:
self.logger.setLevel(ctx.options.termlog_verbosity.upper())
def uninstall(self) -> None:
# uninstall the log dumper.
# This happens at the very very end after done() is completed,
# because we don't want to uninstall while other addons are still logging.
self.logger.uninstall()
class TermLogHandler(log.MitmLogHandler):
def __init__(self, out: IO[str] | None = None):
super().__init__()
self.file: IO[str] = out or sys.stdout
self.has_vt_codes = vt_codes.ensure_supported(self.file)
self.formatter = log.MitmFormatter(self.has_vt_codes)
def emit(self, record: logging.LogRecord) -> None:
try:
print(self.format(record), file=self.file)
except OSError:
# We cannot print, exit immediately.
# See https://github.com/mitmproxy/mitmproxy/issues/4669
sys.exit(1)
| python | MIT | e6aa924bb411a9687b91920b8d094af37bc02b90 | 2026-01-04T14:40:00.086164Z | false |
mitmproxy/mitmproxy | https://github.com/mitmproxy/mitmproxy/blob/e6aa924bb411a9687b91920b8d094af37bc02b90/mitmproxy/addons/core.py | mitmproxy/addons/core.py | import logging
import os
from collections.abc import Sequence
import mitmproxy.types
from mitmproxy import command
from mitmproxy import ctx
from mitmproxy import exceptions
from mitmproxy import flow
from mitmproxy import hooks
from mitmproxy import optmanager
from mitmproxy.log import ALERT
from mitmproxy.net.http import status_codes
from mitmproxy.utils import emoji
logger = logging.getLogger(__name__)
CONF_DIR = "~/.mitmproxy"
LISTEN_PORT = 8080
class Core:
def configure(self, updated):
opts = ctx.options
if opts.add_upstream_certs_to_client_chain and not opts.upstream_cert:
raise exceptions.OptionsError(
"add_upstream_certs_to_client_chain requires the upstream_cert option to be enabled."
)
if "client_certs" in updated:
if opts.client_certs:
client_certs = os.path.expanduser(opts.client_certs)
if not os.path.exists(client_certs):
raise exceptions.OptionsError(
f"Client certificate path does not exist: {opts.client_certs}"
)
@command.command("set")
def set(self, option: str, *value: str) -> None:
"""
Set an option. When the value is omitted, booleans are set to true,
strings and integers are set to None (if permitted), and sequences
are emptied. Boolean values can be true, false or toggle.
Multiple values are concatenated with a single space.
"""
if value:
specs = [f"{option}={v}" for v in value]
else:
specs = [option]
try:
ctx.options.set(*specs)
except exceptions.OptionsError as e:
raise exceptions.CommandError(e) from e
@command.command("flow.resume")
def resume(self, flows: Sequence[flow.Flow]) -> None:
"""
Resume flows if they are intercepted.
"""
intercepted = [i for i in flows if i.intercepted]
for f in intercepted:
f.resume()
ctx.master.addons.trigger(hooks.UpdateHook(intercepted))
# FIXME: this will become view.mark later
@command.command("flow.mark")
def mark(self, flows: Sequence[flow.Flow], marker: mitmproxy.types.Marker) -> None:
"""
Mark flows.
"""
updated = []
if not (marker == "" or marker in emoji.emoji):
raise exceptions.CommandError(f"invalid marker value")
for i in flows:
i.marked = marker
updated.append(i)
ctx.master.addons.trigger(hooks.UpdateHook(updated))
# FIXME: this will become view.mark.toggle later
@command.command("flow.mark.toggle")
def mark_toggle(self, flows: Sequence[flow.Flow]) -> None:
"""
Toggle mark for flows.
"""
for i in flows:
if i.marked:
i.marked = ""
else:
i.marked = ":default:"
ctx.master.addons.trigger(hooks.UpdateHook(flows))
@command.command("flow.kill")
def kill(self, flows: Sequence[flow.Flow]) -> None:
"""
Kill running flows.
"""
updated = []
for f in flows:
if f.killable:
f.kill()
updated.append(f)
logger.log(ALERT, "Killed %s flows." % len(updated))
ctx.master.addons.trigger(hooks.UpdateHook(updated))
# FIXME: this will become view.revert later
@command.command("flow.revert")
def revert(self, flows: Sequence[flow.Flow]) -> None:
"""
Revert flow changes.
"""
updated = []
for f in flows:
if f.modified():
f.revert()
updated.append(f)
logger.log(ALERT, "Reverted %s flows." % len(updated))
ctx.master.addons.trigger(hooks.UpdateHook(updated))
@command.command("flow.set.options")
def flow_set_options(self) -> Sequence[str]:
return [
"host",
"status_code",
"method",
"path",
"url",
"reason",
]
@command.command("flow.set")
@command.argument("attr", type=mitmproxy.types.Choice("flow.set.options"))
def flow_set(self, flows: Sequence[flow.Flow], attr: str, value: str) -> None:
"""
Quickly set a number of common values on flows.
"""
val: int | str = value
if attr == "status_code":
try:
val = int(val) # type: ignore
except ValueError as v:
raise exceptions.CommandError(
"Status code is not an integer: %s" % val
) from v
updated = []
for f in flows:
req = getattr(f, "request", None)
rupdate = True
if req:
if attr == "method":
req.method = val
elif attr == "host":
req.host = val
elif attr == "path":
req.path = val
elif attr == "url":
try:
req.url = val
except ValueError as e:
raise exceptions.CommandError(
f"URL {val!r} is invalid: {e}"
) from e
else:
self.rupdate = False
resp = getattr(f, "response", None)
supdate = True
if resp:
if attr == "status_code":
resp.status_code = val
if val in status_codes.RESPONSES:
resp.reason = status_codes.RESPONSES[val] # type: ignore
elif attr == "reason":
resp.reason = val
else:
supdate = False
if rupdate or supdate:
updated.append(f)
ctx.master.addons.trigger(hooks.UpdateHook(updated))
logger.log(ALERT, f"Set {attr} on {len(updated)} flows.")
@command.command("flow.decode")
def decode(self, flows: Sequence[flow.Flow], part: str) -> None:
"""
Decode flows.
"""
updated = []
for f in flows:
p = getattr(f, part, None)
if p:
f.backup()
p.decode()
updated.append(f)
ctx.master.addons.trigger(hooks.UpdateHook(updated))
logger.log(ALERT, "Decoded %s flows." % len(updated))
@command.command("flow.encode.toggle")
def encode_toggle(self, flows: Sequence[flow.Flow], part: str) -> None:
"""
Toggle flow encoding on and off, using deflate for encoding.
"""
updated = []
for f in flows:
p = getattr(f, part, None)
if p:
f.backup()
current_enc = p.headers.get("content-encoding", "identity")
if current_enc == "identity":
p.encode("deflate")
else:
p.decode()
updated.append(f)
ctx.master.addons.trigger(hooks.UpdateHook(updated))
logger.log(ALERT, "Toggled encoding on %s flows." % len(updated))
@command.command("flow.encode")
@command.argument("encoding", type=mitmproxy.types.Choice("flow.encode.options"))
def encode(
self,
flows: Sequence[flow.Flow],
part: str,
encoding: str,
) -> None:
"""
Encode flows with a specified encoding.
"""
updated = []
for f in flows:
p = getattr(f, part, None)
if p:
current_enc = p.headers.get("content-encoding", "identity")
if current_enc == "identity":
f.backup()
p.encode(encoding)
updated.append(f)
ctx.master.addons.trigger(hooks.UpdateHook(updated))
logger.log(ALERT, "Encoded %s flows." % len(updated))
@command.command("flow.encode.options")
def encode_options(self) -> Sequence[str]:
"""
The possible values for an encoding specification.
"""
return ["gzip", "deflate", "br", "zstd"]
@command.command("options.load")
def options_load(self, path: mitmproxy.types.Path) -> None:
"""
Load options from a file.
"""
try:
optmanager.load_paths(ctx.options, path)
except (OSError, exceptions.OptionsError) as e:
raise exceptions.CommandError("Could not load options - %s" % e) from e
@command.command("options.save")
def options_save(self, path: mitmproxy.types.Path) -> None:
"""
Save options to a file.
"""
try:
optmanager.save(ctx.options, path)
except OSError as e:
raise exceptions.CommandError("Could not save options - %s" % e) from e
@command.command("options.reset")
def options_reset(self) -> None:
"""
Reset all options to defaults.
"""
ctx.options.reset()
@command.command("options.reset.one")
def options_reset_one(self, name: str) -> None:
"""
Reset one option to its default value.
"""
if name not in ctx.options:
raise exceptions.CommandError("No such option: %s" % name)
setattr(
ctx.options,
name,
ctx.options.default(name),
)
| python | MIT | e6aa924bb411a9687b91920b8d094af37bc02b90 | 2026-01-04T14:40:00.086164Z | false |
mitmproxy/mitmproxy | https://github.com/mitmproxy/mitmproxy/blob/e6aa924bb411a9687b91920b8d094af37bc02b90/mitmproxy/addons/server_side_events.py | mitmproxy/addons/server_side_events.py | import logging
from mitmproxy import http
class ServerSideEvents:
"""
Server-Side Events are currently swallowed if there's no streaming,
see https://github.com/mitmproxy/mitmproxy/issues/4469.
Until this bug is fixed, this addon warns the user about this.
"""
def response(self, flow: http.HTTPFlow):
assert flow.response
is_sse = flow.response.headers.get("content-type", "").startswith(
"text/event-stream"
)
if is_sse and not flow.response.stream:
logging.warning(
"mitmproxy currently does not support server side events. As a workaround, you can enable response "
"streaming for such flows: https://github.com/mitmproxy/mitmproxy/issues/4469"
)
| python | MIT | e6aa924bb411a9687b91920b8d094af37bc02b90 | 2026-01-04T14:40:00.086164Z | false |
mitmproxy/mitmproxy | https://github.com/mitmproxy/mitmproxy/blob/e6aa924bb411a9687b91920b8d094af37bc02b90/mitmproxy/addons/next_layer.py | mitmproxy/addons/next_layer.py | """
This addon determines the next protocol layer in our proxy stack.
Whenever a protocol layer in the proxy wants to pass a connection to a child layer and isn't sure which protocol comes
next, it calls the `next_layer` hook, which ends up here.
For example, if mitmproxy runs as a regular proxy, we first need to determine if
new clients start with a TLS handshake right away (Secure Web Proxy) or send a plaintext HTTP CONNECT request.
This addon here peeks at the incoming bytes and then makes a decision based on proxy mode, mitmproxy options, etc.
For a typical HTTPS request, this addon is called a couple of times: First to determine that we start with an HTTP layer
which processes the `CONNECT` request, a second time to determine that the client then starts negotiating TLS, and a
third time when we check if the protocol within that TLS stream is actually HTTP or something else.
Sometimes it's useful to hardcode specific logic in next_layer when one wants to do fancy things.
In that case it's not necessary to modify mitmproxy's source, adding a custom addon with a next_layer event hook
that sets nextlayer.layer works just as well.
"""
from __future__ import annotations
import logging
import re
import sys
from collections.abc import Iterable
from collections.abc import Sequence
from typing import Any
from typing import cast
from mitmproxy import ctx
from mitmproxy.connection import Address
from mitmproxy.net.tls import starts_like_dtls_record
from mitmproxy.net.tls import starts_like_tls_record
from mitmproxy.proxy import layer
from mitmproxy.proxy import layers
from mitmproxy.proxy import mode_specs
from mitmproxy.proxy import tunnel
from mitmproxy.proxy.context import Context
from mitmproxy.proxy.layer import Layer
from mitmproxy.proxy.layers import ClientQuicLayer
from mitmproxy.proxy.layers import ClientTLSLayer
from mitmproxy.proxy.layers import DNSLayer
from mitmproxy.proxy.layers import HttpLayer
from mitmproxy.proxy.layers import modes
from mitmproxy.proxy.layers import RawQuicLayer
from mitmproxy.proxy.layers import ServerQuicLayer
from mitmproxy.proxy.layers import ServerTLSLayer
from mitmproxy.proxy.layers import TCPLayer
from mitmproxy.proxy.layers import UDPLayer
from mitmproxy.proxy.layers.http import HTTPMode
from mitmproxy.proxy.layers.quic import quic_parse_client_hello_from_datagrams
from mitmproxy.proxy.layers.tls import dtls_parse_client_hello
from mitmproxy.proxy.layers.tls import HTTP_ALPNS
from mitmproxy.proxy.layers.tls import parse_client_hello
from mitmproxy.tls import ClientHello
if sys.version_info < (3, 11):
from typing_extensions import assert_never
else:
from typing import assert_never
logger = logging.getLogger(__name__)
def stack_match(
context: Context, layers: Sequence[type[Layer] | tuple[type[Layer], ...]]
) -> bool:
if len(context.layers) != len(layers):
return False
return all(
expected is Any or isinstance(actual, expected)
for actual, expected in zip(context.layers, layers)
)
class NeedsMoreData(Exception):
"""Signal that the decision on which layer to put next needs to be deferred within the NextLayer addon."""
class NextLayer:
ignore_hosts: Sequence[re.Pattern] = ()
allow_hosts: Sequence[re.Pattern] = ()
tcp_hosts: Sequence[re.Pattern] = ()
udp_hosts: Sequence[re.Pattern] = ()
def configure(self, updated):
if "tcp_hosts" in updated:
self.tcp_hosts = [
re.compile(x, re.IGNORECASE) for x in ctx.options.tcp_hosts
]
if "udp_hosts" in updated:
self.udp_hosts = [
re.compile(x, re.IGNORECASE) for x in ctx.options.udp_hosts
]
if "allow_hosts" in updated or "ignore_hosts" in updated:
self.ignore_hosts = [
re.compile(x, re.IGNORECASE) for x in ctx.options.ignore_hosts
]
self.allow_hosts = [
re.compile(x, re.IGNORECASE) for x in ctx.options.allow_hosts
]
def next_layer(self, nextlayer: layer.NextLayer):
if nextlayer.layer:
return # do not override something another addon has set.
try:
nextlayer.layer = self._next_layer(
nextlayer.context,
nextlayer.data_client(),
nextlayer.data_server(),
)
except NeedsMoreData:
logger.debug(
f"Deferring layer decision, not enough data: {nextlayer.data_client().hex()!r}"
)
def _next_layer(
self, context: Context, data_client: bytes, data_server: bytes
) -> Layer | None:
assert context.layers
def s(*layers):
return stack_match(context, layers)
tcp_based = context.client.transport_protocol == "tcp"
udp_based = context.client.transport_protocol == "udp"
# 1) check for --ignore/--allow
if self._ignore_connection(context, data_client, data_server):
return (
layers.TCPLayer(context, ignore=not ctx.options.show_ignored_hosts)
if tcp_based
else layers.UDPLayer(context, ignore=not ctx.options.show_ignored_hosts)
)
# 2) Handle proxy modes with well-defined next protocol
# 2a) Reverse proxy: derive from spec
if s(modes.ReverseProxy):
return self._setup_reverse_proxy(context, data_client)
# 2b) Explicit HTTP proxies
if s((modes.HttpProxy, modes.HttpUpstreamProxy)):
return self._setup_explicit_http_proxy(context, data_client)
# 3) Handle security protocols
# 3a) TLS/DTLS
is_tls_or_dtls = (
tcp_based
and starts_like_tls_record(data_client)
or udp_based
and starts_like_dtls_record(data_client)
)
if is_tls_or_dtls:
server_tls = ServerTLSLayer(context)
server_tls.child_layer = ClientTLSLayer(context)
return server_tls
# 3b) QUIC
if udp_based and _starts_like_quic(data_client, context.server.address):
server_quic = ServerQuicLayer(context)
server_quic.child_layer = ClientQuicLayer(context)
return server_quic
# 4) Check for --tcp/--udp
if tcp_based and self._is_destination_in_hosts(context, self.tcp_hosts):
return layers.TCPLayer(context)
if udp_based and self._is_destination_in_hosts(context, self.udp_hosts):
return layers.UDPLayer(context)
# 5) Handle application protocol
# 5a) Do we have a known ALPN negotiation?
if context.client.alpn:
if context.client.alpn in HTTP_ALPNS:
return layers.HttpLayer(context, HTTPMode.transparent)
elif context.client.tls_version == "QUICv1":
# TODO: Once we support more QUIC-based protocols, relax force_raw here.
return layers.RawQuicLayer(context, force_raw=True)
# 5b) Is it DNS?
if context.server.address and context.server.address[1] in (53, 5353):
return layers.DNSLayer(context)
# 5c) We have no other specialized layers for UDP, so we fall back to raw forwarding.
if udp_based:
return layers.UDPLayer(context)
# 5d) Check for raw tcp mode.
probably_no_http = (
# the first three bytes should be the HTTP verb, so A-Za-z is expected.
len(data_client) < 3
# HTTP would require whitespace...
or b" " not in data_client
# ...and that whitespace needs to be in the first line.
or (data_client.find(b" ") > data_client.find(b"\n"))
or not data_client[:3].isalpha()
# a server greeting would be uncharacteristic.
or data_server
or data_client.startswith(b"SSH")
)
if ctx.options.rawtcp and probably_no_http:
return layers.TCPLayer(context)
# 5c) Assume HTTP by default.
return layers.HttpLayer(context, HTTPMode.transparent)
def _ignore_connection(
self,
context: Context,
data_client: bytes,
data_server: bytes,
) -> bool | None:
"""
Returns:
True, if the connection should be ignored.
False, if it should not be ignored.
Raises:
NeedsMoreData, if we need to wait for more input data.
"""
if not ctx.options.ignore_hosts and not ctx.options.allow_hosts:
return False
# Special handling for wireguard mode: if the hostname is "10.0.0.53", do not ignore the connection
if isinstance(
context.client.proxy_mode, mode_specs.WireGuardMode
) and context.server.address == ("10.0.0.53", 53):
return False
hostnames: list[str] = []
if context.server.peername:
host, port, *_ = context.server.peername
hostnames.append(f"{host}:{port}")
if context.server.address:
host, port, *_ = context.server.address
hostnames.append(f"{host}:{port}")
# We also want to check for TLS SNI and HTTP host headers, but in order to ignore connections based on that
# they must have a destination address. If they don't, we don't know how to establish an upstream connection
# if we ignore.
if host_header := self._get_host_header(context, data_client, data_server):
if not re.search(r":\d+$", host_header):
host_header = f"{host_header}:{port}"
hostnames.append(host_header)
if (
client_hello := self._get_client_hello(context, data_client)
) and client_hello.sni:
hostnames.append(f"{client_hello.sni}:{port}")
if context.client.sni:
# Hostname may be allowed, TLS is already established, and we have another next layer decision.
hostnames.append(f"{context.client.sni}:{port}")
if not hostnames:
return False
if ctx.options.allow_hosts:
not_allowed = not any(
re.search(rex, host, re.IGNORECASE)
for host in hostnames
for rex in ctx.options.allow_hosts
)
if not_allowed:
return True
if ctx.options.ignore_hosts:
ignored = any(
re.search(rex, host, re.IGNORECASE)
for host in hostnames
for rex in ctx.options.ignore_hosts
)
if ignored:
return True
return False
@staticmethod
def _get_host_header(
context: Context,
data_client: bytes,
data_server: bytes,
) -> str | None:
"""
Try to read a host header from data_client.
Returns:
The host header value, or None, if no host header was found.
Raises:
NeedsMoreData, if the HTTP request is incomplete.
"""
if context.client.transport_protocol != "tcp" or data_server:
return None
host_header_expected = re.match(
rb"[A-Z]{3,}.+HTTP/", data_client, re.IGNORECASE
)
if host_header_expected:
if m := re.search(
rb"\r\n(?:Host:\s+(.+?)\s*)?\r\n", data_client, re.IGNORECASE
):
if host := m.group(1):
return host.decode("utf-8", "surrogateescape")
else:
return None # \r\n\r\n - header end came first.
else:
raise NeedsMoreData
else:
return None
@staticmethod
def _get_client_hello(context: Context, data_client: bytes) -> ClientHello | None:
"""
Try to read a TLS/DTLS/QUIC ClientHello from data_client.
Returns:
A complete ClientHello, or None, if no ClientHello was found.
Raises:
NeedsMoreData, if the ClientHello is incomplete.
"""
match context.client.transport_protocol:
case "tcp":
if starts_like_tls_record(data_client):
try:
ch = parse_client_hello(data_client)
except ValueError:
pass
else:
if ch is None:
raise NeedsMoreData
return ch
return None
case "udp":
try:
return quic_parse_client_hello_from_datagrams([data_client])
except ValueError:
pass
try:
ch = dtls_parse_client_hello(data_client)
except ValueError:
pass
else:
if ch is None:
raise NeedsMoreData
return ch
return None
case _: # pragma: no cover
assert_never(context.client.transport_protocol)
@staticmethod
def _setup_reverse_proxy(context: Context, data_client: bytes) -> Layer:
spec = cast(mode_specs.ReverseMode, context.client.proxy_mode)
stack = tunnel.LayerStack()
match spec.scheme:
case "http":
if starts_like_tls_record(data_client):
stack /= ClientTLSLayer(context)
stack /= HttpLayer(context, HTTPMode.transparent)
case "https":
if context.client.transport_protocol == "udp":
stack /= ServerQuicLayer(context)
stack /= ClientQuicLayer(context)
stack /= HttpLayer(context, HTTPMode.transparent)
else:
stack /= ServerTLSLayer(context)
if starts_like_tls_record(data_client):
stack /= ClientTLSLayer(context)
stack /= HttpLayer(context, HTTPMode.transparent)
case "tcp":
if starts_like_tls_record(data_client):
stack /= ClientTLSLayer(context)
stack /= TCPLayer(context)
case "tls":
stack /= ServerTLSLayer(context)
if starts_like_tls_record(data_client):
stack /= ClientTLSLayer(context)
stack /= TCPLayer(context)
case "udp":
if starts_like_dtls_record(data_client):
stack /= ClientTLSLayer(context)
stack /= UDPLayer(context)
case "dtls":
stack /= ServerTLSLayer(context)
if starts_like_dtls_record(data_client):
stack /= ClientTLSLayer(context)
stack /= UDPLayer(context)
case "dns":
# TODO: DNS-over-TLS / DNS-over-DTLS
# is_tls_or_dtls = (
# context.client.transport_protocol == "tcp" and starts_like_tls_record(data_client)
# or
# context.client.transport_protocol == "udp" and starts_like_dtls_record(data_client)
# )
# if is_tls_or_dtls:
# stack /= ClientTLSLayer(context)
stack /= DNSLayer(context)
case "http3":
stack /= ServerQuicLayer(context)
stack /= ClientQuicLayer(context)
stack /= HttpLayer(context, HTTPMode.transparent)
case "quic":
stack /= ServerQuicLayer(context)
stack /= ClientQuicLayer(context)
stack /= RawQuicLayer(context, force_raw=True)
case _: # pragma: no cover
assert_never(spec.scheme)
return stack[0]
@staticmethod
def _setup_explicit_http_proxy(context: Context, data_client: bytes) -> Layer:
stack = tunnel.LayerStack()
if context.client.transport_protocol == "udp":
stack /= layers.ClientQuicLayer(context)
elif starts_like_tls_record(data_client):
stack /= layers.ClientTLSLayer(context)
if isinstance(context.layers[0], modes.HttpUpstreamProxy):
stack /= layers.HttpLayer(context, HTTPMode.upstream)
else:
stack /= layers.HttpLayer(context, HTTPMode.regular)
return stack[0]
@staticmethod
def _is_destination_in_hosts(context: Context, hosts: Iterable[re.Pattern]) -> bool:
return any(
(context.server.address and rex.search(context.server.address[0]))
or (context.client.sni and rex.search(context.client.sni))
for rex in hosts
)
# https://www.iana.org/assignments/quic/quic.xhtml
KNOWN_QUIC_VERSIONS = {
0x00000001, # QUIC v1
0x51303433, # Google QUIC Q043
0x51303436, # Google QUIC Q046
0x51303530, # Google QUIC Q050
0x6B3343CF, # QUIC v2
0x709A50C4, # QUIC v2 draft codepoint
}
TYPICAL_QUIC_PORTS = {80, 443, 8443}
def _starts_like_quic(data_client: bytes, server_address: Address | None) -> bool:
"""
Make an educated guess on whether this could be QUIC.
This turns out to be quite hard in practice as 1-RTT packets are hardly distinguishable from noise.
Returns:
True, if the passed bytes could be the start of a QUIC packet.
False, otherwise.
"""
# Minimum size: 1 flag byte + 1+ packet number bytes + 16+ bytes encrypted payload
if len(data_client) < 18:
return False
if starts_like_dtls_record(data_client):
return False
# TODO: Add more checks here to detect true negatives.
# Long Header Packets
if data_client[0] & 0x80:
version = int.from_bytes(data_client[1:5], "big")
if version in KNOWN_QUIC_VERSIONS:
return True
# https://www.rfc-editor.org/rfc/rfc9000.html#name-versions
# Versions that follow the pattern 0x?a?a?a?a are reserved for use in forcing version negotiation
if version & 0x0F0F0F0F == 0x0A0A0A0A:
return True
else:
# ¯\_(ツ)_/¯
# We can't even rely on the QUIC bit, see https://datatracker.ietf.org/doc/rfc9287/.
pass
return bool(server_address and server_address[1] in TYPICAL_QUIC_PORTS)
| python | MIT | e6aa924bb411a9687b91920b8d094af37bc02b90 | 2026-01-04T14:40:00.086164Z | false |
mitmproxy/mitmproxy | https://github.com/mitmproxy/mitmproxy/blob/e6aa924bb411a9687b91920b8d094af37bc02b90/mitmproxy/addons/browser.py | mitmproxy/addons/browser.py | import logging
import shutil
import subprocess
import tempfile
from mitmproxy import command
from mitmproxy import ctx
from mitmproxy.log import ALERT
def find_executable_cmd(*search_paths) -> list[str] | None:
for browser in search_paths:
if shutil.which(browser):
return [browser]
return None
def find_flatpak_cmd(*search_paths) -> list[str] | None:
if shutil.which("flatpak"):
for browser in search_paths:
if (
subprocess.run(
["flatpak", "info", browser],
stdout=subprocess.DEVNULL,
stderr=subprocess.DEVNULL,
).returncode
== 0
):
return ["flatpak", "run", "-p", browser]
return None
class Browser:
browser: list[subprocess.Popen] = []
tdir: list[tempfile.TemporaryDirectory] = []
@command.command("browser.start")
def start(self, browser: str = "chrome") -> None:
if len(self.browser) > 0:
logging.log(ALERT, "Starting additional browser")
if browser in ("chrome", "chromium"):
self.launch_chrome()
elif browser == "firefox":
self.launch_firefox()
else:
logging.log(ALERT, "Invalid browser name.")
def launch_chrome(self) -> None:
"""
Start an isolated instance of Chrome that points to the currently
running proxy.
"""
cmd = find_executable_cmd(
"/Applications/Google Chrome.app/Contents/MacOS/Google Chrome",
# https://stackoverflow.com/questions/40674914/google-chrome-path-in-windows-10
r"C:\Program Files (x86)\Google\Chrome\Application\chrome.exe",
r"C:\Program Files (x86)\Google\Application\chrome.exe",
# Linux binary names from Python's webbrowser module.
"google-chrome",
"google-chrome-stable",
"chrome",
"chromium",
"chromium-browser",
"google-chrome-unstable",
) or find_flatpak_cmd(
"com.google.Chrome",
"org.chromium.Chromium",
"com.github.Eloston.UngoogledChromium",
"com.google.ChromeDev",
)
if not cmd:
logging.log(
ALERT, "Your platform is not supported yet - please submit a patch."
)
return
tdir = tempfile.TemporaryDirectory()
self.tdir.append(tdir)
self.browser.append(
subprocess.Popen(
[
*cmd,
"--user-data-dir=%s" % str(tdir.name),
"--proxy-server={}:{}".format(
ctx.options.listen_host or "127.0.0.1",
ctx.options.listen_port or "8080",
),
"--disable-fre",
"--no-default-browser-check",
"--no-first-run",
"--disable-extensions",
"about:blank",
],
stdout=subprocess.DEVNULL,
stderr=subprocess.DEVNULL,
)
)
def launch_firefox(self) -> None:
"""
Start an isolated instance of Firefox that points to the currently
running proxy.
"""
cmd = find_executable_cmd(
"/Applications/Firefox.app/Contents/MacOS/firefox",
r"C:\Program Files\Mozilla Firefox\firefox.exe",
"firefox",
"mozilla-firefox",
"mozilla",
) or find_flatpak_cmd("org.mozilla.firefox")
if not cmd:
logging.log(
ALERT, "Your platform is not supported yet - please submit a patch."
)
return
host = ctx.options.listen_host or "127.0.0.1"
port = ctx.options.listen_port or 8080
prefs = [
'user_pref("datareporting.policy.firstRunURL", "");',
'user_pref("network.proxy.type", 1);',
'user_pref("network.proxy.share_proxy_settings", true);',
'user_pref("datareporting.healthreport.uploadEnabled", false);',
'user_pref("app.normandy.enabled", false);',
'user_pref("app.update.auto", false);',
'user_pref("app.update.enabled", false);',
'user_pref("app.update.autoInstallEnabled", false);',
'user_pref("app.shield.optoutstudies.enabled", false);'
'user_pref("extensions.blocklist.enabled", false);',
'user_pref("browser.safebrowsing.downloads.remote.enabled", false);',
'user_pref("browser.region.network.url", "");',
'user_pref("browser.region.update.enabled", false);',
'user_pref("browser.region.local-geocoding", false);',
'user_pref("extensions.pocket.enabled", false);',
'user_pref("network.captive-portal-service.enabled", false);',
'user_pref("network.connectivity-service.enabled", false);',
'user_pref("toolkit.telemetry.server", "");',
'user_pref("dom.push.serverURL", "");',
'user_pref("services.settings.enabled", false);',
'user_pref("browser.newtab.preload", false);',
'user_pref("browser.safebrowsing.provider.google4.updateURL", "");',
'user_pref("browser.safebrowsing.provider.mozilla.updateURL", "");',
'user_pref("browser.newtabpage.activity-stream.feeds.topsites", false);',
'user_pref("browser.newtabpage.activity-stream.default.sites", "");',
'user_pref("browser.newtabpage.activity-stream.showSponsoredTopSites", false);',
'user_pref("browser.bookmarks.restore_default_bookmarks", false);',
'user_pref("browser.bookmarks.file", "");',
]
for service in ("http", "ssl"):
prefs += [
f'user_pref("network.proxy.{service}", "{host}");',
f'user_pref("network.proxy.{service}_port", {port});',
]
tdir = tempfile.TemporaryDirectory()
with open(tdir.name + "/prefs.js", "w") as file:
file.writelines(prefs)
self.tdir.append(tdir)
self.browser.append(
subprocess.Popen(
[
*cmd,
"--profile",
str(tdir.name),
"--new-window",
"about:blank",
],
stdout=subprocess.DEVNULL,
stderr=subprocess.DEVNULL,
)
)
def done(self):
for browser in self.browser:
browser.kill()
for tdir in self.tdir:
tdir.cleanup()
self.browser = []
self.tdir = []
| python | MIT | e6aa924bb411a9687b91920b8d094af37bc02b90 | 2026-01-04T14:40:00.086164Z | false |
mitmproxy/mitmproxy | https://github.com/mitmproxy/mitmproxy/blob/e6aa924bb411a9687b91920b8d094af37bc02b90/mitmproxy/addons/cut.py | mitmproxy/addons/cut.py | import csv
import io
import logging
import os.path
from collections.abc import Sequence
from typing import Any
import pyperclip
import mitmproxy.types
from mitmproxy import certs
from mitmproxy import command
from mitmproxy import exceptions
from mitmproxy import flow
from mitmproxy import http
from mitmproxy.log import ALERT
logger = logging.getLogger(__name__)
def headername(spec: str):
if not (spec.startswith("header[") and spec.endswith("]")):
raise exceptions.CommandError("Invalid header spec: %s" % spec)
return spec[len("header[") : -1].strip()
def is_addr(v):
return isinstance(v, tuple) and len(v) > 1
def extract(cut: str, f: flow.Flow) -> str | bytes:
# Hack for https://github.com/mitmproxy/mitmproxy/issues/6721:
# Make "save body" keybind work for WebSocket flows.
# Ideally the keybind would be smarter and this here can get removed.
if (
isinstance(f, http.HTTPFlow)
and f.websocket
and cut in ("request.content", "response.content")
):
return f.websocket._get_formatted_messages()
path = cut.split(".")
current: Any = f
for i, spec in enumerate(path):
if spec.startswith("_"):
raise exceptions.CommandError("Can't access internal attribute %s" % spec)
part = getattr(current, spec, None)
if i == len(path) - 1:
if spec == "port" and is_addr(current):
return str(current[1])
if spec == "host" and is_addr(current):
return str(current[0])
elif spec.startswith("header["):
if not current:
return ""
return current.headers.get(headername(spec), "")
elif isinstance(part, bytes):
return part
elif isinstance(part, bool):
return "true" if part else "false"
elif isinstance(part, certs.Cert): # pragma: no cover
return part.to_pem().decode("ascii")
elif (
isinstance(part, list)
and len(part) > 0
and isinstance(part[0], certs.Cert)
):
# TODO: currently this extracts only the very first cert as PEM-encoded string.
return part[0].to_pem().decode("ascii")
current = part
return str(current or "")
def extract_str(cut: str, f: flow.Flow) -> str:
ret = extract(cut, f)
if isinstance(ret, bytes):
return repr(ret)
else:
return ret
class Cut:
@command.command("cut")
def cut(
self,
flows: Sequence[flow.Flow],
cuts: mitmproxy.types.CutSpec,
) -> mitmproxy.types.Data:
"""
Cut data from a set of flows. Cut specifications are attribute paths
from the base of the flow object, with a few conveniences - "port"
and "host" retrieve parts of an address tuple, ".header[key]"
retrieves a header value. Return values converted to strings or
bytes: SSL certificates are converted to PEM format, bools are "true"
or "false", "bytes" are preserved, and all other values are
converted to strings.
"""
ret: list[list[str | bytes]] = []
for f in flows:
ret.append([extract(c, f) for c in cuts])
return ret # type: ignore
@command.command("cut.save")
def save(
self,
flows: Sequence[flow.Flow],
cuts: mitmproxy.types.CutSpec,
path: mitmproxy.types.Path,
) -> None:
"""
Save cuts to file. If there are multiple flows or cuts, the format
is UTF-8 encoded CSV. If there is exactly one row and one column,
the data is written to file as-is, with raw bytes preserved. If the
path is prefixed with a "+", values are appended if there is an
existing file.
"""
append = False
if path.startswith("+"):
append = True
epath = os.path.expanduser(path[1:])
path = mitmproxy.types.Path(epath)
try:
if len(cuts) == 1 and len(flows) == 1:
with open(path, "ab" if append else "wb") as fp:
if fp.tell() > 0:
# We're appending to a file that already exists and has content
fp.write(b"\n")
v = extract(cuts[0], flows[0])
if isinstance(v, bytes):
fp.write(v)
else:
fp.write(v.encode("utf8"))
logger.log(ALERT, "Saved single cut.")
else:
with open(
path, "a" if append else "w", newline="", encoding="utf8"
) as tfp:
writer = csv.writer(tfp)
for f in flows:
vals = [extract_str(c, f) for c in cuts]
writer.writerow(vals)
logger.log(
ALERT,
"Saved %s cuts over %d flows as CSV." % (len(cuts), len(flows)),
)
except OSError as e:
logger.error(str(e))
@command.command("cut.clip")
def clip(
self,
flows: Sequence[flow.Flow],
cuts: mitmproxy.types.CutSpec,
) -> None:
"""
Send cuts to the clipboard. If there are multiple flows or cuts, the
format is UTF-8 encoded CSV. If there is exactly one row and one
column, the data is written to file as-is, with raw bytes preserved.
"""
v: str | bytes
fp = io.StringIO(newline="")
if len(cuts) == 1 and len(flows) == 1:
v = extract_str(cuts[0], flows[0])
fp.write(v)
logger.log(ALERT, "Clipped single cut.")
else:
writer = csv.writer(fp)
for f in flows:
vals = [extract_str(c, f) for c in cuts]
writer.writerow(vals)
logger.log(ALERT, "Clipped %s cuts as CSV." % len(cuts))
try:
pyperclip.copy(fp.getvalue())
except pyperclip.PyperclipException as e:
logger.error(str(e))
| python | MIT | e6aa924bb411a9687b91920b8d094af37bc02b90 | 2026-01-04T14:40:00.086164Z | false |
mitmproxy/mitmproxy | https://github.com/mitmproxy/mitmproxy/blob/e6aa924bb411a9687b91920b8d094af37bc02b90/mitmproxy/addons/onboarding.py | mitmproxy/addons/onboarding.py | from mitmproxy import ctx
from mitmproxy.addons import asgiapp
from mitmproxy.addons.onboardingapp import app
APP_HOST = "mitm.it"
class Onboarding(asgiapp.WSGIApp):
name = "onboarding"
def __init__(self):
super().__init__(app, APP_HOST, None)
def load(self, loader):
loader.add_option(
"onboarding", bool, True, "Toggle the mitmproxy onboarding app."
)
loader.add_option(
"onboarding_host",
str,
APP_HOST,
"""
Onboarding app domain. For transparent mode, use an IP when a DNS
entry for the app domain is not present.
""",
)
def configure(self, updated):
self.host = ctx.options.onboarding_host
app.config["CONFDIR"] = ctx.options.confdir
async def request(self, f):
if ctx.options.onboarding:
await super().request(f)
| python | MIT | e6aa924bb411a9687b91920b8d094af37bc02b90 | 2026-01-04T14:40:00.086164Z | false |
mitmproxy/mitmproxy | https://github.com/mitmproxy/mitmproxy/blob/e6aa924bb411a9687b91920b8d094af37bc02b90/mitmproxy/addons/blocklist.py | mitmproxy/addons/blocklist.py | from collections.abc import Sequence
from typing import NamedTuple
from mitmproxy import ctx
from mitmproxy import exceptions
from mitmproxy import flowfilter
from mitmproxy import http
from mitmproxy import version
from mitmproxy.net.http.status_codes import NO_RESPONSE
class BlockSpec(NamedTuple):
matches: flowfilter.TFilter
status_code: int
def parse_spec(option: str) -> BlockSpec:
"""
Parses strings in the following format, enforces number of segments:
/flow-filter/status
"""
sep, rem = option[0], option[1:]
parts = rem.split(sep, 2)
if len(parts) != 2:
raise ValueError("Invalid number of parameters (2 are expected)")
flow_patt, status = parts
try:
status_code = int(status)
except ValueError:
raise ValueError(f"Invalid HTTP status code: {status}")
flow_filter = flowfilter.parse(flow_patt)
return BlockSpec(matches=flow_filter, status_code=status_code)
class BlockList:
def __init__(self) -> None:
self.items: list[BlockSpec] = []
def load(self, loader):
loader.add_option(
"block_list",
Sequence[str],
[],
"""
Block matching requests and return an empty response with the specified HTTP status.
Option syntax is "/flow-filter/status-code", where flow-filter describes
which requests this rule should be applied to and status-code is the HTTP status code to return for
blocked requests. The separator ("/" in the example) can be any character.
Setting a non-standard status code of 444 will close the connection without sending a response.
""",
)
def configure(self, updated):
if "block_list" in updated:
self.items = []
for option in ctx.options.block_list:
try:
spec = parse_spec(option)
except ValueError as e:
raise exceptions.OptionsError(
f"Cannot parse block_list option {option}: {e}"
) from e
self.items.append(spec)
def request(self, flow: http.HTTPFlow) -> None:
if flow.response or flow.error or not flow.live:
return
for spec in self.items:
if spec.matches(flow):
flow.metadata["blocklisted"] = True
if spec.status_code == NO_RESPONSE:
flow.kill()
else:
flow.response = http.Response.make(
spec.status_code, headers={"Server": version.MITMPROXY}
)
| python | MIT | e6aa924bb411a9687b91920b8d094af37bc02b90 | 2026-01-04T14:40:00.086164Z | false |
mitmproxy/mitmproxy | https://github.com/mitmproxy/mitmproxy/blob/e6aa924bb411a9687b91920b8d094af37bc02b90/mitmproxy/addons/modifyheaders.py | mitmproxy/addons/modifyheaders.py | import logging
import re
from collections.abc import Sequence
from pathlib import Path
from typing import NamedTuple
from mitmproxy import ctx
from mitmproxy import exceptions
from mitmproxy import flowfilter
from mitmproxy import http
from mitmproxy.http import Headers
from mitmproxy.utils import strutils
from mitmproxy.utils.spec import parse_spec
class ModifySpec(NamedTuple):
matches: flowfilter.TFilter
subject: bytes
replacement_str: str
def read_replacement(self) -> bytes:
"""
Process the replacement str. This usually just involves converting it to bytes.
However, if it starts with `@`, we interpret the rest as a file path to read from.
Raises:
- IOError if the file cannot be read.
"""
if self.replacement_str.startswith("@"):
return Path(self.replacement_str[1:]).expanduser().read_bytes()
else:
# We could cache this at some point, but unlikely to be a problem.
return strutils.escaped_str_to_bytes(self.replacement_str)
def parse_modify_spec(option: str, subject_is_regex: bool) -> ModifySpec:
flow_filter, subject_str, replacement = parse_spec(option)
subject = strutils.escaped_str_to_bytes(subject_str)
if subject_is_regex:
try:
re.compile(subject)
except re.error as e:
raise ValueError(f"Invalid regular expression {subject!r} ({e})")
spec = ModifySpec(flow_filter, subject, replacement)
try:
spec.read_replacement()
except OSError as e:
raise ValueError(f"Invalid file path: {replacement[1:]} ({e})")
return spec
class ModifyHeaders:
def __init__(self) -> None:
self.replacements: list[ModifySpec] = []
def load(self, loader):
loader.add_option(
"modify_headers",
Sequence[str],
[],
"""
Header modify pattern of the form "[/flow-filter]/header-name/[@]header-value", where the
separator can be any character. The @ allows to provide a file path that is used to read
the header value string. An empty header-value removes existing header-name headers.
""",
)
def configure(self, updated):
if "modify_headers" in updated:
self.replacements = []
for option in ctx.options.modify_headers:
try:
spec = parse_modify_spec(option, False)
except ValueError as e:
raise exceptions.OptionsError(
f"Cannot parse modify_headers option {option}: {e}"
) from e
self.replacements.append(spec)
def requestheaders(self, flow):
if flow.response or flow.error or not flow.live:
return
self.run(flow, flow.request.headers)
def responseheaders(self, flow):
if flow.error or not flow.live:
return
self.run(flow, flow.response.headers)
def run(self, flow: http.HTTPFlow, hdrs: Headers) -> None:
matches = []
# first check all the filters against the original, unmodified flow
for spec in self.replacements:
matches.append(spec.matches(flow))
# unset all specified headers
for i, spec in enumerate(self.replacements):
if matches[i]:
hdrs.pop(spec.subject, None)
# set all specified headers if the replacement string is not empty
for i, spec in enumerate(self.replacements):
if matches[i]:
try:
replacement = spec.read_replacement()
except OSError as e:
logging.warning(f"Could not read replacement file: {e}")
continue
else:
if replacement:
hdrs.add(spec.subject, replacement)
| python | MIT | e6aa924bb411a9687b91920b8d094af37bc02b90 | 2026-01-04T14:40:00.086164Z | false |
mitmproxy/mitmproxy | https://github.com/mitmproxy/mitmproxy/blob/e6aa924bb411a9687b91920b8d094af37bc02b90/mitmproxy/addons/command_history.py | mitmproxy/addons/command_history.py | import logging
import os
import pathlib
from collections.abc import Sequence
from mitmproxy import command
from mitmproxy import ctx
class CommandHistory:
VACUUM_SIZE = 1024
def __init__(self) -> None:
self.history: list[str] = []
self.filtered_history: list[str] = [""]
self.current_index: int = 0
def load(self, loader):
loader.add_option(
"command_history",
bool,
True,
"""Persist command history between mitmproxy invocations.""",
)
@property
def history_file(self) -> pathlib.Path:
return pathlib.Path(os.path.expanduser(ctx.options.confdir)) / "command_history"
def running(self):
# FIXME: We have a weird bug where the contract for configure is not followed and it is never called with
# confdir or command_history as updated.
self.configure("command_history") # pragma: no cover
def configure(self, updated):
if "command_history" in updated or "confdir" in updated:
if ctx.options.command_history and self.history_file.is_file():
self.history = self.history_file.read_text().splitlines()
self.set_filter("")
def done(self):
if ctx.options.command_history and len(self.history) >= self.VACUUM_SIZE:
# vacuum history so that it doesn't grow indefinitely.
history_str = "\n".join(self.history[-self.VACUUM_SIZE // 2 :]) + "\n"
try:
self.history_file.write_text(history_str)
except Exception as e:
logging.warning(f"Failed writing to {self.history_file}: {e}")
@command.command("commands.history.add")
def add_command(self, command: str) -> None:
if not command.strip():
return
self.history.append(command)
if ctx.options.command_history:
try:
with self.history_file.open("a") as f:
f.write(f"{command}\n")
except Exception as e:
logging.warning(f"Failed writing to {self.history_file}: {e}")
self.set_filter("")
@command.command("commands.history.get")
def get_history(self) -> Sequence[str]:
"""Get the entire command history."""
return self.history.copy()
@command.command("commands.history.clear")
def clear_history(self):
if self.history_file.exists():
try:
self.history_file.unlink()
except Exception as e:
logging.warning(f"Failed deleting {self.history_file}: {e}")
self.history = []
self.set_filter("")
# Functionality to provide a filtered list that can be iterated through.
@command.command("commands.history.filter")
def set_filter(self, prefix: str) -> None:
self.filtered_history = [cmd for cmd in self.history if cmd.startswith(prefix)]
self.filtered_history.append(prefix)
self.current_index = len(self.filtered_history) - 1
@command.command("commands.history.next")
def get_next(self) -> str:
self.current_index = min(self.current_index + 1, len(self.filtered_history) - 1)
return self.filtered_history[self.current_index]
@command.command("commands.history.prev")
def get_prev(self) -> str:
self.current_index = max(0, self.current_index - 1)
return self.filtered_history[self.current_index]
| python | MIT | e6aa924bb411a9687b91920b8d094af37bc02b90 | 2026-01-04T14:40:00.086164Z | false |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.