sample_id stringlengths 21 196 | text stringlengths 105 936k | metadata dict | category stringclasses 6
values |
|---|---|---|---|
marimo-team/marimo:marimo/_internal/notifications.py | # Copyright 2026 Marimo. All rights reserved.
"""Internal API for notifications."""
from marimo._messaging.notification import (
AlertNotification,
BannerNotification,
CacheClearedNotification,
CacheInfoNotification,
CellNotification,
ColumnPreview,
CompletedRunNotification,
CompletionResultNotification,
DataColumnPreviewNotification,
DatasetsNotification,
DataSourceConnectionsNotification,
FocusCellNotification,
FunctionCallResultNotification,
HumanReadableStatus,
InstallingPackageAlertNotification,
InterruptedNotification,
KernelCapabilitiesNotification,
KernelReadyNotification,
KernelStartupErrorNotification,
MissingPackageAlertNotification,
Notification,
NotificationMessage,
PackageStatusType,
QueryParamsAppendNotification,
QueryParamsClearNotification,
QueryParamsDeleteNotification,
QueryParamsSetNotification,
ReconnectedNotification,
ReloadNotification,
RemoveUIElementsNotification,
SecretKeysResultNotification,
SQLMetadata,
SQLTableListPreviewNotification,
SQLTablePreviewNotification,
StartupLogsNotification,
UIElementMessageNotification,
UpdateCellCodesNotification,
UpdateCellIdsNotification,
ValidateSQLResultNotification,
VariableDeclarationNotification,
VariablesNotification,
VariableValue,
VariableValuesNotification,
)
__all__ = [
"AlertNotification",
"BannerNotification",
"CacheClearedNotification",
"CacheInfoNotification",
"CellNotification",
"ColumnPreview",
"CompletedRunNotification",
"CompletionResultNotification",
"DataColumnPreviewNotification",
"DataSourceConnectionsNotification",
"DatasetsNotification",
"FocusCellNotification",
"FunctionCallResultNotification",
"HumanReadableStatus",
"InstallingPackageAlertNotification",
"InterruptedNotification",
"KernelCapabilitiesNotification",
"KernelReadyNotification",
"KernelStartupErrorNotification",
"MissingPackageAlertNotification",
"Notification",
"NotificationMessage",
"PackageStatusType",
"QueryParamsAppendNotification",
"QueryParamsClearNotification",
"QueryParamsDeleteNotification",
"QueryParamsSetNotification",
"ReconnectedNotification",
"ReloadNotification",
"RemoveUIElementsNotification",
"SQLMetadata",
"SQLTableListPreviewNotification",
"SQLTablePreviewNotification",
"SecretKeysResultNotification",
"StartupLogsNotification",
"UIElementMessageNotification",
"UpdateCellCodesNotification",
"UpdateCellIdsNotification",
"VariableDeclarationNotification",
"VariableValue",
"VariableValuesNotification",
"VariablesNotification",
"ValidateSQLResultNotification",
]
| {
"repo_id": "marimo-team/marimo",
"file_path": "marimo/_internal/notifications.py",
"license": "Apache License 2.0",
"lines": 92,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
marimo-team/marimo:marimo/_internal/packages.py | # Copyright 2026 Marimo. All rights reserved.
"""Internal API for package management."""
from marimo._runtime.packages.package_manager import (
PackageDescription,
PackageManager,
)
from marimo._runtime.packages.package_managers import create_package_manager
__all__ = [
"PackageDescription",
"PackageManager",
"create_package_manager",
]
| {
"repo_id": "marimo-team/marimo",
"file_path": "marimo/_internal/packages.py",
"license": "Apache License 2.0",
"lines": 12,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
marimo-team/marimo:marimo/_internal/schemas.py | # Copyright 2026 Marimo. All rights reserved.
"""Internal API for notebook schemas."""
import marimo._schemas.notebook as notebook
import marimo._schemas.serialization as serialization
import marimo._schemas.session as session
__all__ = [
"notebook",
"serialization",
"session",
]
| {
"repo_id": "marimo-team/marimo",
"file_path": "marimo/_internal/schemas.py",
"license": "Apache License 2.0",
"lines": 10,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
marimo-team/marimo:marimo/_internal/server/requests.py | # Copyright 2026 Marimo. All rights reserved.
"""Internal API for server request types."""
from marimo._server.models.export import (
ExportAsHTMLRequest,
ExportAsIPYNBRequest,
ExportAsMarkdownRequest,
ExportAsScriptRequest,
)
from marimo._server.models.models import InstantiateNotebookRequest
__all__ = [
"ExportAsHTMLRequest",
"ExportAsIPYNBRequest",
"ExportAsMarkdownRequest",
"ExportAsScriptRequest",
"InstantiateNotebookRequest",
]
| {
"repo_id": "marimo-team/marimo",
"file_path": "marimo/_internal/server/requests.py",
"license": "Apache License 2.0",
"lines": 16,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
marimo-team/marimo:marimo/_internal/session/extensions.py | # Copyright 2026 Marimo. All rights reserved.
"""Internal API for session extensions."""
from marimo._session.extensions.extensions import (
CachingExtension,
HeartbeatExtension,
LoggingExtension,
NotificationListenerExtension,
QueueExtension,
ReplayExtension,
SessionViewExtension,
)
from marimo._session.extensions.types import SessionExtension
__all__ = [
"CachingExtension",
"HeartbeatExtension",
"LoggingExtension",
"NotificationListenerExtension",
"QueueExtension",
"ReplayExtension",
"SessionExtension",
"SessionViewExtension",
]
| {
"repo_id": "marimo-team/marimo",
"file_path": "marimo/_internal/session/extensions.py",
"license": "Apache License 2.0",
"lines": 22,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
marimo-team/marimo:tests/fixtures/notebook.py | import marimo
app = marimo.App()
@app.cell
def __():
import marimo as mo
return (mo,)
@app.cell
def __(mo):
slider = mo.ui.slider(0, 10)
return (slider,)
if __name__ == "__main__":
app.run()
| {
"repo_id": "marimo-team/marimo",
"file_path": "tests/fixtures/notebook.py",
"license": "Apache License 2.0",
"lines": 12,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
marimo-team/marimo:tests/fixtures/notebook_async.py | import marimo
app = marimo.App()
@app.cell
async def __():
import asyncio
await asyncio.sleep(0.1)
return (asyncio,)
if __name__ == "__main__":
app.run()
| {
"repo_id": "marimo-team/marimo",
"file_path": "tests/fixtures/notebook_async.py",
"license": "Apache License 2.0",
"lines": 9,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
marimo-team/marimo:tests/fixtures/notebook_sandboxed.py | # Copyright 2026 Marimo. All rights reserved.
# /// script
# requires-python = ">=3.11"
# dependencies = [
# "polars",
# "marimo>=0.8.0",
# "quak",
# "vega-datasets",
# ]
# ///
import marimo
app = marimo.App()
@app.cell
def __():
import marimo as mo
return (mo,)
@app.cell
def __(mo):
slider = mo.ui.slider(0, 10)
return (slider,)
if __name__ == "__main__":
app.run()
| {
"repo_id": "marimo-team/marimo",
"file_path": "tests/fixtures/notebook_sandboxed.py",
"license": "Apache License 2.0",
"lines": 22,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
marimo-team/marimo:tests/fixtures/notebook_unparsable.py | import marimo
app = marimo.App()
app._unparsable_cell(
r"""
return
""",
name="__",
)
app._unparsable_cell(
r"""
partial_statement =
""",
name="__",
)
@app.cell
def __():
valid_statement = 1
return (valid_statement,)
if __name__ == "__main__":
app.run()
| {
"repo_id": "marimo-team/marimo",
"file_path": "tests/fixtures/notebook_unparsable.py",
"license": "Apache License 2.0",
"lines": 20,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
marimo-team/marimo:tests/fixtures/notebook_with_errors.py | import marimo
app = marimo.App()
@app.cell
def __():
import marimo as mo
return (mo,)
@app.cell
def __(mo):
slider = mo.ui.slider(0, 10)
return (slider,)
@app.cell
def __():
1 / 0
return
if __name__ == "__main__":
app.run()
| {
"repo_id": "marimo-team/marimo",
"file_path": "tests/fixtures/notebook_with_errors.py",
"license": "Apache License 2.0",
"lines": 16,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
marimo-team/marimo:tests/fixtures/notebook_with_md.py | import marimo
app = marimo.App()
@app.cell
def __(mo):
control_dep = None
mo.md("markdown")
return control_dep
@app.cell
def __(mo, control_dep):
control_dep
mo.md(f"parameterized markdown {123}")
return
@app.cell
def __():
mo.md("plain markdown")
return (mo,)
@app.cell
def __():
import marimo as mo
return (mo,)
if __name__ == "__main__":
app.run()
| {
"repo_id": "marimo-team/marimo",
"file_path": "tests/fixtures/notebook_with_md.py",
"license": "Apache License 2.0",
"lines": 22,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
marimo-team/marimo:tests/fixtures/notebook_with_media.py | # /// script
# requires-python = ">=3.13"
# dependencies = [
# "altair==6.0.0",
# "marimo",
# "matplotlib==3.10.8",
# "numpy==2.4.1",
# "pandas==2.3.3",
# "plotly==6.5.2",
# "pyarrow==22.0.0",
# ]
# ///
import marimo
__generated_with = "0.19.4"
app = marimo.App(width="medium")
@app.cell
def cell_imports():
import marimo as mo
import matplotlib.pyplot as plt
import altair as alt
import plotly.graph_objs as go
import numpy as np
import pandas as pd
import pyarrow
np.random.seed(5)
return alt, go, mo, np, pd, plt
@app.cell
def pure_markdown_cell(mo):
mo.md("""
pure markdown cell
""")
return
@app.cell
def ends_with_markdown(mo):
_x = 10
mo.md("ends with markdown")
return
@app.cell
def cell_slider(mo):
slider = mo.ui.slider(0, 10)
slider
return
@app.cell
def cell_matplotlib(np, plt):
# Matplotlib plot
x = np.linspace(0, 2 * np.pi, 100)
y = np.sin(x)
fig, ax = plt.subplots()
mpplot = ax.plot(x, y)
fig
return (fig,)
@app.cell
def basic_dataframe(pd):
df = pd.DataFrame({"x": [1]})
df
return (df,)
@app.cell
def _(df, mo):
mo.ui.table(df)
return
@app.cell
def _(df, mo):
mo.ui.dataframe(df)
return
@app.cell
def _(fig, mo):
# Interactive figure
mo.mpl.interactive(fig)
return
@app.cell
def cell_altair(alt, pd):
# Altair chart
_df = pd.DataFrame({"x": [1], "y": [1]})
chart = alt.Chart(_df).mark_circle().encode(x="x:O", y="y:Q")
chart
return (chart,)
@app.cell
def _(chart, mo):
# Wrapped in mo.ui.altair_chart()
mo.ui.altair_chart(chart)
return
@app.cell
def cell_plotly(go, np):
# Plotly chart
trace = go.Scatter(x=np.arange(2), y=np.random.randn(2), mode="lines+markers")
plot = go.Figure(data=[trace])
plot
return (plot,)
@app.cell
def _(mo, plot):
# Wrapped in mo.ui.plotly
mo.ui.plotly(plot)
return
if __name__ == "__main__":
app.run()
| {
"repo_id": "marimo-team/marimo",
"file_path": "tests/fixtures/notebook_with_media.py",
"license": "Apache License 2.0",
"lines": 95,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
marimo-team/marimo:tests/fixtures/notebook_with_multiple_definitions.py | import marimo
app = marimo.App()
@app.cell
def __():
x = 1
return (x,)
@app.cell
def __():
x = 2
return (x,)
if __name__ == "__main__":
app.run()
| {
"repo_id": "marimo-team/marimo",
"file_path": "tests/fixtures/notebook_with_multiple_definitions.py",
"license": "Apache License 2.0",
"lines": 12,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
marimo-team/marimo:marimo/_smoke_tests/appcomp/resetting_sliders/embed_test_a.py | import marimo
__generated_with = "0.19.2"
app = marimo.App()
@app.cell
def _():
import marimo as mo
return
@app.cell
def _():
import embed_test_b
return (embed_test_b,)
@app.cell
def _(embed_test_b):
clone_a = embed_test_b.app.clone()
return (clone_a,)
@app.cell
async def _(clone_a):
embed_a = await clone_a.embed()
embed_a.output
return (embed_a,)
@app.cell
def _(embed_test_b):
clone_b = embed_test_b.app.clone()
return (clone_b,)
@app.cell
async def _(clone_b, embed_a):
(
await clone_b.embed(
defs={
"slider": None,
"value": embed_a.defs["value"],
"label": "their",
"kind": "warn",
}
)
).output
return
if __name__ == "__main__":
app.run()
| {
"repo_id": "marimo-team/marimo",
"file_path": "marimo/_smoke_tests/appcomp/resetting_sliders/embed_test_a.py",
"license": "Apache License 2.0",
"lines": 39,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
marimo-team/marimo:marimo/_smoke_tests/appcomp/resetting_sliders/embed_test_b.py | import marimo
__generated_with = "0.19.2"
app = marimo.App(width="full")
@app.cell
def _():
import marimo as mo
import time
return (mo,)
@app.cell
def _():
state = None
return (state,)
@app.cell
def _(mo):
print("Creating slider")
slider = mo.ui.slider(0, 10, 1, 3, label="A slider")
return (slider,)
@app.cell
def _(slider):
slider
return
@app.cell
def _(slider, state):
value = slider.value if state is None else state()
label = "our"
return label, value
@app.cell
def _():
kind = "info"
return (kind,)
@app.cell
def _(kind, label, mo, value):
mo.callout(
mo.md(f"""
The value of *{label}* slider is **{value}**!
"""), kind
)
return
@app.cell
def _():
return
if __name__ == "__main__":
app.run()
| {
"repo_id": "marimo-team/marimo",
"file_path": "marimo/_smoke_tests/appcomp/resetting_sliders/embed_test_b.py",
"license": "Apache License 2.0",
"lines": 43,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
marimo-team/marimo:marimo/_smoke_tests/appcomp/ui_elements_with_overrides/embed_test_a.py | import marimo
__generated_with = "0.19.2"
app = marimo.App(width="full")
@app.cell
def _():
# https://github.com/marimo-team/marimo/issues/7685
import marimo as mo
return
@app.cell
def _():
import embed_test_b
return (embed_test_b,)
@app.cell
def _(embed_test_b):
clone_a = embed_test_b.app.clone()
return (clone_a,)
@app.cell
async def _(clone_a):
embed_a = await clone_a.embed()
embed_a.output
return (embed_a,)
@app.cell
def _(embed_test_b):
clone_b = embed_test_b.app.clone()
return (clone_b,)
@app.cell
async def _(clone_b, embed_a):
(
await clone_b.embed(
defs={"value": embed_a.defs["value"], "label": "their", "kind": "warn"}
)
).output
return
@app.cell
def _():
return
if __name__ == "__main__":
app.run()
| {
"repo_id": "marimo-team/marimo",
"file_path": "marimo/_smoke_tests/appcomp/ui_elements_with_overrides/embed_test_a.py",
"license": "Apache License 2.0",
"lines": 38,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
marimo-team/marimo:marimo/_smoke_tests/appcomp/ui_elements_with_overrides/embed_test_b.py | import marimo
__generated_with = "0.18.4"
app = marimo.App(width="full")
@app.cell
def _():
import marimo as mo
return (mo,)
@app.cell
def _(mo):
slider = mo.ui.slider(0, 10, 1, 3, label="A slider")
slider
return (slider,)
@app.cell
def _(slider):
value = slider.value
label = "our"
return label, value
@app.cell
def _():
kind = "info"
return (kind,)
@app.cell
def _(kind, label, mo, value):
mo.callout(
mo.md(f"""
The value of *{label}* slider is **{value}**!
"""), kind
)
return
@app.cell
def _():
return
if __name__ == "__main__":
app.run() | {
"repo_id": "marimo-team/marimo",
"file_path": "marimo/_smoke_tests/appcomp/ui_elements_with_overrides/embed_test_b.py",
"license": "Apache License 2.0",
"lines": 34,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
marimo-team/marimo:frontend/e2e-tests/py/slides.py | # /// script
# [tool.marimo.runtime]
# auto_instantiate = true
# ///
import marimo
__generated_with = "0.19.4"
app = marimo.App(layout_file="layouts/slides.slides.json")
@app.cell
def _():
import marimo as mo
return (mo,)
@app.cell
def _(mo):
mo.md("""
# Slides!
""")
return
@app.cell(hide_code=True)
def _(mo):
mo.md(r"""
We all love slides don't we 🎀
""")
return
if __name__ == "__main__":
app.run()
| {
"repo_id": "marimo-team/marimo",
"file_path": "frontend/e2e-tests/py/slides.py",
"license": "Apache License 2.0",
"lines": 25,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
marimo-team/marimo:marimo/_ast/fast_stack.py | # Copyright 2026 Marimo. All rights reserved.
# Source - https://stackoverflow.com/a
# Posted by Kache, modified by community. See post 'Timeline' for change history
# Retrieved 2026-01-17, License - CC BY-SA 4.0
from __future__ import annotations
import inspect
import itertools
from typing import TYPE_CHECKING
if TYPE_CHECKING:
from collections.abc import Generator
from types import FrameType
def fast_stack(max_depth: int | None = None) -> list[inspect.FrameInfo]:
"""Fast alternative to `inspect.stack()`
Use optional `max_depth` to limit search depth
Based on: github.com/python/cpython/blob/3.11/Lib/inspect.py
Compared to `inspect.stack()`:
* Does not read source files to load neighboring context
* Less accurate filename determination, still correct for most cases
* Does not compute 3.11+ code positions (PEP 657)
Compare:
In [3]: %timeit stack_depth(100, lambda: inspect.stack())
67.7 ms ± 1.35 ms per loop (mean ± std. dev. of 7 runs, 10 loops each)
In [4]: %timeit stack_depth(100, lambda: inspect.stack(0))
22.7 ms ± 747 µs per loop (mean ± std. dev. of 7 runs, 10 loops each)
In [5]: %timeit stack_depth(100, lambda: fast_stack())
108 µs ± 180 ns per loop (mean ± std. dev. of 7 runs, 10,000 loops each)
In [6]: %timeit stack_depth(100, lambda: fast_stack(10))
14.1 µs ± 33.4 ns per loop (mean ± std. dev. of 7 runs, 100,000 loops each)
"""
def frame_infos(
frame: FrameType | None,
) -> Generator[inspect.FrameInfo, None, None]:
while frame := frame and frame.f_back:
yield inspect.FrameInfo(
frame,
inspect.getfile(frame),
frame.f_lineno,
frame.f_code.co_name,
None,
None,
)
return list(
itertools.islice(frame_infos(inspect.currentframe()), max_depth)
)
| {
"repo_id": "marimo-team/marimo",
"file_path": "marimo/_ast/fast_stack.py",
"license": "Apache License 2.0",
"lines": 44,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
marimo-team/marimo:tests/_ast/test_fast_stack.py | # Copyright 2026 Marimo. All rights reserved.
from __future__ import annotations
import inspect
from marimo._ast.fast_stack import fast_stack
class TestFastStack:
@staticmethod
def test_returns_list_of_frame_info() -> None:
result = fast_stack()
assert isinstance(result, list)
assert all(isinstance(frame, inspect.FrameInfo) for frame in result)
@staticmethod
def test_frame_info_has_expected_fields() -> None:
result = fast_stack()
assert len(result) > 0
frame_info = result[0]
assert hasattr(frame_info, "frame")
assert hasattr(frame_info, "filename")
assert hasattr(frame_info, "lineno")
assert hasattr(frame_info, "function")
@staticmethod
def test_contains_caller_function_name() -> None:
result = fast_stack()
function_names = [f.function for f in result]
# The calling test function should be in the stack
assert "test_contains_caller_function_name" in function_names
@staticmethod
def test_contains_caller_filename() -> None:
result = fast_stack()
filenames = [f.filename for f in result]
# At least one frame should be from this test file
assert any("test_fast_stack.py" in f for f in filenames)
@staticmethod
def test_max_depth_limits_results() -> None:
result_limited = fast_stack(max_depth=2)
result_full = fast_stack()
assert len(result_limited) <= 2
assert len(result_limited) <= len(result_full)
@staticmethod
def test_max_depth_none_returns_full_stack() -> None:
result = fast_stack(max_depth=None)
assert len(result) > 0
@staticmethod
def test_max_depth_zero_returns_empty() -> None:
result = fast_stack(max_depth=0)
assert result == []
@staticmethod
def test_context_fields_are_none() -> None:
# fast_stack does not load context (for performance)
result = fast_stack()
assert len(result) > 0
for frame_info in result:
assert frame_info.code_context is None
assert frame_info.index is None
@staticmethod
def test_nested_function_call() -> None:
def inner() -> list[inspect.FrameInfo]:
return fast_stack()
def outer() -> list[inspect.FrameInfo]:
return inner()
result = outer()
function_names = [f.function for f in result]
assert "inner" in function_names
assert "outer" in function_names
@staticmethod
def test_line_numbers_are_positive() -> None:
result = fast_stack()
for frame_info in result:
assert frame_info.lineno > 0
| {
"repo_id": "marimo-team/marimo",
"file_path": "tests/_ast/test_fast_stack.py",
"license": "Apache License 2.0",
"lines": 68,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
marimo-team/marimo:tests/_convert/ipynb/test_from_ir.py | from __future__ import annotations
import json
import pathlib
from typing import Any
import pytest
from marimo._ast.app import App, InternalApp
from marimo._ast.load import load_app
from marimo._convert.ipynb import convert_from_ir_to_ipynb
from marimo._convert.ipynb.from_ir import (
_clean_ansi_for_export,
_convert_latex_delimiters_for_jupyter,
_convert_marimo_output_to_ipynb,
_convert_marimo_tex_to_latex,
_is_marimo_component,
_maybe_extract_dataurl,
)
from marimo._messaging.cell_output import CellChannel, CellOutput
from marimo._output.md import _md
from tests.mocks import snapshotter
SELF_DIR = pathlib.Path(__file__).parent
snapshot_test = snapshotter(__file__)
pytest.importorskip("nbformat")
@pytest.mark.parametrize(
"py_path", (SELF_DIR / "fixtures" / "py").glob("*.py")
)
def test_convert_from_ir_to_ipynb_snapshots(py_path: pathlib.Path) -> None:
"""Test convert_from_ir_to_ipynb against all Python fixtures using snapshots."""
# Load the marimo app from file
app = load_app(py_path)
assert app
internal_app = InternalApp(app)
# Convert
sort_mode = "top-down"
ipynb_str = convert_from_ir_to_ipynb(internal_app, sort_mode=sort_mode)
# Parse as JSON to validate and format consistently
ipynb_json = json.loads(ipynb_str)
formatted_ipynb = json.dumps(ipynb_json, indent=2, sort_keys=True)
base_name = py_path.name.replace(".py", "")
snapshot_name = f"{base_name}_{sort_mode.replace('-', '_')}.ipynb.txt"
snapshot_test(snapshot_name, formatted_ipynb)
def test_export_ipynb_sort_modes() -> None:
app = App()
@app.cell()
def result(x, y):
z = x + y
return (z,)
@app.cell()
def __():
x = 1
return (x,)
@app.cell()
def __():
y = 1
return (y,)
internal_app = InternalApp(app)
# Test top-down mode preserves document order
content = convert_from_ir_to_ipynb(internal_app, sort_mode="top-down")
snapshot_test("notebook_top_down.ipynb.txt", content)
# Test topological mode respects dependencies
content = convert_from_ir_to_ipynb(internal_app, sort_mode="topological")
snapshot_test("notebook_topological.ipynb.txt", content)
@pytest.mark.parametrize(
("input_data", "expected"),
[
# Base64 data URL extraction
(
"data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAAAUA",
"iVBORw0KGgoAAAANSUhEUgAAAAUA",
),
# SVG string from Base64 data URL
(
"data:image/svg+xml;base64,PHN2Zz48L3N2Zz4=",
"<svg></svg>",
),
# Non-data-URL string passes through
("hello world", "hello world"),
# Dict passes through
({"key": "value"}, {"key": "value"}),
# Int passes through
(123, 123),
# Data URL without base64 passes through
("data:text/plain,hello", "data:text/plain,hello"),
],
ids=[
"base64_data_url",
"svg_string_from_base64_data_url",
"regular_string",
"dict_passthrough",
"int_passthrough",
"data_url_no_base64",
],
)
def test_maybe_extract_dataurl(input_data: Any, expected: Any) -> None:
"""Test _maybe_extract_dataurl with various inputs."""
result = _maybe_extract_dataurl(input_data)
assert result == expected
@pytest.mark.parametrize(
("mimebundle", "expected_data", "expected_metadata"),
[
# Known mimetypes
(
{
"text/plain": "Hello",
"text/html": "<p>Hello</p>",
"image/png": "data:image/png;base64,iVBORw0KGgo=",
},
{
"text/plain": "Hello",
"text/html": "<p>Hello</p>",
"image/png": "iVBORw0KGgo=",
},
{},
),
# Multiple formats
(
{
"text/plain": "Figure(640x480)",
"text/html": "<div>Chart</div>",
"image/png": "data:image/png;base64,PNG_BASE64_DATA",
"image/svg+xml": "<svg>...</svg>",
"application/json": {"data": [1, 2, 3]},
},
{
"text/plain": "Figure(640x480)",
"text/html": "<div>Chart</div>",
"image/png": "PNG_BASE64_DATA",
"image/svg+xml": "<svg>...</svg>",
"application/json": {"data": [1, 2, 3]},
},
{},
),
# With metadata
(
{
"text/plain": "Figure",
"image/png": "PNG_DATA",
"__metadata__": {"width": 640, "height": 480, "dpi": 100},
},
{"text/plain": "Figure", "image/png": "PNG_DATA"},
{"width": 640, "height": 480, "dpi": 100},
),
# Nested metadata
(
{
"text/plain": "Figure",
"image/png": "PNG_DATA",
"__metadata__": {
"figure": {"width": 640, "height": 480},
"plot": {"type": "line", "color": "blue"},
"tags": ["important", "experiment-1"],
},
},
{"text/plain": "Figure", "image/png": "PNG_DATA"},
{
"figure": {"width": 640, "height": 480},
"plot": {"type": "line", "color": "blue"},
"tags": ["important", "experiment-1"],
},
),
],
ids=[
"known_mimetypes",
"multiple_formats",
"with_metadata",
"nested_metadata",
],
)
def test_convert_marimo_mimebundle_to_ipynb(
mimebundle: dict[str, Any],
expected_data: dict[str, Any],
expected_metadata: dict[str, Any],
) -> None:
"""Test marimo mimebundle conversion to ipynb format."""
output = CellOutput(
channel=CellChannel.OUTPUT,
mimetype="application/vnd.marimo+mimebundle",
data=json.dumps(mimebundle),
)
result = _convert_marimo_output_to_ipynb(output, [])
assert result == [
{
"output_type": "display_data",
"metadata": expected_metadata,
"data": expected_data,
}
]
def test_convert_marimo_mimebundle_with_non_dict_metadata() -> None:
"""Test that non-dict metadata is ignored."""
mimebundle_data = json.dumps(
{
"text/plain": "Figure",
"image/png": "PNG_DATA",
"__metadata__": "not a dict",
}
)
output = CellOutput(
channel=CellChannel.OUTPUT,
mimetype="application/vnd.marimo+mimebundle",
data=mimebundle_data,
)
result = _convert_marimo_output_to_ipynb(output, [])
assert (
result
== [
{
"output_type": "display_data",
"metadata": {}, # Verify metadata is empty when non-dict value is provided
"data": {
"text/plain": "Figure",
"image/png": "PNG_DATA",
"__metadata__": "not a dict",
},
}
]
)
def test_convert_marimo_mimebundle_empty() -> None:
"""Test that empty marimo mimebundle produces no output."""
output = CellOutput(
channel=CellChannel.OUTPUT,
mimetype="application/vnd.marimo+mimebundle",
data=json.dumps({}),
)
result = _convert_marimo_output_to_ipynb(output, [])
assert result == []
def test_convert_marimo_mimebundle_dict() -> None:
"""Handle the case when the mimebundle is a dict, not JSON-dumped string."""
mimebundle_data = {
"text/plain": "Figure",
"image/png": "PNG_DATA",
"__metadata__": {"width": 640, "height": 480},
}
output = CellOutput(
channel=CellChannel.OUTPUT,
mimetype="application/vnd.marimo+mimebundle",
data=mimebundle_data,
)
result = _convert_marimo_output_to_ipynb(output, [])
assert result == [
{
"output_type": "display_data",
"metadata": {"width": 640, "height": 480},
"data": {"text/plain": "Figure", "image/png": "PNG_DATA"},
}
]
@pytest.mark.parametrize(
("mimetype", "data", "expected_data"),
[
# Data URL extraction
(
"image/png",
"data:image/png;base64,REGULAR_PNG_DATA",
{"image/png": "REGULAR_PNG_DATA"},
),
# No data URL
(
"text/plain",
"Hello World",
{"text/plain": "Hello World"},
),
],
ids=["with_dataurl", "without_dataurl"],
)
def test_convert_regular_output(
mimetype: str, data: str, expected_data: dict[str, str]
) -> None:
"""Test regular output conversion."""
output = CellOutput(
channel=CellChannel.OUTPUT,
mimetype=mimetype,
data=data,
)
result = _convert_marimo_output_to_ipynb(output, [])
assert result == [
{
"output_type": "display_data",
"metadata": {},
"data": expected_data,
}
]
def test_convert_console_outputs() -> None:
"""Test that console outputs (stdout/stderr) are converted correctly."""
console_outputs = [
CellOutput(
channel=CellChannel.STDOUT,
mimetype="text/plain",
data="Console output\n",
),
CellOutput(
channel=CellChannel.STDERR,
mimetype="text/plain",
data="Warning message\n",
),
]
result = _convert_marimo_output_to_ipynb(None, console_outputs)
assert result == [
{
"output_type": "stream",
"name": "stdout",
"text": "Console output\n",
},
{
"output_type": "stream",
"name": "stderr",
"text": "Warning message\n",
},
]
def test_convert_marimo_mimebundle_with_both_output_and_console() -> None:
"""Test marimo mimebundle with both cell output and console outputs."""
main_output = CellOutput(
channel=CellChannel.OUTPUT,
mimetype="application/vnd.marimo+mimebundle",
data=json.dumps({"text/plain": "Result", "image/png": "PNG_DATA"}),
)
console_outputs = [
CellOutput(
channel=CellChannel.STDOUT,
mimetype="text/plain",
data="Console output\n",
),
]
# Convert console outputs
console_result = _convert_marimo_output_to_ipynb(None, console_outputs)
# Convert main output
main_result = _convert_marimo_output_to_ipynb(main_output, [])
assert console_result == [
{
"output_type": "stream",
"name": "stdout",
"text": "Console output\n",
}
]
assert main_result == [
{
"output_type": "display_data",
"metadata": {},
"data": {
"text/plain": "Result",
"image/png": "PNG_DATA",
},
}
]
@pytest.mark.parametrize(
("html_content", "expected"),
[
# Marimo components should be detected
("<marimo-plotly data-figure='{}'>", True),
("<marimo-table data-data='[]'>", True),
("<marimo-slider value='5'>", True),
('<marimo-output data-output="test">', True),
# List of strings (as Jupyter stores text/html)
(["<marimo-plotly data-figure='{}'>"], True),
(["<div>", "<marimo-chart>", "</div>"], True),
# Regular HTML should not be detected
("<div>Hello World</div>", False),
("<p>Some <b>text</b></p>", False),
("<script>console.log('test')</script>", False),
# Edge cases
("", False),
("marimo-plotly", False), # Not an HTML tag
("<div>marimo-test</div>", False), # Not a marimo tag
# Non-string types
(123, False),
(None, False),
({"key": "value"}, False),
],
ids=[
"marimo_plotly",
"marimo_table",
"marimo_slider",
"marimo_output",
"list_with_marimo",
"list_marimo_nested",
"regular_div",
"regular_paragraph",
"regular_script",
"empty_string",
"marimo_text_not_tag",
"marimo_in_text_not_tag",
"integer",
"none",
"dict",
],
)
def test_is_marimo_component(html_content: Any, expected: bool) -> None:
"""Test _is_marimo_component detection of marimo custom elements."""
assert _is_marimo_component(html_content) == expected
def test_convert_mimebundle_filters_marimo_components() -> None:
"""Test that marimo components in text/html are filtered out of mimebundle."""
# Mimebundle with marimo-plotly HTML and PNG fallback
mimebundle = {
"text/html": "<marimo-plotly data-figure='{\"data\": []}'>",
"image/png": "data:image/png;base64,iVBORw0KGgo=",
}
output = CellOutput(
channel=CellChannel.OUTPUT,
mimetype="application/vnd.marimo+mimebundle",
data=json.dumps(mimebundle),
)
result = _convert_marimo_output_to_ipynb(output, [])
# text/html should be filtered out because it contains a marimo component
assert result == [
{
"output_type": "display_data",
"data": {
"image/png": "iVBORw0KGgo="
}, # image/png should remain (with data URL prefix stripped)
"metadata": {},
}
]
def test_convert_mimebundle_keeps_regular_html() -> None:
"""Test that regular HTML is preserved in mimebundle."""
mimebundle = {
"text/html": "<div><p>Regular HTML content</p></div>",
"image/png": "PNG_DATA",
}
output = CellOutput(
channel=CellChannel.OUTPUT,
mimetype="application/vnd.marimo+mimebundle",
data=json.dumps(mimebundle),
)
result = _convert_marimo_output_to_ipynb(output, [])
assert result == [
{
"output_type": "display_data",
"data": {
"text/html": "<div><p>Regular HTML content</p></div>",
"image/png": "PNG_DATA",
},
"metadata": {},
}
]
def test_convert_mimebundle_marimo_component_only_png_remains() -> None:
"""Test mimebundle with only marimo HTML and PNG produces only PNG output."""
mimebundle = {
"text/html": ["<marimo-table data-data='[]'>"], # List format
"image/png": "VALID_PNG_DATA",
}
output = CellOutput(
channel=CellChannel.OUTPUT,
mimetype="application/vnd.marimo+mimebundle",
data=mimebundle, # Dict format, not JSON string
)
result = _convert_marimo_output_to_ipynb(output, [])
assert result == [
{
"output_type": "display_data",
"data": {"image/png": "VALID_PNG_DATA"},
"metadata": {},
}
]
def test_convert_mimebundle_marimo_component_preserves_other_mimes() -> None:
"""Test that filtering marimo HTML preserves other MIME types."""
mimebundle = {
"text/html": "<marimo-slider value='5'>",
"text/plain": "Slider(value=5)",
"image/png": "PNG_DATA",
"application/json": {"value": 5},
}
output = CellOutput(
channel=CellChannel.OUTPUT,
mimetype="application/vnd.marimo+mimebundle",
data=json.dumps(mimebundle),
)
result = _convert_marimo_output_to_ipynb(output, [])
assert result == [
{
"output_type": "display_data",
"data": {
"text/plain": "Slider(value=5)",
"image/png": "PNG_DATA",
"application/json": {"value": 5},
},
"metadata": {},
}
]
def test_convert_console_media_output() -> None:
"""Test that MEDIA channel console outputs (e.g., plt.show()) are converted."""
console_outputs = [
CellOutput(
channel=CellChannel.STDOUT,
mimetype="text/plain",
data="Before plot\n",
),
CellOutput(
channel=CellChannel.MEDIA,
mimetype="application/vnd.marimo+mimebundle",
data={
"text/plain": "<Figure size 640x480 with 1 Axes>",
"image/png": "data:image/png;base64,iVBORw0KGgo=",
},
),
CellOutput(
channel=CellChannel.STDOUT,
mimetype="text/plain",
data="After plot\n",
),
]
result = _convert_marimo_output_to_ipynb(None, console_outputs)
assert result == [
{
"output_type": "stream",
"name": "stdout",
"text": "Before plot\n",
},
{
"output_type": "display_data",
"metadata": {},
"data": {
"text/plain": "<Figure size 640x480 with 1 Axes>",
"image/png": "iVBORw0KGgo=", # Base64 extracted
},
},
{
"output_type": "stream",
"name": "stdout",
"text": "After plot\n",
},
]
def test_convert_console_media_with_marimo_component() -> None:
"""Test that marimo components in console MEDIA outputs are filtered."""
console_outputs = [
CellOutput(
channel=CellChannel.MEDIA,
mimetype="application/vnd.marimo+mimebundle",
data={
"text/html": "<marimo-plotly data-figure='{}'>",
"image/png": "PNG_FALLBACK_DATA",
},
),
]
result = _convert_marimo_output_to_ipynb(None, console_outputs)
# Marimo component HTML should be filtered, PNG should remain
assert result == [
{
"output_type": "display_data",
"metadata": {},
"data": {"image/png": "PNG_FALLBACK_DATA"},
}
]
def test_convert_console_output_channel() -> None:
"""Test that OUTPUT channel console outputs are also handled."""
console_outputs = [
CellOutput(
channel=CellChannel.OUTPUT,
mimetype="image/png",
data="data:image/png;base64,CONSOLE_PNG_DATA",
),
]
result = _convert_marimo_output_to_ipynb(None, console_outputs)
assert result == [
{
"output_type": "display_data",
"metadata": {},
"data": {"image/png": "CONSOLE_PNG_DATA"},
}
]
class TestCleanAnsiForExport:
@pytest.mark.parametrize(
("input_text", "expected"),
[
# Plain text passes through unchanged
("Hello World", "Hello World"),
("", ""),
# Standard ANSI color codes are preserved (for nbconvert's template)
("\x1b[34mBlue text\x1b[0m", "\x1b[34mBlue text\x1b[0m"),
(
"\x1b[31mRed\x1b[0m and \x1b[32mGreen\x1b[0m",
"\x1b[31mRed\x1b[0m and \x1b[32mGreen\x1b[0m",
),
# Character set selection sequences ARE stripped (cause LaTeX errors)
("\x1b(B", ""),
("\x1b)B", ""),
("\x1b(A", ""),
("\x1b(0", ""),
# Mixed: color codes preserved, character set sequences stripped
(
"\x1b[34m[D 260124 22:51:42 cell_runner:711]\x1b(B\x1b[m Running",
"\x1b[34m[D 260124 22:51:42 cell_runner:711]\x1b[m Running",
),
# Multiple character set sequences stripped
("\x1b(B\x1b[34mText\x1b(B\x1b[0m\x1b)A", "\x1b[34mText\x1b[0m"),
],
ids=[
"plain_text",
"empty_string",
"single_color",
"multiple_colors",
"charset_paren_b",
"charset_close_b",
"charset_paren_a",
"charset_paren_0",
"marimo_logger_output",
"multiple_charset_sequences",
],
)
def test_clean_ansi_for_export(
self, input_text: str, expected: str
) -> None:
"""Test _clean_ansi_for_export with various ANSI sequences."""
result = _clean_ansi_for_export(input_text)
assert result == expected
def test_clean_ansi_for_export_non_string(self) -> None:
"""Test _clean_ansi_for_export with non-string inputs."""
assert _clean_ansi_for_export({"key": "value"}) == "{'key': 'value'}"
assert _clean_ansi_for_export([1, 2, 3]) == "[1, 2, 3]"
assert _clean_ansi_for_export(None) == "None"
assert _clean_ansi_for_export(123) == "123"
def test_console_output_with_ansi_cleaned(self) -> None:
"""Test console output conversion cleans ANSI sequences."""
# Simulated marimo logger output with problematic \x1b(B sequence
raw_log = "\x1b[34m[D 260124 22:51:42 cell_runner:711]\x1b(B\x1b[m Test message\n"
console_outputs = [
CellOutput(
channel=CellChannel.STDERR,
mimetype="text/plain",
data=raw_log,
),
]
result = _convert_marimo_output_to_ipynb(None, console_outputs)
assert result == [
{
"output_type": "stream",
"name": "stderr",
"text": "\x1b[34m[D 260124 22:51:42 cell_runner:711]\x1b[m Test message\n",
}
]
def test_console_outputs_multiple_with_ansi(self) -> None:
"""Test multiple console outputs with ANSI codes are all cleaned."""
console_outputs = [
CellOutput(
channel=CellChannel.STDOUT,
mimetype="text/plain",
data="\x1b[32m[I log]\x1b(B\x1b[m stdout message\n",
),
CellOutput(
channel=CellChannel.STDERR,
mimetype="text/plain",
data="\x1b[34m[D log]\x1b(B\x1b[m stderr message\n",
),
]
result = _convert_marimo_output_to_ipynb(None, console_outputs)
assert result == [
{
"output_type": "stream",
"name": "stdout",
"text": "\x1b[32m[I log]\x1b[m stdout message\n",
},
{
"output_type": "stream",
"name": "stderr",
"text": "\x1b[34m[D log]\x1b[m stderr message\n",
},
]
@pytest.mark.skip(
reason="This test can take some time and requires some libraries like xelatex, useful for local testing"
)
def test_clean_ansi_does_not_crash_pdf_export(self) -> None:
"""Integration test: verify cleaned output doesn't crash nbconvert PDF export."""
pytest.importorskip("nbconvert")
import nbformat
from nbconvert import PDFExporter
# Simulated marimo logger output with problematic \x1b(B sequence
raw_log = (
"\x1b[34m[D 260124 22:51:42 cell_runner:711]\x1b(B\x1b[m Running\n"
)
# Verify raw log causes PDF export to crash
notebook = nbformat.v4.new_notebook()
cell = nbformat.v4.new_code_cell("print('test')")
cell.outputs = [
nbformat.v4.new_output("stream", name="stderr", text=raw_log)
]
notebook.cells.append(cell)
exporter = PDFExporter()
with pytest.raises(OSError) as e:
exporter.from_notebook_node(notebook)
# Clean the output (this is what from_ir.py does)
cleaned_output = _clean_ansi_for_export(raw_log)
# Create a notebook with stream output containing cleaned ANSI
notebook = nbformat.v4.new_notebook()
cell = nbformat.v4.new_code_cell("print('test')")
cell.outputs = [
nbformat.v4.new_output(
"stream", name="stderr", text=cleaned_output
)
]
notebook.cells.append(cell)
# This should not raise an error about invalid characters
pdf_output, _resources = exporter.from_notebook_node(notebook)
assert isinstance(pdf_output, bytes)
assert len(pdf_output) > 0
class TestConvertMarimoTexToLatex:
"""Test conversion of marimo-tex HTML elements to standard LaTeX.
These tests use the actual _md() function which causes LaTeX to be converted to marimo-tex HTML elements.
"""
@pytest.mark.parametrize(
("latex_input", "expected_output"),
[
# Inline math
(r"$f(x) = e^x$", "$f(x) = e^x$"),
(r"$x^2 + y^2$", "$x^2 + y^2$"),
# Block math
(r"$$f(x) = e^x$$", "$$f(x) = e^x$$"),
# Fractions
(r"$\frac{x^2}{2!}$", r"$\frac{x^2}{2!}$"),
# Greek letters
(r"$\alpha + \beta = \gamma$", r"$\alpha + \beta = \gamma$"),
# Square root
(r"$\sqrt{x}$", r"$\sqrt{x}$"),
# Subscript and superscript
(r"$x_i^2$", r"$x_i^2$"),
# Summation
(r"$\sum_{i=1}^{n} x_i$", r"$\sum_{i=1}^{n} x_i$"),
# Integral
(r"$\int_0^1 x dx$", r"$\int_0^1 x dx$"),
# Limits
(
r"$\lim_{x \to 0} \frac{\sin x}{x}$",
r"$\lim_{x \to 0} \frac{\sin x}{x}$",
),
],
ids=[
"inline_exponential",
"inline_polynomial",
"block_exponential",
"fractions",
"greek_letters",
"square_root",
"subscript_superscript",
"summation",
"integral",
"limits",
],
)
def test_simple_latex_conversion(
self, latex_input: str, expected_output: str
) -> None:
"""Test simple LaTeX expressions are converted correctly."""
html = _md(latex_input).text
result = _convert_marimo_tex_to_latex(html)
assert expected_output in result
assert "marimo-tex" not in result
def test_block_math_multiline(self) -> None:
"""Test multiline block math conversion."""
html = _md(
r"""$$
f(x) = 1 + x
$$"""
).text
result = _convert_marimo_tex_to_latex(html)
assert "$$" in result
assert "f(x) = 1 + x" in result
assert "marimo-tex" not in result
def test_mixed_content(self) -> None:
"""Test text with embedded inline math."""
html = _md(r"The equation $E = mc^2$ is famous.").text
result = _convert_marimo_tex_to_latex(html)
assert "$E = mc^2$" in result
assert "The equation" in result
assert "is famous" in result
assert "marimo-tex" not in result
def test_multiple_inline_math(self) -> None:
"""Test multiple inline math expressions."""
html = _md(r"$a$ and $b$ and $c$").text
result = _convert_marimo_tex_to_latex(html)
assert "$a$" in result
assert "$b$" in result
assert "$c$" in result
assert "marimo-tex" not in result
def test_complex_document(self) -> None:
"""Test conversion of a complex document with multiple math types."""
html = _md(
r"""The exponential function $f(x) = e^x$ can be represented as
$$
f(x) = 1 + x + \frac{x^2}{2!} + \frac{x^3}{3!} + \ldots
$$"""
).text
result = _convert_marimo_tex_to_latex(html)
assert "$f(x) = e^x$" in result
assert "$$" in result
assert r"\frac{x^2}{2!}" in result
assert "marimo-tex" not in result
def test_no_math_passes_through(self) -> None:
"""Test that text without math passes through unchanged."""
html = _md("Just plain text here.").text
result = _convert_marimo_tex_to_latex(html)
assert "Just plain text here" in result
def test_align_environment(self) -> None:
"""Test LaTeX align environment."""
html = _md(
r"""$$
\begin{align}
a &= b \\
c &= d
\end{align}
$$"""
).text
result = _convert_marimo_tex_to_latex(html)
assert "$$" in result
assert r"\begin{align}" in result
assert "marimo-tex" not in result
def test_nested_md_with_inline_math(self) -> None:
"""Test nested mo.md() calls with inline math."""
inner = _md(r"$x^2$")
outer_html = _md(f"The value is {inner}").text
result = _convert_marimo_tex_to_latex(outer_html)
assert "$x^2$" in result
assert "marimo-tex" not in result
assert "||(" not in result
def test_nested_md_with_block_math(self) -> None:
"""Test nested mo.md() calls with block math."""
inner = _md(r"$$y^2$$")
outer_html = _md(f"Result: {inner}").text
result = _convert_marimo_tex_to_latex(outer_html)
assert "y^2" in result
assert "marimo-tex" not in result
assert "||(" not in result
assert "||[" not in result
def test_fstring_md_with_variable(self) -> None:
"""Test mo.md() with f-string variable interpolation."""
var = 42
html = _md(f"Value is {var} and $x^2$").text
result = _convert_marimo_tex_to_latex(html)
assert "42" in result
assert "$x^2$" in result
assert "marimo-tex" not in result
def test_complex_nested_md(self) -> None:
"""Test complex nested mo.md() with mixed content."""
math_part = _md(r"$\frac{a}{b}$")
text_with_math = _md(f"Equation: {math_part} is important").text
result = _convert_marimo_tex_to_latex(text_with_math)
assert r"\frac{a}{b}" in result
assert "marimo-tex" not in result
class TestConvertLatexDelimitersForJupyter:
"""Test conversion of LaTeX delimiters for Jupyter compatibility."""
@pytest.mark.parametrize(
("markdown_input", "expected"),
[
# Display math: \[...\] → $$...$$
(
r"Display math: \[f(x) = e^x\]",
"Display math: $$f(x) = e^x$$",
),
# Display math with whitespace - gets stripped
(
r"\[ f(x) = e^x \]",
"$$f(x) = e^x$$",
),
# Multiline display math
(
r"""\[
f(x) = 1 + x + \frac{x^2}{2!}
\]""",
r"$$f(x) = 1 + x + \frac{x^2}{2!}$$",
),
# Inline math: \(...\) → $...$
(
r"Inline math: \(f(x) = e^x\)",
"Inline math: $f(x) = e^x$",
),
# Inline math with whitespace - gets stripped
(
r"\( f(x) \)",
"$f(x)$",
),
# Mixed delimiters
(
r"Inline \(x^2\) and display \[y^2\]",
"Inline $x^2$ and display $$y^2$$",
),
# Already using $...$ passes through unchanged
(
"Already $x^2$ and $$y^2$$ work",
"Already $x^2$ and $$y^2$$ work",
),
# No LaTeX at all
(
"Plain text without math",
"Plain text without math",
),
# Complex expression
(
r"\(\sigma\sqrt{100}\)",
r"$\sigma\sqrt{100}$",
),
# Multiple inline
(
r"\(a\) and \(b\) and \(c\)",
"$a$ and $b$ and $c$",
),
],
ids=[
"display_simple",
"display_with_spaces",
"display_multiline",
"inline_simple",
"inline_with_spaces",
"mixed_delimiters",
"already_dollar_signs",
"no_latex",
"complex_expression",
"multiple_inline",
],
)
def test_convert_latex_delimiters(
self, markdown_input: str, expected: str
) -> None:
"""Test LaTeX delimiter conversion."""
result = _convert_latex_delimiters_for_jupyter(markdown_input)
assert result == expected
def test_convert_latex_in_code_blocks_limitation(self) -> None:
r"""Test that documents a known limitation with code blocks.
Note: The simple regex approach will convert \[...\] even inside
code blocks. This is acceptable because:
1. Code blocks in markdown cells are rare
2. Having \[ and \] on different lines in code is also rare
3. A proper fix would require a full markdown parser
"""
# This test verifies the conversion works for standalone math
markdown = r"""Some text
\[x^2\]
More text"""
result = _convert_latex_delimiters_for_jupyter(markdown)
assert "$$x^2$$" in result
assert r"\[" not in result
def test_convert_latex_real_world_example(self) -> None:
"""Test a real-world markdown example."""
markdown = r"""## Markdown / LaTeX
**bold** and _italic_
$\sigma\sqrt{100}$
$$
\sigma\sqrt{100}
$$
\[ \sigma\sqrt{100} \]
\( \sigma\sqrt{100} \)
"""
expected = r"""## Markdown / LaTeX
**bold** and _italic_
$\sigma\sqrt{100}$
$$
\sigma\sqrt{100}
$$
$$\sigma\sqrt{100}$$
$\sigma\sqrt{100}$
"""
result = _convert_latex_delimiters_for_jupyter(markdown)
assert result == expected
| {
"repo_id": "marimo-team/marimo",
"file_path": "tests/_convert/ipynb/test_from_ir.py",
"license": "Apache License 2.0",
"lines": 946,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
marimo-team/marimo:tests/_server/export/fixtures/apps/basic.py | import marimo
__generated_with = "0.19.2"
app = marimo.App()
@app.cell
def _():
x = 10
return (x,)
@app.cell
def _(x, y):
z = y + x
return (z,)
@app.cell
def _(x):
y = x + 1
return (y,)
if __name__ == "__main__":
app.run()
| {
"repo_id": "marimo-team/marimo",
"file_path": "tests/_server/export/fixtures/apps/basic.py",
"license": "Apache License 2.0",
"lines": 17,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
marimo-team/marimo:tests/_server/export/fixtures/apps/empty_notebook.py | import marimo
__generated_with = "0.19.2"
app = marimo.App()
if __name__ == "__main__":
app.run()
| {
"repo_id": "marimo-team/marimo",
"file_path": "tests/_server/export/fixtures/apps/empty_notebook.py",
"license": "Apache License 2.0",
"lines": 5,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
marimo-team/marimo:tests/_server/export/fixtures/apps/error_ancestor.py | import marimo
__generated_with = "0.19.2"
app = marimo.App()
@app.cell
def _():
raise ValueError("ancestor error")
@app.cell
def _(x):
y = x + 1
return (y,)
if __name__ == "__main__":
app.run()
| {
"repo_id": "marimo-team/marimo",
"file_path": "tests/_server/export/fixtures/apps/error_ancestor.py",
"license": "Apache License 2.0",
"lines": 12,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
marimo-team/marimo:tests/_server/export/fixtures/apps/error_complex.py | import marimo
__generated_with = "0.19.2"
app = marimo.App()
@app.cell
def _(y): # noqa: F811
x = y
return (x,)
@app.cell
def _():
Y = 0 # noqa: N806
return (Y,)
@app.cell
def _(z):
Z = z # noqa: N806
return (Z,)
@app.cell
def _():
z = 1 / 0
return (z,)
@app.cell
def _(x, Y): # noqa: N803
y = x
y = Y # noqa: F841
return (y,)
if __name__ == "__main__":
app.run()
| {
"repo_id": "marimo-team/marimo",
"file_path": "tests/_server/export/fixtures/apps/error_complex.py",
"license": "Apache License 2.0",
"lines": 26,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
marimo-team/marimo:tests/_server/export/fixtures/apps/error_cycle.py | import marimo
__generated_with = "0.19.2"
app = marimo.App()
@app.cell
def _(y):
x = y
return (x,)
@app.cell
def _(x):
y = x
return (y,)
if __name__ == "__main__":
app.run()
| {
"repo_id": "marimo-team/marimo",
"file_path": "tests/_server/export/fixtures/apps/error_cycle.py",
"license": "Apache License 2.0",
"lines": 13,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
marimo-team/marimo:tests/_server/export/fixtures/apps/error_redefinition.py | import marimo
__generated_with = "0.19.2"
app = marimo.App()
@app.cell
def _():
x = 1
return (x,)
@app.cell
def _():
x = 2 # Redefines x
return (x,)
if __name__ == "__main__":
app.run()
| {
"repo_id": "marimo-team/marimo",
"file_path": "tests/_server/export/fixtures/apps/error_redefinition.py",
"license": "Apache License 2.0",
"lines": 13,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
marimo-team/marimo:tests/_server/export/fixtures/apps/error_undefined.py | import marimo
__generated_with = "0.19.2"
app = marimo.App()
@app.cell
def _():
x = undefined_variable # noqa: F821
return (x,)
if __name__ == "__main__":
app.run()
| {
"repo_id": "marimo-team/marimo",
"file_path": "tests/_server/export/fixtures/apps/error_undefined.py",
"license": "Apache License 2.0",
"lines": 9,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
marimo-team/marimo:tests/_server/export/fixtures/apps/error_value_with_stdout.py | import marimo
__generated_with = "0.19.2"
app = marimo.App()
@app.cell
def _():
print("hello before error")
raise ValueError("test error")
if __name__ == "__main__":
app.run()
| {
"repo_id": "marimo-team/marimo",
"file_path": "tests/_server/export/fixtures/apps/error_value_with_stdout.py",
"license": "Apache License 2.0",
"lines": 9,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
marimo-team/marimo:tests/_server/export/fixtures/apps/with_console_output.py | import marimo
__generated_with = "0.19.2"
app = marimo.App()
@app.cell
def _():
import sys
sys.stdout.write("hello stdout")
return (sys,)
@app.cell
def _(sys):
sys.stderr.write("hello stderr")
return
if __name__ == "__main__":
app.run()
| {
"repo_id": "marimo-team/marimo",
"file_path": "tests/_server/export/fixtures/apps/with_console_output.py",
"license": "Apache License 2.0",
"lines": 14,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
marimo-team/marimo:tests/_server/export/fixtures/apps/with_dependencies.py | import marimo
__generated_with = "0.19.2"
app = marimo.App()
@app.cell
def _():
x = 1
return (x,)
@app.cell
def _(x):
y = x + 1
return (y,)
@app.cell
def _(y):
z = y + 1
return (z,)
if __name__ == "__main__":
app.run()
| {
"repo_id": "marimo-team/marimo",
"file_path": "tests/_server/export/fixtures/apps/with_dependencies.py",
"license": "Apache License 2.0",
"lines": 17,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
marimo-team/marimo:tests/_server/export/fixtures/apps/with_layout.py | import marimo
__generated_with = "0.19.2"
app = marimo.App(layout_file="layouts/layout.json")
@app.cell
def _():
x = 1
return
if __name__ == "__main__":
app.run()
| {
"repo_id": "marimo-team/marimo",
"file_path": "tests/_server/export/fixtures/apps/with_layout.py",
"license": "Apache License 2.0",
"lines": 9,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
marimo-team/marimo:tests/_server/export/fixtures/apps/with_outputs.py | import marimo
__generated_with = "0.19.2"
app = marimo.App()
@app.cell
def _():
import marimo as mo
return (mo,)
@app.cell
def _(mo):
import altair as alt
import polars as pl
df = pl.DataFrame({"x": [1, 2, 3], "y": [4, 5, 6]})
chart = alt.Chart(df).mark_point().encode(x="x", y="y")
mo.ui.altair_chart(chart)
return alt, chart, df, pl
@app.cell
def _():
import matplotlib.pyplot as plt
fig, ax = plt.subplots()
ax.plot([1, 2, 3], [4, 5, 6])
plt.gca()
return ax, fig, plt
@app.cell
def _(mo):
mo.md("# Hello World")
return
@app.cell
def _():
print("hello stdout")
return
if __name__ == "__main__":
app.run()
| {
"repo_id": "marimo-team/marimo",
"file_path": "tests/_server/export/fixtures/apps/with_outputs.py",
"license": "Apache License 2.0",
"lines": 32,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
marimo-team/marimo:tests/_server/export/fixtures/apps/with_stop.py | import marimo
__generated_with = "0.19.2"
app = marimo.App()
@app.cell
def _():
import marimo as mo
mo.stop(True, "Stopped early")
return (mo,)
@app.cell
def _():
x = 10
return (x,)
@app.cell
def _(x):
y = x + 1
return (y,)
if __name__ == "__main__":
app.run()
| {
"repo_id": "marimo-team/marimo",
"file_path": "tests/_server/export/fixtures/apps/with_stop.py",
"license": "Apache License 2.0",
"lines": 18,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
marimo-team/marimo:tests/_server/export/test_export_ipynb.py | """Snapshot tests for ipynb export functionality."""
from __future__ import annotations
import pathlib
import sys
from pathlib import Path
import pytest
from marimo._ast.app import InternalApp
from marimo._ast.load import load_app
from marimo._convert.ipynb.from_ir import convert_from_ir_to_ipynb
from marimo._dependencies.dependencies import DependencyManager
from marimo._server.export import run_app_then_export_as_ipynb
from marimo._utils.marimo_path import MarimoPath
from tests.mocks import delete_lines_with_files, simplify_images, snapshotter
SELF_DIR = pathlib.Path(__file__).parent
FIXTURES_DIR = SELF_DIR / "fixtures" / "apps"
snapshot = snapshotter(__file__)
HAS_DEPS = (
DependencyManager.polars.has()
and DependencyManager.altair.has()
and DependencyManager.matplotlib.has()
)
pytest.importorskip("nbformat")
def _load_fixture_app(path: Path | str) -> InternalApp:
"""Load a fixture app by name."""
if isinstance(path, str):
path = FIXTURES_DIR / f"{path}.py"
app = load_app(path)
assert app is not None
return InternalApp(app)
# Apps with heavy dependencies (matplotlib, pandas, polars, etc) that timeout in CI
HEAVY_DEPENDENCY_APPS = {"with_outputs"}
@pytest.mark.parametrize(
"app_path",
[
path
for path in FIXTURES_DIR.glob("*.py")
if path.stem not in HEAVY_DEPENDENCY_APPS
],
ids=lambda path: path.stem,
)
@pytest.mark.skipif(not HAS_DEPS, reason="optional dependencies not installed")
@pytest.mark.skipif(
sys.version_info < (3, 11), reason="3.10 has different stack trace format"
)
async def test_export_ipynb(app_path: Path) -> None:
"""Test ipynb export with actual execution outputs."""
internal_app = _load_fixture_app(app_path)
# Test without session view
content = convert_from_ir_to_ipynb(
internal_app, sort_mode="top-down", session_view=None
)
assert content is not None
# Test with actual run
result = await run_app_then_export_as_ipynb(
MarimoPath(app_path),
sort_mode="top-down",
cli_args={},
argv=None,
)
assert result.download_filename == f"{app_path.stem}.ipynb"
content = delete_lines_with_files(result.text)
content = simplify_images(content)
snapshot(f"ipynb/{app_path.stem}.ipynb.txt", content)
| {
"repo_id": "marimo-team/marimo",
"file_path": "tests/_server/export/test_export_ipynb.py",
"license": "Apache License 2.0",
"lines": 63,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
marimo-team/marimo:marimo/_convert/ipynb/from_ir.py | # Copyright 2026 Marimo. All rights reserved.
"""Export marimo notebooks to Jupyter ipynb format."""
from __future__ import annotations
import base64
import io
import json
import re
from html.parser import HTMLParser
from typing import TYPE_CHECKING, Any, Literal, Optional, Union, cast
from marimo._ast.cell import Cell, CellConfig
from marimo._ast.errors import CycleError, MultipleDefinitionError
from marimo._ast.names import is_internal_cell_name
from marimo._convert.common.format import get_markdown_from_cell
from marimo._dependencies.dependencies import DependencyManager
from marimo._messaging.cell_output import CellChannel, CellOutput
from marimo._messaging.errors import (
Error as MarimoError,
MarimoExceptionRaisedError,
)
from marimo._messaging.mimetypes import METADATA_KEY
from marimo._runtime import dataflow
if TYPE_CHECKING:
from nbformat.notebooknode import NotebookNode # type: ignore
from marimo._ast.app import InternalApp
from marimo._session.state.session_view import SessionView
# Note: We intentionally omit "version" as it would vary across environments
# and break reproducibility. The marimo_version in metadata is sufficient.
DEFAULT_LANGUAGE_INFO = {
"codemirror_mode": {"name": "ipython", "version": 3},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
}
def convert_from_ir_to_ipynb(
app: InternalApp,
*,
sort_mode: Literal["top-down", "topological"],
session_view: Optional[SessionView] = None,
) -> str:
"""Export notebook as .ipynb, optionally including outputs.
Args:
app: The internal app to export
sort_mode: How to order cells - "top-down" preserves notebook order,
"topological" orders by dependencies
session_view: Optional session view to include cell outputs
Returns:
JSON string of the .ipynb notebook
"""
DependencyManager.nbformat.require("to convert marimo notebooks to ipynb")
import nbformat # type: ignore[import-not-found]
from marimo import __version__
notebook = nbformat.v4.new_notebook() # type: ignore[no-untyped-call]
notebook["cells"] = []
# Add marimo-specific notebook metadata
marimo_metadata: dict[str, Any] = {
"marimo_version": __version__,
}
app_config_diff = app.config.asdict_difference()
if app_config_diff:
marimo_metadata["app_config"] = app_config_diff
# Include header if present (PEP 723 metadata, docstrings, etc.)
if app._app._header:
marimo_metadata["header"] = app._app._header
notebook["metadata"]["marimo"] = marimo_metadata
# Add standard Jupyter language_info (no kernelspec)
notebook["metadata"]["language_info"] = DEFAULT_LANGUAGE_INFO
# Determine cell order based on sort_mode
if sort_mode == "top-down":
cell_data_list = list(app.cell_manager.cell_data())
else:
# Topological sort - try to sort, fall back to top-down on cycle
try:
graph = app.graph
sorted_ids = dataflow.topological_sort(graph, graph.cells.keys())
# Build cell_data list in topological order
cell_data_list = [
app.cell_manager.cell_data_at(cid)
for cid in sorted_ids
if cid in graph.cells
]
except (CycleError, MultipleDefinitionError):
# Fall back to top-down order if graph is invalid
cell_data_list = list(app.cell_manager.cell_data())
for cell_data in cell_data_list:
cid = cell_data.cell_id
# Get outputs if session_view is provided
outputs: list[NotebookNode] = []
if session_view is not None:
cell_output = session_view.get_cell_outputs([cid]).get(cid, None)
cell_console_outputs = session_view.get_cell_console_outputs(
[cid]
).get(cid, [])
outputs = _convert_marimo_output_to_ipynb(
cell_output, cell_console_outputs
)
notebook_cell = _create_ipynb_cell(
cell_id=cid,
code=cell_data.code,
name=cell_data.name,
config=cell_data.config,
cell=cell_data.cell,
outputs=outputs,
)
notebook["cells"].append(notebook_cell)
stream = io.StringIO()
nbformat.write(notebook, stream) # type: ignore[no-untyped-call]
stream.seek(0)
return stream.read()
def _create_ipynb_cell(
cell_id: str,
code: str,
name: str,
config: CellConfig,
cell: Optional[Cell],
outputs: list[NotebookNode],
) -> NotebookNode:
"""Create an ipynb cell with metadata.
Args:
cell_id: The cell's unique identifier
code: The cell's source code
name: The cell's name
config: The cell's configuration
cell: Optional Cell object for markdown detection
outputs: List of cell outputs (ignored for markdown cells)
"""
import nbformat
# Try to extract markdown if we have a valid Cell
if cell is not None:
markdown_string = get_markdown_from_cell(cell, code)
if markdown_string is not None:
markdown_string = _convert_latex_delimiters_for_jupyter(
markdown_string
)
node = cast(
nbformat.NotebookNode,
nbformat.v4.new_markdown_cell(markdown_string, id=cell_id), # type: ignore[no-untyped-call]
)
_add_marimo_metadata(node, name, config)
return node
node = cast(
nbformat.NotebookNode,
nbformat.v4.new_code_cell(code, id=cell_id), # type: ignore[no-untyped-call]
)
if outputs:
node.outputs = outputs
_add_marimo_metadata(node, name, config)
return node
def _add_marimo_metadata(
node: NotebookNode, name: str, config: CellConfig
) -> None:
"""Add marimo-specific metadata to a notebook cell."""
marimo_metadata: dict[str, Any] = {}
if config.is_different_from_default():
marimo_metadata["config"] = config.asdict_without_defaults()
if not is_internal_cell_name(name):
marimo_metadata["name"] = name
if marimo_metadata:
node["metadata"]["marimo"] = marimo_metadata
# Output conversion helpers
def _maybe_extract_dataurl(data: Any) -> Any:
if isinstance(data, str) and data.startswith("data:image/svg+xml;base64,"):
# Decode SVG from base64 to plain text XML
payload = data[len("data:image/svg+xml;base64,") :]
return base64.b64decode(payload).decode()
if (
isinstance(data, str)
and data.startswith("data:")
and ";base64," in data
):
return data.split(";base64,")[1]
else:
return data
def _is_marimo_component(html_content: Any) -> bool:
"""Check if the content is a marimo component."""
if isinstance(html_content, list):
html_content = "".join(html_content)
if not isinstance(html_content, str):
return False
return "<marimo-" in html_content
class _HTMLTextExtractor(HTMLParser):
"""Extract plain text from HTML."""
def __init__(self) -> None:
super().__init__()
self.text_parts: list[str] = []
def handle_data(self, data: str) -> None:
self.text_parts.append(data)
def get_text(self) -> str:
return "".join(self.text_parts)
def _strip_html_from_traceback(html_traceback: str) -> list[str]:
"""Convert HTML-formatted traceback to plain text lines.
Also strips temporary file paths from tracebacks. Exports run cells in
the kernel which compiles them with temp file paths. It's not easy to
set anonymous source since exports are done in the kernel, so we strip
the paths here instead.
"""
parser = _HTMLTextExtractor()
parser.feed(html_traceback)
text = parser.get_text()
# Strip temp file paths like /tmp/marimo_12345/__marimo__cell_Hbol_.py
# Replace with empty string to get cleaner tracebacks
text = re.sub(r'[^"]*__marimo__cell_[^"]*\.py', "", text)
return text.splitlines()
def _extract_traceback_from_console(
console_outputs: list[CellOutput],
) -> list[str]:
"""Extract traceback lines from console outputs."""
for output in console_outputs:
if (
output.channel == CellChannel.STDERR
and output.mimetype == "application/vnd.marimo+traceback"
):
return _strip_html_from_traceback(str(output.data))
return []
def _get_error_info(
error: Union[MarimoError, dict[str, Any]],
) -> tuple[str, str]:
"""Extract ename and evalue from a marimo error."""
from marimo._messaging.msgspec_encoder import asdict
if isinstance(error, dict):
return error.get("type", "UnknownError"), error.get("msg", "")
elif isinstance(error, MarimoExceptionRaisedError):
return error.exception_type, error.msg.rstrip().strip(":")
else:
# For other error types, use the tag as ename and describe() as evalue
error_dict = asdict(error)
return error_dict.get("type", "Error"), error.describe()
def _convert_output_to_ipynb(
output: CellOutput,
) -> Optional[NotebookNode]:
"""Convert certain outputs (OUTPUT/MEDIA channel) to IPython notebook format.
Outputs like rich elements and LaTeX are converted to ensure they are compatible with IPython notebook format.
Returns None if the output should be skipped or produces no data.
"""
import nbformat
if output.data is None:
return None
if output.channel not in (CellChannel.OUTPUT, CellChannel.MEDIA):
return None
if output.mimetype == "text/plain" and (
output.data == [] or output.data == ""
):
return None
data: dict[str, Any] = {}
metadata: dict[str, Any] = {}
if output.mimetype == "application/vnd.marimo+error":
# Errors are handled separately via MARIMO_ERROR channel
return None
elif output.mimetype == "application/vnd.marimo+mimebundle":
if isinstance(output.data, dict):
mimebundle = output.data
elif isinstance(output.data, str):
mimebundle = json.loads(output.data)
else:
raise ValueError(f"Invalid data type: {type(output.data)}")
for mime, content in mimebundle.items():
if mime == METADATA_KEY and isinstance(content, dict):
metadata = content
elif mime == "text/html" and _is_marimo_component(content):
# Skip marimo components because they cannot be rendered
# in IPython notebook format
continue
else:
data[mime] = _maybe_extract_dataurl(content)
elif output.mimetype == "text/markdown" and isinstance(output.data, str):
data[output.mimetype] = _convert_marimo_tex_to_latex(output.data)
else:
data[output.mimetype] = _maybe_extract_dataurl(output.data)
if not data:
return None
return cast(
nbformat.NotebookNode,
nbformat.v4.new_output( # type: ignore[no-untyped-call]
"display_data",
data=data,
metadata=metadata,
),
)
def _clean_ansi_for_export(text: Any) -> str:
"""Clean ANSI escape codes for export, keeping color codes intact.
ANSI codes are terminal styling sequences (colors, bold, cursor movement)
used by logging libraries like rich, colorama, and marimo's own logger.
We keep standard color codes (like \\x1b[34m) so nbconvert's LaTeX template
can convert them to colors via its ansi2latex filter. However, we must strip
character set selection sequences (like \\x1b(B) which nbconvert doesn't
handle and cause LaTeX to fail with "invalid character" errors.
"""
if not isinstance(text, str):
return str(text)
# Strip character set selection sequences: ESC ( <char> or ESC ) <char>
# These have no visual effect and cause LaTeX compilation to fail
return re.sub(r"\x1b[()][A-Z0-9]", "", text)
def _convert_marimo_output_to_ipynb(
cell_output: Optional[CellOutput], console_outputs: list[CellOutput]
) -> list[NotebookNode]:
"""Convert marimo output format to IPython notebook format."""
import nbformat
ipynb_outputs: list[NotebookNode] = []
# Handle console outputs (stdout/stderr/media)
for console_out in console_outputs:
if console_out.channel == CellChannel.STDOUT:
ipynb_outputs.append(
cast(
nbformat.NotebookNode,
nbformat.v4.new_output( # type: ignore[no-untyped-call]
"stream",
name="stdout",
# https://nbformat.readthedocs.io/en/latest/format_description.html#stream-output
text=_clean_ansi_for_export(console_out.data),
),
)
)
elif console_out.channel == CellChannel.STDERR:
# Skip tracebacks - they're included in error outputs
if console_out.mimetype == "application/vnd.marimo+traceback":
continue
ipynb_outputs.append(
cast(
nbformat.NotebookNode,
nbformat.v4.new_output( # type: ignore[no-untyped-call]
"stream",
name="stderr",
text=_clean_ansi_for_export(console_out.data),
),
)
)
elif console_out.channel in (CellChannel.OUTPUT, CellChannel.MEDIA):
ipynb_compatible_output = _convert_output_to_ipynb(console_out)
if ipynb_compatible_output is not None:
ipynb_outputs.append(ipynb_compatible_output)
if not cell_output:
return ipynb_outputs
if cell_output.data is None:
return ipynb_outputs
if cell_output.channel == CellChannel.MARIMO_ERROR:
traceback_lines = _extract_traceback_from_console(console_outputs)
errors = cast(
list[Union[MarimoError, dict[str, Any]]], cell_output.data
)
for error in errors:
ename, evalue = _get_error_info(error)
ipynb_outputs.append(
cast(
nbformat.NotebookNode,
nbformat.v4.new_output( # type: ignore[no-untyped-call]
"error",
ename=ename,
evalue=evalue,
traceback=traceback_lines,
),
)
)
return ipynb_outputs
ipynb_compatible_output = _convert_output_to_ipynb(cell_output)
if ipynb_compatible_output is not None:
ipynb_outputs.append(ipynb_compatible_output)
return ipynb_outputs
def _convert_latex_delimiters_for_jupyter(markdown_string: str) -> str:
"""Convert LaTeX delimiters that nbconvert can't handle. See https://github.com/jupyter/nbconvert/issues/477"""
# Convert display math \[...\] to $$...$$
# Preserve internal whitespace but trim the delimiter boundaries
def replace_display(match: re.Match[str]) -> str:
content = match.group(1)
return f"$${content.strip()}$$"
markdown_string = re.sub(
r"\\\[(.*?)\\\]", replace_display, markdown_string, flags=re.DOTALL
)
# Convert inline math \(...\) to $...$
# Remove spaces adjacent to delimiters
def replace_inline(match: re.Match[str]) -> str:
content = match.group(1)
return f"${content.strip()}$"
markdown_string = re.sub(
r"\\\((.*?)\\\)", replace_inline, markdown_string, flags=re.DOTALL
)
return markdown_string
def _convert_marimo_tex_to_latex(html_string: str) -> str:
"""Convert marimo-tex elements back to standard LaTeX delimiters.
Keep in sync with TexPlugin.tsx
Converts:
- <marimo-tex ...>||(content||)</marimo-tex> → $content$ (inline)
- <marimo-tex ...>||[content||]</marimo-tex> → $$content$$ (block)
- <marimo-tex ...>||(||(content||)||)</marimo-tex> → $$content$$ (nested display)
"""
def replace_tex(match: re.Match[str]) -> str:
content = match.group(1)
# Handle nested display math: ||(||(content||)||)
# Must check this FIRST and be more specific
if content.startswith("||(||(") and content.endswith("||)||)"):
inner = content[6:-6] # Strip ||(||( and ||)||)
return f"$${inner}$$"
# Handle block math: ||[content||]
elif content.startswith("||[") and content.endswith("||]"):
inner = content[3:-3]
return f"$${inner}$$"
# Handle inline math: ||(content||)
elif content.startswith("||(") and content.endswith("||)"):
inner = content[3:-3]
return f"${inner}$" # Single $ for inline!
else:
return content
# Match <marimo-tex ...>content</marimo-tex>
# Use non-greedy matching and handle potential attributes
pattern = r"<marimo-tex[^>]*>(.*?)</marimo-tex>"
return re.sub(pattern, replace_tex, html_string, flags=re.DOTALL)
| {
"repo_id": "marimo-team/marimo",
"file_path": "marimo/_convert/ipynb/from_ir.py",
"license": "Apache License 2.0",
"lines": 406,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
marimo-team/marimo:marimo/_convert/markdown/from_ir.py | # Copyright 2026 Marimo. All rights reserved.
from __future__ import annotations
import ast
import os
import re
import textwrap
from typing import TYPE_CHECKING
from marimo import _loggers
from marimo._ast import codegen
from marimo._ast.compiler import const_or_id
from marimo._ast.names import is_internal_cell_name
from marimo._convert.common.format import get_markdown_from_cell
from marimo._schemas.serialization import NotebookSerializationV1
from marimo._types.ids import CellId_t
from marimo._version import __version__
if TYPE_CHECKING:
from marimo._ast.cell import CellImpl
from marimo._ast.visitor import Language
LOGGER = _loggers.marimo_logger()
def convert_from_ir_to_markdown(
notebook: NotebookSerializationV1,
filename: str | None = None,
) -> str:
from marimo._ast.app_config import _AppConfig
from marimo._ast.compiler import compile_cell
from marimo._convert.markdown.to_ir import (
formatted_code_block,
is_sanitized_markdown,
)
from marimo._utils import yaml
filename = filename or notebook.filename or "notebook.md"
app_title = notebook.app.options.get("app_title", None)
if not app_title:
app_title = _format_filename_title(filename)
metadata: dict[str, str | list[str]] = {}
metadata.update(
{
"title": app_title,
"marimo-version": __version__,
}
)
# Put data from AppFileManager into the yaml header.
ignored_keys = {"app_title"}
default_config = _AppConfig().asdict()
# Get values defined in _AppConfig without explicitly extracting keys,
# as long as it isn't the default.
metadata.update(
{
k: v
for k, v in notebook.app.options.items()
if k not in ignored_keys and v != default_config.get(k)
}
)
# Recover frontmatter metadata from header
if notebook.header and notebook.header.value:
try:
frontmatter = yaml.load(notebook.header.value)
if isinstance(frontmatter, dict):
# Insert metadata before config so config takes precedence
_recovered = dict(frontmatter)
_recovered.update(metadata)
metadata = _recovered
except (yaml.YAMLError, AssertionError):
# Not valid YAML dict — treat as script preamble
metadata["header"] = notebook.header.value.strip()
# Add the expected qmd filter to the metadata.
is_qmd = filename.endswith(".qmd")
if is_qmd:
if "filters" not in metadata:
metadata["filters"] = []
if "marimo" not in str(metadata["filters"]):
if isinstance(metadata["filters"], str):
metadata["filters"] = metadata["filters"].split(",")
if isinstance(metadata["filters"], list):
metadata["filters"].append("marimo-team/marimo")
else:
LOGGER.warning(
"Unexpected type for filters: %s",
type(metadata["filters"]),
)
header = yaml.marimo_compat_dump(
{
k: v
for k, v in metadata.items()
if v is not None and v != "" and v != []
},
sort_keys=False,
)
document = ["---", header.strip(), "---", ""]
previous_was_markdown = False
for cell in notebook.cells:
code = cell.code
# Config values are opt in, so only include if they are set.
attributes = cell.options.copy()
# Extract name from options if present (for unparsable cells)
# and use it instead of cell.name
cell_name = attributes.pop("name", None) or cell.name
# Allow for attributes like column index.
attributes = {k: repr(v).lower() for k, v in attributes.items() if v}
if not is_internal_cell_name(cell_name):
attributes["name"] = cell_name
# No "cell" typically means not parseable. However newly added
# cells require compilation before cell is set.
# TODO: Refactor so it doesn't occur in export (codegen
# does this too)
# NB. Also need to recompile in the sql case since sql parsing is
# cached.
language: Language = "python"
cell_impl: CellImpl | None = None
try:
cell_impl = compile_cell(code, cell_id=CellId_t("dummy"))
language = cell_impl.language
except SyntaxError:
pass
if cell_impl:
# Markdown that starts a column is forced to code.
column = attributes.get("column", None)
if not column or column == "0":
markdown = get_markdown_from_cell(cell_impl, code)
# Unsanitized markdown is forced to code.
if markdown and is_sanitized_markdown(markdown):
# Use blank HTML comment to separate markdown codeblocks
if previous_was_markdown:
document.append("<!---->")
previous_was_markdown = True
document.append(markdown)
continue
# In which case we need to format it like our python blocks.
elif cell_impl.markdown:
code = codegen.format_markdown(cell_impl)
attributes["language"] = language
# Definitely a code cell, but need to determine if it can be
# formatted as non-python.
if attributes["language"] == "sql":
sql_options: dict[str, str] | None = (
_get_sql_options_from_cell(code)
)
if not sql_options:
# means not sql.
attributes.pop("language")
else:
# Ignore default query value.
if sql_options.get("query") == "_df":
sql_options.pop("query")
attributes.update(sql_options)
code = "\n".join(cell_impl.raw_sqls).strip()
# Definitely no "cell"; as such, treat as code, as everything in
# marimo is code.
else:
attributes["unparsable"] = "true"
# Dedent and strip code to prevent whitespace accumulation on roundtrips
code = textwrap.dedent(code).strip()
# Add a blank line between markdown and code
if previous_was_markdown:
document.append("")
previous_was_markdown = False
document.append(formatted_code_block(code, attributes, is_qmd=is_qmd))
return "\n".join(document).strip()
def _format_filename_title(filename: str) -> str:
basename = os.path.basename(filename)
name, _ext = os.path.splitext(basename)
title = re.sub("[-_]", " ", name)
return title.title()
def _get_sql_options_from_cell(code: str) -> dict[str, str] | None:
# Note frontend/src/core/codemirror/language/sql.ts
# also extracts options via ast. Ideally, these should be synced.
options = {}
code = code.strip()
try:
(body,) = ast.parse(code).body
(target,) = body.targets # type: ignore[attr-defined]
options["query"] = target.id
if body.value.func.attr == "sql": # type: ignore[attr-defined]
value = body.value # type: ignore[attr-defined]
else:
return None
if value.keywords:
for keyword in value.keywords: # type: ignore[attr-defined]
options[keyword.arg] = const_or_id(keyword.value) # type: ignore[attr-defined]
output = options.pop("output", "True").lower()
if output == "false":
options["hide_output"] = "True"
return options
except (AssertionError, AttributeError, ValueError):
return None
| {
"repo_id": "marimo-team/marimo",
"file_path": "marimo/_convert/markdown/from_ir.py",
"license": "Apache License 2.0",
"lines": 185,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
marimo-team/marimo:tests/_convert/ipynb/fixtures/py/complex_file_format.py | # /// script
# description = "Complex file format with setup cell"
# requires-python = ">=3.12"
# dependencies = [
# "marimo",
# "pandas>=2.1.0",
# "numpy>=2.1.0",
# ]
# ///
import marimo
__generated_with = "0.19.2"
app = marimo.App(width="medium", auto_download=["html"], sql_output="native")
with app.setup:
# Complex file format with setup cell
import marimo as mo
@app.cell(hide_code=True)
def _():
mo.md("""
# Documentation
This cell has **hidden code** and uses markdown.
- Feature 1
- Feature 2
""")
return
@app.cell
def imports():
"""Named cell with imports."""
import pandas as pd
import numpy as np
return np, pd
@app.cell(disabled=True)
def disabled_cell():
"""This cell is disabled."""
x = 42
should_not_run = True
return
@app.cell
def data_loading(np, pd):
"""Named cell for data loading."""
df = pd.DataFrame({"a": np.array([1, 2, 3]), "b": np.array([4, 5, 6])})
return (df,)
@app.cell
def analysis(df):
"""Named cell with analysis and markdown output."""
result = df["a"].sum()
mo.md(f"The sum is **{result}**")
return
@app.cell
def _():
"""Unnamed cell."""
internal_var = 100
return
@app.function
def add(x, y):
"""Pure function."""
return x + y
@app.function(hide_code=True)
def remove(x, y):
"""Hidden function."""
return x - y
@app.class_definition
class MyClass:
"""Pure class."""
def __init__(self, x, y):
self.x = x
self.y = y
def add(self):
return self.x + self.y
@app.class_definition(hide_code=True)
class MyHiddenClass:
"""Hidden class."""
def __init__(self, x, y):
self.x = x
self.y = y
def add(self):
return self.x + self.y
if __name__ == "__main__":
app.run()
| {
"repo_id": "marimo-team/marimo",
"file_path": "tests/_convert/ipynb/fixtures/py/complex_file_format.py",
"license": "Apache License 2.0",
"lines": 78,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
marimo-team/marimo:tests/_convert/ipynb/fixtures/py/complex_outputs.py | import marimo
__generated_with = "0.19.2"
app = marimo.App()
@app.cell
def _():
import marimo as mo
return (mo,)
@app.cell
def _(mo):
mo.md("""
# Testing Various Output Types
This notebook tests various output scenarios.
""")
return
@app.cell
def _():
"""Cell with print statements."""
print("Standard output")
print("Multiple lines")
return
@app.cell
def _(mo):
"""Cell with rich output."""
data = {"x": [1, 2, 3], "y": [4, 5, 6]}
mo.ui.table(data)
return (data,)
@app.cell
def _(data):
"""Cell with calculations and implicit output."""
result = sum(data["x"]) + sum(data["y"])
result
return
@app.cell
def _(mo):
"""Cell with multiple outputs."""
mo.md("## Section 1")
print("Debug info")
value = 42
value
return
@app.cell
def error_cell():
"""This would cause an error if run."""
# Note: This is valid Python, just demonstrates error handling
import sys
if hasattr(sys, "never_exists"):
raise ValueError("This should not happen")
success = True
return
if __name__ == "__main__":
app.run()
| {
"repo_id": "marimo-team/marimo",
"file_path": "tests/_convert/ipynb/fixtures/py/complex_outputs.py",
"license": "Apache License 2.0",
"lines": 51,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
marimo-team/marimo:tests/_convert/ipynb/fixtures/py/simple.py | import marimo
__generated_with = "0.19.2"
app = marimo.App()
@app.cell
def _():
import marimo as mo
return (mo,)
@app.cell
def _(mo):
mo.md("""
# Hello, World!
""")
return
@app.cell
def _():
x = 1
y = 2
z = x + y
return (z,)
@app.cell
def _(z):
print(z)
return
if __name__ == "__main__":
app.run()
| {
"repo_id": "marimo-team/marimo",
"file_path": "tests/_convert/ipynb/fixtures/py/simple.py",
"license": "Apache License 2.0",
"lines": 25,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
marimo-team/marimo:tests/_utils/test_deep_merge.py | # Copyright 2026 Marimo. All rights reserved.
from __future__ import annotations
from marimo._utils.deep_merge import deep_merge
def test_deep_merge_basic() -> None:
"""Test basic deep merge functionality."""
original = {"a": 1, "b": {"c": 2, "d": 3}}
update = {"b": {"c": 10, "e": 4}, "f": 5}
result = deep_merge(original, update)
assert result == {"a": 1, "b": {"c": 10, "d": 3, "e": 4}, "f": 5}
def test_deep_merge_keeps_original_keys_not_in_update() -> None:
"""Test that keys in original but not in update are preserved."""
original = {"a": 1, "b": 2, "c": 3}
update = {"b": 20}
result = deep_merge(original, update)
assert result == {"a": 1, "b": 20, "c": 3}
def test_deep_merge_adds_new_keys_from_update() -> None:
"""Test that new keys in update are added."""
original = {"a": 1}
update = {"b": 2}
result = deep_merge(original, update)
assert result == {"a": 1, "b": 2}
def test_deep_merge_nested_dicts() -> None:
"""Test deep merge with nested dictionaries."""
original = {
"level1": {
"level2": {
"keep": "original",
"override": "original",
}
}
}
update = {
"level1": {
"level2": {
"override": "updated",
"new": "added",
}
}
}
result = deep_merge(original, update)
assert result == {
"level1": {
"level2": {
"keep": "original",
"override": "updated",
"new": "added",
}
}
}
def test_deep_merge_replace_paths_deletes_missing_keys() -> None:
"""Test that replace_paths deletes keys not in update."""
original = {
"ai": {
"custom_providers": {
"provider1": {"api_key": "key1", "base_url": "url1"},
"provider2": {"api_key": "key2", "base_url": "url2"},
}
}
}
update = {
"ai": {
"custom_providers": {
"provider1": {"api_key": "key1_updated", "base_url": "url1"},
# provider2 is removed
}
}
}
# Without replace_paths, provider2 would be kept
result_merged = deep_merge(original, update)
assert "provider2" in result_merged["ai"]["custom_providers"]
# With replace_paths, provider2 should be removed
result_replaced = deep_merge(
original, update, replace_paths=frozenset({"ai.custom_providers"})
)
assert "provider2" not in result_replaced["ai"]["custom_providers"]
assert result_replaced["ai"]["custom_providers"] == {
"provider1": {"api_key": "key1_updated", "base_url": "url1"}
}
def test_deep_merge_replace_paths_preserves_unmodified_fields() -> None:
"""Test that replace_paths preserves fields not in update.
This is the key use case: editing base_url should preserve api_key
(which is filtered out as masked placeholder).
"""
original = {
"ai": {
"custom_providers": {
"provider1": {"api_key": "secret", "base_url": "old_url"},
"provider2": {"api_key": "key2", "base_url": "url2"},
}
}
}
# Frontend sends all providers, but api_key filtered as placeholder
update = {
"ai": {
"custom_providers": {
"provider1": {"base_url": "new_url"}, # api_key missing
"provider2": {"api_key": "key2", "base_url": "url2"},
}
}
}
result = deep_merge(
original, update, replace_paths=frozenset({"ai.custom_providers"})
)
# api_key should be preserved from original
assert result["ai"]["custom_providers"]["provider1"] == {
"api_key": "secret",
"base_url": "new_url",
}
assert result["ai"]["custom_providers"]["provider2"] == {
"api_key": "key2",
"base_url": "url2",
}
def test_deep_merge_replace_paths_with_empty_dict() -> None:
"""Test that replace_paths works when update has empty dict."""
original = {
"ai": {
"custom_providers": {
"provider1": {"api_key": "key1"},
}
}
}
update = {
"ai": {
"custom_providers": {} # Remove all providers
}
}
result = deep_merge(
original, update, replace_paths=frozenset({"ai.custom_providers"})
)
assert result["ai"]["custom_providers"] == {}
def test_deep_merge_replace_paths_does_not_affect_other_paths() -> None:
"""Test that replace_paths only affects specified paths."""
original = {
"ai": {
"custom_providers": {"p1": {"key": "v1"}},
"models": {"m1": "model1", "m2": "model2"},
}
}
update = {
"ai": {
"custom_providers": {"p2": {"key": "v2"}},
"models": {"m1": "updated"},
# m2 not in update
}
}
result = deep_merge(
original, update, replace_paths=frozenset({"ai.custom_providers"})
)
# custom_providers should be replaced (p1 gone, only p2)
assert result["ai"]["custom_providers"] == {"p2": {"key": "v2"}}
# models should be merged (m2 kept, m1 updated)
assert result["ai"]["models"] == {"m1": "updated", "m2": "model2"}
def test_deep_merge_replace_paths_nested_path() -> None:
"""Test replace_paths with deeply nested paths."""
original = {
"a": {
"b": {
"c": {
"keep": "original",
"replace_me": {"x": 1, "y": 2},
}
}
}
}
update = {
"a": {
"b": {
"c": {
"replace_me": {"z": 3},
}
}
}
}
result = deep_merge(
original, update, replace_paths=frozenset({"a.b.c.replace_me"})
)
# replace_me should be replaced entirely
assert result["a"]["b"]["c"]["replace_me"] == {"z": 3}
# keep should be preserved (it's not in update, and parent is not replaced)
assert result["a"]["b"]["c"]["keep"] == "original"
def test_deep_merge_replace_paths_when_key_not_in_original() -> None:
"""Test replace_paths when the key doesn't exist in original."""
original = {"a": {"other": "value"}}
update = {"a": {"custom_providers": {"p1": {"key": "v1"}}}}
result = deep_merge(
original, update, replace_paths=frozenset({"a.custom_providers"})
)
assert result == {
"a": {"other": "value", "custom_providers": {"p1": {"key": "v1"}}}
}
def test_deep_merge_replace_paths_when_key_not_in_update() -> None:
"""Test replace_paths when the key doesn't exist in update."""
original = {"a": {"custom_providers": {"p1": {"key": "v1"}}}}
update = {"a": {"other": "new_value"}}
result = deep_merge(
original, update, replace_paths=frozenset({"a.custom_providers"})
)
# custom_providers should be kept since it's not in update
assert result == {
"a": {
"custom_providers": {"p1": {"key": "v1"}},
"other": "new_value",
}
}
| {
"repo_id": "marimo-team/marimo",
"file_path": "tests/_utils/test_deep_merge.py",
"license": "Apache License 2.0",
"lines": 204,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
marimo-team/marimo:marimo/_smoke_tests/ai/chat-variations.py | import marimo
__generated_with = "0.19.2"
app = marimo.App(width="medium", auto_download=["html"])
@app.cell
def _():
import uuid
import time
import asyncio
import pydantic_ai.ui.vercel_ai.response_types as vercel
return asyncio, time, uuid, vercel
@app.cell
def _(
async_no_streaming_text,
async_streaming_chunks,
async_streaming_text,
mo,
sync_no_streaming_object,
sync_no_streaming_text,
sync_streaming_text,
):
model = mo.ui.dropdown(
[
async_streaming_chunks,
async_streaming_text,
async_no_streaming_text,
sync_streaming_text,
sync_no_streaming_text,
sync_no_streaming_object,
],
value=async_streaming_text,
label="Select Model",
)
model
return (model,)
@app.cell
def _(mo, model):
chat = mo.ui.chat(model.value)
chat
return (chat,)
@app.cell
def _(chat):
chat.value
return
@app.cell
def _(chat):
chat.value[1].content if len(chat.value) else None
return
@app.cell
def _(uuid, vercel):
async def async_streaming_chunks(messages, config):
# Generate unique IDs for message parts
reasoning_id = f"reasoning_{uuid.uuid4().hex}"
text_id = f"text_{uuid.uuid4().hex}"
tool_id = f"tool_{uuid.uuid4().hex}"
# --- Stream reasoning/thinking ---
yield vercel.StartStepChunk()
yield vercel.ReasoningStartChunk(id=reasoning_id)
yield vercel.ReasoningDeltaChunk(
id=reasoning_id,
delta="The user is asking about Van Gogh. I should fetch information about his famous works.",
)
yield vercel.ReasoningEndChunk(id=reasoning_id)
# --- Stream tool call to fetch artwork information ---
yield vercel.ToolInputAvailableChunk(
tool_call_id=tool_id,
tool_name="search_artwork",
input={"artist": "Vincent van Gogh", "limit": 1},
)
yield vercel.ToolInputStartChunk(
tool_call_id=tool_id, tool_name="search_artwork"
)
yield vercel.ToolInputDeltaChunk(
tool_call_id=tool_id,
input_text_delta='{"artist": "Vincent van Gogh", "limit": 1}',
)
# --- Tool output (simulated artwork search result) ---
yield vercel.ToolOutputAvailableChunk(
tool_call_id=tool_id,
output={
"title": "The Starry Night",
"year": 1889,
"museum": "Museum of Modern Art",
},
)
# --- Stream text response ---
yield vercel.TextStartChunk(id=text_id)
yield vercel.TextDeltaChunk(
id=text_id,
delta="One of Vincent van Gogh's most iconic works is 'The Starry Night', painted in 1889. Here's the painting:\n\n",
)
# --- Embed the artwork image ---
yield vercel.FileChunk(
url="https://upload.wikimedia.org/wikipedia/commons/thumb/e/ea/Van_Gogh_-_Starry_Night_-_Google_Art_Project.jpg/1280px-Van_Gogh_-_Starry_Night_-_Google_Art_Project.jpg",
media_type="image/jpeg",
)
yield vercel.TextDeltaChunk(
id=text_id,
delta="\nThis masterpiece is now housed at the Museum of Modern Art in New York and remains one of the most recognizable paintings in the world.",
)
yield vercel.TextEndChunk(id=text_id)
yield vercel.FinishStepChunk()
yield vercel.FinishChunk()
return (async_streaming_chunks,)
@app.cell
def _():
import marimo as mo
return (mo,)
@app.cell
def _(asyncio):
async def async_streaming_text(messages, config):
await asyncio.sleep(1)
yield "This is a simple text-only response."
await asyncio.sleep(1)
yield " It does not include reasoning or tool calls."
await asyncio.sleep(1)
yield " Have a nice day!"
return (async_streaming_text,)
@app.cell
def _(time):
def sync_streaming_text(messages, config):
time.sleep(1)
yield "This"
time.sleep(1)
yield " is simple "
time.sleep(1)
yield " streaming."
return (sync_streaming_text,)
@app.cell
def _(asyncio):
async def async_no_streaming_text(messages, config):
await asyncio.sleep(1)
return f"**echo**: _{messages[-1].content}_"
return (async_no_streaming_text,)
@app.cell
def _(time):
def sync_no_streaming_text(messages, config):
time.sleep(1)
return f"**echo**: _{messages[-1].content}_"
return (sync_no_streaming_text,)
@app.cell
def _(mo, time):
def sync_no_streaming_object(messages, config):
time.sleep(1)
return mo.ui.table([1, 2, 3, messages[-1].content])
return (sync_no_streaming_object,)
if __name__ == "__main__":
app.run()
| {
"repo_id": "marimo-team/marimo",
"file_path": "marimo/_smoke_tests/ai/chat-variations.py",
"license": "Apache License 2.0",
"lines": 147,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
marimo-team/marimo:marimo/_session/_venv.py | # Copyright 2026 Marimo. All rights reserved.
"""Virtual environment configuration utilities.
This module provides utilities for working with configured virtual environments
in marimo's sandbox mode. It handles:
- Finding Python interpreters in virtual environments
- Checking marimo installation status in venvs
- PYTHONPATH injection for kernel subprocesses
- Installing marimo into configured venvs
"""
from __future__ import annotations
import os
import subprocess
import sys
from importlib.metadata import version
from pathlib import Path
from typing import TYPE_CHECKING
from marimo import _loggers
from marimo._cli.print import echo
from marimo._utils.uv import find_uv_bin
from marimo._version import __version__
if TYPE_CHECKING:
from marimo._config.config import VenvConfig
LOGGER = _loggers.marimo_logger()
def get_ipc_kernel_deps() -> list[str]:
"""Get dependencies required for IPC kernel communication.
Returns pyzmq pinned to the currently installed version to ensure
compatibility between host and sandbox environments.
"""
try:
pyzmq_version = version("pyzmq")
return [f"pyzmq=={pyzmq_version}"]
except Exception:
# Fallback if pyzmq not installed
return ["pyzmq>=27.1.0"]
def _find_python_in_venv(venv_path: str) -> str | None:
"""Find Python interpreter in a venv directory.
Args:
venv_path: Path to the virtualenv directory.
Returns:
Path to Python binary, or None if not found.
"""
venv = Path(venv_path)
if not venv.exists() or not venv.is_dir():
return None
if sys.platform == "win32":
python_path = venv / "Scripts" / "python.exe"
else:
python_path = venv / "bin" / "python"
if not python_path.exists():
return None
return str(python_path.absolute())
def get_configured_venv_python(
venv_config: VenvConfig,
base_path: str | None = None,
) -> str | None:
"""Get Python path from venv config.
Args:
venv_config: The venv config dict (from config.get("venv")).
base_path: Base path for resolving relative venv paths (e.g., script path).
Returns:
Path to Python interpreter in configured venv, or None if not configured.
Raises:
ValueError: If venv is configured but invalid.
"""
venv_path = venv_config.get("path")
if not venv_path:
return None
if base_path and not os.path.isabs(venv_path):
base_dir = os.path.dirname(os.path.abspath(base_path))
venv_path = os.path.join(base_dir, venv_path)
if not os.path.isdir(venv_path):
raise ValueError(f"Configured venv does not exist: {venv_path}")
python_path = _find_python_in_venv(venv_path)
if not python_path:
raise ValueError(
f"No Python interpreter found in configured venv: {venv_path}"
)
return python_path
def get_kernel_pythonpath() -> str:
"""Get PYTHONPATH for kernel subprocess.
Returns paths needed to import marimo and its dependencies (pyzmq, msgspec)
from the parent process's environment. Used when launching a kernel
in a configured venv that has marimo available via path injection.
Returns:
Colon-separated (or semicolon on Windows) path string for PYTHONPATH.
"""
paths: list[str] = []
# Find actual paths where dependencies are installed by checking their __file__
# This is more reliable than site.getsitepackages() which may return
# ephemeral paths e.g. when running via `uv run --with=pyzmq`.
# We also include msgspec as a reliable dependency that _should_ be in the
# desired user system path.
# Also add marimo's parent directory.
# NB. If running in edit mode this may be local directory.
for module_name in ["marimo", "zmq", "msgspec"]:
try:
module = __import__(module_name)
if hasattr(module, "__file__") and module.__file__:
# Get the site-packages directory containing this module
module_path = Path(module.__file__).parent.parent
module_path_str = str(module_path)
if module_path_str not in paths:
paths.append(module_path_str)
except ImportError:
pass
return os.pathsep.join(paths)
def has_marimo_installed(venv_python: str) -> bool:
"""Check if marimo and its IPC deps are installed in the venv.
Args:
venv_python: Path to the venv's Python interpreter.
Returns:
True if marimo, msgspec, and zmq can all be imported.
"""
result = subprocess.run(
[
venv_python,
"-c",
"import marimo, msgspec, zmq; print(marimo.__version__)",
],
capture_output=True,
text=True,
)
if result.returncode != 0:
return False
venv_version = result.stdout.strip()
if venv_version != __version__:
LOGGER.warning(
f"marimo version mismatch: venv has {venv_version}, "
f"current is {__version__}. "
f"This may cause unexpected behavior. "
f"Consider upgrading both environments to the same version: "
f"uv pip install --upgrade marimo"
)
return True
def check_python_version_compatibility(venv_python: str) -> bool:
"""Check if venv Python version matches current Python.
Binary dependencies (pyzmq, msgspec) aren't cross-version compatible,
so the venv must use the same Python major.minor version.
Args:
venv_python: Path to the venv's Python interpreter.
Returns:
True if versions match, False otherwise.
"""
result = subprocess.run(
[
venv_python,
"-c",
"import sys; print(f'{sys.version_info.major}.{sys.version_info.minor}')",
],
capture_output=True,
text=True,
)
venv_version = result.stdout.strip()
current_version = f"{sys.version_info.major}.{sys.version_info.minor}"
return venv_version == current_version
def install_marimo_into_venv(venv_python: str) -> None:
"""Install marimo and IPC dependencies into a venv.
Installs marimo and IPC dependencies (pyzmq) into the specified venv.
Args:
venv_python: Path to the venv's Python interpreter.
"""
uv_bin = find_uv_bin()
packages = [f"marimo=={__version__}"] + get_ipc_kernel_deps()
echo("Installing marimo into configured venv...", err=True)
result = subprocess.run(
[uv_bin, "pip", "install", "--python", venv_python] + packages,
capture_output=True,
text=True,
)
if result.returncode != 0:
LOGGER.warning(
f"Failed to install marimo into configured venv: {result.stderr}"
)
| {
"repo_id": "marimo-team/marimo",
"file_path": "marimo/_session/_venv.py",
"license": "Apache License 2.0",
"lines": 173,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
marimo-team/marimo:tests/_session/test_venv.py | from __future__ import annotations
import os
import sys
from typing import TYPE_CHECKING, Any
if TYPE_CHECKING:
from pathlib import Path
import pytest
from marimo._session._venv import (
check_python_version_compatibility,
get_configured_venv_python,
get_kernel_pythonpath,
has_marimo_installed,
)
def test_get_configured_venv_python_returns_none_when_not_configured() -> None:
"""Test returns None when venv not in config."""
config: dict[str, Any] = {} # No venv configured
result = get_configured_venv_python(config)
assert result is None
def test_get_configured_venv_python_returns_none_when_venv_empty() -> None:
"""Test returns None when venv is empty string."""
config: dict[str, Any] = {"path": ""}
result = get_configured_venv_python(config)
assert result is None
def test_get_configured_venv_python_returns_path_when_valid(
tmp_path: Path,
) -> None:
"""Test returns Python path when venv is valid."""
# Create a mock venv with Python
venv_dir = tmp_path / "venv"
venv_dir.mkdir()
if sys.platform == "win32":
bin_dir = venv_dir / "Scripts"
python_name = "python.exe"
else:
bin_dir = venv_dir / "bin"
python_name = "python"
bin_dir.mkdir()
python_path = bin_dir / python_name
python_path.touch()
config: dict[str, Any] = {"path": str(venv_dir)}
result = get_configured_venv_python(config)
assert result is not None
assert result.endswith(python_name)
def test_get_configured_venv_python_raises_on_missing_venv() -> None:
"""Test raises ValueError when configured venv doesn't exist."""
config: dict[str, Any] = {"path": "/nonexistent/venv/path"}
with pytest.raises(ValueError, match="does not exist"):
get_configured_venv_python(config)
def test_get_configured_venv_python_raises_on_no_python(
tmp_path: Path,
) -> None:
"""Test raises ValueError when venv has no Python interpreter."""
venv_dir = tmp_path / "venv"
venv_dir.mkdir() # Empty venv, no bin/python
config: dict[str, Any] = {"path": str(venv_dir)}
with pytest.raises(ValueError, match="No Python interpreter"):
get_configured_venv_python(config)
def test_get_configured_venv_python_resolves_relative_path(
tmp_path: Path,
) -> None:
"""Test that relative venv paths are resolved from base_path."""
# Create a mock venv with Python
venv_dir = tmp_path / "venvs" / "myenv"
venv_dir.mkdir(parents=True)
if sys.platform == "win32":
bin_dir = venv_dir / "Scripts"
python_name = "python.exe"
else:
bin_dir = venv_dir / "bin"
python_name = "python"
bin_dir.mkdir()
python_path = bin_dir / python_name
python_path.touch()
# Create a script file in tmp_path
script_path = tmp_path / "notebook.py"
script_path.touch()
# Relative path from script location
config: dict[str, Any] = {"path": "venvs/myenv"}
result = get_configured_venv_python(config, base_path=str(script_path))
assert result is not None
assert result.endswith(python_name)
def test_get_kernel_pythonpath_includes_marimo_dir() -> None:
"""Test that kernel PYTHONPATH includes marimo's parent directory."""
import marimo
pythonpath = get_kernel_pythonpath()
paths = pythonpath.split(os.pathsep)
marimo_dir = os.path.dirname(os.path.dirname(marimo.__file__))
assert marimo_dir in paths, f"marimo dir {marimo_dir} not in {paths}"
def test_get_kernel_pythonpath_detects_module_via_import(
monkeypatch: pytest.MonkeyPatch,
) -> None:
"""Test that module detection works by importing and checking __file__."""
from types import ModuleType
fake_zmq = ModuleType("zmq")
fake_zmq.__file__ = "/fake/site-packages/zmq/__init__.py"
monkeypatch.setitem(sys.modules, "zmq", fake_zmq)
pythonpath = get_kernel_pythonpath()
paths = pythonpath.split(os.pathsep)
assert os.path.normpath("/fake/site-packages") in paths
def test_get_kernel_pythonpath_skips_module_without_file(
monkeypatch: pytest.MonkeyPatch,
) -> None:
"""Test that modules without __file__ are skipped gracefully."""
from types import ModuleType
fake_zmq = ModuleType("zmq")
# Don't set __file__ - simulates built-in or namespace package
monkeypatch.setitem(sys.modules, "zmq", fake_zmq)
pythonpath = get_kernel_pythonpath()
assert pythonpath # Should still have marimo dir at minimum
def test_get_kernel_pythonpath_handles_import_error(
monkeypatch: pytest.MonkeyPatch,
) -> None:
"""Test that ImportError is handled gracefully."""
import marimo
monkeypatch.delitem(sys.modules, "zmq", raising=False)
monkeypatch.delitem(sys.modules, "msgspec", raising=False)
original_import = __builtins__["__import__"] # type: ignore[index]
def fake_import(name: str, *args: Any, **kwargs: Any) -> Any:
if name in ("zmq", "msgspec"):
raise ImportError(f"No module named '{name}'")
return original_import(name, *args, **kwargs)
monkeypatch.setattr("builtins.__import__", fake_import)
pythonpath = get_kernel_pythonpath()
paths = pythonpath.split(os.pathsep)
marimo_dir = os.path.dirname(os.path.dirname(marimo.__file__))
assert marimo_dir in paths
def test_get_kernel_pythonpath_deduplicates(
monkeypatch: pytest.MonkeyPatch,
) -> None:
"""Test that duplicate paths are deduplicated."""
from types import ModuleType
fake_zmq = ModuleType("zmq")
fake_zmq.__file__ = "/shared/site-packages/zmq/__init__.py"
fake_msgspec = ModuleType("msgspec")
fake_msgspec.__file__ = "/shared/site-packages/msgspec/__init__.py"
monkeypatch.setitem(sys.modules, "zmq", fake_zmq)
monkeypatch.setitem(sys.modules, "msgspec", fake_msgspec)
pythonpath = get_kernel_pythonpath()
paths = pythonpath.split(os.pathsep)
assert paths.count(os.path.normpath("/shared/site-packages")) == 1
def test_has_marimo_installed_returns_true_when_imports_succeed(
monkeypatch: pytest.MonkeyPatch,
) -> None:
"""Test returns True when marimo and deps can be imported."""
import subprocess
from marimo._version import __version__
def mock_run(
*args: Any, **_kwargs: Any
) -> subprocess.CompletedProcess[str]:
return subprocess.CompletedProcess(
args=args[0],
returncode=0,
stdout=f"{__version__}\n",
stderr="",
)
monkeypatch.setattr(subprocess, "run", mock_run)
assert has_marimo_installed("/fake/python") is True
def test_has_marimo_installed_returns_false_when_import_fails(
monkeypatch: pytest.MonkeyPatch,
) -> None:
"""Test returns False when imports fail."""
import subprocess
def mock_run(
*args: Any, **_kwargs: Any
) -> subprocess.CompletedProcess[str]:
return subprocess.CompletedProcess(
args=args[0],
returncode=1,
stdout="",
stderr="ModuleNotFoundError: No module named 'marimo'",
)
monkeypatch.setattr(subprocess, "run", mock_run)
assert has_marimo_installed("/fake/python") is False
def test_has_marimo_installed_returns_true_on_version_mismatch(
monkeypatch: pytest.MonkeyPatch,
) -> None:
"""Test returns True even when venv has different marimo version."""
import subprocess
def mock_run(
*args: Any, **_kwargs: Any
) -> subprocess.CompletedProcess[str]:
return subprocess.CompletedProcess(
args=args[0],
returncode=0,
stdout="0.0.1\n", # Different version
stderr="",
)
monkeypatch.setattr(subprocess, "run", mock_run)
# Should return True even with version mismatch (warning is logged)
assert has_marimo_installed("/fake/python") is True
def test_check_python_version_compatibility_returns_true_when_match(
monkeypatch: pytest.MonkeyPatch,
) -> None:
"""Test returns True when Python versions match."""
import subprocess
current_version = f"{sys.version_info.major}.{sys.version_info.minor}"
def mock_run(
*args: Any, **_kwargs: Any
) -> subprocess.CompletedProcess[str]:
return subprocess.CompletedProcess(
args=args[0],
returncode=0,
stdout=f"{current_version}\n",
stderr="",
)
monkeypatch.setattr(subprocess, "run", mock_run)
assert check_python_version_compatibility("/fake/python") is True
def test_check_python_version_compatibility_returns_false_when_mismatch(
monkeypatch: pytest.MonkeyPatch,
) -> None:
"""Test returns False when Python versions differ."""
import subprocess
def mock_run(
*args: Any, **_kwargs: Any
) -> subprocess.CompletedProcess[str]:
return subprocess.CompletedProcess(
args=args[0],
returncode=0,
stdout="2.7\n", # Different version
stderr="",
)
monkeypatch.setattr(subprocess, "run", mock_run)
assert check_python_version_compatibility("/fake/python") is False
| {
"repo_id": "marimo-team/marimo",
"file_path": "tests/_session/test_venv.py",
"license": "Apache License 2.0",
"lines": 223,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
marimo-team/marimo:tests/_utils/test_versions.py | # Copyright 2026 Marimo. All rights reserved.
from __future__ import annotations
import pytest
from marimo._utils.versions import (
extract_extras,
has_version_specifier,
without_extras,
without_version_specifier,
)
class TestWithoutVersionSpecifier:
"""Tests for without_version_specifier function"""
def test_removes_version_specifiers(self) -> None:
# Test various version specifier operators
assert without_version_specifier("numpy==1.24.0") == "numpy"
assert without_version_specifier("pandas>1.5.0") == "pandas"
assert without_version_specifier("scipy>=1.10.0") == "scipy"
assert without_version_specifier("requests<3.0.0") == "requests"
assert without_version_specifier("urllib3<=2.0.0") == "urllib3"
assert without_version_specifier("flask~=2.0.0") == "flask"
# Exclusion operator (PEP 440)
assert without_version_specifier("package!=1.0.0") == "package"
def test_handles_whitespace(self) -> None:
# Whitespace around version operators should be stripped
assert without_version_specifier("package >= 1.0.0") == "package"
assert without_version_specifier("package >=1.0.0") == "package"
def test_preserves_package_without_version(self) -> None:
assert without_version_specifier("django") == "django"
assert without_version_specifier("") == ""
def test_handles_extras_and_special_chars(self) -> None:
# Should remove version but keep extras
assert (
without_version_specifier("requests[security]>=2.0.0")
== "requests[security]"
)
# Only splits on first specifier
assert without_version_specifier("package>=1.0.0,<2.0.0") == "package"
# Handles hyphens and underscores
assert (
without_version_specifier("scikit-learn>=1.0.0") == "scikit-learn"
)
assert (
without_version_specifier("typing_extensions>=4.0.0")
== "typing_extensions"
)
class TestWithoutExtras:
"""Tests for without_extras function"""
def test_removes_extras(self) -> None:
# Single extra
assert without_extras("requests[security]") == "requests"
# Multiple extras
assert without_extras("requests[security,socks]") == "requests"
# Nested brackets (splits at first bracket)
assert without_extras("package[extra[nested]]") == "package"
def test_preserves_package_without_extras(self) -> None:
assert without_extras("numpy") == "numpy"
assert without_extras("") == ""
def test_handles_extras_with_versions_and_special_chars(self) -> None:
# Removes extras and anything after them (including version specifiers)
assert without_extras("requests[security]>=2.0.0") == "requests"
# Handles hyphens and underscores
assert without_extras("scikit-learn[all]") == "scikit-learn"
assert without_extras("typing_extensions[test]") == "typing_extensions"
class TestExtractExtras:
"""Tests for extract_extras function"""
def test_extracts_single_extra(self) -> None:
assert extract_extras("requests[security]") == "[security]"
def test_extracts_multiple_extras(self) -> None:
assert extract_extras("requests[security,socks]") == "[security,socks]"
def test_no_extras(self) -> None:
assert extract_extras("numpy") == ""
assert extract_extras("") == ""
def test_extras_with_version_specifier(self) -> None:
# extract_extras expects version specifier to be removed first
package = "requests[security]>=2.0.0"
assert (
extract_extras(without_version_specifier(package)) == "[security]"
)
def test_extras_with_nested_brackets(self) -> None:
# Should include everything after the first opening bracket
assert extract_extras("package[extra[nested]]") == "[extra[nested]]"
def test_extras_with_special_chars(self) -> None:
assert extract_extras("package[a-b_c.d]") == "[a-b_c.d]"
class TestHasVersionSpecifier:
"""Tests for has_version_specifier function"""
def test_detects_version_specifiers(self) -> None:
# Various operators
assert has_version_specifier("numpy==1.24.0") is True
assert has_version_specifier("pandas>1.5.0") is True
assert has_version_specifier("scipy>=1.10.0") is True
assert has_version_specifier("requests<3.0.0") is True
assert has_version_specifier("urllib3<=2.0.0") is True
assert has_version_specifier("flask~=2.0.0") is True
# Exclusion operator (PEP 440)
assert has_version_specifier("package!=1.0.0") is True
# Multiple specifiers
assert has_version_specifier("package>=1.0.0,<2.0.0") is True
def test_detects_no_version_specifier(self) -> None:
assert has_version_specifier("django") is False
assert has_version_specifier("") is False
# Extras without version
assert has_version_specifier("requests[security]") is False
# Hyphens and underscores are not version specifiers
assert has_version_specifier("scikit-learn") is False
assert has_version_specifier("typing_extensions") is False
def test_handles_extras_with_version(self) -> None:
assert has_version_specifier("requests[security]>=2.0.0") is True
class TestCombinedUsage:
"""Tests for combined usage of utility functions"""
def test_chaining_functions(self) -> None:
package = "requests[security]>=2.0.0"
# Chain extras -> version
assert without_version_specifier(without_extras(package)) == "requests"
# Chain version -> extras
assert without_extras(without_version_specifier(package)) == "requests"
@pytest.mark.parametrize(
("package", "expected_name"),
[
("numpy==1.24.0", "numpy"),
("pandas[all]>=1.5.0", "pandas"),
("scikit-learn", "scikit-learn"),
("requests[security,socks]<3.0.0", "requests"),
("typing_extensions>=4.0.0", "typing_extensions"),
],
)
def test_extract_clean_package_name(
self, package: str, expected_name: str
) -> None:
"""Test extracting clean package name from various formats"""
result = without_extras(without_version_specifier(package))
assert result == expected_name
| {
"repo_id": "marimo-team/marimo",
"file_path": "tests/_utils/test_versions.py",
"license": "Apache License 2.0",
"lines": 130,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
marimo-team/marimo:tests/_plugins/ui/_impl/test_tabs.py | # Copyright 2026 Marimo. All rights reserved.
from __future__ import annotations
from marimo._plugins import ui
def test_tabs_basic() -> None:
tab = ui.tabs({"Tab 1": "Content 1", "Tab 2": "Content 2"})
# Default value should be the first tab
assert tab.value == "Tab 1"
def test_tabs_with_initial_value() -> None:
tab = ui.tabs({"Tab 1": "Content 1", "Tab 2": "Content 2"}, value="Tab 2")
assert tab.value == "Tab 2"
def test_tabs_update() -> None:
tab = ui.tabs({"Tab 1": "Content 1", "Tab 2": "Content 2"})
assert tab.value == "Tab 1"
# Simulate selecting the second tab (index 1)
tab._update("1")
assert tab.value == "Tab 2"
def test_tabs_empty() -> None:
# Empty tabs should not raise an error
tab = ui.tabs({})
assert tab.value == ""
def test_tabs_with_invalid_initial_value() -> None:
# Invalid value should default to empty string, which converts to first tab
tab = ui.tabs(
{"Tab 1": "Content 1", "Tab 2": "Content 2"}, value="Invalid"
)
assert tab.value == "Tab 1"
def test_tabs_lazy() -> None:
tab = ui.tabs({"Tab 1": "Content 1", "Tab 2": "Content 2"}, lazy=True)
assert tab.value == "Tab 1"
# Verify lazy loading is enabled by checking the slotted HTML contains lazy
assert "marimo-lazy" in tab.text
| {
"repo_id": "marimo-team/marimo",
"file_path": "tests/_plugins/ui/_impl/test_tabs.py",
"license": "Apache License 2.0",
"lines": 31,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
marimo-team/marimo:marimo/_server/api/endpoints/ws/ws_formatter.py | # Copyright 2026 Marimo. All rights reserved.
"""WebSocket message formatting utilities.
This module handles the wire format for WebSocket transport:
wrapping serialized notification data with operation metadata.
"""
from __future__ import annotations
from typing import TYPE_CHECKING
from marimo._messaging.serde import serialize_kernel_message
if TYPE_CHECKING:
from marimo._messaging.notification import NotificationMessage
def format_wire_message(op: str, data: bytes) -> str:
"""Format a serialized message for WebSocket transport.
Wraps serialized notification data with operation metadata
for the WebSocket wire protocol.
Args:
op: The operation name (e.g., "cell-op", "kernel-ready")
data: The serialized notification data as bytes
Returns:
JSON string in wire format: {"op": "...", "data": ...}
"""
return f'{{"op": "{op}", "data": {data.decode("utf-8")}}}'
def serialize_notification_for_websocket(
notification: NotificationMessage,
) -> str:
"""Serialize and format a notification for WebSocket transport.
Combines serialization and wire formatting into a single operation.
Useful when you have a notification object and need the final wire format.
Args:
notification: The notification to serialize and format
Returns:
JSON string in wire format: {"op": "...", "data": ...}
"""
serialized = serialize_kernel_message(notification)
op = notification.name
return format_wire_message(op, serialized)
| {
"repo_id": "marimo-team/marimo",
"file_path": "marimo/_server/api/endpoints/ws/ws_formatter.py",
"license": "Apache License 2.0",
"lines": 35,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | documentation |
marimo-team/marimo:marimo/_session/managers/ipc.py | # Copyright 2026 Marimo. All rights reserved.
"""IPC-based managers using ZeroMQ.
These implementations launch the kernel as a subprocess and communicate
via ZeroMQ channels. Each notebook gets its own sandboxed virtual environment.
"""
from __future__ import annotations
import os
import signal
import subprocess
import sys
from typing import TYPE_CHECKING, Optional, Union, cast
from marimo import _loggers
from marimo._cli.sandbox import (
build_sandbox_venv,
cleanup_sandbox_dir,
)
from marimo._config.config import VenvConfig
from marimo._config.manager import MarimoConfigReader
from marimo._config.settings import GLOBAL_SETTINGS
from marimo._messaging.types import KernelMessage
from marimo._runtime import commands
from marimo._session._venv import (
check_python_version_compatibility,
get_configured_venv_python,
get_ipc_kernel_deps,
get_kernel_pythonpath,
has_marimo_installed,
install_marimo_into_venv,
)
from marimo._session.model import SessionMode
from marimo._session.queue import ProcessLike, QueueType
from marimo._session.types import KernelManager, QueueManager
from marimo._utils.typed_connection import TypedConnection
if TYPE_CHECKING:
from marimo._ast.cell import CellConfig
from marimo._ipc.queue_manager import QueueManager as IPCQueueManagerType
from marimo._ipc.types import ConnectionInfo
from marimo._runtime.commands import AppMetadata
from marimo._types.ids import CellId_t
LOGGER = _loggers.marimo_logger()
def _get_venv_config(config_manager: MarimoConfigReader) -> VenvConfig:
"""Get the [tool.marimo.venv] config from a config manager."""
config = config_manager.get_config(hide_secrets=False)
return cast(VenvConfig, config.get("venv", {}))
class KernelStartupError(Exception):
"""Raised when kernel subprocess fails to start."""
class IPCQueueManagerImpl(QueueManager):
"""Manages queues for a session via ZeroMQ IPC.
This wraps the ZeroMQ-based IPC QueueManager to provide queues
for communication with the kernel subprocess.
"""
def __init__(self, ipc: IPCQueueManagerType) -> None:
self._ipc = ipc
@classmethod
def from_ipc(cls, ipc: IPCQueueManagerType) -> IPCQueueManagerImpl:
"""Create an IPCQueueManagerImpl from an IPC queue manager."""
return cls(ipc)
@property
def control_queue( # type: ignore[override]
self,
) -> QueueType[commands.CommandMessage]:
return self._ipc.control_queue
@property
def set_ui_element_queue( # type: ignore[override]
self,
) -> QueueType[commands.BatchableCommand]:
return self._ipc.set_ui_element_queue
@property
def completion_queue( # type: ignore[override]
self,
) -> QueueType[commands.CodeCompletionCommand]:
return self._ipc.completion_queue
@property
def input_queue( # type: ignore[override]
self,
) -> QueueType[str]:
return self._ipc.input_queue
@property
def stream_queue( # type: ignore[override]
self,
) -> QueueType[Union[KernelMessage, None]]:
return cast(
QueueType[Union[KernelMessage, None]],
self._ipc.stream_queue,
)
@property
def win32_interrupt_queue( # type: ignore[override]
self,
) -> Optional[QueueType[bool]]:
return self._ipc.win32_interrupt_queue
def close_queues(self) -> None:
self._ipc.close_queues()
def put_control_request(self, request: commands.CommandMessage) -> None:
# Completions are on their own queue
if isinstance(request, commands.CodeCompletionCommand):
self.completion_queue.put(request)
return
self.control_queue.put(request)
# Update UI elements are on both queues so they can be batched
if isinstance(request, commands.UpdateUIElementCommand):
self.set_ui_element_queue.put(request)
def put_input(self, text: str) -> None:
self.input_queue.put(text)
class IPCKernelManagerImpl(KernelManager):
"""IPC-based kernel manager to spawn sandboxed kernels.
Launches the kernel as a subprocess and communicates via ZeroMQ channels.
Each notebook gets its own sandboxed virtual environment.
"""
def __init__(
self,
*,
queue_manager: IPCQueueManagerImpl,
connection_info: ConnectionInfo,
mode: SessionMode,
configs: dict[CellId_t, CellConfig],
app_metadata: AppMetadata,
config_manager: MarimoConfigReader,
virtual_files_supported: bool = True,
redirect_console_to_browser: bool = True,
) -> None:
self.queue_manager = queue_manager
self.connection_info = connection_info
self.mode = mode
self.configs = configs
self.app_metadata = app_metadata
self.config_manager = config_manager
self.virtual_files_supported = virtual_files_supported
self.redirect_console_to_browser = redirect_console_to_browser
self._process: subprocess.Popen[bytes] | None = None
self.kernel_task: ProcessLike | None = None
self._sandbox_dir: str | None = None
self._venv_python: str | None = None
def start_kernel(self) -> None:
from marimo._cli.print import echo, muted
from marimo._ipc.types import KernelArgs
kernel_args = KernelArgs(
configs=self.configs,
app_metadata=self.app_metadata,
user_config=self.config_manager.get_config(hide_secrets=False),
log_level=GLOBAL_SETTINGS.LOG_LEVEL,
profile_path=None,
connection_info=self.connection_info,
is_run_mode=self.mode == SessionMode.RUN,
virtual_files_supported=self.virtual_files_supported,
redirect_console_to_browser=self.redirect_console_to_browser,
)
env = os.environ.copy()
venv_config = _get_venv_config(self.config_manager)
try:
configured_python = get_configured_venv_python(
venv_config, base_path=self.app_metadata.filename
)
except ValueError as e:
raise KernelStartupError(str(e)) from e
# Ephemeral sandboxes are always writable; configured venvs respect the
# flag.
writable = True
# An explicitly configured venv takes precedence over an ephemeral
# sandbox.
if configured_python:
echo(
f"Using configured venv: {muted(configured_python)}",
err=True,
)
venv_python = configured_python
writable = venv_config.get("writable", False)
# Configured environments are assumed to be read-only.
# If not, then install marimo by default to ensure that the
# environment can spawn a marimo kernel.
if writable:
try:
install_marimo_into_venv(venv_python)
except Exception as e:
raise KernelStartupError(
f"Failed to install marimo into configured venv.\n\n{e}"
) from e
elif not has_marimo_installed(venv_python):
# Check Python version compatibility for binary deps
if not check_python_version_compatibility(venv_python):
# If we have gotten to this point
# - We have a prescribed venv
# - The venv is not writable
# - The venv does not contain marimo nor zmq
# As such there is nothing we can do, as we can't get marimo
# into the runtime without installing it somewhere else.
raise KernelStartupError(
f"Configured venv uses a different Python version than marimo.\n"
f"Binary dependencies (pyzmq, msgspec) aren't cross-version compatible.\n\n"
f"Options:\n"
f" 1. Set writable=true in [tool.marimo.venv] to allow marimo to install deps\n"
f" 2. Install marimo in your venv: uv pip install marimo --python {venv_python}\n"
f" 3. Remove [tool.marimo.venv].path to use an ephemeral sandbox instead"
)
# Inject PYTHONPATH for marimo and dependencies from the
# current runtime as a last chance effort to expose marimo
# to the kernel.
kernel_path = get_kernel_pythonpath()
existing = env.get("PYTHONPATH", "")
if existing:
env["PYTHONPATH"] = f"{kernel_path}{os.pathsep}{existing}"
else:
env["PYTHONPATH"] = kernel_path
else:
# Fall back to building ephemeral sandbox venv
# with IPC dependencies.
# NB. "Ephemeral" sandboxes (or rather tmp sandboxes built by uv)
# are always writable, and as such install marimo as a default,
# making them much easier than a configured venv we cannot manage.
try:
self._sandbox_dir, venv_python = build_sandbox_venv(
self.app_metadata.filename,
additional_deps=get_ipc_kernel_deps(),
)
except Exception as e:
cleanup_sandbox_dir(self._sandbox_dir)
raise KernelStartupError(
f"Failed to build sandbox environment.\n\n{e}"
) from e
echo(
f"Running kernel in sandbox: {muted(venv_python)}",
err=True,
)
# Store the venv python for package manager targeting
self._venv_python = venv_python
cmd = [venv_python, "-m", "marimo._ipc.launch_kernel"]
if writable:
# Setting this attempts to make auto-installations work even if
# other normally detected criteria are not true.
# IPC by itself does not seem to trigger them.
env["MARIMO_MANAGE_SCRIPT_METADATA"] = "true"
LOGGER.debug(f"Launching kernel: {' '.join(cmd)}")
try:
self._process = subprocess.Popen(
cmd,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
env=env,
)
# Send connection info via stdin
assert self._process.stdin is not None
self._process.stdin.write(kernel_args.encode_json())
self._process.stdin.flush()
self._process.stdin.close()
# Wait for ready signal
assert self._process.stdout is not None
ready = self._process.stdout.readline().decode().strip()
if ready != "KERNEL_READY":
assert self._process.stderr is not None
stderr = self._process.stderr.read().decode()
raise KernelStartupError(
f"Kernel failed to start.\n\n"
f"Command: {' '.join(cmd)}\n\n"
f"Stderr:\n{stderr}"
)
LOGGER.debug("Kernel ready")
# Create a ProcessLike wrapper for the subprocess
self.kernel_task = _SubprocessWrapper(self._process)
except KernelStartupError:
# Already a KernelStartupError, just cleanup and re-raise
cleanup_sandbox_dir(self._sandbox_dir)
raise
except Exception as e:
# Wrap other exceptions as KernelStartupError
cleanup_sandbox_dir(self._sandbox_dir)
raise KernelStartupError(
f"Failed to start kernel subprocess.\n\n{e}"
) from e
@property
def pid(self) -> int | None:
if self._process is None:
return None
return self._process.pid
@property
def profile_path(self) -> str | None:
# Profiling not currently supported with IPC kernel
return None
@property
def venv_python(self) -> str | None:
"""Python executable path for the kernel's venv."""
return self._venv_python
def is_alive(self) -> bool:
if self._process is None:
return False
return self._process.poll() is None
def interrupt_kernel(self) -> None:
if self._process is None:
return
if self._process.pid is not None:
q = self.queue_manager.win32_interrupt_queue
if sys.platform == "win32" and q is not None:
LOGGER.debug("Queueing interrupt request for kernel.")
q.put_nowait(True)
else:
LOGGER.debug("Sending SIGINT to kernel")
os.kill(self._process.pid, signal.SIGINT)
def close_kernel(self) -> None:
if self._process is not None:
self.queue_manager.put_control_request(
commands.StopKernelCommand()
)
self.queue_manager.close_queues()
# Terminate process if still alive
if self._process.poll() is None:
self._process.terminate()
try:
self._process.wait(timeout=5)
except subprocess.TimeoutExpired:
self._process.kill()
# Always attempt cleanup, even if _process is None
cleanup_sandbox_dir(self._sandbox_dir)
self._sandbox_dir = None
@property
def kernel_connection(self) -> TypedConnection[KernelMessage]:
# IPC kernel uses stream_queue instead of kernel_connection
raise NotImplementedError(
"IPC kernel uses stream_queue, not kernel_connection"
)
class _SubprocessWrapper(ProcessLike):
"""Wrapper to make subprocess.Popen compatible with ProcessLike."""
def __init__(self, process: subprocess.Popen[bytes]) -> None:
self._process = process
@property
def pid(self) -> int | None:
return self._process.pid
def is_alive(self) -> bool:
return self._process.poll() is None
def terminate(self) -> None:
self._process.terminate()
def kill(self) -> None:
self._process.kill()
def join(self, timeout: Optional[float] = None) -> None:
self._process.wait(timeout=timeout)
| {
"repo_id": "marimo-team/marimo",
"file_path": "marimo/_session/managers/ipc.py",
"license": "Apache License 2.0",
"lines": 333,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
marimo-team/marimo:marimo/_session/managers/queue.py | # Copyright 2026 Marimo. All rights reserved.
"""Queue manager implementation using multiprocessing or threading queues."""
from __future__ import annotations
import queue
import sys
from multiprocessing import get_context
from multiprocessing.queues import Queue as MPQueue
from typing import Optional, Union
from marimo._messaging.types import KernelMessage
from marimo._runtime import commands
from marimo._session.types import QueueManager
class QueueManagerImpl(QueueManager):
"""Manages queues for a session using multiprocessing or threading queues."""
def __init__(self, *, use_multiprocessing: bool):
context = get_context("spawn") if use_multiprocessing else None
# Control messages for the kernel (run, set UI element, set config, etc
# ) are sent through the control queue
self.control_queue: Union[
MPQueue[commands.CommandMessage],
queue.Queue[commands.CommandMessage],
] = context.Queue() if context is not None else queue.Queue()
# UI element updates and model commands are stored in both the
# control queue and this queue, so that the backend can
# merge/batch requests (last-write-wins per element/model ID).
self.set_ui_element_queue: Union[
MPQueue[commands.BatchableCommand],
queue.Queue[commands.BatchableCommand],
] = context.Queue() if context is not None else queue.Queue()
# Code completion requests are sent through a separate queue
self.completion_queue: Union[
MPQueue[commands.CodeCompletionCommand],
queue.Queue[commands.CodeCompletionCommand],
] = context.Queue() if context is not None else queue.Queue()
self.win32_interrupt_queue: (
Union[MPQueue[bool], queue.Queue[bool]] | None
)
if sys.platform == "win32":
self.win32_interrupt_queue = (
context.Queue() if context is not None else queue.Queue()
)
else:
self.win32_interrupt_queue = None
# Input messages for the user's Python code are sent through the
# input queue
self.input_queue: Union[MPQueue[str], queue.Queue[str]] = (
context.Queue(maxsize=1)
if context is not None
else queue.Queue(maxsize=1)
)
self.stream_queue: Optional[
queue.Queue[Union[KernelMessage, None]]
] = None
if not use_multiprocessing:
self.stream_queue = queue.Queue()
def close_queues(self) -> None:
if isinstance(self.control_queue, MPQueue):
# cancel join thread because we don't care if the queues still have
# things in it: don't want to make the child process wait for the
# queues to empty
self.control_queue.cancel_join_thread()
self.control_queue.close()
else:
# kernel thread cleans up read/write conn and IOloop handler on
# exit; we don't join the thread because we don't want to block
self.control_queue.put(commands.StopKernelCommand())
if isinstance(self.set_ui_element_queue, MPQueue):
self.set_ui_element_queue.cancel_join_thread()
self.set_ui_element_queue.close()
if isinstance(self.input_queue, MPQueue):
# again, don't make the child process wait for the queues to empty
self.input_queue.cancel_join_thread()
self.input_queue.close()
if isinstance(self.completion_queue, MPQueue):
self.completion_queue.cancel_join_thread()
self.completion_queue.close()
if isinstance(self.win32_interrupt_queue, MPQueue):
self.win32_interrupt_queue.cancel_join_thread()
self.win32_interrupt_queue.close()
def put_control_request(self, request: commands.CommandMessage) -> None:
"""Put a control request in the control queue."""
# Completions are on their own queue
if isinstance(request, commands.CodeCompletionCommand):
self.completion_queue.put(request)
return
self.control_queue.put(request)
# UI element updates and model commands are on both queues
# so they can be batched
if isinstance(
request,
(commands.UpdateUIElementCommand, commands.ModelCommand),
):
self.set_ui_element_queue.put(request)
def put_input(self, text: str) -> None:
"""Put an input request in the input queue."""
self.input_queue.put(text)
| {
"repo_id": "marimo-team/marimo",
"file_path": "marimo/_session/managers/queue.py",
"license": "Apache License 2.0",
"lines": 95,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
marimo-team/marimo:tests/_server/api/endpoints/ws/test_ws_formatter.py | # Copyright 2026 Marimo. All rights reserved.
from __future__ import annotations
import json
from marimo._messaging.notification import (
AlertNotification,
KernelStartupErrorNotification,
)
from marimo._server.api.endpoints.ws.ws_formatter import (
format_wire_message,
serialize_notification_for_websocket,
)
class TestFormatWireMessage:
"""Tests for format_wire_message function."""
def test_basic_formatting(self) -> None:
"""Test basic wire message formatting."""
op = "test-op"
data = b'{"key": "value"}'
result = format_wire_message(op, data)
parsed = json.loads(result)
assert parsed["op"] == "test-op"
assert parsed["data"] == {"key": "value"}
def test_with_nested_data(self) -> None:
"""Test formatting with nested JSON data."""
op = "cell-op"
data = b'{"cell_id": "abc123", "output": {"type": "text", "data": "hello"}}'
result = format_wire_message(op, data)
parsed = json.loads(result)
assert parsed["op"] == "cell-op"
assert parsed["data"]["cell_id"] == "abc123"
assert parsed["data"]["output"]["type"] == "text"
def test_with_special_characters_in_data(self) -> None:
"""Test formatting with special characters in data."""
op = "alert"
data = b'{"message": "Hello\\nWorld\\twith\\ttabs"}'
result = format_wire_message(op, data)
parsed = json.loads(result)
assert parsed["op"] == "alert"
assert parsed["data"]["message"] == "Hello\nWorld\twith\ttabs"
def test_with_unicode_data(self) -> None:
"""Test formatting with unicode characters."""
op = "notification"
data = '{"text": "Hello 世界 🌍"}'.encode()
result = format_wire_message(op, data)
parsed = json.loads(result)
assert parsed["op"] == "notification"
assert parsed["data"]["text"] == "Hello 世界 🌍"
def test_with_empty_object(self) -> None:
"""Test formatting with empty object data."""
op = "empty"
data = b"{}"
result = format_wire_message(op, data)
parsed = json.loads(result)
assert parsed["op"] == "empty"
assert parsed["data"] == {}
class TestSerializeNotificationForWebsocket:
"""Tests for serialize_notification_for_websocket function."""
def test_kernel_startup_error_notification(self) -> None:
"""Test serializing KernelStartupErrorNotification."""
notification = KernelStartupErrorNotification(
error="Failed to start kernel: module not found"
)
result = serialize_notification_for_websocket(notification)
parsed = json.loads(result)
assert parsed["op"] == "kernel-startup-error"
assert (
parsed["data"]["error"]
== "Failed to start kernel: module not found"
)
def test_alert_notification(self) -> None:
"""Test serializing AlertNotification."""
notification = AlertNotification(
title="Test Alert",
description="This is a test alert message",
)
result = serialize_notification_for_websocket(notification)
parsed = json.loads(result)
assert parsed["op"] == "alert"
assert parsed["data"]["title"] == "Test Alert"
assert parsed["data"]["description"] == "This is a test alert message"
def test_alert_notification_with_variant(self) -> None:
"""Test serializing AlertNotification with danger variant."""
notification = AlertNotification(
title="Error",
description="Something went wrong",
variant="danger",
)
result = serialize_notification_for_websocket(notification)
parsed = json.loads(result)
assert parsed["op"] == "alert"
assert parsed["data"]["variant"] == "danger"
def test_produces_valid_json(self) -> None:
"""Test that output is always valid JSON."""
notification = KernelStartupErrorNotification(
error='Error with "quotes" and special chars: <>&'
)
result = serialize_notification_for_websocket(notification)
# Should not raise
parsed = json.loads(result)
assert "op" in parsed
assert "data" in parsed
class TestIntegration:
"""Integration tests for the ws_formatter module."""
def test_roundtrip_matches_expected_wire_format(self) -> None:
"""Test that the wire format matches what the frontend expects.
The frontend expects messages in the format:
{"op": "operation-name", "data": {...notification fields...}}
"""
notification = KernelStartupErrorNotification(error="test error")
result = serialize_notification_for_websocket(notification)
parsed = json.loads(result)
# Verify structure matches expected wire format
assert set(parsed.keys()) == {"op", "data"}
assert isinstance(parsed["op"], str)
assert isinstance(parsed["data"], dict)
# Verify op matches notification name
assert parsed["op"] == notification.name
assert parsed["op"] == "kernel-startup-error"
| {
"repo_id": "marimo-team/marimo",
"file_path": "tests/_server/api/endpoints/ws/test_ws_formatter.py",
"license": "Apache License 2.0",
"lines": 117,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
marimo-team/marimo:tests/_session/managers/test_ipc.py | # Copyright 2026 Marimo. All rights reserved.
from __future__ import annotations
import time
import pytest
@pytest.mark.requires("zmq")
class TestIPCConnection:
def test_input_channel_direction(self) -> None:
"""Test that input flows from host to kernel (not vice versa).
Regression test for #7972 where the input channel Push/Pull
directions were inverted, causing input() to fail in sandbox mode.
"""
from marimo._ipc.connection import Connection
host_conn, connection_info = Connection.create()
kernel_conn = Connection.connect(connection_info)
# Allow ZeroMQ connections to establish
time.sleep(0.05)
try:
# Host sends input to kernel (what happens when user
# responds to an input() prompt)
test_input = "user response"
host_conn.input.queue.put(test_input)
# Kernel receives input
received = kernel_conn.input.queue.get(timeout=1.0)
assert received == test_input
finally:
host_conn.close()
kernel_conn.close()
@pytest.mark.requires("zmq")
class TestIPCKernelManagerImpl:
def test_venv_python_initial_value(self) -> None:
"""Test that venv_python is None before kernel starts."""
from unittest.mock import MagicMock
from marimo._session.managers.ipc import (
IPCKernelManagerImpl,
IPCQueueManagerImpl,
)
from marimo._session.model import SessionMode
# Create minimal mocks for construction
mock_ipc = MagicMock()
queue_manager = IPCQueueManagerImpl(mock_ipc)
connection_info = MagicMock()
configs: dict = {}
app_metadata = MagicMock()
config_manager = MagicMock()
# Create IPCKernelManagerImpl without starting kernel
kernel_manager = IPCKernelManagerImpl(
queue_manager=queue_manager,
connection_info=connection_info,
mode=SessionMode.EDIT,
configs=configs,
app_metadata=app_metadata,
config_manager=config_manager,
)
# venv_python should be None before kernel starts
assert kernel_manager.venv_python is None
def test_venv_python_property_returns_stored_value(self) -> None:
"""Test that venv_python property returns the stored _venv_python value."""
from unittest.mock import MagicMock
from marimo._session.managers.ipc import (
IPCKernelManagerImpl,
IPCQueueManagerImpl,
)
from marimo._session.model import SessionMode
# Create minimal mocks for construction
mock_ipc = MagicMock()
queue_manager = IPCQueueManagerImpl(mock_ipc)
connection_info = MagicMock()
configs: dict = {}
app_metadata = MagicMock()
config_manager = MagicMock()
kernel_manager = IPCKernelManagerImpl(
queue_manager=queue_manager,
connection_info=connection_info,
mode=SessionMode.EDIT,
configs=configs,
app_metadata=app_metadata,
config_manager=config_manager,
)
# Manually set the internal state (simulating what start_kernel does)
kernel_manager._venv_python = "/path/to/sandbox/venv/python"
# venv_python property should return the stored value
assert kernel_manager.venv_python == "/path/to/sandbox/venv/python"
@pytest.mark.requires("zmq")
class TestIPCQueueManagerImpl:
def test_from_ipc_factory(self) -> None:
"""Test that IPCQueueManagerImpl.from_ipc() creates a valid instance."""
from marimo._ipc import QueueManager as IPCQueueManager
from marimo._session.managers.ipc import IPCQueueManagerImpl
# Create the underlying IPC queue manager
ipc_queue_manager, connection_info = IPCQueueManager.create()
# Create wrapper using factory method
wrapper = IPCQueueManagerImpl.from_ipc(ipc_queue_manager)
# Verify wrapper has access to queues
assert wrapper.control_queue is not None
assert wrapper.completion_queue is not None
assert wrapper.input_queue is not None
assert wrapper.stream_queue is not None
assert wrapper.set_ui_element_queue is not None
# connection_info should be valid
assert connection_info is not None
# Clean up
wrapper.close_queues()
def test_from_ipc_equals_direct_init(self) -> None:
"""Test that from_ipc() and __init__() produce equivalent results."""
from marimo._ipc import QueueManager as IPCQueueManager
from marimo._session.managers.ipc import IPCQueueManagerImpl
ipc_queue_manager, _ = IPCQueueManager.create()
# Create using factory
via_factory = IPCQueueManagerImpl.from_ipc(ipc_queue_manager)
# Create using __init__ directly
via_init = IPCQueueManagerImpl(ipc_queue_manager)
# Both should reference the same underlying IPC manager
assert via_factory._ipc is via_init._ipc
# Clean up
via_factory.close_queues()
| {
"repo_id": "marimo-team/marimo",
"file_path": "tests/_session/managers/test_ipc.py",
"license": "Apache License 2.0",
"lines": 115,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
marimo-team/marimo:examples/third_party/plotly/area_chart.py | # /// script
# requires-python = ">=3.9"
# dependencies = [
# "marimo",
# "pandas==2.3.3",
# "plotly==6.5.1",
# ]
# ///
import marimo
__generated_with = "0.19.7"
app = marimo.App(width="medium")
@app.cell
def _():
import marimo as mo
return (mo,)
@app.cell
def _(mo):
mo.md("""
# Reactive Plotly Area Charts
Use `mo.ui.plotly` to make area charts reactive. Select data by dragging
a box on the chart, and get the selected points in Python!
Area charts are scatter traces with `fill='tozeroy'` or similar fill options.
""")
return
@app.cell
def _():
import plotly.graph_objects as go
import pandas as pd
return go, pd
@app.cell
def _(pd):
# Create sample data
data = pd.DataFrame(
{
"month": list(range(1, 13)),
"revenue": [45, 52, 48, 65, 72, 68, 80, 85, 78, 90, 95, 88],
"costs": [30, 35, 32, 40, 45, 42, 50, 48, 52, 55, 58, 54],
}
)
data
return (data,)
@app.cell(hide_code=True)
def _(data, go, mo):
# 1. Basic area chart with fill='tozeroy'
fig1 = go.Figure()
fig1.add_trace(
go.Scatter(
x=data["month"],
y=data["revenue"],
fill="tozeroy",
mode="lines",
name="Revenue",
line=dict(color="#636EFA", width=2),
)
)
fig1.update_layout(
title="Monthly Revenue (Area Chart)",
xaxis_title="Month",
yaxis_title="Revenue ($1000s)",
)
area_chart = mo.ui.plotly(fig1)
area_chart
return (area_chart,)
@app.cell
def _(area_chart, mo):
mo.md(f"""
## Basic Area Chart (fill='tozeroy')
**Instructions:** Use the box select tool (in the toolbar) to select a range.
### Selected Points:
{area_chart.value}
### Selection Range:
{area_chart.ranges}
### Indices:
{area_chart.indices}
""")
return
@app.cell(hide_code=True)
def _(data, go, mo):
# 2. Stacked area chart
fig2 = go.Figure()
fig2.add_trace(
go.Scatter(
x=data["month"],
y=data["costs"],
fill="tozeroy",
stackgroup="one",
mode="lines",
name="Costs",
line=dict(color="#EF553B", width=2),
)
)
fig2.add_trace(
go.Scatter(
x=data["month"],
y=data["revenue"] - data["costs"],
fill="tonexty",
stackgroup="one",
mode="lines",
name="Profit",
line=dict(color="#00CC96", width=2),
)
)
fig2.update_layout(
title="Costs vs Profit (Stacked Area)",
xaxis_title="Month",
yaxis_title="Amount ($1000s)",
)
stacked_area = mo.ui.plotly(fig2)
stacked_area
return (stacked_area,)
@app.cell
def _(mo, stacked_area):
mo.md(f"""
## Stacked Area Chart (stackgroup)
**Instructions:** Use the box select tool to select a range.
Points from both areas will be returned!
### Selected Points:
{stacked_area.value}
### Number of selected points:
{len(stacked_area.value)}
### Selection Range:
{stacked_area.ranges}
""")
return
if __name__ == "__main__":
app.run()
| {
"repo_id": "marimo-team/marimo",
"file_path": "examples/third_party/plotly/area_chart.py",
"license": "Apache License 2.0",
"lines": 126,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
marimo-team/marimo:examples/third_party/plotly/line_chart.py | # /// script
# requires-python = ">=3.9"
# dependencies = [
# "marimo",
# "pandas==2.3.3",
# "plotly==6.5.1",
# ]
# ///
import marimo
__generated_with = "0.19.7"
app = marimo.App(width="medium")
@app.cell
def _():
import marimo as mo
return (mo,)
@app.cell
def _(mo):
mo.md("""
# Reactive Plotly Line Charts
Use `mo.ui.plotly` to make line charts reactive. Select data by dragging
a box on the chart, and get the selected points in Python!
""")
return
@app.cell
def _():
import plotly.graph_objects as go
import pandas as pd
return go, pd
@app.cell
def _(pd):
# Create sample time series data
data = pd.DataFrame(
{
"month": list(range(1, 13)),
"sales": [45, 52, 48, 65, 72, 68, 80, 85, 78, 90, 95, 88],
"expenses": [30, 35, 32, 40, 45, 42, 50, 48, 52, 55, 58, 54],
}
)
data
return (data,)
@app.cell(hide_code=True)
def _(data, go, mo):
# 1. Simple line chart
fig1 = go.Figure()
fig1.add_trace(
go.Scatter(
x=data["month"],
y=data["sales"],
mode="lines",
name="Sales",
line=dict(color="#636EFA", width=2),
)
)
fig1.update_layout(
title="Monthly Sales", xaxis_title="Month", yaxis_title="Sales ($1000s)"
)
line_chart = mo.ui.plotly(fig1)
line_chart
return (line_chart,)
@app.cell
def _(line_chart, mo):
mo.md(f"""
## Simple Line Chart
**Instructions:** Use the box select tool (in the toolbar) to select a range.
### Selected Points:
{line_chart.value}
### Selection Range:
{line_chart.ranges}
""")
return
@app.cell(hide_code=True)
def _(data, go, mo):
# 2. Line chart with markers
fig2 = go.Figure()
fig2.add_trace(
go.Scatter(
x=data["month"],
y=data["sales"],
mode="lines+markers",
name="Sales",
line=dict(color="#636EFA", width=2),
marker=dict(size=8),
)
)
fig2.update_layout(
title="Monthly Sales (with markers)",
xaxis_title="Month",
yaxis_title="Sales ($1000s)",
)
line_markers = mo.ui.plotly(fig2)
line_markers
return (line_markers,)
@app.cell
def _(line_markers, mo):
mo.md(f"""
## Line Chart with Markers
**Instructions:** Use the box select tool to select a range.
### Selected Points:
{line_markers.value}
""")
return
@app.cell(hide_code=True)
def _(data, go, mo):
# 3. Multiple lines
fig3 = go.Figure()
fig3.add_trace(
go.Scatter(
x=data["month"],
y=data["sales"],
mode="lines",
name="Sales",
line=dict(color="#636EFA", width=2),
)
)
fig3.add_trace(
go.Scatter(
x=data["month"],
y=data["expenses"],
mode="lines",
name="Expenses",
line=dict(color="#EF553B", width=2),
)
)
fig3.update_layout(
title="Sales vs Expenses",
xaxis_title="Month",
yaxis_title="Amount ($1000s)",
)
multi_line = mo.ui.plotly(fig3)
multi_line
return (multi_line,)
@app.cell
def _(mo, multi_line):
mo.md(f"""
## Multiple Lines
**Instructions:** Use the box select tool to select a range.
Points from both lines will be returned!
### Selected Points:
{multi_line.value}
### Number of selected points:
{len(multi_line.value)}
""")
return
if __name__ == "__main__":
app.run()
| {
"repo_id": "marimo-team/marimo",
"file_path": "examples/third_party/plotly/line_chart.py",
"license": "Apache License 2.0",
"lines": 145,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
marimo-team/marimo:examples/third_party/plotly/bar_chart.py | # /// script
# requires-python = ">=3.9"
# dependencies = [
# "marimo",
# "plotly==6.5.1",
# ]
# ///
import marimo
__generated_with = "0.19.7"
app = marimo.App(width="medium")
@app.cell
def _():
import marimo as mo
import plotly.graph_objects as go
return go, mo
@app.cell
def _(mo):
mo.md("""
# Plotly Bar Chart Selection
This example demonstrates reactive bar chart selections with `mo.ui.plotly`.
Select bars by clicking or dragging a box to see the selected data.
""")
return
@app.cell
def _(go, mo):
mo.md("## Simple Vertical Bar Chart")
# Create a simple bar chart with categorical data
fig_simple = go.Figure(
data=go.Bar(
x=["Product A", "Product B", "Product C", "Product D", "Product E"],
y=[20, 35, 30, 25, 40],
marker_color="steelblue",
)
)
fig_simple.update_layout(
title="Sales by Product",
xaxis_title="Product",
yaxis_title="Sales ($k)",
)
# Wrap with mo.ui.plotly to make it reactive
bar_chart = mo.ui.plotly(fig_simple)
return (bar_chart,)
@app.cell
def _(bar_chart, mo):
mo.md(f"""
### Interactive Chart
{bar_chart}
### Selected Bars
Select bars by dragging a box over them:
```python
{bar_chart.value}
```
""")
return
@app.cell
def _(go, mo):
mo.md("## Stacked Bar Chart")
# Create a stacked bar chart
fig_stacked = go.Figure()
fig_stacked.add_trace(
go.Bar(
x=["Q1", "Q2", "Q3", "Q4"],
y=[15, 20, 18, 22],
name="Product A",
marker_color="steelblue",
)
)
fig_stacked.add_trace(
go.Bar(
x=["Q1", "Q2", "Q3", "Q4"],
y=[10, 15, 12, 18],
name="Product B",
marker_color="lightcoral",
)
)
fig_stacked.update_layout(
title="Quarterly Sales by Product",
xaxis_title="Quarter",
yaxis_title="Sales ($k)",
barmode="stack",
)
stacked_chart = mo.ui.plotly(fig_stacked)
return (stacked_chart,)
@app.cell
def _(mo, stacked_chart):
mo.md(f"""
### Stacked Bar Chart
{stacked_chart}
**Note:** When you select a stacked bar, all segments at that position are returned!
### Selected Data
```python
{stacked_chart.value}
```
""")
return
@app.cell
def _(go, mo):
mo.md("## Grouped Bar Chart")
# Create a grouped bar chart
fig_grouped = go.Figure()
fig_grouped.add_trace(
go.Bar(
x=["Jan", "Feb", "Mar", "Apr"],
y=[20, 25, 22, 28],
name="2024",
marker_color="steelblue",
)
)
fig_grouped.add_trace(
go.Bar(
x=["Jan", "Feb", "Mar", "Apr"],
y=[18, 23, 20, 25],
name="2025",
marker_color="lightcoral",
)
)
fig_grouped.update_layout(
title="Monthly Sales Comparison",
xaxis_title="Month",
yaxis_title="Sales ($k)",
barmode="group",
)
grouped_chart = mo.ui.plotly(fig_grouped)
return (grouped_chart,)
@app.cell
def _(grouped_chart, mo):
mo.md(f"""
### Grouped Bar Chart
{grouped_chart}
**Note:** When you select a category, all bars in that group are returned!
### Selected Data
```python
{grouped_chart.value}
```
""")
return
@app.cell
def _(go, mo):
mo.md("## Horizontal Bar Chart")
# Create a horizontal bar chart
fig_horizontal = go.Figure(
data=go.Bar(
x=[30, 45, 35, 50, 40],
y=["Team A", "Team B", "Team C", "Team D", "Team E"],
orientation="h",
marker_color="mediumseagreen",
)
)
fig_horizontal.update_layout(
title="Team Performance",
xaxis_title="Score",
yaxis_title="Team",
)
horizontal_chart = mo.ui.plotly(fig_horizontal)
return (horizontal_chart,)
@app.cell
def _(horizontal_chart, mo):
mo.md(f"""
### Horizontal Bar Chart
{horizontal_chart}
### Selected Data
```python
{horizontal_chart.value}
```
""")
return
@app.cell
def _(mo):
mo.md("""
## How It Works
- **Selection**: Drag a box over bars to select them
- **Categorical axes**: Each bar spans from (index - 0.5) to (index + 0.5)
- **Stacked/Grouped**: All bars at a position are returned when that position is selected
- **Data format**: Returns a list of `{"x": value, "y": value, "curveNumber": trace_index}`
This allows you to build reactive dashboards where selecting bars filters other
visualizations or displays detailed information about the selected data.
""")
return
if __name__ == "__main__":
app.run()
| {
"repo_id": "marimo-team/marimo",
"file_path": "examples/third_party/plotly/bar_chart.py",
"license": "Apache License 2.0",
"lines": 180,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
marimo-team/marimo:marimo/_sql/engines/adbc.py | # Copyright 2026 Marimo. All rights reserved.
from __future__ import annotations
from types import ModuleType
from typing import TYPE_CHECKING, Any, Literal, Optional, Protocol, Union, cast
from marimo import _loggers
from marimo._data.models import (
Database,
DataTable,
DataTableColumn,
DataTableType,
DataType,
Schema,
)
from marimo._sql.engines.types import InferenceConfig, SQLConnection
from marimo._sql.utils import CHEAP_DISCOVERY_DATABASES, convert_to_output
from marimo._types.ids import VariableName
LOGGER = _loggers.marimo_logger()
if TYPE_CHECKING:
from collections.abc import Sequence
import pandas as pd
import polars as pl
import pyarrow as pa
AdbcGetObjectsDepth = Literal[
"all", "catalogs", "db_schemas", "tables", "columns"
]
class AdbcDbApiCursor(Protocol):
description: Any
def execute(
self, query: str, parameters: Sequence[Any] = ...
) -> AdbcDbApiCursor: ...
def fetch_arrow_table(self) -> pa.Table: ...
def close(self) -> None: ...
class AdbcDbApiConnection(Protocol):
"""ADBC DB-API wrapper connection.
For the canonical DB-API types/signatures, see:
https://arrow.apache.org/adbc/current/python/api/adbc_driver_manager.html#adbc_driver_manager.dbapi.Connection
"""
def cursor(self) -> AdbcDbApiCursor: ...
def commit(self) -> None: ...
def rollback(self) -> None: ...
def close(self) -> None: ...
adbc_current_catalog: str
adbc_current_db_schema: str
def adbc_get_objects(
self,
*,
depth: AdbcGetObjectsDepth = "all",
catalog_filter: str | None = None,
db_schema_filter: str | None = None,
table_name_filter: str | None = None,
table_types_filter: list[str] | None = None,
column_name_filter: str | None = None,
) -> pa.RecordBatchReader: ...
def adbc_get_table_schema(
self, table_name: str, *, db_schema_filter: str | None = None
) -> pa.Schema: ...
def adbc_get_info(self) -> dict[str | int, Any]: ...
def _resolve_table_type(table_type: str) -> DataTableType:
if "view" in table_type.lower():
return "view"
return "table"
def _adbc_info_to_dialect(*, info: dict[str | int, Any]) -> str:
"""Infer marimo's dialect identifier from ADBC metadata.
Notes:
ADBC DB-API wrappers expose driver/database metadata via ``adbc_get_info()``,
including a ``vendor_name`` and ``driver_name`` (see ADBC quickstart:
https://arrow.apache.org/adbc/current/python/quickstart.html).
In marimo, ``engine.dialect`` is used primarily for editor/formatter dialect
selection and for display in the UI.
"""
vendor_name = info.get("vendor_name")
vendor = vendor_name if isinstance(vendor_name, str) else None
if vendor is not None and vendor.strip():
return vendor.strip().lower()
return "sql"
def _schema_field_to_data_type(external_type: str) -> DataType:
"""Map an Arrow-like dtype string to marimo DataType."""
t = external_type.lower()
if "bool" in t:
return "boolean"
if "int" in t or "uint" in t:
return "integer"
if "float" in t or "double" in t or "decimal" in t:
return "number"
if "timestamp" in t:
return "datetime"
if t.startswith("date") or " date" in t:
return "date"
if t.startswith("time") or " time" in t:
return "time"
return "string"
class AdbcConnectionCatalog:
"""Catalog implementation backed by ADBC DB-API wrapper extensions."""
def __init__(
self,
*,
adbc_connection: AdbcDbApiConnection,
dialect: str,
engine_name: Optional[VariableName],
) -> None:
self._adbc_connection = adbc_connection
self._dialect = dialect
self._engine_name = engine_name
def get_default_database(self) -> Optional[str]:
try:
return self._adbc_connection.adbc_current_catalog
except Exception:
# Some drivers (like arrow-adbc-driver-sqlite) do not support the standardized option for current
# catalog/schema; treat as unavailable.
LOGGER.debug("Failed to read ADBC current catalog", exc_info=True)
return None
def get_default_schema(self) -> Optional[str]:
try:
return self._adbc_connection.adbc_current_db_schema
except Exception:
LOGGER.debug("Failed to read ADBC current schema", exc_info=True)
return None
def _resolve_should_auto_discover(
self, value: Union[bool, Literal["auto"]]
) -> bool:
if value == "auto":
return self._dialect.lower() in CHEAP_DISCOVERY_DATABASES
return value
def get_databases(
self,
*,
include_schemas: Union[bool, Literal["auto"]],
include_tables: Union[bool, Literal["auto"]],
include_table_details: Union[bool, Literal["auto"]],
) -> list[Database]:
databases: list[Database] = []
include_schemas_bool = self._resolve_should_auto_discover(
include_schemas
)
include_tables_bool = self._resolve_should_auto_discover(
include_tables
)
include_table_details_bool = self._resolve_should_auto_discover(
include_table_details
)
if not include_schemas_bool:
include_tables_bool = False
if not include_tables_bool:
include_table_details_bool = False
depth: AdbcGetObjectsDepth
if not include_schemas_bool:
depth = "catalogs"
elif not include_tables_bool:
depth = "db_schemas"
else:
depth = "tables"
objects_pylist = (
self._adbc_connection.adbc_get_objects(depth=depth)
.read_all()
.to_pylist()
)
for catalog_row in objects_pylist:
catalog_name_obj = catalog_row.get("catalog_name")
catalog_name = (
"" if catalog_name_obj is None else str(catalog_name_obj)
)
schemas: list[Schema] = []
if include_schemas_bool:
schema_rows = catalog_row.get("catalog_db_schemas") or []
for schema_row in schema_rows:
schema_name_obj = schema_row.get("db_schema_name")
schema_name = (
"" if schema_name_obj is None else str(schema_name_obj)
)
tables: list[DataTable] = []
if include_tables_bool:
table_rows = schema_row.get("db_schema_tables") or []
for table_row in table_rows:
table_name_obj = table_row.get("table_name")
if table_name_obj is None:
continue
table_name = str(table_name_obj)
table_type_obj = (
table_row.get("table_type") or "TABLE"
)
table_type = str(table_type_obj)
if include_table_details_bool:
details = self.get_table_details(
table_name=table_name,
schema_name=schema_name,
database_name=catalog_name,
)
if details is not None:
details.type = _resolve_table_type(
table_type
)
tables.append(details)
else:
tables.append(
DataTable(
source_type="connection",
source=self._dialect,
name=table_name,
num_rows=None,
num_columns=None,
variable_name=None,
engine=self._engine_name,
type=_resolve_table_type(table_type),
columns=[],
primary_keys=[],
indexes=[],
)
)
schemas.append(Schema(name=schema_name, tables=tables))
databases.append(
Database(
name=catalog_name,
dialect=self._dialect,
schemas=schemas,
engine=self._engine_name,
)
)
return databases
def get_tables_in_schema(
self, *, schema: str, database: str, include_table_details: bool
) -> list[DataTable]:
tables: list[DataTable] = []
objects_pylist = (
self._adbc_connection.adbc_get_objects(
depth="tables",
catalog_filter=database or None,
db_schema_filter=schema or None,
)
.read_all()
.to_pylist()
)
for catalog_row in objects_pylist:
schema_rows = catalog_row.get("catalog_db_schemas") or []
for schema_row in schema_rows:
table_rows = schema_row.get("db_schema_tables") or []
for table_row in table_rows:
table_name_obj = table_row.get("table_name")
if table_name_obj is None:
continue
table_name = str(table_name_obj)
table_type_obj = table_row.get("table_type") or "TABLE"
table_type = str(table_type_obj)
if include_table_details:
details = self.get_table_details(
table_name=table_name,
schema_name=schema,
database_name=database,
)
if details is not None:
details.type = _resolve_table_type(table_type)
tables.append(details)
else:
tables.append(
DataTable(
source_type="connection",
source=self._dialect,
name=table_name,
num_rows=None,
num_columns=None,
variable_name=None,
engine=self._engine_name,
type=_resolve_table_type(table_type),
columns=[],
primary_keys=[],
indexes=[],
)
)
return tables
def get_table_details(
self, *, table_name: str, schema_name: str, database_name: str
) -> Optional[DataTable]:
_ = database_name
try:
schema = self._adbc_connection.adbc_get_table_schema(
table_name, db_schema_filter=schema_name or None
)
except Exception:
LOGGER.warning(
"Failed to get table schema for %s.%s.%s",
database_name,
schema_name,
table_name,
exc_info=True,
)
return None
cols: list[DataTableColumn] = []
try:
for field in cast(Any, schema):
external_type = str(getattr(field, "type", "string"))
cols.append(
DataTableColumn(
name=str(getattr(field, "name", "")),
type=_schema_field_to_data_type(external_type),
external_type=external_type,
sample_values=[],
)
)
except Exception:
LOGGER.warning("Failed to parse ADBC table schema", exc_info=True)
cols = []
return DataTable(
source_type="connection",
source=self._dialect,
name=table_name,
num_rows=None,
num_columns=len(cols) if cols else None,
variable_name=None,
engine=self._engine_name,
columns=cols,
primary_keys=[],
indexes=[],
)
class AdbcDBAPIEngine(SQLConnection[AdbcDbApiConnection]):
"""ADBC DB-API wrapper connection."""
def __init__(
self,
connection: AdbcDbApiConnection,
engine_name: Optional[VariableName] = None,
) -> None:
super().__init__(connection, engine_name)
self._catalog = AdbcConnectionCatalog(
adbc_connection=self._connection,
dialect=self.dialect,
engine_name=self._engine_name,
)
@property
def source(self) -> str:
return "adbc"
@property
def dialect(self) -> str:
try:
info = self._connection.adbc_get_info()
if isinstance(info, dict):
return _adbc_info_to_dialect(info=info)
except Exception:
LOGGER.debug("Failed to read ADBC driver metadata", exc_info=True)
return "sql"
@staticmethod
def is_compatible(var: Any) -> bool:
if isinstance(var, ModuleType):
return False
# Ibis Deferred expression object should not be handled as datasource #7791
var_type = type(var)
var_type_name = f"{var_type.__module__}.{var_type.__qualname__}"
if var_type_name == "ibis.common.deferred.Deferred":
return False
try:
# First, validate the connection-level surface area to avoid
# accidentally classifying regular DB-API connections as ADBC.
required_connection_methods = (
"cursor",
"commit",
"rollback",
"close",
# ADBC DB-API extension methods.
"adbc_get_objects",
"adbc_get_table_schema",
"adbc_get_info",
)
if not all(
callable(getattr(var, method, None))
for method in required_connection_methods
):
return False
# Then, validate the cursor shape (ADBC-specific).
# We do not execute queries; we also best-effort close the cursor
# to avoid leaking resources during compatibility checks.
cursor = var.cursor()
try:
required_cursor_methods = ("execute", "fetch_arrow_table")
return all(
callable(getattr(cursor, method, None))
for method in required_cursor_methods
)
finally:
# Never fail compatibility checks due to close errors
try:
cursor.close()
except Exception:
LOGGER.debug(
"Failed to close cursor during ADBC compatibility check",
exc_info=True,
)
except Exception:
LOGGER.debug("ADBC compatibility check failed", exc_info=True)
return False
@property
def inference_config(self) -> InferenceConfig:
return InferenceConfig(
auto_discover_schemas=True,
auto_discover_tables="auto",
auto_discover_columns=False,
)
def get_default_database(self) -> Optional[str]:
return self._catalog.get_default_database()
def get_default_schema(self) -> Optional[str]:
return self._catalog.get_default_schema()
def get_databases(
self,
*,
include_schemas: Union[bool, Literal["auto"]],
include_tables: Union[bool, Literal["auto"]],
include_table_details: Union[bool, Literal["auto"]],
) -> list[Database]:
return self._catalog.get_databases(
include_schemas=include_schemas,
include_tables=include_tables,
include_table_details=include_table_details,
)
def get_tables_in_schema(
self, *, schema: str, database: str, include_table_details: bool
) -> list[DataTable]:
return self._catalog.get_tables_in_schema(
schema=schema,
database=database,
include_table_details=include_table_details,
)
def get_table_details(
self, *, table_name: str, schema_name: str, database_name: str
) -> Optional[DataTable]:
return self._catalog.get_table_details(
table_name=table_name,
schema_name=schema_name,
database_name=database_name,
)
def execute(
self, query: str, parameters: Optional[Sequence[Any]] = None
) -> Any:
sql_output_format = self.sql_output_format()
cursor = self._connection.cursor()
def _try_commit() -> None:
try:
self._connection.commit()
except Exception:
LOGGER.info("Unable to commit transaction", exc_info=True)
try:
cursor.execute(query, parameters or ())
if not getattr(cursor, "description", None):
_try_commit()
return None
arrow_table = cursor.fetch_arrow_table()
def convert_to_polars() -> pl.DataFrame | pl.Series:
import polars as pl
return pl.from_arrow(arrow_table)
def convert_to_pandas() -> pd.DataFrame:
return arrow_table.to_pandas()
result = convert_to_output(
sql_output_format=sql_output_format,
to_polars=convert_to_polars,
to_pandas=convert_to_pandas,
to_native=lambda: arrow_table,
)
_try_commit()
return result
finally:
try:
cursor.close()
except Exception:
LOGGER.info("Failed to close cursor", exc_info=True)
| {
"repo_id": "marimo-team/marimo",
"file_path": "marimo/_sql/engines/adbc.py",
"license": "Apache License 2.0",
"lines": 459,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
marimo-team/marimo:tests/_sql/test_adbc.py | # Copyright 2026 Marimo. All rights reserved.
from __future__ import annotations
from dataclasses import dataclass
from typing import TYPE_CHECKING, Any, cast
import pytest
from marimo._data.models import Database, DataTable, Schema
from marimo._sql.engines.adbc import (
AdbcConnectionCatalog,
AdbcDBAPIEngine,
_adbc_info_to_dialect,
)
from marimo._sql.get_engines import get_engines_from_variables
from marimo._types.ids import VariableName
if TYPE_CHECKING:
from collections.abc import Iterator, Sequence
import pyarrow as pa
@dataclass
class FakeAdbcSchemaField:
name: str
type: str
class FakeAdbcTableSchema:
def __init__(self, fields: list[FakeAdbcSchemaField]) -> None:
self._fields = fields
def __iter__(self) -> Iterator[FakeAdbcSchemaField]:
return iter(self._fields)
class FakeAdbcObjectsTable:
def __init__(self, pylist: list[dict[str, Any]]) -> None:
self._pylist = pylist
def to_pylist(self) -> list[dict[str, Any]]:
return self._pylist
class FakeAdbcObjectsReader:
def __init__(self, pylist: list[dict[str, Any]]) -> None:
self._table = FakeAdbcObjectsTable(pylist)
def read_all(self) -> FakeAdbcObjectsTable:
return self._table
class FakeAdbcDbApiCursor:
def __init__(
self,
*,
description: list[tuple[str, Any]] | None,
arrow_table: pa.Table | None = None,
) -> None:
self.description = description
self._arrow_table: pa.Table | None = arrow_table
self.did_execute = False
self.did_fetch_arrow = False
self.did_close = False
def execute(
self, query: str, parameters: Sequence[Any] = ()
) -> FakeAdbcDbApiCursor:
_ = query, parameters
self.did_execute = True
return self
def fetch_arrow_table(self) -> pa.Table:
self.did_fetch_arrow = True
assert self._arrow_table is not None
return self._arrow_table
def close(self) -> None:
self.did_close = True
class FakeAdbcDbApiConnection:
adbc_current_catalog: str
adbc_current_db_schema: str
def __init__(
self,
*,
cursor: FakeAdbcDbApiCursor,
objects_pylist: list[dict[str, Any]],
table_schema: FakeAdbcTableSchema,
) -> None:
self._cursor = cursor
self._objects_pylist = objects_pylist
self._table_schema = table_schema
self.did_commit = False
self.did_rollback = False
self.did_close = False
self.dialect = "postgresql"
self.adbc_current_catalog = "db1"
self.adbc_current_db_schema = "public"
self.did_create_cursor = False
self.last_get_objects_kwargs: dict[str, Any] | None = None
self.adbc_get_info_calls = 0
def cursor(self) -> FakeAdbcDbApiCursor:
self.did_create_cursor = True
return self._cursor
def commit(self) -> None:
self.did_commit = True
def rollback(self) -> None:
self.did_rollback = True
def close(self) -> None:
self.did_close = True
# ADBC DB-API extension methods
def adbc_get_objects(
self,
*,
depth: str = "all",
catalog_filter: str | None = None,
db_schema_filter: str | None = None,
table_name_filter: str | None = None,
table_types_filter: list[str] | None = None,
column_name_filter: str | None = None,
) -> pa.RecordBatchReader:
self.last_get_objects_kwargs = {
"depth": depth,
"catalog_filter": catalog_filter,
"db_schema_filter": db_schema_filter,
"table_name_filter": table_name_filter,
"table_types_filter": table_types_filter,
"column_name_filter": column_name_filter,
}
catalogs: list[dict[str, Any]] = []
for catalog_row in self._objects_pylist:
catalog_name_obj = catalog_row.get("catalog_name")
catalog_name = (
"" if catalog_name_obj is None else str(catalog_name_obj)
)
if catalog_filter is not None and catalog_name != catalog_filter:
continue
next_catalog_row: dict[str, Any] = dict(catalog_row)
if depth == "catalogs":
next_catalog_row["catalog_db_schemas"] = []
catalogs.append(next_catalog_row)
continue
schemas = catalog_row.get("catalog_db_schemas") or []
next_schemas: list[dict[str, Any]] = []
for schema_row in schemas:
schema_name_obj = schema_row.get("db_schema_name")
schema_name = (
"" if schema_name_obj is None else str(schema_name_obj)
)
if (
db_schema_filter is not None
and schema_name != db_schema_filter
):
continue
next_schema_row: dict[str, Any] = dict(schema_row)
if depth == "db_schemas":
next_schema_row["db_schema_tables"] = []
next_schemas.append(next_schema_row)
continue
tables = schema_row.get("db_schema_tables") or []
next_tables: list[dict[str, Any]] = []
for table_row in tables:
table_name_obj = table_row.get("table_name")
if table_name_obj is None:
continue
table_name = str(table_name_obj)
if (
table_name_filter is not None
and table_name != table_name_filter
):
continue
table_type_obj = table_row.get("table_type")
table_type = (
"" if table_type_obj is None else str(table_type_obj)
)
if (
table_types_filter is not None
and table_type not in table_types_filter
):
continue
next_tables.append(dict(table_row))
next_schema_row["db_schema_tables"] = next_tables
next_schemas.append(next_schema_row)
next_catalog_row["catalog_db_schemas"] = next_schemas
catalogs.append(next_catalog_row)
# ADBC DB-API wrapper returns a pyarrow.RecordBatchReader; our fake
# emulates the reader's `read_all().to_pylist()` interface.
return cast(Any, FakeAdbcObjectsReader(catalogs))
def adbc_get_table_schema(
self, table_name: str, *, db_schema_filter: str | None = None
) -> pa.Schema:
_ = table_name, db_schema_filter
return cast(Any, self._table_schema)
def adbc_get_info(self) -> dict[str | int, Any]:
self.adbc_get_info_calls += 1
return cast(dict[str | int, Any], {"vendor_name": "PostgreSQL"})
def test_adbc_info_to_dialect() -> None:
# Vendor string is lowercased (and stripped); driver_name is ignored.
assert (
_adbc_info_to_dialect(
info={"vendor_name": "PostgreSQL", "driver_name": "SQLite"}
)
== "postgresql"
)
assert (
_adbc_info_to_dialect(info={"vendor_name": "Microsoft SQL Server"})
== "microsoft sql server"
)
# Missing/blank/non-string vendor_name falls back to "sql".
assert _adbc_info_to_dialect(info={"vendor_name": " "}) == "sql"
assert _adbc_info_to_dialect(info={"vendor_name": 123}) == "sql"
assert _adbc_info_to_dialect(info={"driver_name": "AcmeDB"}) == "sql"
assert _adbc_info_to_dialect(info={}) == "sql"
def test_get_engines_from_variables_prefers_adbc_dbapi_engine() -> None:
conn = FakeAdbcDbApiConnection(
cursor=FakeAdbcDbApiCursor(description=None),
objects_pylist=[],
table_schema=FakeAdbcTableSchema([]),
)
engines = get_engines_from_variables([(VariableName("conn"), conn)])
assert len(engines) == 1
_, engine = engines[0]
assert isinstance(engine, AdbcDBAPIEngine)
def test_adbc_catalog_parses_adbc_get_objects() -> None:
objects_pylist = [
{
"catalog_name": "db1",
"catalog_db_schemas": [
{
"db_schema_name": "public",
"db_schema_tables": [
{"table_name": "t1", "table_type": "TABLE"},
{"table_name": "v1", "table_type": "VIEW"},
],
}
],
}
]
conn = FakeAdbcDbApiConnection(
cursor=FakeAdbcDbApiCursor(description=None),
objects_pylist=objects_pylist,
table_schema=FakeAdbcTableSchema(
[
FakeAdbcSchemaField(name="id", type="int64"),
FakeAdbcSchemaField(name="name", type="utf8"),
]
),
)
engine = AdbcDBAPIEngine(conn, engine_name=VariableName("adbc_conn"))
databases = engine.get_databases(
include_schemas=True,
include_tables=True,
include_table_details=False,
)
assert conn.last_get_objects_kwargs is not None
assert conn.last_get_objects_kwargs["depth"] == "tables"
assert databases == [
Database(
name="db1",
dialect="postgresql",
engine=VariableName("adbc_conn"),
schemas=[
Schema(
name="public",
tables=[
DataTable(
source_type="connection",
source="postgresql",
name="t1",
num_rows=None,
num_columns=None,
variable_name=None,
engine=VariableName("adbc_conn"),
type="table",
columns=[],
primary_keys=[],
indexes=[],
),
DataTable(
source_type="connection",
source="postgresql",
name="v1",
num_rows=None,
num_columns=None,
variable_name=None,
engine=VariableName("adbc_conn"),
type="view",
columns=[],
primary_keys=[],
indexes=[],
),
],
)
],
)
]
def test_adbc_execute_prefers_arrow_fetch(monkeypatch) -> None:
sentinel_result = object()
def fake_convert_to_output(*args: Any, **kwargs: Any) -> Any:
_ = args, kwargs
return sentinel_result
monkeypatch.setattr(
"marimo._sql.engines.adbc.convert_to_output", fake_convert_to_output
)
cursor = FakeAdbcDbApiCursor(
description=[("col", None)],
arrow_table=cast("pa.Table", object()),
)
conn = FakeAdbcDbApiConnection(
cursor=cursor,
objects_pylist=[],
table_schema=FakeAdbcTableSchema([]),
)
engine = AdbcDBAPIEngine(conn)
monkeypatch.setattr(engine, "sql_output_format", lambda: "auto")
result = engine.execute("SELECT 1")
assert result is sentinel_result
assert cursor.did_fetch_arrow is True
assert conn.did_commit is True
assert cursor.did_close is True
def test_adbc_execute_native_returns_arrow_table(monkeypatch) -> None:
arrow_table = cast("pa.Table", object())
cursor = FakeAdbcDbApiCursor(
description=[("col", None)], arrow_table=arrow_table
)
conn = FakeAdbcDbApiConnection(
cursor=cursor,
objects_pylist=[],
table_schema=FakeAdbcTableSchema([]),
)
engine = AdbcDBAPIEngine(conn)
monkeypatch.setattr(engine, "sql_output_format", lambda: "native")
result = engine.execute("SELECT 1")
assert result is arrow_table
assert cursor.did_execute is True
assert cursor.did_fetch_arrow is True
assert conn.did_commit is True
assert cursor.did_close is True
def test_adbc_is_compatible_does_not_create_cursor() -> None:
conn = FakeAdbcDbApiConnection(
cursor=FakeAdbcDbApiCursor(description=None),
objects_pylist=[],
table_schema=FakeAdbcTableSchema([]),
)
# is_compatible() validates cursor shape; it should not execute or fetch.
assert AdbcDBAPIEngine.is_compatible(conn) is True
assert conn.did_create_cursor is True
assert conn._cursor.did_execute is False # type: ignore[attr-defined]
assert conn._cursor.did_fetch_arrow is False # type: ignore[attr-defined]
assert conn._cursor.did_close is True # type: ignore[attr-defined]
def test_adbc_catalog_auto_discovery_uses_cheap_dialect_heuristic() -> None:
conn = FakeAdbcDbApiConnection(
cursor=FakeAdbcDbApiCursor(description=None),
objects_pylist=[],
table_schema=FakeAdbcTableSchema([]),
)
cheap = AdbcConnectionCatalog(
adbc_connection=conn,
dialect="sqlite",
engine_name=None,
)
assert cheap._resolve_should_auto_discover("auto") is True
assert cheap._resolve_should_auto_discover(True) is True
assert cheap._resolve_should_auto_discover(False) is False
expensive = AdbcConnectionCatalog(
adbc_connection=conn,
dialect="snowflake",
engine_name=None,
)
assert expensive._resolve_should_auto_discover("auto") is False
def test_adbc_catalog_get_databases_uses_depth_catalogs_when_no_schemas() -> (
None
):
conn = FakeAdbcDbApiConnection(
cursor=FakeAdbcDbApiCursor(description=None),
objects_pylist=[
{
"catalog_name": "db1",
"catalog_db_schemas": [
{"db_schema_name": "public", "db_schema_tables": []}
],
},
{"catalog_name": "db2", "catalog_db_schemas": []},
],
table_schema=FakeAdbcTableSchema([]),
)
engine = AdbcDBAPIEngine(conn)
databases = engine.get_databases(
include_schemas=False,
include_tables=True,
include_table_details=True,
)
assert conn.last_get_objects_kwargs is not None
assert conn.last_get_objects_kwargs["depth"] == "catalogs"
assert [db.name for db in databases] == ["db1", "db2"]
assert [db.schemas for db in databases] == [[], []]
def test_adbc_catalog_get_databases_uses_depth_db_schemas_when_no_tables() -> (
None
):
conn = FakeAdbcDbApiConnection(
cursor=FakeAdbcDbApiCursor(description=None),
objects_pylist=[
{
"catalog_name": "db1",
"catalog_db_schemas": [
{"db_schema_name": "public", "db_schema_tables": []},
{"db_schema_name": "empty", "db_schema_tables": []},
],
}
],
table_schema=FakeAdbcTableSchema([]),
)
engine = AdbcDBAPIEngine(conn)
databases = engine.get_databases(
include_schemas=True,
include_tables=False,
include_table_details=False,
)
assert conn.last_get_objects_kwargs is not None
assert conn.last_get_objects_kwargs["depth"] == "db_schemas"
assert [s.name for s in databases[0].schemas] == ["public", "empty"]
assert [s.tables for s in databases[0].schemas] == [[], []]
def test_adbc_get_tables_in_schema_passes_filters() -> None:
conn = FakeAdbcDbApiConnection(
cursor=FakeAdbcDbApiCursor(description=None),
objects_pylist=[
{
"catalog_name": "db1",
"catalog_db_schemas": [
{
"db_schema_name": "public",
"db_schema_tables": [
{"table_name": "t1", "table_type": "TABLE"}
],
},
{
"db_schema_name": "other",
"db_schema_tables": [
{"table_name": "t2", "table_type": "TABLE"}
],
},
],
},
{
"catalog_name": "db2",
"catalog_db_schemas": [
{
"db_schema_name": "public",
"db_schema_tables": [
{"table_name": "t3", "table_type": "TABLE"}
],
}
],
},
],
table_schema=FakeAdbcTableSchema([]),
)
engine = AdbcDBAPIEngine(conn)
tables = engine.get_tables_in_schema(
schema="public", database="db1", include_table_details=False
)
assert conn.last_get_objects_kwargs is not None
assert conn.last_get_objects_kwargs["depth"] == "tables"
assert conn.last_get_objects_kwargs["catalog_filter"] == "db1"
assert conn.last_get_objects_kwargs["db_schema_filter"] == "public"
assert [t.name for t in tables] == ["t1"]
def _find_table_location(
*, databases: list[Any], table_name: str
) -> tuple[str, str]:
for db in databases:
for schema in db.schemas:
for table in schema.tables:
if table.name == table_name:
return db.name, schema.name
raise AssertionError(f"Did not find table {table_name!r} in catalog")
def _table_summary(table: DataTable) -> dict[str, Any]:
"""Return a stable representation of a DataTable for assertions."""
return {
"name": table.name,
"type": table.type,
"num_columns": table.num_columns,
"column_names": [c.name for c in table.columns],
"column_types": {c.name: c.type for c in table.columns},
}
def test_adbc_sqlite_driver_catalog_interface() -> None:
"""Smoke test marimo ADBC catalog interface against the real SQLite driver."""
pytest.importorskip(
"pyarrow", reason="ADBC DBAPI wrapper requires PyArrow"
)
adbc_sqlite_dbapi = pytest.importorskip("adbc_driver_sqlite.dbapi")
# Explicitly use a fresh in-memory database per test run.
conn: Any = adbc_sqlite_dbapi.connect(":memory:")
try:
def get_table(
*,
databases: list[Any],
database_name: str,
schema_name: str,
table_name: str,
) -> DataTable:
for db in databases:
if getattr(db, "name", None) != database_name:
continue
for schema in getattr(db, "schemas", []):
if getattr(schema, "name", None) != schema_name:
continue
for table in getattr(schema, "tables", []):
if getattr(table, "name", None) == table_name:
return cast(DataTable, table)
raise AssertionError(
f"Did not find table {table_name!r} "
f"in {database_name!r}.{schema_name!r}"
)
def sorted_table_summaries(
tables: list[DataTable],
) -> list[dict[str, Any]]:
return sorted(
(_table_summary(t) for t in tables),
key=lambda d: str(d["name"]),
)
engine = AdbcDBAPIEngine(conn)
assert engine.source == "adbc"
assert engine.dialect == "sqlite"
assert AdbcDBAPIEngine.is_compatible(conn) is True
# Minimal schema for catalog discovery.
cursor = conn.cursor()
try:
cursor.execute("CREATE TABLE t (id INTEGER, name TEXT)")
cursor.execute("CREATE TABLE t2 (x REAL)")
# NOTE: The SQLite ADBC driver infers Arrow types from observed data.
# Without any rows, some declared TEXT columns can come back as int64.
# Insert at least one row to make schema inference deterministic.
cursor.execute("INSERT INTO t VALUES (1, 'hello')")
cursor.execute("INSERT INTO t2 VALUES (1.5)")
cursor.execute(
"""
CREATE TABLE t_types (
int_col INTEGER,
real_col REAL,
text_col TEXT,
blob_col BLOB,
bool_col BOOLEAN,
date_col DATE,
time_col TIME,
ts_col TIMESTAMP,
numeric_col NUMERIC
)
"""
)
cursor.execute(
"""
INSERT INTO t_types (
int_col,
real_col,
text_col,
blob_col,
bool_col,
date_col,
time_col,
ts_col,
numeric_col
) VALUES (
1,
1.5,
'hello',
X'0001',
1,
'2026-01-04',
'12:34:56',
'2026-01-04 12:34:56',
123.45
)
"""
)
conn.commit()
finally:
cursor.close()
# Default database/schema should be readable (may be None on SQLite).
_ = engine.get_default_database()
_ = engine.get_default_schema()
# When schemas are excluded, we should not return any schema information.
dbs = engine.get_databases(
include_schemas=False,
include_tables=True,
include_table_details=True,
)
assert all(db.schemas == [] for db in dbs)
# When tables are excluded, we should not return any tables.
dbs = engine.get_databases(
include_schemas=True,
include_tables=False,
include_table_details=True,
)
assert all(
all(schema.tables == [] for schema in db.schemas) for db in dbs
)
# When table details are excluded, tables should have no columns.
dbs_without_details = engine.get_databases(
include_schemas=True,
include_tables=True,
include_table_details=False,
)
db_name, schema_name = _find_table_location(
databases=dbs_without_details, table_name="t"
)
db_name_types, schema_name_types = _find_table_location(
databases=dbs_without_details, table_name="t_types"
)
assert _table_summary(
get_table(
databases=dbs_without_details,
database_name=db_name,
schema_name=schema_name,
table_name="t",
)
) == {
"name": "t",
"type": "table",
"num_columns": None,
"column_names": [],
"column_types": {},
}
assert _table_summary(
get_table(
databases=dbs_without_details,
database_name=db_name_types,
schema_name=schema_name_types,
table_name="t_types",
)
) == {
"name": "t_types",
"type": "table",
"num_columns": None,
"column_names": [],
"column_types": {},
}
# With details enabled, columns should be populated.
dbs_with_details = engine.get_databases(
include_schemas=True,
include_tables=True,
include_table_details=True,
)
assert _table_summary(
get_table(
databases=dbs_with_details,
database_name=db_name,
schema_name=schema_name,
table_name="t",
)
) == {
"name": "t",
"type": "table",
"num_columns": 2,
"column_names": ["id", "name"],
"column_types": {"id": "integer", "name": "string"},
}
types_summary = _table_summary(
get_table(
databases=dbs_with_details,
database_name=db_name_types,
schema_name=schema_name_types,
table_name="t_types",
)
)
assert types_summary == {
"name": "t_types",
"type": "table",
"num_columns": 9,
"column_names": [
"int_col",
"real_col",
"text_col",
"blob_col",
"bool_col",
"date_col",
"time_col",
"ts_col",
"numeric_col",
],
"column_types": {
"int_col": "integer",
"real_col": "number",
"text_col": "string",
"blob_col": "string",
"bool_col": "integer",
"date_col": "string",
"time_col": "string",
"ts_col": "string",
"numeric_col": "number",
},
}
tables = engine.get_tables_in_schema(
schema=schema_name, database=db_name, include_table_details=False
)
assert sorted_table_summaries(tables) == [
{
"name": "t",
"type": "table",
"num_columns": None,
"column_names": [],
"column_types": {},
},
{
"name": "t2",
"type": "table",
"num_columns": None,
"column_names": [],
"column_types": {},
},
{
"name": "t_types",
"type": "table",
"num_columns": None,
"column_names": [],
"column_types": {},
},
]
tables_with_details = engine.get_tables_in_schema(
schema=schema_name, database=db_name, include_table_details=True
)
assert sorted_table_summaries(tables_with_details) == [
{
"name": "t",
"type": "table",
"num_columns": 2,
"column_names": ["id", "name"],
"column_types": {"id": "integer", "name": "string"},
},
{
"name": "t2",
"type": "table",
"num_columns": 1,
"column_names": ["x"],
"column_types": {"x": "number"},
},
types_summary,
]
finally:
conn.close()
def test_adbc_sqlite_driver_execute_polars(monkeypatch) -> None:
"""Smoke test marimo ADBC engine against the real ADBC SQLite driver."""
pytest.importorskip(
"pyarrow", reason="ADBC DBAPI wrapper requires PyArrow"
)
pl = pytest.importorskip("polars")
adbc_sqlite_dbapi = pytest.importorskip("adbc_driver_sqlite.dbapi")
# Explicitly use a fresh in-memory database per test run.
conn: Any = adbc_sqlite_dbapi.connect(":memory:")
try:
engine = AdbcDBAPIEngine(conn)
assert engine.source == "adbc"
assert engine.dialect == "sqlite"
assert AdbcDBAPIEngine.is_compatible(conn) is True
engine.execute("CREATE TABLE t (id INTEGER)")
engine.execute("INSERT INTO t VALUES (1), (2)")
from marimo._sql.get_engines import engine_to_data_source_connection
# Ensure we can produce a DataSourceConnection for the UI/kernel message path.
connection = engine_to_data_source_connection(
VariableName("adbc_sqlite"), engine
)
assert connection.source == "adbc"
assert connection.dialect == "sqlite"
assert connection.name == "adbc_sqlite"
assert "sqlite" in connection.display_name.lower()
monkeypatch.setattr(engine, "sql_output_format", lambda: "polars")
df = engine.execute("SELECT id FROM t ORDER BY id")
assert isinstance(df, pl.DataFrame)
assert df.to_dicts() == [{"id": 1}, {"id": 2}]
finally:
conn.close()
| {
"repo_id": "marimo-team/marimo",
"file_path": "tests/_sql/test_adbc.py",
"license": "Apache License 2.0",
"lines": 750,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
marimo-team/marimo:marimo/_smoke_tests/issues/7345-dataframe-pivot-transform.py | import marimo
__generated_with = "0.18.4"
app = marimo.App(width="columns")
@app.cell(column=0)
def _():
import marimo as mo
import polars as pl
import pandas as pd
import ibis as ib
from vega_datasets import data
return data, ib, mo, pd, pl
@app.cell
def _(data, ib, pd, pl):
cars = data.cars()
df_pandas = pd.DataFrame(cars)
df_polars = pl.DataFrame(cars)
df_ibis = ib.memtable(cars)
return df_ibis, df_pandas, df_polars
@app.cell(column=1)
def _(df_pandas, mo):
mo.ui.dataframe(df_pandas)
return
@app.cell
def _(df_polars, mo):
mo.ui.dataframe(df_polars)
return
@app.cell
def _(df_ibis, mo):
mo.ui.dataframe(df_ibis)
return
@app.cell(column=2)
def _(df_pandas):
df_pandas_next = df_pandas
df_pandas_next = df_pandas_next.pivot_table(
index=["Year"],
columns=["Origin"],
values=["Acceleration"],
aggfunc="mean",
sort=False,
fill_value=None,
).sort_index(axis=0)
df_pandas_next.columns = [
f"{'_'.join(map(str, col)).strip()}_mean"
if isinstance(col, tuple)
else f"{col}_mean"
for col in df_pandas_next.columns
]
df_pandas_next = df_pandas_next.reset_index()
df_pandas_next
return
@app.cell
def _(df_polars):
df_polars_next = df_polars
df_polars_next = df_polars_next.pivot(
on=["Origin"],
index=["Year"],
values=["Acceleration"],
aggregate_function="mean",
).sort(["Year"])
replacements = str.maketrans({"{": "", "}": "", '"': "", ",": "_"})
df_polars_next = df_polars_next.rename(
lambda col: f"Acceleration_{col.translate(replacements)}_mean"
if col not in ["Year"]
else col
)
df_polars_next
return
@app.cell
def _(df_ibis):
df_ibis_next = df_ibis
df_ibis_next = df_ibis_next.pivot_wider(
names_from=["Origin"],
id_cols=["Year"],
values_from=["Acceleration"],
names_prefix="Acceleration",
values_agg="mean",
)
df_ibis_next = df_ibis_next.rename(
**{
f"{col}_mean": col
for col in df_ibis_next.columns
if col not in ["Year"]
}
)
df_ibis_next.to_polars()
return
if __name__ == "__main__":
app.run()
| {
"repo_id": "marimo-team/marimo",
"file_path": "marimo/_smoke_tests/issues/7345-dataframe-pivot-transform.py",
"license": "Apache License 2.0",
"lines": 88,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
marimo-team/marimo:examples/ai/chat/pydantic-ai-chat.py | import marimo
__generated_with = "0.19.2"
app = marimo.App(width="medium")
with app.setup(hide_code=True):
import marimo as mo
import os
import httpx
from pydantic_ai import Agent, RunContext, BinaryImage
from pydantic_ai.models.google import GoogleModel, GoogleModelSettings
from pydantic_ai.providers.google import GoogleProvider
from pydantic_ai.models.anthropic import AnthropicModel, AnthropicModelSettings
from pydantic_ai.providers.anthropic import AnthropicProvider
from pydantic_ai.providers.openai import OpenAIProvider
from pydantic_ai.models.openai import (
OpenAIResponsesModel,
OpenAIResponsesModelSettings,
)
from pydantic import BaseModel
from pydantic_ai.models import Model
from pydantic_ai.settings import ModelSettings
@app.cell(hide_code=True)
def _():
mo.md(r"""
# Pydantic-AI 🤖
[Pydantic AI](https://ai.pydantic.dev/) is a modern framework to build applications that interact with LLMs. Key features include
* ✨ **Structured Outputs:** Force LLMs to return clean, structured data (like JSON) that conforms to your Pydantic models.
* ✅ **Validation & Type-Safety:** Use Pydantic's validation and Python's type hints to ensure data integrity and make your code robust.
* 🧠 **Reasoning & Tool Use:** Define output models for complex reasoning tasks and reliable function calling (tool use).
The following example uses [`mo.ui.chat`](https://docs.marimo.io/api/inputs/chat.html#marimo.ui.chat) to build a chatbot backed by Pydantic-AI.
""")
return
@app.cell(hide_code=True)
def _():
structured = mo.ui.checkbox(label="Structured outputs")
thinking = mo.ui.checkbox(label="Reasoning")
fetch_dog_tool = mo.ui.checkbox(label="Fetch dog pics tool")
models = mo.ui.dropdown(
options={
"Gemini 2.5 Flash": "gemini-2.5-flash",
"Claude Haiku 4.5": "claude-haiku-4-5",
"GPT 5 Nano": "gpt-5-nano",
"GPT 5 (multimodal)": "gpt-5",
},
value="Gemini 2.5 Flash",
label="Choose a model",
)
mo.vstack([models, structured, thinking, fetch_dog_tool])
return fetch_dog_tool, models, structured, thinking
@app.cell(hide_code=True)
def _(models):
model_name = models.value
if model_name.startswith("gemini"):
env_key = "GOOGLE_AI_API_KEY"
elif model_name.startswith("claude"):
env_key = "ANTHROPIC_API_KEY"
elif model_name.startswith("gpt"):
env_key = "OPENAI_API_KEY"
else:
raise NotImplementedError
os_key = os.environ.get(env_key)
input_key = mo.ui.text(label="API key", kind="password")
input_key if not os_key else None
return input_key, model_name, os_key
@app.function
def get_model(
model_name: str, thinking: bool, api_key: str
) -> tuple[Model, ModelSettings]:
model_name = model_name.lower()
if model_name.startswith("gemini"):
provider = GoogleProvider(api_key=api_key)
model = GoogleModel(model_name, provider=provider)
settings = GoogleModelSettings(
google_thinking_config={
"include_thoughts": True if thinking else False
}
)
elif model_name.startswith("claude"):
model = AnthropicModel(
model_name, provider=AnthropicProvider(api_key=api_key)
)
settings = AnthropicModelSettings(
anthropic_thinking={"type": "enabled", "budget_tokens": 1024}
if thinking
else {"type": "disabled"},
)
elif model_name.startswith("gpt"):
model = OpenAIResponsesModel(
model_name, provider=OpenAIProvider(api_key=api_key)
)
settings = (
OpenAIResponsesModelSettings(
openai_reasoning_effort="low",
openai_reasoning_summary="detailed",
)
if thinking
else OpenAIResponsesModelSettings()
)
else:
raise NotImplementedError
return model, settings
@app.cell(hide_code=True)
def _(
fetch_dog_tool,
input_key,
model_name,
models,
os_key,
structured,
thinking,
):
class CodeOutput(BaseModel):
code: str
time_complexity: str
memory_complexity: str
algorithm_complexity: int
api_key = input_key.value or os_key
model, settings = get_model(models.value, thinking.value, api_key)
output_type = str
if "image" in model_name or model_name == "gpt-5":
output_type = BinaryImage | str
elif structured.value:
output_type = [CodeOutput, str]
agent = Agent(
model,
output_type=output_type,
instructions="You are a senior software engineer experienced in Python, React and Typescript.",
model_settings=settings,
)
if fetch_dog_tool.value:
@agent.tool
def fetch_dog_picture_url(ctx: RunContext[str]) -> str:
"""Returns URL of dog picture"""
response_json = httpx.get(
"https://dog.ceo/api/breeds/image/random"
).json()
if "message" in response_json:
return response_json["message"]
else:
return "Error fetching dog URL"
return (agent,)
@app.cell
def _(agent):
chatbot = mo.ui.chat(
mo.ai.llm.pydantic_ai(agent),
prompts=[
"Write the fibonacci function in Python",
"Who is Ada Lovelace?",
"What is marimo?",
"I need dogs (render as markdown)",
],
allow_attachments=True,
show_configuration_controls=True,
)
chatbot
return (chatbot,)
@app.cell
def _(chatbot):
chatbot.value
return
@app.cell
def _():
mo.md("""
## Custom Model Sample
""")
return
@app.cell
def _():
import uuid
import pydantic_ai.ui.vercel_ai.response_types as vercel
async def custom_model(messages, config):
# Generate unique IDs for message parts
reasoning_id = f"reasoning_{uuid.uuid4().hex}"
text_id = f"text_{uuid.uuid4().hex}"
tool_id = f"tool_{uuid.uuid4().hex}"
# --- Stream reasoning/thinking ---
yield vercel.StartStepChunk()
yield vercel.ReasoningStartChunk(id=reasoning_id)
yield vercel.ReasoningDeltaChunk(
id=reasoning_id,
delta="The user is asking about Van Gogh. I should fetch information about his famous works.",
)
yield vercel.ReasoningEndChunk(id=reasoning_id)
# --- Stream tool call to fetch artwork information ---
yield vercel.ToolInputAvailableChunk(
tool_call_id=tool_id,
tool_name="search_artwork",
input={"artist": "Vincent van Gogh", "limit": 1},
)
yield vercel.ToolInputStartChunk(
tool_call_id=tool_id, tool_name="search_artwork"
)
yield vercel.ToolInputDeltaChunk(
tool_call_id=tool_id,
input_text_delta='{"artist": "Vincent van Gogh", "limit": 1}',
)
# --- Tool output (simulated artwork search result) ---
yield vercel.ToolOutputAvailableChunk(
tool_call_id=tool_id,
output={
"title": "The Starry Night",
"year": 1889,
"museum": "Museum of Modern Art",
},
)
# --- Stream text response ---
yield vercel.TextStartChunk(id=text_id)
yield vercel.TextDeltaChunk(
id=text_id,
delta="One of Vincent van Gogh's most iconic works is 'The Starry Night', painted in 1889. Here's the painting:\n\n",
)
# --- Embed the artwork image ---
yield vercel.FileChunk(
url="https://upload.wikimedia.org/wikipedia/commons/thumb/e/ea/Van_Gogh_-_Starry_Night_-_Google_Art_Project.jpg/1280px-Van_Gogh_-_Starry_Night_-_Google_Art_Project.jpg",
media_type="image/jpeg",
)
yield vercel.TextDeltaChunk(
id=text_id,
delta="\nThis masterpiece is now housed at the Museum of Modern Art in New York and remains one of the most recognizable paintings in the world.",
)
yield vercel.TextEndChunk(id=text_id)
yield vercel.FinishStepChunk()
yield vercel.FinishChunk()
custom_chat = mo.ui.chat(custom_model)
custom_chat
return (custom_chat,)
@app.cell
def _(custom_chat):
custom_chat.value
return
if __name__ == "__main__":
app.run()
| {
"repo_id": "marimo-team/marimo",
"file_path": "examples/ai/chat/pydantic-ai-chat.py",
"license": "Apache License 2.0",
"lines": 231,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
marimo-team/marimo:tests/_ai/test_ai_types.py | # Copyright 2026 Marimo. All rights reserved.
from __future__ import annotations
from dataclasses import dataclass
from typing import Any, Literal, cast
import pytest
from marimo._ai._types import (
ChatAttachment,
ChatMessage,
ChatPart,
FilePart,
ReasoningPart,
TextPart,
ToolInvocationPart,
)
from marimo._dependencies.dependencies import DependencyManager
class TestChatMessageCreate:
"""Tests for ChatMessage.create class method."""
def test_basic_usage_without_validator(self):
"""Test create without part_validator_class."""
parts: list[Any] = [{"type": "text", "text": "Hello"}]
message = ChatMessage.create(
role="user",
message_id="msg-123",
content="Hello world",
parts=parts,
)
assert message == ChatMessage(
role="user",
id="msg-123",
content="Hello world",
parts=[TextPart(type="text", text="Hello")],
)
def test_with_empty_parts(self):
"""Test create with empty parts list."""
message = ChatMessage.create(
role="assistant",
message_id="msg-456",
content="Response",
parts=[],
)
assert message == ChatMessage(
role="assistant",
id="msg-456",
content="Response",
parts=[],
)
def test_with_none_content(self):
"""Test create with None content."""
parts: list[Any] = [{"type": "text", "text": "Content in parts"}]
message = ChatMessage.create(
role="user",
message_id="msg-789",
content=None,
parts=parts,
)
assert message == ChatMessage(
role="user",
id="msg-789",
content=None,
parts=[TextPart(type="text", text="Content in parts")],
)
def test_parts_already_correct_type_with_validator(self):
"""Test with parts that are already the correct type."""
@dataclass
class MockPart:
type: Literal["mock"]
value: str
existing_part = MockPart(type="mock", value="test")
message = ChatMessage.create(
role="user",
message_id="msg-123",
content=None,
parts=[cast(ChatPart, existing_part)],
part_validator_class=MockPart,
)
# The part should be kept as-is since it's already the right type
assert message == ChatMessage(
role="user",
id="msg-123",
content=None,
parts=[existing_part], # type: ignore
)
@pytest.mark.skipif(
not DependencyManager.pydantic_ai.has(),
reason="pydantic_ai is not installed",
)
def test_dict_parts_with_dataclass_validator(self):
"""Test converting dict parts using a dataclass validator."""
dict_part: dict[str, str] = {"type": "text", "text": "Hello from dict"}
message = ChatMessage.create(
role="user",
message_id="msg-123",
content=None,
parts=[cast(ChatPart, dict_part)],
part_validator_class=TextPart,
)
assert message == ChatMessage(
role="user",
id="msg-123",
content=None,
parts=[TextPart(type="text", text="Hello from dict")],
)
@pytest.mark.skipif(
not DependencyManager.pydantic_ai.has(),
reason="pydantic_ai is not installed",
)
def test_mixed_parts_with_validator(self):
"""Test with a mix of already-typed and dict parts."""
@dataclass
class MockPart:
type: Literal["mock"]
value: str
existing_part = MockPart(type="mock", value="existing")
dict_part: dict[str, str] = {"type": "mock", "value": "from_dict"}
message = ChatMessage.create(
role="user",
message_id="msg-123",
content=None,
parts=[cast(ChatPart, existing_part), cast(ChatPart, dict_part)],
part_validator_class=MockPart,
)
assert message == ChatMessage(
role="user",
id="msg-123",
content=None,
parts=[existing_part, MockPart(type="mock", value="from_dict")], # type: ignore
)
def test_invalid_dict_part_is_skipped(self):
"""Test that invalid dict parts are skipped during validation."""
@dataclass
class StrictPart:
type: Literal["strict"]
required_field: str
# This dict is missing required_field
invalid_dict: dict[str, str] = {"type": "strict"}
valid_part = StrictPart(type="strict", required_field="valid")
message = ChatMessage.create(
role="user",
message_id="msg-123",
content=None,
parts=[cast(ChatPart, valid_part), cast(ChatPart, invalid_dict)],
part_validator_class=StrictPart,
)
# Invalid dict should be skipped, only valid part remains
assert message.parts is not None
assert len(message.parts) == 1
assert message.parts[0] is valid_part
def test_non_dict_non_validator_part_is_dropped_with_logging(self):
"""Test that parts that are neither validator instances nor dicts are dropped."""
@dataclass
class ExpectedPart:
type: Literal["expected"]
value: str
@dataclass
class UnexpectedPart:
type: Literal["unexpected"]
data: int
valid_part = ExpectedPart(type="expected", value="valid")
unexpected_part = UnexpectedPart(type="unexpected", data=42)
message = ChatMessage.create(
role="user",
message_id="msg-123",
content=None,
parts=[
cast(ChatPart, valid_part),
cast(ChatPart, unexpected_part),
],
part_validator_class=ExpectedPart,
)
# Unexpected part (different dataclass) should be dropped, only valid part remains
assert message.parts is not None
assert len(message.parts) == 1
assert message.parts[0] is valid_part
def test_all_roles(self):
"""Test create with all valid roles."""
for role in ["user", "assistant", "system"]:
message = ChatMessage.create(
role=role, # type: ignore
message_id=f"msg-{role}",
content=f"{role} message",
parts=[],
)
assert message == ChatMessage(
role=role, # type: ignore
id=f"msg-{role}",
content=f"{role} message",
parts=[],
)
class TestChatMessageFromRequestWithPydanticAI:
"""Tests for create with pydantic-ai types."""
@pytest.mark.skipif(
not DependencyManager.pydantic_ai.has(),
reason="pydantic-ai is not installed",
)
def test_with_ui_message_part_validator(self):
"""Test create with UIMessagePart from pydantic-ai."""
from pydantic_ai.ui.vercel_ai.request_types import (
TextUIPart,
UIMessagePart,
)
dict_part: dict[str, str] = {
"type": "text",
"text": "Hello pydantic-ai",
}
message = ChatMessage.create(
role="user",
message_id="msg-pydantic",
content=None,
parts=[cast(ChatPart, dict_part)],
part_validator_class=UIMessagePart,
)
assert message == ChatMessage(
role="user",
id="msg-pydantic",
content=None,
parts=cast(
Any, [TextUIPart(type="text", text="Hello pydantic-ai")]
),
)
@pytest.mark.skipif(
not DependencyManager.pydantic_ai.has(),
reason="pydantic-ai is not installed",
)
def test_with_existing_ui_message_part(self):
"""Test that existing UIMessagePart instances are kept as-is."""
from pydantic_ai.ui.vercel_ai.request_types import (
TextUIPart,
UIMessagePart,
)
existing_part = TextUIPart(type="text", text="Already typed")
message = ChatMessage.create(
role="assistant",
message_id="msg-existing",
content=None,
parts=[cast(ChatPart, existing_part)],
part_validator_class=UIMessagePart,
)
# Check identity - the same object should be kept
assert message.parts is not None
assert len(message.parts) == 1
assert message.parts[0] is existing_part
@pytest.mark.skipif(
not DependencyManager.pydantic_ai.has(),
reason="pydantic-ai is not installed",
)
def test_with_multiple_ui_part_types(self):
"""Test create with multiple UI part types."""
from pydantic_ai.ui.vercel_ai.request_types import (
TextUIPart,
UIMessagePart,
)
parts: list[dict[str, str]] = [
{"type": "text", "text": "First part"},
{"type": "text", "text": "Second part"},
]
message = ChatMessage.create(
role="user",
message_id="msg-multi",
content=None,
parts=cast(Any, parts),
part_validator_class=UIMessagePart,
)
assert message == ChatMessage(
role="user",
id="msg-multi",
content=None,
parts=cast(
Any,
[
TextUIPart(type="text", text="First part"),
TextUIPart(type="text", text="Second part"),
],
),
)
@pytest.mark.skipif(
not DependencyManager.pydantic_ai.has(),
reason="pydantic-ai is not installed",
)
def test_with_reasoning_ui_part(self):
"""Test create with reasoning UI part."""
from pydantic_ai.ui.vercel_ai.request_types import (
ReasoningUIPart,
UIMessagePart,
)
parts: list[dict[str, str]] = [
{"type": "reasoning", "text": "Let me think about this..."},
]
message = ChatMessage.create(
role="assistant",
message_id="msg-reasoning",
content=None,
parts=cast(Any, parts),
part_validator_class=UIMessagePart,
)
assert message == ChatMessage(
role="assistant",
id="msg-reasoning",
content=None,
parts=cast(
Any,
[
ReasoningUIPart(
type="reasoning", text="Let me think about this..."
)
],
),
)
class TestChatMessagePostInit:
"""Tests for ChatMessage.__post_init__ part conversion."""
def test_converts_text_part_dict(self):
"""Test that text part dicts are converted to TextPart."""
message = ChatMessage(
role="user",
content="Hello",
parts=[cast(ChatPart, {"type": "text", "text": "Part text"})],
)
assert message == ChatMessage(
role="user",
content="Hello",
parts=[TextPart(type="text", text="Part text")],
)
def test_converts_reasoning_part_dict(self):
"""Test that reasoning part dicts are converted to ReasoningPart."""
message = ChatMessage(
role="assistant",
content=None,
parts=[
cast(ChatPart, {"type": "reasoning", "text": "Thinking..."})
],
)
assert message == ChatMessage(
role="assistant",
content=None,
parts=[ReasoningPart(type="reasoning", text="Thinking...")],
)
def test_converts_file_part_dict(self):
"""Test that file part dicts are converted to FilePart."""
message = ChatMessage(
role="user",
content=None,
parts=[
cast(
ChatPart,
{
"type": "file",
"media_type": "image/png",
"url": "data:image/png;base64,abc123",
},
),
],
)
assert message == ChatMessage(
role="user",
content=None,
parts=[
FilePart(
type="file",
media_type="image/png",
url="data:image/png;base64,abc123",
)
],
)
def test_converts_tool_invocation_part_dict(self):
"""Test that tool invocation part dicts are converted."""
message = ChatMessage(
role="assistant",
content=None,
parts=cast(
Any,
[
{
"type": "tool-call",
"tool_call_id": "call-123",
"state": "output-available",
"input": {"query": "test"},
"output": {"result": "success"},
}
],
),
)
assert message == ChatMessage(
role="assistant",
content=None,
parts=[
ToolInvocationPart(
type="tool-call",
tool_call_id="call-123",
state="output-available",
input={"query": "test"},
output={"result": "success"},
)
],
)
def test_keeps_already_typed_parts(self):
"""Test that already-typed parts are kept as-is."""
text_part = TextPart(type="text", text="Already typed")
message = ChatMessage(
role="user",
content="Hello",
parts=[text_part],
)
# Check identity - the same object should be kept
assert message.parts is not None
assert len(message.parts) == 1
assert message.parts[0] is text_part
def test_handles_invalid_parts_gracefully(self):
"""Test that invalid parts are dropped gracefully."""
message = ChatMessage(
role="user",
content="Hello",
parts=cast(
Any,
[
{"type": "text", "text": "Valid"},
{"type": "unknown_type", "data": "invalid"},
],
),
)
# Valid part should be kept, invalid should be dropped
assert message == ChatMessage(
role="user",
content="Hello",
parts=[TextPart(type="text", text="Valid")],
)
def test_with_none_parts(self):
"""Test that None parts is handled."""
message = ChatMessage(role="user", content="Hello", parts=[])
assert message == ChatMessage(role="user", content="Hello", parts=[])
def test_with_empty_parts(self):
"""Test that empty parts list stays empty."""
message = ChatMessage(role="user", content="Hello", parts=[])
assert message == ChatMessage(role="user", content="Hello", parts=[])
def test_id_defaults_to_empty_string(self):
"""Test that id defaults to empty string when not provided."""
message = ChatMessage(role="user", content="Hello")
assert message.id == ""
def test_id_can_be_set_explicitly(self):
"""Test that id can be set explicitly."""
message = ChatMessage(role="user", content="Hello", id="custom-id")
assert message.id == "custom-id"
class TestChatMessageDict:
"""Tests for ChatMessage serialization via dict()."""
def test_parts_and_attachments_serialized_to_dict(self):
"""Parts and attachments are converted to dicts via asdict."""
message = ChatMessage(
role="user",
content="Hello",
id="msg-1",
parts=[TextPart(type="text", text="Part text")],
attachments=[
ChatAttachment(url="https://example.com/file.pdf", name="doc"),
],
)
out = dict[str, Any](message)
assert out == {
"role": "user",
"id": "msg-1",
"content": "Hello",
"parts": [{"type": "text", "text": "Part text"}],
"attachments": [
{
"url": "https://example.com/file.pdf",
"name": "doc",
"content_type": "application/pdf",
}
],
"metadata": None,
}
| {
"repo_id": "marimo-team/marimo",
"file_path": "tests/_ai/test_ai_types.py",
"license": "Apache License 2.0",
"lines": 468,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
marimo-team/marimo:tests/_utils/test_paths.py | # Copyright 2026 Marimo. All rights reserved.
from __future__ import annotations
import os
import tempfile
from pathlib import Path
import pytest
from marimo._utils.paths import normalize_path, pretty_path
def test_normalize_path_makes_absolute() -> None:
"""Test that relative paths are converted to absolute paths."""
relative_path = Path("foo") / "bar"
result = normalize_path(relative_path)
assert result.is_absolute()
assert result == Path.cwd() / "foo" / "bar"
def test_normalize_path_removes_parent_and_current_refs() -> None:
"""Test that .. and . components are normalized."""
path_with_refs = Path("foo") / "bar" / ".." / "baz" / "." / "qux"
result = normalize_path(path_with_refs)
assert result.is_absolute()
assert ".." not in str(result)
assert str(result) == str(Path.cwd() / "foo" / "baz" / "qux")
def test_normalize_path_handles_already_absolute() -> None:
"""Test that absolute paths stay absolute and get normalized."""
with tempfile.TemporaryDirectory() as temp_dir:
temp_path = Path(temp_dir)
# Create a path with .. in absolute path
absolute_with_parents = temp_path / "foo" / "bar" / ".." / "baz"
result = normalize_path(absolute_with_parents)
assert result.is_absolute()
assert ".." not in str(result)
# Should resolve to temp_path/foo/baz
assert result == temp_path / "foo" / "baz"
def test_normalize_path_does_not_resolve_symlinks() -> None:
"""Test that symlinks are NOT resolved (key security feature)."""
with tempfile.TemporaryDirectory() as temp_dir:
temp_path = Path(temp_dir)
# Create a real directory
real_dir = temp_path / "real_directory"
real_dir.mkdir()
# Create a file in the real directory
real_file = real_dir / "test.txt"
real_file.write_text("test content")
# Create a symlink to the real directory
symlink_dir = temp_path / "symlinked_directory"
try:
symlink_dir.symlink_to(real_dir)
except OSError:
# On Windows, creating symlinks might require admin privileges
pytest.skip("Cannot create symlinks on this system")
# Path through symlink
path_through_symlink = symlink_dir / "test.txt"
# normalize_path should NOT resolve the symlink
normalized = normalize_path(path_through_symlink)
# Should contain the symlink name, not the real directory
assert "symlinked_directory" in str(normalized)
assert "real_directory" not in str(normalized)
# Compare with resolve() which DOES follow symlinks
resolved = path_through_symlink.resolve()
assert "real_directory" in str(resolved)
# They should be different
assert normalized != resolved
def test_normalize_path_skips_cloudpathlib_paths() -> None:
"""Test that cloud paths from cloudpathlib are returned unchanged.
os.path.normpath corrupts URI schemes like s3:// by reducing them to s3:/
This test verifies that cloudpathlib paths bypass normalization.
"""
from unittest.mock import MagicMock
# Create a mock cloud path that simulates cloudpathlib.S3Path
# We can't inherit from PurePosixPath because it normalizes on construction
mock_path = MagicMock()
mock_path.__class__.__module__ = "cloudpathlib.s3.s3path"
# normalize_path should return the path unchanged
result = normalize_path(mock_path)
# Should be the exact same object, returned unchanged
assert result is mock_path
def test_normalize_path_does_not_skip_regular_paths() -> None:
"""Test that regular Path objects are still normalized properly."""
# Ensure the cloudpathlib check doesn't affect regular paths
relative_path = Path("foo") / "bar"
result = normalize_path(relative_path)
# Should be converted to absolute
assert result.is_absolute()
# Module should be pathlib, not cloudpathlib
assert result.__class__.__module__.startswith("pathlib")
class TestPrettyPath:
"""Tests for pretty_path function."""
def test_empty_filename_returns_empty(self) -> None:
"""Test that empty string returns empty string."""
assert pretty_path("") == ""
def test_relative_path_unchanged(self) -> None:
"""Test that relative paths are returned unchanged."""
assert pretty_path("foo/bar.py") == "foo/bar.py"
def test_absolute_path_inside_cwd_becomes_relative(self) -> None:
"""Test that absolute paths inside CWD become relative."""
cwd = Path.cwd()
abs_path = str(cwd / "subdir" / "file.py")
result = pretty_path(abs_path)
# Should be relative to CWD
assert result == os.path.join("subdir", "file.py")
def test_absolute_path_outside_cwd_stays_absolute(self) -> None:
"""Test that absolute paths outside CWD stay absolute."""
with tempfile.TemporaryDirectory() as tmp:
# This is outside CWD
abs_path = os.path.join(tmp, "file.py")
result = pretty_path(abs_path)
# Should stay absolute or have .. prefix (depending on location)
# Either way, it should contain the filename
assert "file.py" in result
def test_base_dir_makes_path_relative_to_it(self) -> None:
"""Test that base_dir parameter makes paths relative to that dir."""
with tempfile.TemporaryDirectory() as tmp:
subdir = os.path.join(tmp, "subdir")
os.makedirs(subdir)
notebook = os.path.join(subdir, "notebook.py")
Path(notebook).touch()
# With base_dir pointing to subdir, should return just filename
result = pretty_path(notebook, base_dir=subdir)
assert result == "notebook.py"
def test_base_dir_with_nested_path(self) -> None:
"""Test base_dir with nested subdirectories."""
with tempfile.TemporaryDirectory() as tmp:
nested = os.path.join(tmp, "a", "b", "c")
os.makedirs(nested)
notebook = os.path.join(nested, "notebook.py")
Path(notebook).touch()
# With base_dir pointing to parent, should return relative path
result = pretty_path(notebook, base_dir=tmp)
expected = os.path.join("a", "b", "c", "notebook.py")
assert result == expected
def test_base_dir_file_outside_falls_back_to_cwd_relative(self) -> None:
"""Test that files outside base_dir fall back to CWD-relative."""
with tempfile.TemporaryDirectory() as base:
with tempfile.TemporaryDirectory() as other:
# File is in 'other', not in 'base'
file_path = os.path.join(other, "outside.py")
Path(file_path).touch()
result = pretty_path(file_path, base_dir=base)
# Should contain the filename (exact path depends on CWD)
assert "outside.py" in result
def test_base_dir_accepts_path_object(self) -> None:
"""Test that base_dir accepts Path objects."""
with tempfile.TemporaryDirectory() as tmp:
subdir = Path(tmp) / "subdir"
subdir.mkdir()
notebook = subdir / "notebook.py"
notebook.touch()
# Should work with Path object
result = pretty_path(str(notebook), base_dir=subdir)
assert result == "notebook.py"
def test_base_dir_accepts_string(self) -> None:
"""Test that base_dir accepts string paths."""
with tempfile.TemporaryDirectory() as tmp:
subdir = os.path.join(tmp, "subdir")
os.makedirs(subdir)
notebook = os.path.join(subdir, "notebook.py")
Path(notebook).touch()
# Should work with string
result = pretty_path(notebook, base_dir=subdir)
assert result == "notebook.py"
| {
"repo_id": "marimo-team/marimo",
"file_path": "tests/_utils/test_paths.py",
"license": "Apache License 2.0",
"lines": 158,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
marimo-team/marimo:marimo/_smoke_tests/ipython/mpl_plot.py | import marimo
__generated_with = "0.18.4"
app = marimo.App()
@app.cell
def _():
import matplotlib.pyplot as plt
plt.plot([1, 2])
plt.gca()
return
if __name__ == "__main__":
app.run()
| {
"repo_id": "marimo-team/marimo",
"file_path": "marimo/_smoke_tests/ipython/mpl_plot.py",
"license": "Apache License 2.0",
"lines": 11,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
marimo-team/marimo:marimo/_smoke_tests/pdf_export/basic_example.py | import marimo
__generated_with = "0.19.6"
app = marimo.App(auto_download=["ipynb"])
@app.cell
def _():
import marimo as mo
import matplotlib.pyplot as plt
import plotly.express as px
return mo, plt, px
@app.cell(hide_code=True)
def _(mo):
mo.md(r"""
This notebook contains:
- Markdown
- a marimo UI element
- a Plotly scatter plot
- a matplotlib plot
- a dataframe output
This Markdown also contains some math: $f(x)$
$$
g(x) = 0
$$
""")
return
@app.cell
def _(mo):
mo.ui.text(label="$f(x)$")
return
@app.cell
def _():
# How are errors serialized?
1 / 0
return
@app.cell
def _():
print("This is console output")
return
@app.cell
def _(px):
df = px.data.iris()
fig = px.scatter(
df, x="sepal_width", y="sepal_length", color="species", symbol="species"
)
fig
return (df,)
@app.cell
def _(df, plt):
plt.scatter(x=df["sepal_width"], y=df["sepal_length"])
print("Plot in console output")
plt.show()
return
@app.cell
def _(df, plt):
plt.scatter(x=df["sepal_width"], y=df["sepal_length"])
return
@app.cell
def _(mo):
iframe = mo.iframe("""
<div style='border: 2px solid #4CAF50; padding: 20px; border-radius: 10px; background: linear-gradient(135deg, #e0f7fa, #80deea);'>
<h1 style='color: #00796b; font-family: Arial, sans-serif;'>Welcome to My Interactive Frame</h1>
<p style='font-size: 16px; color: #004d40;'>This is a more complex div element with styled borders, gradients, and custom fonts.</p>
<ul style='color: #004d40;'>
<li>Feature 1: Stylish layout</li>
<li>Feature 2: Custom fonts and colors</li>
<li>Feature 3: Rounded corners and padding</li>
</ul>
<button style='background-color: #00796b; color: white; border: none; padding: 10px 20px; border-radius: 5px; cursor: pointer;' onclick="alert('Button clicked!')">Click Me</button>
</div>
""")
iframe
return (iframe,)
@app.cell
def _(iframe, mo):
mo.Html(iframe.text)
return
@app.cell
def _(mo):
mo.iframe(
'<iframe src="demo_iframe.html" height="200" width="300" title="Iframe Example"></iframe>'
)
return
@app.cell
def _(mo):
mo.iframe(
'<iframe id="inlineFrameExample" title="Inline Frame Example" width="800" height="600" src="https://www.openstreetmap.org/export/embed.html?bbox=-0.004017949104309083%2C51.47612752641776%2C0.00030577182769775396%2C51.478569861898606&layer=mapnik"></iframe>'
)
return
@app.cell
def _(df):
df
return
if __name__ == "__main__":
app.run()
| {
"repo_id": "marimo-team/marimo",
"file_path": "marimo/_smoke_tests/pdf_export/basic_example.py",
"license": "Apache License 2.0",
"lines": 93,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
marimo-team/marimo:examples/third_party/plotly/heatmap.py | import marimo
__generated_with = "0.18.4"
app = marimo.App(width="medium")
@app.cell
def _():
import marimo as mo
import plotly.graph_objects as go
import numpy as np
# 1. Create your heatmap data
z = np.random.rand(10, 10) * 100
# 2. Create the plotly figure
fig = go.Figure(
data=go.Heatmap(
z=z,
x=["A", "B", "C", "D", "E", "F", "G", "H", "I", "J"],
y=[
"Mon",
"Tue",
"Wed",
"Thu",
"Fri",
"Sat",
"Sun",
"Mon2",
"Tue2",
"Wed2",
],
colorscale="Viridis",
)
)
# 3. Wrap it with mo.ui.plotly
heatmap = mo.ui.plotly(fig)
heatmap
return heatmap, mo
@app.cell
def _(heatmap, mo):
# 4. Display it
mo.md(f"""
## Sales Heatmap
{heatmap}
### Selected Cells:
{heatmap.value}
""")
return
if __name__ == "__main__":
app.run()
| {
"repo_id": "marimo-team/marimo",
"file_path": "examples/third_party/plotly/heatmap.py",
"license": "Apache License 2.0",
"lines": 46,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
marimo-team/marimo:marimo/_smoke_tests/issues/7668_altair_concat_interactions.py | import marimo
__generated_with = "0.18.4"
app = marimo.App(width="medium")
@app.cell
def _():
import marimo as mo
import altair as alt
import numpy as np
import pyarrow
import pandas as pd
return alt, mo, np, pd
@app.cell
def _(alt, np, pd):
source = pd.DataFrame({"x": np.random.rand(100), "y": np.random.rand(100)})
brush = alt.selection_interval(encodings=["x"], value={"x": [0, 0.5]})
base = (
alt.Chart(source, width=600, height=200)
.mark_area()
.encode(x="x:Q", y="y:Q")
)
upper = base.encode(alt.X("x:Q").scale(domain=brush))
lower = base.properties(height=60).add_params(brush)
# Brush interaction across views works:
upper & lower
return lower, upper
@app.cell
def _(lower, mo, upper):
# Brush interaction (no longer) breaks:
mo.ui.altair_chart(upper & lower)
return
@app.cell
def _(alt, lower, mo, upper):
# Brush interaction works:
mo.ui.altair_chart(alt.vconcat(upper & lower))
return
if __name__ == "__main__":
app.run()
| {
"repo_id": "marimo-team/marimo",
"file_path": "marimo/_smoke_tests/issues/7668_altair_concat_interactions.py",
"license": "Apache License 2.0",
"lines": 37,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
marimo-team/marimo:marimo/_smoke_tests/issues/7661_point_selection.py | import marimo
__generated_with = "0.18.4"
app = marimo.App()
@app.cell
def _():
import marimo as mo
import pandas as pd
import numpy as np
import altair as alt
return alt, mo, np, pd
@app.cell
def _(np, pd):
# Create mock data for the heatmap
np.random.seed(42)
rows = 10
cols = 8
x_labels = [f"X{i}" for i in range(1, cols + 1)]
y_labels = [f"Y{j}" for j in range(1, rows + 1)]
data = []
for y in y_labels:
for x in x_labels:
value = np.random.randint(0, 100)
data.append({"X": x, "Y": y, "Value": value})
heatmap_df = pd.DataFrame(data)
heatmap_df
return (heatmap_df,)
@app.cell
def _(alt, heatmap_df, mo):
# Create an interactive Altair heatmap with multi-point selection
selection = alt.selection_point(
fields=["X", "Y"], bind="legend", toggle=True, clear="click"
)
heatmap_chart = (
alt.Chart(heatmap_df)
.mark_rect()
.encode(
x=alt.X("X:O", title="X Label"),
y=alt.Y("Y:O", title="Y Label"),
color=alt.Color("Value:Q", scale=alt.Scale(scheme="viridis")),
tooltip=["X", "Y", "Value"],
)
.add_params(selection)
.encode(opacity=alt.condition(selection, alt.value(1), alt.value(0.3)))
)
heatmap_ui = mo.ui.altair_chart(heatmap_chart)
heatmap_ui
return (heatmap_ui,)
@app.cell
def _(heatmap_ui):
# Display the selected points from the heatmap
heatmap_ui.value
return
if __name__ == "__main__":
app.run()
| {
"repo_id": "marimo-team/marimo",
"file_path": "marimo/_smoke_tests/issues/7661_point_selection.py",
"license": "Apache License 2.0",
"lines": 54,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
marimo-team/marimo:marimo/_smoke_tests/appcomp/setup_embed_setup/main.py | # Copyright 2026 Marimo. All rights reserved.
"""
Smoke test for setup cell embedding.
This tests the scenario where:
- Main app has a setup cell
- Inner app has a setup cell
- Main app embeds the inner app
Bug: The main app's setup cell should NOT rerun when the inner app is embedded.
"""
import marimo
__generated_with = "0.18.4"
app = marimo.App(width="medium")
with app.setup:
setup_tracker = {"count": 0}
setup_tracker["count"] += 1
import marimo as mo
from inner_with_setup import app as inner_app
@app.cell(hide_code=True)
def _():
mo.md("""
# Setup Cell Embedding Smoke Test
This notebook tests embedding an app with a setup cell from a notebook
that also has a setup cell
The outer app's setup cell should only run **once**, not rerun when
the inner app is embedded.
""")
return
@app.cell
def _():
setup_count_before_embed = setup_tracker["count"]
mo.md(f"Setup run count before embed: **{setup_count_before_embed}**")
return (setup_count_before_embed,)
@app.cell
async def _():
# Embed the inner app (which also has a setup cell)
result = await inner_app.embed()
result.output
return (result,)
@app.cell
def _(result):
mo.md(f"""
Inner app value: **{result.defs.get('inner_value')}**
""")
return
@app.cell
def _(setup_count_before_embed):
setup_count_after_embed = setup_tracker["count"]
# This is the key assertion - setup should still be 1
status = "PASS" if setup_count_after_embed == 1 else "FAIL"
color = "green" if status == "PASS" else "red"
mo.md(f"""
## Test Result: <span style="color: {color}">{status}</span>
- Setup run count before embed: {setup_count_before_embed}
- Setup run count after embed: {setup_count_after_embed}
Expected setup to run exactly **1** time.
""")
return
if __name__ == "__main__":
app.run()
| {
"repo_id": "marimo-team/marimo",
"file_path": "marimo/_smoke_tests/appcomp/setup_embed_setup/main.py",
"license": "Apache License 2.0",
"lines": 59,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | documentation |
marimo-team/marimo:marimo/_runtime/virtual_file/storage.py | # Copyright 2026 Marimo. All rights reserved.
from __future__ import annotations
import sys
from typing import TYPE_CHECKING, Protocol
from marimo._utils.platform import is_pyodide
if TYPE_CHECKING:
from collections.abc import Iterable, Iterator
DEFAULT_CHUNK_SIZE = 256 * 1024 # 256KB
if not is_pyodide():
# the shared_memory module is not supported in the Pyodide distribution
from multiprocessing import shared_memory
class VirtualFileStorage(Protocol):
"""Protocol for virtual file storage backends."""
def store(self, key: str, buffer: bytes) -> None:
"""Store buffer data by key."""
...
def read(self, key: str, byte_length: int) -> bytes:
"""Read buffer data by key.
Raises:
KeyError: If key not found
"""
...
def read_chunked(
self,
key: str,
byte_length: int,
chunk_size: int = DEFAULT_CHUNK_SIZE,
) -> Iterator[bytes]:
"""Read buffer data by key in chunks.
Yields chunks of bytes, avoiding allocating the full buffer at once.
Useful for streaming large files over HTTP.
Raises:
KeyError: If key not found
"""
...
def remove(self, key: str) -> None:
"""Remove stored data by key."""
...
def shutdown(self, keys: Iterable[str] | None = None) -> None:
"""Clean up storage resources.
Args:
keys: If provided, only remove these keys. If None, clear all.
Implementations may ignore this if storage is not shared.
"""
...
def has(self, key: str) -> bool:
"""Check if key exists in storage."""
...
@property
def stale(self) -> bool:
"""Whether storage has been fully shut down and is no longer usable."""
...
class SharedMemoryStorage(VirtualFileStorage):
"""Storage backend using multiprocessing shared memory.
Used in `edit` mode when kernel runs in a separate process.
"""
def __init__(self) -> None:
self._storage: dict[str, shared_memory.SharedMemory] = {}
self._shutting_down = False
self._stale = False
@property
def stale(self) -> bool:
return self._stale
def store(self, key: str, buffer: bytes) -> None:
if key in self._storage:
return # Already stored
# Immediately writes the contents of the file to an in-memory
# buffer; not lazy.
#
# To retrieve the buffer from another process, use:
#
# ```
# try:
# shm = shared_memory.SharedMemory(name=key)
# buffer_contents = bytes(shm.buf)
# except FileNotFoundError:
# # virtual file was removed
# ```
shm = shared_memory.SharedMemory(
name=key,
create=True,
size=len(buffer),
)
shm.buf[: len(buffer)] = buffer
# we can safely close this shm, since we don't need to access its
# buffer; we do need to keep it around so we can unlink it later
if sys.platform != "win32":
# don't call close() on Windows, due to a bug in the Windows
# Python implementation. On Windows, close() actually unlinks
# (destroys) the shared_memory:
# https://stackoverflow.com/questions/63713241/segmentation-fault-using-python-shared-memory/63717188#63717188
shm.close()
# We have to keep a reference to the shared memory to prevent it from
# being destroyed on Windows
self._storage[key] = shm
def read(self, key: str, byte_length: int) -> bytes:
if is_pyodide():
raise RuntimeError(
"Shared memory is not supported on this platform"
)
# Read from shared memory by name (works cross-process)
shm = None
try:
shm = shared_memory.SharedMemory(name=key)
# Slice the memoryview first, then copy — avoids allocating
# a bytes object for the entire buffer when only a prefix
# is needed.
buffer_contents = bytes(shm.buf[:byte_length])
except FileNotFoundError as err:
raise KeyError(f"Virtual file not found: {key}") from err
finally:
if shm is not None:
shm.close()
return buffer_contents
def read_chunked(
self,
key: str,
byte_length: int,
chunk_size: int = DEFAULT_CHUNK_SIZE,
) -> Iterator[bytes]:
if is_pyodide():
raise RuntimeError(
"Shared memory is not supported on this platform"
)
shm = None
view = None
try:
shm = shared_memory.SharedMemory(name=key)
view = shm.buf[:byte_length]
for i in range(0, byte_length, chunk_size):
yield bytes(view[i : i + chunk_size])
except FileNotFoundError as err:
raise KeyError(f"Virtual file not found: {key}") from err
finally:
# Release the memoryview before closing the shared memory,
# otherwise close() fails with "cannot close exported pointers".
if view is not None:
view.release()
if shm is not None:
shm.close()
def remove(self, key: str) -> None:
if key in self._storage:
if sys.platform == "win32":
self._storage[key].close()
self._storage[key].unlink()
del self._storage[key]
def shutdown(self, keys: Iterable[str] | None = None) -> None:
del keys # Always clear all - not shared
if self._shutting_down:
return
try:
self._shutting_down = True
for shm in self._storage.values():
if sys.platform == "win32":
shm.close()
shm.unlink()
self._storage.clear()
finally:
self._stale = True
self._shutting_down = False
def has(self, key: str) -> bool:
return key in self._storage
class InMemoryStorage(VirtualFileStorage):
"""Storage backend using simple in-memory dictionary.
Used in `run` mode when kernel runs in the same process as the server.
"""
def __init__(self) -> None:
self._storage: dict[str, bytes] = {}
@property
def stale(self) -> bool:
return False # Never stale - can be shared
def store(self, key: str, buffer: bytes) -> None:
self._storage[key] = buffer
def read(self, key: str, byte_length: int) -> bytes:
if key not in self._storage:
raise KeyError(f"Virtual file not found: {key}")
return self._storage[key][:byte_length]
def read_chunked(
self,
key: str,
byte_length: int,
chunk_size: int = DEFAULT_CHUNK_SIZE,
) -> Iterator[bytes]:
if key not in self._storage:
raise KeyError(f"Virtual file not found: {key}")
buffer = self._storage[key]
end = min(byte_length, len(buffer))
for i in range(0, end, chunk_size):
yield buffer[i : min(i + chunk_size, end)]
def remove(self, key: str) -> None:
if key in self._storage:
del self._storage[key]
def shutdown(self, keys: Iterable[str] | None = None) -> None:
if keys is not None:
for key in keys:
self.remove(key)
else:
self._storage.clear()
def has(self, key: str) -> bool:
return key in self._storage
class VirtualFileStorageManager:
"""Singleton manager for virtual file storage access."""
_instance: VirtualFileStorageManager | None = None
_storage: VirtualFileStorage | None = None
def __new__(cls) -> VirtualFileStorageManager:
if cls._instance is None:
cls._instance = super().__new__(cls)
return cls._instance
@property
def storage(self) -> VirtualFileStorage | None:
if self._storage is not None and self._storage.stale:
self._storage = None
return self._storage
@storage.setter
def storage(self, value: VirtualFileStorage | None) -> None:
self._storage = value
def read(self, filename: str, byte_length: int) -> bytes:
"""Read from storage, with cross-process fallback for EDIT mode server.
Raises:
KeyError: If file not found
RuntimeError: When ``SharedMemoryStorage`` is used on the Pyodide platform.
"""
storage = self.storage
if storage is None:
# Never initialized so in a separate thread from the kernel.
# Use SharedMemoryStorage to read by name across processes
return SharedMemoryStorage().read(filename, byte_length)
return storage.read(filename, byte_length)
def read_chunked(
self,
filename: str,
byte_length: int,
chunk_size: int = DEFAULT_CHUNK_SIZE,
) -> Iterator[bytes]:
"""Read from storage in chunks, with cross-process fallback.
Yields chunks of bytes for streaming. Avoids holding the entire
file in memory as a single bytes object.
Raises:
KeyError: If file not found
RuntimeError: When ``SharedMemoryStorage`` is used on the Pyodide platform.
"""
storage = self.storage
if storage is None:
yield from SharedMemoryStorage().read_chunked(
filename, byte_length, chunk_size
)
else:
yield from storage.read_chunked(filename, byte_length, chunk_size)
| {
"repo_id": "marimo-team/marimo",
"file_path": "marimo/_runtime/virtual_file/storage.py",
"license": "Apache License 2.0",
"lines": 248,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
marimo-team/marimo:tests/_runtime/test_storage.py | # Copyright 2026 Marimo. All rights reserved.
from __future__ import annotations
import pytest
from marimo._runtime.virtual_file.storage import (
InMemoryStorage,
SharedMemoryStorage,
VirtualFileStorageManager,
)
class TestInMemoryStorageReadChunked:
def test_read_chunked_basic(self) -> None:
storage = InMemoryStorage()
storage.store("test_key", b"hello world")
chunks = list(storage.read_chunked("test_key", 11))
assert b"".join(chunks) == b"hello world"
def test_read_chunked_with_byte_length(self) -> None:
storage = InMemoryStorage()
storage.store("test_key", b"hello world")
chunks = list(storage.read_chunked("test_key", 5))
assert b"".join(chunks) == b"hello"
def test_read_chunked_multiple_chunks(self) -> None:
storage = InMemoryStorage()
data = b"a" * 100
storage.store("test_key", data)
# Use a small chunk size to force multiple chunks
chunks = list(storage.read_chunked("test_key", 100, chunk_size=30))
assert b"".join(chunks) == data
assert len(chunks) == 4 # 30 + 30 + 30 + 10
def test_read_chunked_nonexistent_raises_keyerror(self) -> None:
storage = InMemoryStorage()
with pytest.raises(KeyError, match="Virtual file not found"):
list(storage.read_chunked("nonexistent", 10))
def test_read_chunked_chunk_sizes(self) -> None:
"""Verify each chunk is at most chunk_size bytes."""
storage = InMemoryStorage()
data = b"x" * 250
storage.store("test_key", data)
chunk_size = 64
chunks = list(
storage.read_chunked("test_key", 250, chunk_size=chunk_size)
)
for chunk in chunks[:-1]:
assert len(chunk) == chunk_size
# Last chunk may be smaller
assert len(chunks[-1]) <= chunk_size
assert b"".join(chunks) == data
class TestInMemoryStorage:
def test_store_and_read(self) -> None:
storage = InMemoryStorage()
storage.store("test_key", b"hello world")
result = storage.read("test_key", 11)
assert result == b"hello world"
def test_read_with_byte_length(self) -> None:
storage = InMemoryStorage()
storage.store("test_key", b"hello world")
result = storage.read("test_key", 5)
assert result == b"hello"
def test_read_nonexistent_raises_keyerror(self) -> None:
storage = InMemoryStorage()
with pytest.raises(KeyError, match="Virtual file not found"):
storage.read("nonexistent", 10)
def test_store_overwrites(self) -> None:
storage = InMemoryStorage()
storage.store("test_key", b"original")
storage.store("test_key", b"updated")
result = storage.read("test_key", 7)
assert result == b"updated"
def test_remove(self) -> None:
storage = InMemoryStorage()
storage.store("test_key", b"hello")
assert storage.has("test_key")
storage.remove("test_key")
assert not storage.has("test_key")
def test_remove_nonexistent_no_error(self) -> None:
storage = InMemoryStorage()
storage.remove("nonexistent") # Should not raise
def test_has(self) -> None:
storage = InMemoryStorage()
assert not storage.has("test_key")
storage.store("test_key", b"hello")
assert storage.has("test_key")
def test_shutdown_clears_storage(self) -> None:
storage = InMemoryStorage()
storage.store("key1", b"data1")
storage.store("key2", b"data2")
assert storage.has("key1")
assert storage.has("key2")
storage.shutdown()
assert not storage.has("key1")
assert not storage.has("key2")
class TestSharedMemoryStorage:
def test_store_and_read(self) -> None:
storage = SharedMemoryStorage()
try:
storage.store("marimo_test_1", b"hello world")
result = storage.read("marimo_test_1", 11)
assert result == b"hello world"
finally:
storage.shutdown()
def test_read_with_byte_length(self) -> None:
storage = SharedMemoryStorage()
try:
storage.store("marimo_test_2", b"hello world")
result = storage.read("marimo_test_2", 5)
assert result == b"hello"
finally:
storage.shutdown()
def test_read_nonexistent_raises_keyerror(self) -> None:
storage = SharedMemoryStorage()
try:
with pytest.raises(KeyError, match="Virtual file not found"):
storage.read("nonexistent_key_xyz", 10)
finally:
storage.shutdown()
def test_store_duplicate_skipped(self) -> None:
storage = SharedMemoryStorage()
try:
storage.store("marimo_test_3", b"original")
# Second store should be a no-op (not overwrite)
storage.store("marimo_test_3", b"updated_longer")
result = storage.read("marimo_test_3", 8)
assert result == b"original"
finally:
storage.shutdown()
def test_remove(self) -> None:
storage = SharedMemoryStorage()
try:
storage.store("marimo_test_4", b"hello")
assert storage.has("marimo_test_4")
storage.remove("marimo_test_4")
assert not storage.has("marimo_test_4")
finally:
storage.shutdown()
def test_remove_nonexistent_no_error(self) -> None:
storage = SharedMemoryStorage()
try:
storage.remove("nonexistent") # Should not raise
finally:
storage.shutdown()
def test_has(self) -> None:
storage = SharedMemoryStorage()
try:
assert not storage.has("marimo_test_5")
storage.store("marimo_test_5", b"hello")
assert storage.has("marimo_test_5")
finally:
storage.shutdown()
def test_shutdown_clears_storage(self) -> None:
storage = SharedMemoryStorage()
storage.store("marimo_test_6", b"data1")
storage.store("marimo_test_7", b"data2")
assert storage.has("marimo_test_6")
assert storage.has("marimo_test_7")
storage.shutdown()
assert not storage.has("marimo_test_6")
assert not storage.has("marimo_test_7")
def test_cross_process_read(self) -> None:
"""Test that shared memory can be read by name from a fresh instance."""
storage1 = SharedMemoryStorage()
try:
storage1.store("marimo_test_cross", b"cross process data")
# Create a new instance and read by name
storage2 = SharedMemoryStorage()
result = storage2.read("marimo_test_cross", 18)
assert result == b"cross process data"
finally:
storage1.shutdown()
def test_shutdown_is_reentrant(self) -> None:
"""Test that shutdown can be called multiple times safely."""
storage = SharedMemoryStorage()
storage.store("marimo_test_8", b"data")
storage.shutdown()
storage.shutdown() # Should not raise
def test_read_chunked_basic(self) -> None:
storage = SharedMemoryStorage()
try:
storage.store("marimo_chunk_1", b"hello world")
chunks = list(storage.read_chunked("marimo_chunk_1", 11))
assert b"".join(chunks) == b"hello world"
finally:
storage.shutdown()
def test_read_chunked_with_byte_length(self) -> None:
storage = SharedMemoryStorage()
try:
storage.store("marimo_chunk_2", b"hello world")
chunks = list(storage.read_chunked("marimo_chunk_2", 5))
assert b"".join(chunks) == b"hello"
finally:
storage.shutdown()
def test_read_chunked_multiple_chunks(self) -> None:
storage = SharedMemoryStorage()
try:
data = b"a" * 100
storage.store("marimo_chunk_3", data)
chunks = list(
storage.read_chunked("marimo_chunk_3", 100, chunk_size=30)
)
assert b"".join(chunks) == data
assert len(chunks) == 4 # 30 + 30 + 30 + 10
finally:
storage.shutdown()
def test_read_chunked_nonexistent_raises_keyerror(self) -> None:
storage = SharedMemoryStorage()
try:
with pytest.raises(KeyError, match="Virtual file not found"):
list(storage.read_chunked("nonexistent_chunk", 10))
finally:
storage.shutdown()
def test_read_chunked_cross_process(self) -> None:
"""Test chunked read works across fresh instances."""
storage1 = SharedMemoryStorage()
try:
data = b"cross process chunked"
storage1.store("marimo_chunk_cross", data)
storage2 = SharedMemoryStorage()
chunks = list(
storage2.read_chunked(
"marimo_chunk_cross", len(data), chunk_size=5
)
)
assert b"".join(chunks) == data
finally:
storage1.shutdown()
def test_read_chunked_data_integrity(self) -> None:
"""Test that chunked read produces identical data to regular read."""
storage = SharedMemoryStorage()
try:
data = bytes(range(256)) * 4 # 1024 bytes of varied data
storage.store("marimo_chunk_integ", data)
regular = storage.read("marimo_chunk_integ", len(data))
chunked = b"".join(
storage.read_chunked(
"marimo_chunk_integ", len(data), chunk_size=100
)
)
assert regular == chunked == data
finally:
storage.shutdown()
class TestVirtualFileStorageManager:
def test_singleton(self) -> None:
manager1 = VirtualFileStorageManager()
manager2 = VirtualFileStorageManager()
assert manager1 is manager2
def test_storage_property(self) -> None:
manager = VirtualFileStorageManager()
original_storage = manager.storage
try:
storage = InMemoryStorage()
manager.storage = storage
assert manager.storage is storage
finally:
manager.storage = original_storage
def test_read_with_storage(self) -> None:
manager = VirtualFileStorageManager()
original_storage = manager.storage
try:
storage = InMemoryStorage()
storage.store("test_file", b"test data")
manager.storage = storage
result = manager.read("test_file", 9)
assert result == b"test data"
finally:
manager.storage = original_storage
def test_read_without_storage_falls_back_to_shared_memory(self) -> None:
manager = VirtualFileStorageManager()
original_storage = manager.storage
# Store data in shared memory directly
shm_storage = SharedMemoryStorage()
try:
shm_storage.store("marimo_fallback_test", b"fallback data")
# Set manager storage to None to trigger fallback
manager.storage = None
result = manager.read("marimo_fallback_test", 13)
assert result == b"fallback data"
finally:
manager.storage = original_storage
shm_storage.shutdown()
def test_read_chunked_with_storage(self) -> None:
manager = VirtualFileStorageManager()
original_storage = manager.storage
try:
storage = InMemoryStorage()
storage.store("test_file", b"test data chunked")
manager.storage = storage
chunks = list(manager.read_chunked("test_file", 17))
assert b"".join(chunks) == b"test data chunked"
finally:
manager.storage = original_storage
def test_read_chunked_falls_back_to_shared_memory(self) -> None:
manager = VirtualFileStorageManager()
original_storage = manager.storage
shm_storage = SharedMemoryStorage()
try:
shm_storage.store("marimo_fb_chunk", b"fallback chunked")
manager.storage = None
chunks = list(
manager.read_chunked("marimo_fb_chunk", 16, chunk_size=4)
)
assert b"".join(chunks) == b"fallback chunked"
assert len(chunks) == 4 # 16 / 4 = 4 chunks
finally:
manager.storage = original_storage
shm_storage.shutdown()
| {
"repo_id": "marimo-team/marimo",
"file_path": "tests/_runtime/test_storage.py",
"license": "Apache License 2.0",
"lines": 302,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
marimo-team/marimo:marimo/_server/app_defaults.py | # Copyright 2024 Marimo. All rights reserved.
from __future__ import annotations
from dataclasses import dataclass
from typing import TYPE_CHECKING, Optional
from marimo._config.config import ExportType, SqlOutputType, WidthType
if TYPE_CHECKING:
from marimo._config.manager import MarimoConfigManager
@dataclass
class AppDefaults:
"""Default configuration for app file managers."""
width: Optional[WidthType] = None
auto_download: Optional[list[ExportType]] = None
sql_output: Optional[SqlOutputType] = None
@staticmethod
def from_config_manager(config: MarimoConfigManager) -> AppDefaults:
return AppDefaults(
width=config.default_width,
auto_download=config.default_auto_download,
sql_output=config.default_sql_output,
)
| {
"repo_id": "marimo-team/marimo",
"file_path": "marimo/_server/app_defaults.py",
"license": "Apache License 2.0",
"lines": 20,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
marimo-team/marimo:marimo/_server/files/directory_scanner.py | # Copyright 2024 Marimo. All rights reserved.
from __future__ import annotations
import os
import time
from pathlib import Path
from typing import Optional
from marimo import _loggers
from marimo._server.files.os_file_system import natural_sort_file
from marimo._server.models.files import FileInfo
from marimo._utils.http import HTTPException, HTTPStatus
from marimo._utils.marimo_path import MarimoPath
LOGGER = _loggers.marimo_logger()
def is_marimo_app(full_path: str) -> bool:
"""
Detect whether a file is a marimo app.
Rules:
- Markdown (`.md`/`.qmd`) files are marimo apps if the first 512 bytes
contain `marimo-version:`.
- Python (`.py`) files are marimo apps if the header (first 512 bytes)
contains both `marimo.App` and `import marimo`.
- If the header contains `# /// script`, read the full file and check for
the same Python markers, to handle large script headers.
- Any errors while reading result in `False`.
"""
READ_LIMIT = 512
def contains_marimo_app(content: bytes) -> bool:
return b"marimo.App" in content and b"import marimo" in content
try:
path = MarimoPath(full_path)
# Fast extension check to avoid I/O for unrelated files
if not path.is_python() and not path.is_markdown():
return False
with open(full_path, "rb") as f:
header = f.read(READ_LIMIT)
if path.is_markdown():
return b"marimo-version:" in header
if path.is_python():
if contains_marimo_app(header):
return True
if b"# /// script" in header:
full_content = path.read_bytes()
if contains_marimo_app(full_content):
return True
return False
except Exception as e:
LOGGER.debug("Error reading file %s: %s", full_path, e)
return False
class DirectoryScanner:
"""Scans directories for marimo files with filtering and limits.
Features:
- Recursive directory traversal (max depth)
- File type filtering (.py, .md, .qmd)
- Skip common directories (venv, node_modules, etc.)
- File count limits and timeouts
- Marimo app detection
"""
MAX_DEPTH = 5
MAX_FILES = 1000
MAX_EXECUTION_TIME = 10 # seconds
SKIP_DIRS = {
# Python virtual environments
"venv",
".venv",
".virtualenv",
"__pypackages__",
# Python cache and build
"__pycache__",
"build",
"dist",
"eggs",
# Package management
"node_modules",
"site-packages",
# Testing and tooling
".tox",
".nox",
".pytest_cache",
".mypy_cache",
# Version control
".git",
}
def __init__(
self,
directory: str,
include_markdown: bool = False,
max_files: Optional[int] = None,
max_depth: Optional[int] = None,
max_execution_time: Optional[int] = None,
):
"""Initialize DirectoryScanner.
Args:
directory: The directory to scan
include_markdown: Whether to include .md and .qmd files
max_files: Maximum number of files to find
max_depth: Maximum directory depth to recurse
max_execution_time: Maximum time in seconds before timeout
"""
self.directory = directory
self.include_markdown = include_markdown
self.max_files = max_files if max_files is not None else self.MAX_FILES
self.max_depth = max_depth if max_depth is not None else self.MAX_DEPTH
self.max_execution_time = (
max_execution_time
if max_execution_time is not None
else self.MAX_EXECUTION_TIME
)
# Stores partial results in case of timeout
self.partial_results: list[FileInfo] = []
@property
def allowed_extensions(self) -> tuple[str, ...]:
"""Get allowed file extensions based on settings."""
if self.include_markdown:
return (".py", ".md", ".qmd")
return (".py",)
def scan(self) -> list[FileInfo]:
"""Scan directory and return file tree.
Returns:
List of FileInfo with nested children
Raises:
HTTPException: On timeout with REQUEST_TIMEOUT status.
On timeout, partial_results will contain files found so far.
"""
start_time = time.time()
file_count = [0] # Use list for closure mutability
self.partial_results = [] # Reset partial results
def recurse(
directory: str, depth: int = 0
) -> Optional[list[FileInfo]]:
if depth > self.max_depth:
return None
# Check file limit
if file_count[0] >= self.max_files:
LOGGER.warning(
f"Reached maximum file limit ({self.max_files})"
)
return None
if time.time() - start_time > self.max_execution_time:
# Store accumulated results before raising timeout
raise HTTPException(
status_code=HTTPStatus.REQUEST_TIMEOUT,
detail=f"Request timed out: Loading workspace files took too long. Showing first {file_count[0]} files.", # noqa: E501
)
try:
entries = os.scandir(directory)
except OSError as e:
LOGGER.debug("OSError scanning directory: %s", str(e))
return None
files: list[FileInfo] = []
folders: list[FileInfo] = []
for entry in entries:
# Skip hidden files and directories
if entry.name.startswith("."):
continue
if entry.is_dir():
if entry.name in self.SKIP_DIRS or depth == self.max_depth:
continue
children = recurse(entry.path, depth + 1)
if children:
entry_path = Path(entry.path)
relative_path = str(
entry_path.relative_to(self.directory)
)
folders.append(
FileInfo(
id=relative_path,
path=relative_path,
name=entry.name,
is_directory=True,
is_marimo_file=False,
children=children,
)
)
elif entry.name.endswith(self.allowed_extensions):
if is_marimo_app(entry.path):
file_count[0] += 1
entry_path = Path(entry.path)
relative_path = str(
entry_path.relative_to(self.directory)
)
file_info = FileInfo(
id=relative_path,
path=relative_path,
name=entry.name,
is_directory=False,
is_marimo_file=True,
last_modified=entry.stat().st_mtime,
)
files.append(file_info)
# Also add to partial results for timeout recovery
self.partial_results.append(file_info)
# Check if we've reached the limit
if file_count[0] >= self.max_files:
break
# Sort folders then files, based on natural sort (alpha, then num)
return sorted(folders, key=natural_sort_file) + sorted(
files, key=natural_sort_file
)
return recurse(self.directory) or []
| {
"repo_id": "marimo-team/marimo",
"file_path": "marimo/_server/files/directory_scanner.py",
"license": "Apache License 2.0",
"lines": 198,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
marimo-team/marimo:marimo/_server/files/path_validator.py | # Copyright 2024 Marimo. All rights reserved.
from __future__ import annotations
from pathlib import Path
from typing import Optional
from marimo import _loggers
from marimo._utils.http import HTTPException, HTTPStatus
LOGGER = _loggers.marimo_logger()
class PathValidator:
"""Validates file paths for security and access control.
Handles:
- Directory containment validation (prevent path traversal)
- Temporary directory allowlisting (for tutorials)
- Symlink resolution and security
"""
def __init__(self, base_directory: Optional[Path] = None):
"""Initialize PathValidator.
Args:
base_directory: The base directory to validate paths against.
If None, validation is skipped.
"""
self.base_directory = base_directory
self._allowed_temp_dirs: set[Path] = set()
def register_temp_dir(self, temp_dir: str) -> None:
"""Register a temp directory as allowed for file access.
Args:
temp_dir: The absolute path to the temp directory to allow.
"""
# Normalize the path to ensure consistency
normalized_path = self._normalize_path_without_resolving_symlinks(
Path(temp_dir), Path.cwd()
)
self._allowed_temp_dirs.add(normalized_path)
LOGGER.debug("Registered allowed temp directory: %s", normalized_path)
def is_file_in_allowed_temp_dir(self, filepath: str) -> bool:
"""Check if a file is inside an allowed temp directory.
Args:
filepath: The file path to check.
Returns:
True if the file is in an allowed temp directory, False otherwise.
"""
if not self._allowed_temp_dirs:
return False
try:
file_normalized = self._normalize_path_without_resolving_symlinks(
Path(filepath), Path.cwd()
)
for temp_dir in list(self._allowed_temp_dirs):
try:
file_normalized.relative_to(temp_dir)
return True
except ValueError:
# Not a child of this temp directory, try next
continue
return False
except Exception as e:
LOGGER.warning(
"Error checking if file %s is in allowed temp dir: %s",
filepath,
e,
)
return False
def _normalize_path_without_resolving_symlinks(
self, path: Path, base: Path
) -> Path:
"""Normalize a path without resolving symlinks.
Makes the path absolute relative to base and normalizes .. components,
but does NOT resolve symlinks.
Args:
path: The path to normalize
base: The base directory for relative paths
Returns:
Normalized absolute path without symlink resolution
"""
import os
from marimo._utils.tmpdir import _convert_to_long_pathname
# Make absolute relative to base if needed
if not path.is_absolute():
path = base / path
# Use os.path.normpath to normalize .. and . without resolving symlinks
# Then convert back to Path
normalized = Path(os.path.normpath(str(path)))
# On Windows, convert short (8.3) path names to long path names.
# This handles cases where Path.cwd() returns short names like
# "C:\Users\RUNNER~1\..." but Path.resolve() returns long names
# like "C:\Users\runneradmin\...". Without this normalization,
# two paths referring to the same location may fail containment checks.
normalized = Path(_convert_to_long_pathname(str(normalized)))
# Ensure the normalized path is still absolute. While normpath generally
# preserves absoluteness, we enforce this invariant explicitly for safety.
if not normalized.is_absolute():
LOGGER.error(
"Normalized path is not absolute: %s (original: %s, base: %s)",
normalized,
path,
base,
)
raise HTTPException(
status_code=HTTPStatus.BAD_REQUEST,
detail="Invalid path: normalized path is not absolute",
)
return normalized
def _check_containment(
self,
directory_abs: Path,
filepath_abs: Path,
directory: Path,
filepath: Path,
) -> None:
"""Check that filepath is inside directory."""
if filepath_abs == directory_abs:
raise HTTPException(
status_code=HTTPStatus.FORBIDDEN,
detail=f"Access denied: File {filepath} is the same as directory {directory}",
)
try:
filepath_abs.relative_to(directory_abs)
except ValueError:
raise HTTPException(
status_code=HTTPStatus.FORBIDDEN,
detail=f"Access denied: File {filepath} is outside the allowed directory {directory}",
) from None
def validate_inside_directory(
self, directory: Path, filepath: Path
) -> None:
"""
Validate that a filepath is inside a directory.
Handles all combinations of absolute/relative paths for both directory
and filepath. By default, symlinks are preserved.
Args:
directory: The directory path (can be absolute or relative)
filepath: The file path to validate (can be absolute or relative)
Raises:
HTTPException: If the filepath is outside the directory or if there's
an error resolving paths (e.g., broken symlinks, permission errors)
"""
try:
# Handle empty paths - Path("") resolves to ".", so check for that
if str(directory) == "." and str(filepath) == ".":
# Both are current directory - this is ambiguous
raise HTTPException(
status_code=HTTPStatus.BAD_REQUEST,
detail="Empty or ambiguous directory or filepath provided",
)
# Resolve directory to absolute path
# If directory is relative, resolve it relative to current working directory
directory_resolved = directory.resolve(strict=False)
# If directory doesn't exist, we can't validate - this is an error
if not directory_resolved.exists():
raise HTTPException(
status_code=HTTPStatus.BAD_REQUEST,
detail=f"Directory {directory} does not exist",
)
if not directory_resolved.is_dir():
raise HTTPException(
status_code=HTTPStatus.BAD_REQUEST,
detail=f"Path {directory} is not a directory",
)
try:
# Normalize without resolving symlinks
directory_normalized = (
self._normalize_path_without_resolving_symlinks(
directory, Path.cwd()
)
)
# If it was an absolute directory, then the base is that directory
# otherwise, the base is the current working directory
if directory.is_absolute():
filepath_base = directory
else:
filepath_base = Path.cwd()
filepath_normalized = (
self._normalize_path_without_resolving_symlinks(
filepath, filepath_base
)
)
self._check_containment(
directory_normalized,
filepath_normalized,
directory,
filepath,
)
except OSError as e:
# Handle errors like permission errors, etc.
raise HTTPException(
status_code=HTTPStatus.BAD_REQUEST,
detail=f"Error resolving path {filepath}: {str(e)}",
) from e
except HTTPException:
# Re-raise HTTPException as-is
raise
except Exception as e:
# Catch any other unexpected errors
raise HTTPException(
status_code=HTTPStatus.SERVER_ERROR,
detail=f"Unexpected error validating path: {str(e)}",
) from e
def validate_file_access(self, filepath: Path) -> None:
"""Validate file can be accessed (combines checks).
Checks if the file is in an allowed temp directory, and if not,
validates it's inside the base directory.
Args:
filepath: The file path to validate
Raises:
HTTPException: If validation fails
"""
if self.base_directory is None:
return
# Check if file is in an allowed temp directory first
if self.is_file_in_allowed_temp_dir(str(filepath)):
return
# Otherwise, validate it's inside the base directory
self.validate_inside_directory(self.base_directory, filepath)
| {
"repo_id": "marimo-team/marimo",
"file_path": "marimo/_server/files/path_validator.py",
"license": "Apache License 2.0",
"lines": 212,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
marimo-team/marimo:tests/_server/test_directory_scanner.py | # Copyright 2024 Marimo. All rights reserved.
from __future__ import annotations
import os
import shutil
import tempfile
import unittest
from marimo._server.files.directory_scanner import (
DirectoryScanner,
is_marimo_app,
)
def test_python_app_detected():
"""Test that Python marimo apps are detected."""
with tempfile.NamedTemporaryFile(
mode="w", suffix=".py", delete=False
) as f:
f.write("import marimo\napp = marimo.App()\n")
f.flush()
assert is_marimo_app(f.name)
os.unlink(f.name)
def test_python_non_app():
"""Test that non-marimo Python files return False."""
with tempfile.NamedTemporaryFile(
mode="w", suffix=".py", delete=False
) as f:
f.write("import sys\nprint('hello')\n")
f.flush()
assert not is_marimo_app(f.name)
os.unlink(f.name)
def test_markdown_app_detected():
"""Test that markdown files with marimo-version are detected."""
with tempfile.NamedTemporaryFile(
mode="w", suffix=".md", delete=False
) as f:
f.write("---\nmarimo-version: 0.1.0\n---\n")
f.flush()
assert is_marimo_app(f.name)
os.unlink(f.name)
class TestDirectoryScanner(unittest.TestCase):
def setUp(self):
self.temp_root = tempfile.mkdtemp()
self.test_dir = os.path.join(self.temp_root, "test_directory")
os.makedirs(self.test_dir)
# Create marimo files
with open(os.path.join(self.test_dir, "app1.py"), "w") as f:
f.write("import marimo\napp = marimo.App()\n")
with open(os.path.join(self.test_dir, "app2.py"), "w") as f:
f.write("import marimo\napp = marimo.App()\n")
# Create markdown file
with open(os.path.join(self.test_dir, "notebook.md"), "w") as f:
f.write("---\nmarimo-version: 0.1.0\n---\n")
# Create nested directory with marimo file
self.nested_dir = os.path.join(self.test_dir, "nested")
os.makedirs(self.nested_dir)
with open(os.path.join(self.nested_dir, "nested_app.py"), "w") as f:
f.write("import marimo\napp = marimo.App()\n")
def tearDown(self):
shutil.rmtree(self.temp_root)
def test_basic_scan(self):
"""Test basic directory scanning."""
scanner = DirectoryScanner(self.test_dir)
files = scanner.scan()
file_names = [f.name for f in files if not f.is_directory]
assert len(file_names) == 2
assert "app1.py" in file_names
assert "app2.py" in file_names
def test_scan_with_markdown(self):
"""Test scanning with markdown files included."""
scanner = DirectoryScanner(self.test_dir, include_markdown=True)
files = scanner.scan()
file_names = [f.name for f in files if not f.is_directory]
assert len(file_names) == 3
assert "notebook.md" in file_names
def test_scan_nested_directories(self):
"""Test scanning nested directories."""
scanner = DirectoryScanner(self.test_dir)
files = scanner.scan()
nested_dirs = [
f for f in files if f.is_directory and f.name == "nested"
]
assert len(nested_dirs) == 1
assert nested_dirs[0].children is not None
assert nested_dirs[0].children[0].name == "nested_app.py"
def test_max_files_limit(self):
"""Test that max_files limit is enforced."""
for i in range(10):
with open(os.path.join(self.test_dir, f"app{i + 3}.py"), "w") as f:
f.write("import marimo\napp = marimo.App()\n")
scanner = DirectoryScanner(self.test_dir, max_files=5)
files = scanner.scan()
def count_files(file_list: list) -> int:
total = 0
for f in file_list:
if f.is_directory:
if f.children:
total += count_files(f.children)
else:
total += 1
return total
assert count_files(files) == 5
def test_skip_common_directories(self):
"""Test that common directories are skipped."""
for dirname in ["venv", "node_modules", "__pycache__", ".git"]:
skip_dir = os.path.join(self.test_dir, dirname)
os.makedirs(skip_dir)
with open(os.path.join(skip_dir, "app.py"), "w") as f:
f.write("import marimo\napp = marimo.App()\n")
scanner = DirectoryScanner(self.test_dir)
files = scanner.scan()
file_paths = [f.path for f in files if not f.is_directory]
for path in file_paths:
assert "venv" not in path
assert "node_modules" not in path
def test_skip_hidden_files(self):
"""Test that hidden files are skipped."""
with open(os.path.join(self.test_dir, ".hidden_app.py"), "w") as f:
f.write("import marimo\napp = marimo.App()\n")
scanner = DirectoryScanner(self.test_dir)
files = scanner.scan()
file_names = [f.name for f in files if not f.is_directory]
assert ".hidden_app.py" not in file_names
def test_partial_results_populated_during_scan(self):
"""Test that partial_results is populated during scanning."""
scanner = DirectoryScanner(self.test_dir)
# partial_results starts empty
assert scanner.partial_results == []
files = scanner.scan()
# After scan, partial_results contains all found files (flat list)
assert len(scanner.partial_results) >= 2
# All items in partial_results should be non-directory files
for f in scanner.partial_results:
assert not f.is_directory
assert f.is_marimo_file
| {
"repo_id": "marimo-team/marimo",
"file_path": "tests/_server/test_directory_scanner.py",
"license": "Apache License 2.0",
"lines": 132,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
marimo-team/marimo:tests/_server/test_path_validator.py | # Copyright 2024 Marimo. All rights reserved.
from __future__ import annotations
import os
import shutil
import tempfile
import unittest
from pathlib import Path
import pytest
from marimo._server.files.path_validator import PathValidator
from marimo._utils.http import HTTPException, HTTPStatus
class TestPathValidator(unittest.TestCase):
def setUp(self):
self.temp_root = tempfile.mkdtemp()
self.test_dir = os.path.join(self.temp_root, "test_directory")
os.makedirs(self.test_dir)
self.test_file = os.path.join(self.test_dir, "test.py")
with open(self.test_file, "w") as f:
f.write("# test")
self.outside_dir = os.path.join(self.temp_root, "outside_directory")
os.makedirs(self.outside_dir)
self.outside_file = os.path.join(self.outside_dir, "outside.py")
with open(self.outside_file, "w") as f:
f.write("# outside")
self.original_cwd = os.getcwd()
def tearDown(self):
os.chdir(self.original_cwd)
shutil.rmtree(self.temp_root)
def test_file_inside_directory(self):
"""Test that files inside directory pass validation."""
validator = PathValidator()
directory = Path(self.test_dir).resolve()
filepath = Path(self.test_file).resolve()
validator.validate_inside_directory(directory, filepath)
def test_file_outside_directory(self):
"""Test that files outside directory fail validation."""
validator = PathValidator()
directory = Path(self.test_dir).resolve()
filepath = Path(self.outside_file).resolve()
with pytest.raises(HTTPException) as exc_info:
validator.validate_inside_directory(directory, filepath)
assert exc_info.value.status_code == HTTPStatus.FORBIDDEN
def test_path_traversal_attack(self):
"""Test that path traversal attacks are prevented."""
validator = PathValidator()
directory = Path(self.test_dir).resolve()
filepath = Path(self.test_dir) / ".." / ".." / "etc" / "passwd"
with pytest.raises(HTTPException) as exc_info:
validator.validate_inside_directory(directory, filepath)
assert exc_info.value.status_code == HTTPStatus.FORBIDDEN
def test_symlink_inside_directory(self):
"""Test that symlinks pointing inside directory are allowed."""
validator = PathValidator()
directory = Path(self.test_dir)
symlink_path = Path(self.test_dir) / "symlink.py"
symlink_path.symlink_to(self.test_file)
validator.validate_inside_directory(directory, symlink_path)
symlink_path.unlink()
def test_symlink_to_outside_allowed(self):
"""Test that symlinks pointing outside directory are allowed.
Since symlinks are preserved (not resolved), the symlink path itself
is inside the directory, so access is allowed.
"""
validator = PathValidator()
directory = Path(self.test_dir)
symlink_path = Path(self.test_dir) / "symlink.py"
symlink_path.symlink_to(self.outside_file)
# Should not raise - symlink path is inside directory
validator.validate_inside_directory(directory, symlink_path)
symlink_path.unlink()
def test_temp_directory_registration(self):
"""Test registering and checking temp directories."""
validator = PathValidator()
validator.register_temp_dir(self.outside_dir)
assert validator.is_file_in_allowed_temp_dir(self.outside_file)
assert not validator.is_file_in_allowed_temp_dir(self.test_file)
def test_validate_file_access_with_temp_dir(self):
"""Test that temp directory files bypass validation."""
validator = PathValidator(base_directory=Path(self.test_dir))
validator.register_temp_dir(self.outside_dir)
filepath = Path(self.outside_file)
# Should not raise - file is in allowed temp dir
validator.validate_file_access(filepath)
def test_validate_file_access_outside_base_dir(self):
"""Test that files outside base directory are blocked."""
validator = PathValidator(base_directory=Path(self.test_dir))
filepath = Path(self.outside_file)
with pytest.raises(HTTPException) as exc_info:
validator.validate_file_access(filepath)
assert exc_info.value.status_code == HTTPStatus.FORBIDDEN
def test_symlink_directory_outside_allowed(self):
"""Test that files through symlinked directories are allowed.
Since symlinks are preserved (not resolved), the path through the
symlink is inside the base directory.
"""
# Create a symlink to the outside directory inside the test directory
symlink_path = Path(self.test_dir) / "shared"
symlink_path.symlink_to(self.outside_dir)
# Create a file reference through the symlink
file_through_symlink = symlink_path / "outside.py"
# Symlinks are preserved (not resolved), so the path
# /test_dir/shared/outside.py is inside /test_dir/
validator = PathValidator()
validator.validate_inside_directory(
Path(self.test_dir), file_through_symlink
) # Should not raise
symlink_path.unlink()
def test_absolute_directory_with_relative_filepath(self):
"""Test that relative filepaths are normalized relative to absolute directory."""
validator = PathValidator()
# Use absolute directory
directory = Path(self.test_dir).resolve()
# Use relative filepath (relative to the directory)
filepath = Path("test.py")
# Should validate successfully - test.py is inside test_dir
validator.validate_inside_directory(directory, filepath)
def test_absolute_directory_with_relative_filepath_traversal(self):
"""Test path traversal prevention with absolute dir and relative filepath."""
validator = PathValidator()
directory = Path(self.test_dir).resolve()
# Try to traverse outside using relative path
filepath = Path("../outside_directory/outside.py")
with pytest.raises(HTTPException) as exc_info:
validator.validate_inside_directory(directory, filepath)
assert exc_info.value.status_code == HTTPStatus.FORBIDDEN
def test_relative_directory_with_relative_filepath(self):
"""Test that both relative paths work correctly from cwd."""
validator = PathValidator()
# Change to temp_root so relative paths make sense
os.chdir(self.temp_root)
directory = Path("test_directory")
filepath = Path("test_directory/test.py")
# Should validate successfully
validator.validate_inside_directory(directory, filepath)
def test_relative_directory_with_relative_filepath_outside(self):
"""Test that relative filepath outside relative directory fails."""
validator = PathValidator()
os.chdir(self.temp_root)
directory = Path("test_directory")
# File is outside the test_directory
filepath = Path("outside_directory/outside.py")
with pytest.raises(HTTPException) as exc_info:
validator.validate_inside_directory(directory, filepath)
assert exc_info.value.status_code == HTTPStatus.FORBIDDEN
def test_absolute_directory_with_nested_relative_filepath(self):
"""Test nested relative filepath with absolute directory."""
validator = PathValidator()
# Create a nested directory structure
nested_dir = os.path.join(self.test_dir, "nested", "deeper")
os.makedirs(nested_dir)
nested_file = os.path.join(nested_dir, "nested.py")
with open(nested_file, "w") as f:
f.write("# nested")
directory = Path(self.test_dir).resolve()
# Relative path to nested file
filepath = Path("nested/deeper/nested.py")
# Should validate successfully
validator.validate_inside_directory(directory, filepath)
def test_absolute_directory_with_dot_relative_filepath(self):
"""Test that ./ prefix in relative filepath works correctly."""
validator = PathValidator()
directory = Path(self.test_dir).resolve()
# Relative path with ./ prefix
filepath = Path("./test.py")
# Should validate successfully
validator.validate_inside_directory(directory, filepath)
def test_relative_directory_with_absolute_filepath_inside(self):
"""Test relative directory with absolute filepath inside it."""
validator = PathValidator()
os.chdir(self.temp_root)
directory = Path("test_directory")
# Absolute path to file inside the relative directory
filepath = Path(self.test_file).resolve()
# Should validate successfully
validator.validate_inside_directory(directory, filepath)
def test_relative_directory_with_absolute_filepath_outside(self):
"""Test relative directory with absolute filepath outside it."""
validator = PathValidator()
os.chdir(self.temp_root)
directory = Path("test_directory")
# Absolute path to file outside the directory
filepath = Path(self.outside_file).resolve()
with pytest.raises(HTTPException) as exc_info:
validator.validate_inside_directory(directory, filepath)
assert exc_info.value.status_code == HTTPStatus.FORBIDDEN
def test_path_traversal_staying_inside(self):
"""Test path traversal that goes up but stays inside directory."""
validator = PathValidator()
# Create nested structure: test_dir/subdir/file.py
subdir = os.path.join(self.test_dir, "subdir")
os.makedirs(subdir)
directory = Path(self.test_dir).resolve()
# Path goes up from subdir but stays in test_dir
filepath = Path("subdir/../test.py")
# Should validate successfully - normalized path is test_dir/test.py
validator.validate_inside_directory(directory, filepath)
def test_path_traversal_middle_of_path(self):
"""Test path traversal in the middle of a path."""
validator = PathValidator()
# Create structure: test_dir/dir1/dir2/file.py
dir1 = os.path.join(self.test_dir, "dir1")
dir2 = os.path.join(self.test_dir, "dir2")
os.makedirs(dir1)
os.makedirs(dir2)
file_in_dir2 = os.path.join(dir2, "file.py")
with open(file_in_dir2, "w") as f:
f.write("# file")
directory = Path(self.test_dir).resolve()
# Path has traversal in the middle: dir1/../dir2/file.py
filepath = Path("dir1/../dir2/file.py")
# Should validate successfully
validator.validate_inside_directory(directory, filepath)
def test_multiple_level_path_traversal(self):
"""Test multiple levels of path traversal to escape."""
validator = PathValidator()
directory = Path(self.test_dir).resolve()
# Try to go up multiple levels
filepath = Path("../../../../../../etc/passwd")
with pytest.raises(HTTPException) as exc_info:
validator.validate_inside_directory(directory, filepath)
assert exc_info.value.status_code == HTTPStatus.FORBIDDEN
def test_filepath_equals_directory(self):
"""Test that filepath cannot be the same as directory."""
validator = PathValidator()
directory = Path(self.test_dir).resolve()
filepath = directory
with pytest.raises(HTTPException) as exc_info:
validator.validate_inside_directory(directory, filepath)
assert exc_info.value.status_code == HTTPStatus.FORBIDDEN
assert "same as directory" in str(exc_info.value.detail)
def test_nonexistent_directory(self):
"""Test that validation fails for non-existent directory."""
validator = PathValidator()
nonexistent_dir = Path(self.temp_root) / "does_not_exist"
filepath = Path("test.py")
with pytest.raises(HTTPException) as exc_info:
validator.validate_inside_directory(nonexistent_dir, filepath)
assert exc_info.value.status_code == HTTPStatus.BAD_REQUEST
assert "does not exist" in str(exc_info.value.detail)
def test_directory_is_file(self):
"""Test that validation fails when directory is actually a file."""
validator = PathValidator()
# Use test_file as directory (it's actually a file)
not_a_directory = Path(self.test_file)
filepath = Path("test.py")
with pytest.raises(HTTPException) as exc_info:
validator.validate_inside_directory(not_a_directory, filepath)
assert exc_info.value.status_code == HTTPStatus.BAD_REQUEST
assert "not a directory" in str(exc_info.value.detail)
def test_empty_paths(self):
"""Test that empty/ambiguous paths are rejected."""
validator = PathValidator()
# Path("") resolves to Path(".")
directory = Path(".")
filepath = Path(".")
with pytest.raises(HTTPException) as exc_info:
validator.validate_inside_directory(directory, filepath)
assert exc_info.value.status_code == HTTPStatus.BAD_REQUEST
assert "Empty or ambiguous" in str(exc_info.value.detail)
def test_broken_symlink_filepath(self):
"""Test handling of broken symlinks in filepath."""
validator = PathValidator()
directory = Path(self.test_dir)
# Create a symlink to a non-existent file
broken_symlink = Path(self.test_dir) / "broken_link.py"
broken_symlink.symlink_to("/nonexistent/path/file.py")
# The symlink path itself is inside the directory, so it should be allowed
# (symlinks are not resolved)
validator.validate_inside_directory(directory, broken_symlink)
broken_symlink.unlink()
def test_symlink_directory(self):
"""Test validation when the directory itself is a symlink."""
validator = PathValidator()
# Create a symlink to test_dir
symlink_dir = Path(self.temp_root) / "symlink_to_test_dir"
symlink_dir.symlink_to(self.test_dir)
# Use the symlink as the directory
filepath = symlink_dir / "test.py"
# Should validate successfully
validator.validate_inside_directory(symlink_dir, filepath)
symlink_dir.unlink()
def test_redundant_path_components(self):
"""Test paths with redundant components like ./ and //."""
validator = PathValidator()
directory = Path(self.test_dir).resolve()
# Test various redundant path formats
test_cases = [
Path("./././test.py"), # Multiple ./
Path("test.py"), # Normal case for comparison
]
for filepath in test_cases:
# All should validate successfully
validator.validate_inside_directory(directory, filepath)
def test_trailing_slash_in_filepath(self):
"""Test filepath with trailing slash."""
validator = PathValidator()
directory = Path(self.test_dir).resolve()
# Create a subdirectory
subdir = os.path.join(self.test_dir, "subdir")
os.makedirs(subdir, exist_ok=True)
# Path with trailing slash (refers to directory)
filepath = Path("subdir/")
# Should validate successfully - subdir is inside test_dir
validator.validate_inside_directory(directory, filepath)
def test_absolute_filepath_with_traversal(self):
"""Test absolute filepath with traversal components."""
validator = PathValidator()
directory = Path(self.test_dir).resolve()
# Create an absolute path with .. in it
# E.g., /path/to/test_dir/subdir/../test.py
subdir = os.path.join(self.test_dir, "subdir")
os.makedirs(subdir, exist_ok=True)
# Use resolved directory to ensure consistent path representation
filepath = directory / "subdir" / ".." / "test.py"
# Should validate successfully after normalization
validator.validate_inside_directory(directory, filepath)
def test_case_sensitive_paths(self):
"""Test that path validation is case-sensitive on case-sensitive filesystems."""
validator = PathValidator()
directory = Path(self.test_dir).resolve()
# Create a file with specific casing
case_file = os.path.join(self.test_dir, "CamelCase.py")
with open(case_file, "w") as f:
f.write("# test")
# Use the exact casing
filepath = Path("CamelCase.py")
# Should validate successfully
validator.validate_inside_directory(directory, filepath)
| {
"repo_id": "marimo-team/marimo",
"file_path": "tests/_server/test_path_validator.py",
"license": "Apache License 2.0",
"lines": 327,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
marimo-team/marimo:marimo/_utils/net.py | # Copyright 2026 Marimo. All rights reserved.
from __future__ import annotations
import errno
import os
import socket
import sys
from typing import TYPE_CHECKING
from marimo import _loggers
if TYPE_CHECKING:
from typing import Optional
LOGGER = _loggers.marimo_logger()
_DEFAULT_BACKLOG = 128
# From Tornado (Apache 2.0)
# https://github.com/tornadoweb/tornado/blob/8a953697888d463f48090d500268892a7384e6b1/tornado/netutil.py#L56
def _errno_from_exception(e: BaseException) -> Optional[int]:
"""Provides the errno from an Exception object.
There are cases that the errno attribute was not set so we pull
the errno out of the args but if someone instantiates an Exception
without any args you will get a tuple error. So this function
abstracts all that behavior to give you a safe way to get the
errno.
"""
if hasattr(e, "errno"):
return e.errno # type: ignore
elif e.args:
return e.args[0] # type:ignore[no-any-return]
else:
return None
# From Tornado (Apache 2.0), battle-tested by Jupyter, streamlit, others
# https://github.com/tornadoweb/tornado/blob/8a953697888d463f48090d500268892a7384e6b1/tornado/netutil.py#L56
def _bind_sockets(
port: int,
address: Optional[str] = None,
family: socket.AddressFamily = socket.AF_UNSPEC,
backlog: int = _DEFAULT_BACKLOG,
flags: int | None = None,
reuse_port: bool = False,
) -> list[socket.socket]:
"""Creates listening sockets bound to the given port and address.
Returns a list of socket objects (multiple sockets are returned if
the given address maps to multiple IP addresses, which is most common
for mixed IPv4 and IPv6 use).
Address may be either an IP address or hostname. If it's a hostname,
the server will listen on all IP addresses associated with the
name. Address may be an empty string or None to listen on all
available interfaces. Family may be set to either `socket.AF_INET`
or `socket.AF_INET6` to restrict to IPv4 or IPv6 addresses, otherwise
both will be used if available.
The ``backlog`` argument has the same meaning as for
`socket.listen() <socket.socket.listen>`.
``flags`` is a bitmask of AI_* flags to `~socket.getaddrinfo`, like
``socket.AI_PASSIVE | socket.AI_NUMERICHOST``.
``reuse_port`` option sets ``SO_REUSEPORT`` option for every socket
in the list. If your platform doesn't support this option ValueError will
be raised.
"""
if reuse_port and not hasattr(socket, "SO_REUSEPORT"):
raise ValueError("the platform doesn't support SO_REUSEPORT")
sockets = []
if address == "":
address = None
if not socket.has_ipv6 and family == socket.AF_UNSPEC:
# Python can be compiled with --disable-ipv6, which causes
# operations on AF_INET6 sockets to fail, but does not
# automatically exclude those results from getaddrinfo
# results.
# http://bugs.python.org/issue16208
family = socket.AF_INET
if flags is None:
flags = socket.AI_PASSIVE
bound_port = None
unique_addresses = set() # type:ignore[type-arg]
for res in sorted(
socket.getaddrinfo(
address, port, family, socket.SOCK_STREAM, 0, flags
),
key=lambda x: x[0],
):
if res in unique_addresses:
continue
unique_addresses.add(res)
af, socktype, proto, _, sockaddr = res
if (
sys.platform == "darwin"
and address == "localhost"
and af == socket.AF_INET6
and sockaddr[3] != 0 # type: ignore
):
# Mac OS X includes a link-local address fe80::1%lo0 in the
# getaddrinfo results for 'localhost'. However, the firewall
# doesn't understand that this is a local address and will
# prompt for access (often repeatedly, due to an apparent
# bug in its ability to remember granting access to an
# application). Skip these addresses.
continue
try:
sock = socket.socket(af, socktype, proto)
except OSError as e:
if _errno_from_exception(e) == errno.EAFNOSUPPORT:
continue
raise
if os.name != "nt":
try:
sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
except OSError as e:
if _errno_from_exception(e) != errno.ENOPROTOOPT:
# Hurd doesn't support SO_REUSEADDR.
raise
if reuse_port:
sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEPORT, 1)
if af == socket.AF_INET6:
# On linux, ipv6 sockets accept ipv4 too by default,
# but this makes it impossible to bind to both
# 0.0.0.0 in ipv4 and :: in ipv6. On other systems,
# separate sockets *must* be used to listen for both ipv4
# and ipv6. For consistency, always disable ipv4 on our
# ipv6 sockets and use a separate ipv4 socket when needed.
#
# Python 2.x on windows doesn't have IPPROTO_IPV6.
if hasattr(socket, "IPPROTO_IPV6"):
sock.setsockopt(socket.IPPROTO_IPV6, socket.IPV6_V6ONLY, 1)
# automatic port allocation with port=None
# should bind on the same port on IPv4 and IPv6
host, requested_port = sockaddr[:2]
if requested_port == 0 and bound_port is not None:
sockaddr = tuple([host, bound_port] + list(sockaddr[2:]))
sock.setblocking(False)
try:
sock.bind(sockaddr)
except OSError as e:
if (
_errno_from_exception(e) == errno.EADDRNOTAVAIL
and address == "localhost"
and sockaddr[0] == "::1"
):
# On some systems (most notably docker with default
# configurations), ipv6 is partially disabled:
# socket.has_ipv6 is true, we can create AF_INET6
# sockets, and getaddrinfo("localhost", ...,
# AF_PASSIVE) resolves to ::1, but we get an error
# when binding.
#
# Swallow the error, but only for this specific case.
# If EADDRNOTAVAIL occurs in other situations, it
# might be a real problem like a typo in a
# configuration.
sock.close()
continue
else:
raise
bound_port = sock.getsockname()[1]
sock.listen(backlog)
sockets.append(sock)
return sockets
def find_free_port(port: int, attempts: int = 100, addr: str = "") -> int:
"""Find a free port starting at `port`.
Use addr="" or "0.0.0.0" to use all interfaces.
"""
# Valid port range is 1-65535
port = max(1, min(port, 65535))
if attempts == 0:
raise RuntimeError("Could not find a free port")
# Based on logic from Jupyter server:
# https://github.com/jupyter-server/jupyter_server/blob/56e2478a728ff292d8270e62d27dd50c316ee6b7/jupyter_server/serverapp.py#L2670
try:
sockets = _bind_sockets(port, addr)
sockets[0].close()
return port
except OSError as e:
if e.errno == errno.EADDRINUSE:
LOGGER.debug("Port %d already in use, trying another port.", port)
elif e.errno in (
errno.EACCES,
getattr(errno, "WSAEACCES", errno.EACCES),
):
LOGGER.warning("Permission to listen on port %d denied.", port)
next_port = min(port + 1, 65535)
if next_port == port:
raise RuntimeError("No more ports available")
return find_free_port(next_port, attempts - 1, addr=addr)
| {
"repo_id": "marimo-team/marimo",
"file_path": "marimo/_utils/net.py",
"license": "Apache License 2.0",
"lines": 180,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
marimo-team/marimo:marimo/_server/config.py | # Copyright 2026 Marimo. All rights reserved.
from __future__ import annotations
from dataclasses import dataclass
from typing import TYPE_CHECKING
if TYPE_CHECKING:
from starlette.datastructures import State
from uvicorn import Server
from marimo._config.manager import MarimoConfigManager
from marimo._server.session_manager import SessionManager
@dataclass(frozen=True)
class StarletteServerStateInit:
"""State for the Starlette server.
This enforces that we always supply all the required state to the app.state object.
"""
port: int
host: str
base_url: str
asset_url: str | None
headless: bool
quiet: bool
session_manager: SessionManager
config_manager: MarimoConfigManager
remote_url: str | None
mcp_server_enabled: bool
skew_protection: bool
enable_auth: bool
def apply(self, state: State) -> None:
for field, value in self.__dict__.items():
setattr(state, field, value)
class StarletteServerState(StarletteServerStateInit):
"""Typed state for the Starlette server."""
server: Server
| {
"repo_id": "marimo-team/marimo",
"file_path": "marimo/_server/config.py",
"license": "Apache License 2.0",
"lines": 32,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
marimo-team/marimo:marimo/_utils/print.py | # Copyright 2026 Marimo. All rights reserved.
from __future__ import annotations
from typing import Any
# use spaces instead of a tab to play well with carriage returns;
# \r\t doesn't appear to overwrite characters at the start of a line,
# but \r{TAB} does ...
TAB = " "
def print_tabbed(string: str, n_tabs: int = 1) -> None:
print_(f"{TAB * n_tabs}{string}")
def print_(*args: Any, **kwargs: Any) -> None:
try:
import click
click.echo(*args, **kwargs)
except ImportError:
print(*args, **kwargs) # noqa: T201
| {
"repo_id": "marimo-team/marimo",
"file_path": "marimo/_utils/print.py",
"license": "Apache License 2.0",
"lines": 15,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
marimo-team/marimo:marimo/_ai/_pydantic_ai_utils.py | # Copyright 2025 Marimo. All rights reserved.
from __future__ import annotations
import uuid
from typing import TYPE_CHECKING, Any, Callable
from marimo import _loggers
from marimo._messaging.msgspec_encoder import asdict
from marimo._server.ai.tools.types import ToolDefinition
from marimo._server.models.completion import UIMessage as ServerUIMessage
LOGGER = _loggers.marimo_logger()
if TYPE_CHECKING:
from pydantic_ai import FunctionToolset
from pydantic_ai.ui.vercel_ai.request_types import UIMessage, UIMessagePart
def generate_id(prefix: str) -> str:
return f"{prefix}_{uuid.uuid4().hex}"
def form_toolsets(
tools: list[ToolDefinition],
tool_invoker: Callable[[str, dict[str, Any]], Any],
) -> tuple[FunctionToolset, bool]:
"""
Because we have a list of tool definitions and call them in a separate event loop,
we create a closure to invoke the tool (backend) or raise a CallDeferred (frontend).
Ref: https://ai.pydantic.dev/toolsets/#function-toolset
Returns a tuple of the toolset and whether deferred tool requests are needed.
"""
from pydantic_ai import CallDeferred, FunctionToolset
toolset = FunctionToolset()
deferred_tool_requests = False
for tool in tools:
if tool.source == "frontend":
deferred_tool_requests = True
async def tool_fn(
_tool_name: str = tool.name, **kwargs: Any
) -> Any:
raise CallDeferred(
metadata={
"source": "frontend",
"tool_name": _tool_name,
"kwargs": kwargs,
}
)
else:
async def tool_fn(
_tool_name: str = tool.name, **kwargs: Any
) -> Any:
result = await tool_invoker(_tool_name, kwargs)
# Convert to JSON-serializable object
return asdict(result)
tool_fn.__name__ = tool.name
toolset.add_function(
tool_fn, name=tool.name, description=tool.description
)
return toolset, deferred_tool_requests
def convert_to_pydantic_messages(
messages: list[ServerUIMessage],
part_processor: Callable[[UIMessagePart], UIMessagePart] | None = None,
) -> list[UIMessage]:
"""
The frontend SDK tends to generate messages with a messageId eventhough it's not valid.
Remove them to prevent validation errors.
If a part processor is provided, it will be applied to the parts of the message.
"""
from pydantic_ai.ui.vercel_ai.request_types import UIMessage
def safe_part_processor(
part: UIMessagePart,
part_processor: Callable[[UIMessagePart], UIMessagePart],
) -> UIMessagePart:
try:
return part_processor(part)
except Exception as e:
LOGGER.error(f"Error processing part {part}: {e}")
return part
pydantic_messages: list[UIMessage] = []
for message in messages:
message_id = (
message.get("messageId")
or message.get("id")
or generate_id("message")
)
role = message.get("role", "assistant")
parts = message.get("parts", [])
metadata = message.get("metadata")
ui_message = UIMessage(
id=message_id, role=role, parts=parts, metadata=metadata
)
# Process parts after casting so the processor will work on typed parts
if ui_message.parts and part_processor:
new_parts = [
safe_part_processor(part, part_processor)
for part in ui_message.parts
]
ui_message.parts = new_parts
pydantic_messages.append(ui_message)
return pydantic_messages
def create_simple_prompt(text: str) -> UIMessage:
from pydantic_ai.ui.vercel_ai.request_types import TextUIPart, UIMessage
parts: list[UIMessagePart] = [TextUIPart(text=text)] if text else []
return UIMessage(id=generate_id("message"), role="user", parts=parts)
| {
"repo_id": "marimo-team/marimo",
"file_path": "marimo/_ai/_pydantic_ai_utils.py",
"license": "Apache License 2.0",
"lines": 97,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
marimo-team/marimo:tests/_ai/test_pydantic_utils.py | # Copyright 2025 Marimo. All rights reserved.
from __future__ import annotations
from dataclasses import dataclass
from unittest.mock import AsyncMock
import pytest
pytest.importorskip("pydantic_ai", reason="pydantic_ai not installed")
from marimo._ai._pydantic_ai_utils import (
convert_to_pydantic_messages,
create_simple_prompt,
form_toolsets,
generate_id,
)
from marimo._server.ai.tools.types import ToolDefinition
class TestGenerateId:
def test_generate_id_with_prefix(self):
result = generate_id("test")
assert result.startswith("test_")
# UUID hex is 32 characters
assert len(result) == len("test_") + 32
def test_generate_id_returns_unique_values(self):
ids = [generate_id("prefix") for _ in range(100)]
assert len(set(ids)) == 100
def test_generate_id_with_empty_prefix(self):
result = generate_id("")
assert result.startswith("_")
def _has_pydantic_function_like() -> bool:
"""Check if pydantic has the _function_like attribute required by pydantic-ai."""
try:
from pydantic._internal import _decorators
return hasattr(_decorators, "_function_like")
except ImportError:
return False
@pytest.mark.skipif(
not _has_pydantic_function_like(),
reason="pydantic version missing _function_like (required by pydantic-ai)",
)
class TestFormToolsets:
def test_form_toolsets_empty_list(self):
tool_invoker = AsyncMock()
toolset, deferred = form_toolsets([], tool_invoker)
assert deferred is False
assert toolset is not None
def test_form_toolsets_with_backend_tool(self):
tool_invoker = AsyncMock()
tool = ToolDefinition(
name="test_tool",
description="A test tool",
parameters={"type": "object", "properties": {}},
source="backend",
mode=["manual"],
)
toolset, deferred = form_toolsets([tool], tool_invoker)
assert toolset is not None
assert deferred is False
def test_form_toolsets_with_frontend_tool(self):
tool_invoker = AsyncMock()
tool = ToolDefinition(
name="frontend_tool",
description="A frontend tool",
parameters={"type": "object", "properties": {}},
source="frontend",
mode=["manual"],
)
toolset, deferred = form_toolsets([tool], tool_invoker)
assert toolset is not None
assert deferred is True
def test_form_toolsets_with_multiple_tools(self):
tool_invoker = AsyncMock()
tools = [
ToolDefinition(
name="backend_tool",
description="A backend tool",
parameters={"type": "object", "properties": {}},
source="backend",
mode=["manual"],
),
ToolDefinition(
name="frontend_tool",
description="A frontend tool",
parameters={"type": "object", "properties": {}},
source="frontend",
mode=["manual"],
),
ToolDefinition(
name="mcp_tool",
description="An MCP tool",
parameters={"type": "object", "properties": {}},
source="mcp",
mode=["manual"],
),
]
toolset, deferred = form_toolsets(tools, tool_invoker)
assert toolset is not None
assert deferred is True # has frontend tool
def test_form_toolsets_with_only_backend_and_mcp_tools(self):
tool_invoker = AsyncMock()
tools = [
ToolDefinition(
name="backend_tool",
description="A backend tool",
parameters={"type": "object", "properties": {}},
source="backend",
mode=["manual"],
),
ToolDefinition(
name="mcp_tool",
description="An MCP tool",
parameters={"type": "object", "properties": {}},
source="mcp",
mode=["manual"],
),
]
toolset, deferred = form_toolsets(tools, tool_invoker)
assert toolset is not None
assert deferred is False # no frontend tools
async def test_backend_tool_invokes_tool_invoker(self):
@dataclass
class MockResult:
value: str
tool_invoker = AsyncMock(return_value=MockResult(value="result"))
tool = ToolDefinition(
name="backend_tool",
description="A backend tool",
parameters={"type": "object", "properties": {}},
source="backend",
mode=["manual"],
)
toolset, deferred = form_toolsets([tool], tool_invoker)
assert deferred is False
tools = toolset.tools
assert len(tools) == 1
assert "backend_tool" in tools
backend_tool = tools["backend_tool"]
assert backend_tool.name == "backend_tool"
assert backend_tool.description == "A backend tool"
# Actually call the tool function
result = await backend_tool.function(arg1="test", arg2=123) # type: ignore[call-arg]
# Verify tool_invoker was called with correct arguments
tool_invoker.assert_called_once_with(
"backend_tool", {"arg1": "test", "arg2": 123}
)
# Verify result is converted to dict via asdict
assert result == {"value": "result"}
async def test_frontend_tool_raises_call_deferred(self):
from pydantic_ai import CallDeferred
tool_invoker = AsyncMock()
tool = ToolDefinition(
name="frontend_tool",
description="A frontend tool",
parameters={"type": "object", "properties": {}},
source="frontend",
mode=["manual"],
)
toolset, deferred = form_toolsets([tool], tool_invoker)
assert deferred is True
tools = toolset.tools
assert len(tools) == 1
assert "frontend_tool" in tools
frontend_tool = tools["frontend_tool"]
assert frontend_tool.name == "frontend_tool"
assert frontend_tool.description == "A frontend tool"
# Call the tool function and verify it raises CallDeferred
with pytest.raises(CallDeferred) as exc_info:
await frontend_tool.function(arg="value") # type: ignore[call-arg]
# Verify CallDeferred has correct metadata
assert exc_info.value.metadata == {
"source": "frontend",
"tool_name": "frontend_tool",
"kwargs": {"arg": "value"},
}
# Verify tool_invoker was NOT called for frontend tools
tool_invoker.assert_not_called()
class TestConvertToPydanticMessages:
def test_convert_empty_messages(self):
result = convert_to_pydantic_messages([])
assert result == []
def test_convert_message_with_message_id(self):
from pydantic_ai.ui.vercel_ai.request_types import TextUIPart
messages = [
{
"messageId": "msg_123",
"role": "user",
"parts": [{"type": "text", "text": "Hello"}],
}
]
result = convert_to_pydantic_messages(messages)
assert len(result) == 1
assert result[0].id == "msg_123"
assert result[0].role == "user"
assert result[0].parts == [
TextUIPart(
type="text", text="Hello", state=None, provider_metadata=None
)
]
def test_convert_message_with_id(self):
messages = [
{
"id": "id_456",
"role": "assistant",
"parts": [{"type": "text", "text": "Hi there"}],
}
]
result = convert_to_pydantic_messages(messages)
assert len(result) == 1
assert result[0].id == "id_456"
assert result[0].role == "assistant"
def test_convert_message_generates_id_when_missing(self):
messages = [
{
"role": "user",
"parts": [{"type": "text", "text": "Hello"}],
}
]
result = convert_to_pydantic_messages(messages)
assert len(result) == 1
assert result[0].id.startswith("message_")
def test_convert_message_prefers_message_id_over_id(self):
messages = [
{
"messageId": "message_id_value",
"id": "id_value",
"role": "user",
"parts": [],
}
]
result = convert_to_pydantic_messages(messages)
assert result[0].id == "message_id_value"
def test_convert_message_defaults_role_to_assistant(self):
messages = [
{
"id": "test_id",
"parts": [],
}
]
result = convert_to_pydantic_messages(messages)
assert result[0].role == "assistant"
def test_convert_message_defaults_parts_to_empty_list(self):
messages = [
{
"id": "test_id",
"role": "user",
}
]
result = convert_to_pydantic_messages(messages)
assert result[0].parts == []
def test_convert_message_with_metadata(self):
messages = [
{
"id": "test_id",
"role": "user",
"parts": [],
"metadata": {"key": "value"},
}
]
result = convert_to_pydantic_messages(messages)
assert result[0].metadata == {"key": "value"}
def test_convert_message_without_metadata(self):
messages = [
{
"id": "test_id",
"role": "user",
"parts": [],
}
]
result = convert_to_pydantic_messages(messages)
assert result[0].metadata is None
def test_convert_multiple_messages(self):
messages = [
{
"messageId": "msg_1",
"role": "user",
"parts": [{"type": "text", "text": "Hello"}],
},
{
"id": "msg_2",
"role": "assistant",
"parts": [{"type": "text", "text": "Hi!"}],
},
{
"role": "user",
"parts": [{"type": "text", "text": "How are you?"}],
},
]
result = convert_to_pydantic_messages(messages)
assert len(result) == 3
assert result[0].id == "msg_1"
assert result[1].id == "msg_2"
assert result[2].id.startswith("message_")
def test_convert_with_part_processor(self):
"""Test that part_processor is called on each part."""
from pydantic_ai.ui.vercel_ai.request_types import (
TextUIPart,
UIMessage,
)
call_count = 0
def processor(part):
nonlocal call_count
call_count += 1
# Modify the text
if isinstance(part, TextUIPart):
return TextUIPart(
type="text",
text=f"processed: {part.text}",
state=part.state,
provider_metadata=part.provider_metadata,
)
return part
messages = [
{
"id": "msg_1",
"role": "user",
"parts": [
{"type": "text", "text": "Hello"},
{"type": "text", "text": "World"},
],
}
]
result = convert_to_pydantic_messages(
messages, part_processor=processor
)
assert call_count == 2
assert result == [
UIMessage(
id="msg_1",
role="user",
metadata=None,
parts=[
TextUIPart(
type="text",
text="processed: Hello",
state=None,
provider_metadata=None,
),
TextUIPart(
type="text",
text="processed: World",
state=None,
provider_metadata=None,
),
],
)
]
def test_convert_with_part_processor_error_returns_original(self):
"""Test that part_processor errors are caught and original part is returned."""
from pydantic_ai.ui.vercel_ai.request_types import (
TextUIPart,
UIMessage,
)
def failing_processor(_part):
raise ValueError("Processing failed!")
messages = [
{
"id": "msg_1",
"role": "user",
"parts": [{"type": "text", "text": "Hello"}],
}
]
result = convert_to_pydantic_messages(
messages, part_processor=failing_processor
)
# Should return original part when processor fails
assert result == [
UIMessage(
id="msg_1",
role="user",
metadata=None,
parts=[
TextUIPart(
type="text",
text="Hello",
state=None,
provider_metadata=None,
)
],
)
]
def test_convert_with_part_processor_empty_parts(self):
"""Test that part_processor is not called when parts is empty."""
call_count = 0
def processor(_part):
nonlocal call_count
call_count += 1
return _part
messages = [
{
"id": "msg_1",
"role": "user",
"parts": [],
}
]
result = convert_to_pydantic_messages(
messages, part_processor=processor
)
assert call_count == 0
assert len(result) == 1
assert result[0].parts == []
def test_convert_with_part_processor_multiple_messages(self):
"""Test that part_processor is applied to all messages."""
from pydantic_ai.ui.vercel_ai.request_types import TextUIPart
processed_parts = []
def processor(part):
if isinstance(part, TextUIPart):
processed_parts.append(part.text)
return part
messages = [
{
"id": "msg_1",
"role": "user",
"parts": [{"type": "text", "text": "First"}],
},
{
"id": "msg_2",
"role": "assistant",
"parts": [{"type": "text", "text": "Second"}],
},
]
convert_to_pydantic_messages(messages, part_processor=processor)
assert processed_parts == ["First", "Second"]
class TestCreateSimplePrompt:
def test_create_simple_prompt(self):
from pydantic_ai.ui.vercel_ai.request_types import TextUIPart
result = create_simple_prompt("Hello, world!")
assert result.id.startswith("message_")
assert result.role == "user"
assert result.parts == [TextUIPart(type="text", text="Hello, world!")]
def test_create_simple_prompt_with_empty_text(self):
result = create_simple_prompt("")
assert result.id.startswith("message_")
assert result.role == "user"
assert result.parts == []
| {
"repo_id": "marimo-team/marimo",
"file_path": "tests/_ai/test_pydantic_utils.py",
"license": "Apache License 2.0",
"lines": 430,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
marimo-team/marimo:tests/_server/api/endpoints/test_auto_instantiate.py | # Copyright 2024 Marimo. All rights reserved.
"""Tests for auto-instantiate behavior in run mode vs edit mode."""
from __future__ import annotations
from typing import TYPE_CHECKING
from unittest.mock import MagicMock, patch
from marimo._messaging.notification import KernelReadyNotification
from marimo._session.model import SessionMode
from marimo._utils.parse_dataclass import parse_raw
from tests._server.mocks import token_header
if TYPE_CHECKING:
from starlette.testclient import TestClient
def _create_headers(session_id: str) -> dict[str, str]:
"""Create headers with both token and session ID."""
return {
**token_header("fake-token"),
"Marimo-Session-Id": session_id,
}
def _create_ws_url(session_id: str) -> str:
return f"/ws?session_id={session_id}&access_token=fake-token"
class TestAutoInstantiateEditMode:
"""Tests for auto-instantiate in edit mode."""
def test_kernel_ready_auto_instantiated_false_in_edit_mode(
self, client: TestClient
) -> None:
"""In edit mode, auto_instantiated should be False."""
session_id = "test-edit-mode"
ws_url = _create_ws_url(session_id)
with client.websocket_connect(ws_url) as websocket:
data = websocket.receive_json()
assert data["op"] == "kernel-ready"
kernel_ready = parse_raw(data["data"], KernelReadyNotification)
# In edit mode, auto_instantiated should be False
assert kernel_ready.auto_instantiated is False
def test_instantiate_endpoint_allowed_in_edit_mode(
self, client: TestClient
) -> None:
"""In edit mode, /instantiate endpoint should be allowed."""
session_id = "test-instantiate-edit"
ws_url = _create_ws_url(session_id)
headers = _create_headers(session_id)
with client.websocket_connect(ws_url) as websocket:
# Wait for the session to be fully established
data = websocket.receive_json()
assert data["op"] == "kernel-ready"
# The /instantiate endpoint should work in edit mode
response = client.post(
"/api/kernel/instantiate",
headers=headers,
json={"objectIds": [], "values": [], "autoRun": False},
)
assert response.status_code == 200
class TestAutoInstantiateRunMode:
"""Tests for auto-instantiate in run mode."""
def test_kernel_ready_auto_instantiated_true_in_run_mode(
self, client: TestClient
) -> None:
"""In run mode, auto_instantiated should be True."""
from tests._server.conftest import get_session_manager
session_manager = get_session_manager(client)
session_id = "test-run-mode"
# Switch to run mode
session_manager.mode = SessionMode.RUN
ws_url = _create_ws_url(session_id)
with client.websocket_connect(ws_url) as websocket:
data = websocket.receive_json()
assert data["op"] == "kernel-ready"
kernel_ready = parse_raw(data["data"], KernelReadyNotification)
# In run mode, auto_instantiated should be True
assert kernel_ready.auto_instantiated is True
def test_instantiate_endpoint_blocked_in_run_mode(
self, client: TestClient
) -> None:
"""In run mode, /instantiate endpoint should return 401 (Unauthorized).
The @requires("edit") decorator checks for edit permissions, and in run mode
users only have "read" permissions, so they get 401 Unauthorized.
"""
from tests._server.conftest import get_session_manager
session_manager = get_session_manager(client)
session_id = "test-instantiate-run"
headers = _create_headers(session_id)
session_manager.mode = SessionMode.RUN
ws_url = _create_ws_url(session_id)
with client.websocket_connect(ws_url) as websocket:
# Wait for the session to be fully established
data = websocket.receive_json()
assert data["op"] == "kernel-ready"
# The /instantiate endpoint should be blocked in run mode
# Returns 401 because @requires("edit") checks permissions
response = client.post(
"/api/kernel/instantiate",
headers=headers,
json={"objectIds": [], "values": [], "autoRun": False},
)
assert response.status_code == 401
class TestAutoInstantiateHTTPRequest:
"""Tests for HTTP request propagation during auto-instantiate."""
def test_auto_instantiate_passes_http_request(self) -> None:
"""Verify _auto_instantiate passes HTTPRequest from websocket.
This verifies the fix for the issue where mo.app_meta().request
returned None in run mode because _auto_instantiate was passing
http_request=None instead of extracting it from the websocket.
"""
from marimo._server.api.endpoints.ws.ws_session_connector import (
SessionConnector,
)
mock_session = MagicMock()
mock_http_request = MagicMock()
connector = SessionConnector(
manager=MagicMock(),
handler=MagicMock(),
params=MagicMock(),
websocket=MagicMock(),
)
with patch(
"marimo._runtime.commands.HTTPRequest.from_request",
return_value=mock_http_request,
) as mock_from_request:
connector._auto_instantiate(mock_session)
mock_from_request.assert_called_once_with(connector.websocket)
assert (
mock_session.instantiate.call_args.kwargs["http_request"]
is mock_http_request
)
class TestInstantiateNotebookRequest:
"""Tests for InstantiateNotebookRequest with codes field."""
def test_instantiate_request_with_codes(self) -> None:
"""InstantiateNotebookRequest should accept optional codes field."""
from marimo._server.models.models import InstantiateNotebookRequest
# Without codes
request = InstantiateNotebookRequest(
object_ids=[],
values=[],
auto_run=True,
)
assert request.codes is None
# With codes
request_with_codes = InstantiateNotebookRequest(
object_ids=[],
values=[],
auto_run=True,
codes={"cell1": "print('hello')"},
)
assert request_with_codes.codes == {"cell1": "print('hello')"}
def test_instantiate_with_codes_field(self, client: TestClient) -> None:
"""Test that instantiate endpoint accepts codes field.
This test verifies the API accepts the codes parameter without
creating a new session (which would fail due to multiprocessing issues
in the test environment).
"""
del client
from marimo._server.models.models import InstantiateNotebookRequest
# Test that the model accepts codes
request_with_codes = InstantiateNotebookRequest(
object_ids=[],
values=[],
auto_run=True,
codes={"cell1": "print('test')", "cell2": "x = 1"},
)
assert request_with_codes.codes == {
"cell1": "print('test')",
"cell2": "x = 1",
}
assert request_with_codes.auto_run is True
def test_instantiate_endpoint_without_codes_uses_file_codes(
self, client: TestClient
) -> None:
"""Test that instantiate without codes uses file codes."""
session_id = "test-file-codes"
ws_url = _create_ws_url(session_id)
headers = _create_headers(session_id)
with client.websocket_connect(ws_url) as websocket:
# Get the kernel-ready message
data = websocket.receive_json()
assert data["op"] == "kernel-ready"
# Send instantiate without codes (should use file codes)
response = client.post(
"/api/kernel/instantiate",
headers=headers,
json={
"objectIds": [],
"values": [],
"autoRun": True,
# No codes field
},
)
assert response.status_code == 200
| {
"repo_id": "marimo-team/marimo",
"file_path": "tests/_server/api/endpoints/test_auto_instantiate.py",
"license": "Apache License 2.0",
"lines": 186,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
marimo-team/marimo:marimo/_convert/script.py | # Copyright 2026 Marimo. All rights reserved.
from __future__ import annotations
from marimo._ast import codegen
from marimo._ast.app import InternalApp
from marimo._ast.load import load_notebook_ir
from marimo._runtime.dataflow import topological_sort
from marimo._schemas.serialization import NotebookSerialization
from marimo._version import __version__
def convert_from_ir_to_script(ir: NotebookSerialization) -> str:
app = InternalApp(load_notebook_ir(ir))
# Check if any code is async, if so, raise an error
for cell in app.cell_manager.cells():
if not cell:
continue
if cell._is_coroutine:
from click import ClickException
raise ClickException(
"Cannot export a notebook with async code to a flat script"
)
graph = app.graph
if ir.filename:
header = codegen.get_header_comments(ir.filename) or ""
else:
header = ""
codes: list[str] = [
"# %%\n" + graph.cells[cid].code
for cid in topological_sort(graph, graph.cells.keys())
]
return f'{header}\n__generated_with = "{__version__}"\n\n' + "\n\n".join(
codes
)
| {
"repo_id": "marimo-team/marimo",
"file_path": "marimo/_convert/script.py",
"license": "Apache License 2.0",
"lines": 31,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
marimo-team/marimo:tests/_ast/codegen_data/test_unparsable_cell_with_triple_quotes.py | import marimo
__generated_with = "0.0.0"
app = marimo.App()
@app.cell
def _():
x = 1
return
app._unparsable_cell(
"""
\"\"\"
```python {.marimo}
print(\"Hello, World!\")
""",
name="_"
)
app._unparsable_cell(
r"""
it's an unparsable cell
""",
name="_"
)
if __name__ == "__main__":
app.run()
| {
"repo_id": "marimo-team/marimo",
"file_path": "tests/_ast/codegen_data/test_unparsable_cell_with_triple_quotes.py",
"license": "Apache License 2.0",
"lines": 23,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
marimo-team/marimo:tests/_server/api/endpoints/test_cache_endpoints.py | # Copyright 2026 Marimo. All rights reserved.
from __future__ import annotations
from typing import TYPE_CHECKING
from marimo._types.ids import SessionId
from tests._server.mocks import token_header, with_read_session, with_session
if TYPE_CHECKING:
from starlette.testclient import TestClient
SESSION_ID = SessionId("session-123")
HEADERS = {
"Marimo-Session-Id": SESSION_ID,
**token_header("fake-token"),
}
@with_session(SESSION_ID)
def test_clear_cache(client: TestClient) -> None:
"""Test cache clear operation."""
response = client.post("/api/cache/clear", headers=HEADERS, json={})
assert response.status_code == 200, response.text
assert response.json()["success"] is True
@with_session(SESSION_ID)
def test_get_cache_info(client: TestClient) -> None:
"""Test cache info retrieval."""
response = client.post("/api/cache/info", headers=HEADERS, json={})
assert response.status_code == 200, response.text
assert response.json()["success"] is True
@with_read_session(SESSION_ID)
def test_cache_forbidden_in_read_mode(client: TestClient) -> None:
"""Test that cache operations are forbidden in read mode."""
response = client.post("/api/cache/clear", headers=HEADERS, json={})
assert response.status_code == 401, response.text
response = client.post("/api/cache/info", headers=HEADERS, json={})
assert response.status_code == 401, response.text
| {
"repo_id": "marimo-team/marimo",
"file_path": "tests/_server/api/endpoints/test_cache_endpoints.py",
"license": "Apache License 2.0",
"lines": 31,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
marimo-team/marimo:tests/_server/api/endpoints/test_mpl_endpoints.py | # Copyright 2026 Marimo. All rights reserved.
from __future__ import annotations
from typing import TYPE_CHECKING
from unittest.mock import AsyncMock, MagicMock, patch
from marimo._server.api.endpoints.mpl import WS_MAX_SIZE, figure_endpoints
if TYPE_CHECKING:
from starlette.testclient import TestClient
class TestMatplotlibProxyEndpoints:
"""Tests for matplotlib proxy endpoints."""
def setup_method(self) -> None:
"""Clear figure_endpoints before each test."""
figure_endpoints.clear()
def teardown_method(self) -> None:
"""Clean up after each test."""
figure_endpoints.clear()
@staticmethod
def test_http_requires_auth(client: TestClient) -> None:
"""Test that HTTP proxy requires authentication."""
figure_endpoints[1] = "8888"
# Mock validate_auth to return False (unauthenticated)
with patch(
"marimo._server.api.endpoints.mpl.validate_auth",
return_value=False,
):
response = client.get("/mpl/1/test_path")
assert response.status_code == 401, response.text
assert "Unauthorized" in response.text
@staticmethod
def test_unauthorized_figure_returns_403(client: TestClient) -> None:
"""Test that accessing unregistered figure returns 403."""
# Mock auth to pass, but figure not registered
with patch(
"marimo._server.api.endpoints.mpl.validate_auth", return_value=True
):
response = client.get("/mpl/999/some_path")
assert response.status_code == 403, response.text
assert "Unauthorized" in response.text
@staticmethod
def test_connection_error_returns_503(client: TestClient) -> None:
"""Test connection failure returns 503."""
figure_endpoints[1] = "8888"
with patch(
"marimo._server.api.endpoints.mpl.validate_auth", return_value=True
):
with patch("urllib.request.urlopen") as mock_urlopen:
from urllib.error import URLError
mock_urlopen.side_effect = URLError("Connection refused")
response = client.get("/mpl/1/test_path")
assert response.status_code == 503, response.text
assert "Matplotlib server is not available" in response.text
@staticmethod
def test_successful_proxy(client: TestClient) -> None:
"""Test successful proxying to matplotlib server."""
figure_endpoints[1] = "8888"
with patch(
"marimo._server.api.endpoints.mpl.validate_auth", return_value=True
):
with patch("urllib.request.urlopen") as mock_urlopen:
mock_response = MagicMock()
mock_response.status = 200
mock_response.headers = {"content-type": "text/html"}
mock_response.read.return_value = b"<html>Success</html>"
mock_urlopen.return_value.__enter__.return_value = (
mock_response
)
response = client.get("/mpl/1/test")
assert response.status_code == 200, response.text
assert response.text == "<html>Success</html>"
@staticmethod
def test_query_params_forwarded(client: TestClient) -> None:
"""Test query parameters are forwarded to matplotlib server."""
figure_endpoints[1] = "8888"
with patch(
"marimo._server.api.endpoints.mpl.validate_auth", return_value=True
):
with patch("urllib.request.urlopen") as mock_urlopen:
mock_response = MagicMock()
mock_response.status = 200
mock_response.headers = {}
mock_response.read.return_value = b""
mock_urlopen.return_value.__enter__.return_value = (
mock_response
)
client.get("/mpl/1/test?param1=value1¶m2=value2")
request_obj = mock_urlopen.call_args[0][0]
assert "param1=value1" in request_obj.full_url
assert "param2=value2" in request_obj.full_url
@staticmethod
def test_headers_filtered(client: TestClient) -> None:
"""Test that problematic headers (host, content-length) are filtered."""
figure_endpoints[1] = "8888"
with patch(
"marimo._server.api.endpoints.mpl.validate_auth", return_value=True
):
with patch("urllib.request.urlopen") as mock_urlopen:
mock_response = MagicMock()
mock_response.status = 200
mock_response.headers = {}
mock_response.read.return_value = b""
mock_urlopen.return_value.__enter__.return_value = (
mock_response
)
client.get("/mpl/1/test")
request_obj = mock_urlopen.call_args[0][0]
assert "Host" not in request_obj.headers
assert "Content-length" not in request_obj.headers
@staticmethod
def test_websocket_requires_auth(client: TestClient) -> None:
"""Test that WebSocket connection requires authentication."""
from starlette.websockets import WebSocketDisconnect
# Mock validate_auth to return False (unauthenticated)
with patch(
"marimo._server.api.endpoints.mpl.validate_auth",
return_value=False,
):
try:
with client.websocket_connect("/mpl/8888/ws"):
pass
except WebSocketDisconnect:
# Expected - connection should be rejected
pass
@staticmethod
def test_websocket_authenticated_connects(client: TestClient) -> None:
"""Test that authenticated WebSocket connection succeeds."""
from starlette.websockets import WebSocketDisconnect
# Mock validate_auth to return True (authenticated)
with patch(
"marimo._server.api.endpoints.mpl.validate_auth", return_value=True
):
with patch("websockets.connect") as mock_connect:
mock_ws = MagicMock()
mock_context = MagicMock()
mock_context.__aenter__ = AsyncMock(return_value=mock_ws)
mock_context.__aexit__ = AsyncMock(return_value=None)
mock_connect.return_value = mock_context
async def mock_iter():
return
yield
mock_ws.__aiter__ = mock_iter
try:
with client.websocket_connect("/mpl/8888/ws?figure=123"):
pass
except WebSocketDisconnect:
pass
# Verify websockets.connect was called with correct port
mock_connect.assert_called()
call_url = mock_connect.call_args[0][0]
assert "localhost:8888" in call_url
# Verify max_size is set to handle large matplotlib PNGs
call_kwargs = mock_connect.call_args[1]
assert call_kwargs.get("max_size") == WS_MAX_SIZE
| {
"repo_id": "marimo-team/marimo",
"file_path": "tests/_server/api/endpoints/test_mpl_endpoints.py",
"license": "Apache License 2.0",
"lines": 151,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
marimo-team/marimo:tests/_server/api/endpoints/test_secrets_endpoints.py | # Copyright 2026 Marimo. All rights reserved.
from __future__ import annotations
from typing import TYPE_CHECKING
from unittest.mock import patch
import pytest
from marimo._types.ids import SessionId
from tests._server.mocks import token_header, with_read_session, with_session
if TYPE_CHECKING:
from starlette.testclient import TestClient
SESSION_ID = SessionId("session-123")
HEADERS = {
"Marimo-Session-Id": SESSION_ID,
**token_header("fake-token"),
}
@with_session(SESSION_ID)
def test_list_secret_keys(client: TestClient) -> None:
"""Test secret keys listing."""
response = client.post(
"/api/secrets/keys",
headers=HEADERS,
json={"requestId": "test-request-id"},
)
assert response.status_code == 200, response.text
assert "success" in response.json()
@with_session(SESSION_ID)
def test_create_secret(client: TestClient) -> None:
"""Test secret creation and verify write_secret is called."""
with patch(
"marimo._server.api.endpoints.secrets.write_secret"
) as mock_write_secret:
response = client.post(
"/api/secrets/create",
headers=HEADERS,
json={
"key": "TEST_SECRET",
"value": "secret_value",
"provider": "env",
"name": "test_secret",
},
)
assert response.status_code == 200, response.text
assert response.json()["success"] is True
assert mock_write_secret.called
@with_session(SESSION_ID)
def test_delete_secret_not_implemented(client: TestClient) -> None:
"""Test that delete secret endpoint raises NotImplementedError."""
response = client.post(
"/api/secrets/delete", headers=HEADERS, json={"key": "test_secret"}
)
assert response.status_code == 501, response.text
@with_read_session(SESSION_ID)
def test_secrets_forbidden_in_read_mode(client: TestClient) -> None:
"""Test that secret operations are forbidden in read mode."""
response = client.post(
"/api/secrets/keys",
headers=HEADERS,
json={"requestId": "test-request-id"},
)
assert response.status_code == 401, response.text
response = client.post(
"/api/secrets/create",
headers=HEADERS,
json={
"key": "TEST_SECRET",
"value": "test_value",
"provider": "env",
"name": "test",
},
)
assert response.status_code == 401, response.text
@with_session(SESSION_ID)
def test_create_secret_write_failure(client: TestClient) -> None:
"""Test handling when write_secret fails."""
with patch(
"marimo._server.api.endpoints.secrets.write_secret",
side_effect=Exception("Write failed"),
):
with pytest.raises(Exception, match="Write failed"):
client.post(
"/api/secrets/create",
headers=HEADERS,
json={
"key": "TEST_SECRET",
"value": "test_value",
"provider": "env",
"name": "test",
},
)
| {
"repo_id": "marimo-team/marimo",
"file_path": "tests/_server/api/endpoints/test_secrets_endpoints.py",
"license": "Apache License 2.0",
"lines": 88,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
marimo-team/marimo:tests/_runtime/test_commands.py | # Copyright 2026 Marimo. All rights reserved.
from __future__ import annotations
from marimo._runtime.commands import kebab_case
def test_kebab_case() -> None:
assert kebab_case("SomeSQLCommand") == "some-sql"
assert kebab_case("SomeSQL") == "some-sql"
assert kebab_case("MyNotificationCommand") == "my-notification"
| {
"repo_id": "marimo-team/marimo",
"file_path": "tests/_runtime/test_commands.py",
"license": "Apache License 2.0",
"lines": 7,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
marimo-team/marimo:marimo/_server/api/endpoints/ws/ws_connection_validator.py | # Copyright 2026 Marimo. All rights reserved.
from __future__ import annotations
from dataclasses import dataclass
from typing import TYPE_CHECKING, Optional
if TYPE_CHECKING:
from starlette.websockets import WebSocket
from marimo import _loggers
from marimo._server.api.auth import validate_auth
from marimo._server.api.deps import AppState
from marimo._server.codes import WebSocketCodes
from marimo._server.file_router import MarimoFileKey
from marimo._types.ids import SessionId
LOGGER = _loggers.marimo_logger()
SESSION_QUERY_PARAM_KEY = "session_id"
FILE_QUERY_PARAM_KEY = "file"
KIOSK_QUERY_PARAM_KEY = "kiosk"
@dataclass
class ConnectionParams:
"""Parameters extracted from WebSocket connection request."""
session_id: SessionId
file_key: MarimoFileKey
kiosk: bool
auto_instantiate: bool
rtc_enabled: bool
class WebSocketConnectionValidator:
"""Validates and extracts connection parameters from WebSocket requests."""
def __init__(self, websocket: WebSocket, app_state: AppState):
self.websocket = websocket
self.app_state = app_state
async def validate_auth(self) -> bool:
"""Validate authentication, close socket if invalid.
Returns:
True if authentication is valid or not required, False otherwise.
"""
if self.app_state.enable_auth and not validate_auth(self.websocket):
await self.websocket.close(
WebSocketCodes.UNAUTHORIZED, "MARIMO_UNAUTHORIZED"
)
return False
return True
async def extract_connection_params(
self,
) -> Optional[ConnectionParams]:
"""Extract and validate connection parameters.
Returns:
ConnectionParams if all parameters are valid, None otherwise.
"""
# Extract session_id
raw_session_id = self.app_state.query_params(SESSION_QUERY_PARAM_KEY)
if raw_session_id is None:
await self.websocket.close(
WebSocketCodes.NORMAL_CLOSE, "MARIMO_NO_SESSION_ID"
)
return None
session_id = SessionId(raw_session_id)
# Extract file_key
file_key: Optional[MarimoFileKey] = (
self.app_state.query_params(FILE_QUERY_PARAM_KEY)
or self.app_state.session_manager.file_router.get_unique_file_key()
)
if file_key is None:
await self.websocket.close(
WebSocketCodes.NORMAL_CLOSE, "MARIMO_NO_FILE_KEY"
)
return None
# Extract kiosk mode
kiosk = self.app_state.query_params(KIOSK_QUERY_PARAM_KEY) == "true"
# Extract config-based parameters
config = self.app_state.config_manager_at_file(file_key).get_config()
rtc_enabled = config.get("experimental", {}).get("rtc_v2", False)
auto_instantiate = config["runtime"]["auto_instantiate"]
return ConnectionParams(
session_id=session_id,
file_key=file_key,
kiosk=kiosk,
auto_instantiate=auto_instantiate,
rtc_enabled=rtc_enabled,
)
async def extract_file_key_only(self) -> Optional[MarimoFileKey]:
"""Extract only the file_key parameter (for RTC endpoint).
Returns:
MarimoFileKey if valid, None otherwise.
"""
file_key: Optional[MarimoFileKey] = (
self.app_state.query_params(FILE_QUERY_PARAM_KEY)
or self.app_state.session_manager.file_router.get_unique_file_key()
)
if file_key is None:
LOGGER.warning("RTC: Closing websocket - no file key")
await self.websocket.close(
WebSocketCodes.NORMAL_CLOSE, "MARIMO_NO_FILE_KEY"
)
return None
return file_key
| {
"repo_id": "marimo-team/marimo",
"file_path": "marimo/_server/api/endpoints/ws/ws_connection_validator.py",
"license": "Apache License 2.0",
"lines": 94,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
marimo-team/marimo:marimo/_server/api/endpoints/ws/ws_kernel_ready.py | # Copyright 2026 Marimo. All rights reserved.
from __future__ import annotations
import asyncio
import sys
from typing import TYPE_CHECKING
from marimo import _loggers
from marimo._ast.cell import CellConfig
from marimo._dependencies.dependencies import DependencyManager
from marimo._messaging.notification import (
KernelCapabilitiesNotification,
KernelReadyNotification,
)
from marimo._plugins.core.web_component import JSONType
from marimo._session.model import SessionMode
from marimo._types.ids import CellId_t
if TYPE_CHECKING:
from marimo._server.file_router import MarimoFileKey
from marimo._server.rtc.doc import LoroDocManager
from marimo._server.session_manager import SessionManager
from marimo._session import Session
LOGGER = _loggers.marimo_logger()
LORO_ALLOWED = sys.version_info >= (3, 11)
def build_kernel_ready(
session: Session,
manager: SessionManager,
resumed: bool,
ui_values: dict[str, JSONType],
last_executed_code: dict[CellId_t, str],
last_execution_time: dict[CellId_t, float],
kiosk: bool,
rtc_enabled: bool,
file_key: MarimoFileKey,
mode: SessionMode,
doc_manager: LoroDocManager,
auto_instantiated: bool = False,
) -> KernelReadyNotification:
"""Build a KernelReady message.
Args:
session: Current session
manager: Session manager
resumed: Whether this is a resumed session
ui_values: UI element values
last_executed_code: Last executed code for each cell
last_execution_time: Last execution time for each cell
kiosk: Whether this is kiosk mode
rtc_enabled: Whether RTC is enabled
file_key: File key for the session
mode: Session mode (edit/run)
doc_manager: LoroDoc manager for RTC
auto_instantiated: Whether the kernel has already been instantiated
server-side (run mode). If True, the frontend does not need
to instantiate the app.
Returns:
KernelReady message operation.
"""
codes, names, configs, cell_ids = _extract_cell_data(session, manager)
# Initialize RTC if needed
if _should_init_rtc(rtc_enabled, mode):
_try_init_rtc_doc(cell_ids, codes, file_key, doc_manager)
return KernelReadyNotification(
codes=codes,
names=names,
configs=configs,
layout=session.app_file_manager.read_layout_config(),
cell_ids=cell_ids,
resumed=resumed,
ui_values=ui_values,
last_executed_code=last_executed_code,
last_execution_time=last_execution_time,
app_config=session.app_file_manager.app.config,
kiosk=kiosk,
capabilities=KernelCapabilitiesNotification(),
auto_instantiated=auto_instantiated,
)
def _extract_cell_data(
session: Session,
manager: SessionManager,
) -> tuple[
tuple[str, ...],
tuple[str, ...],
tuple[CellConfig, ...],
tuple[CellId_t, ...],
]:
"""Extract cell data based on mode.
Args:
session: Current session
manager: Session manager
Returns:
Tuple of (codes, names, configs, cell_ids).
"""
file_manager = session.app_file_manager
app = file_manager.app
if manager.should_send_code_to_frontend():
# Send full cell data to frontend
codes, names, configs, cell_ids = tuple(
zip(
*tuple(
(
cell_data.code,
cell_data.name,
cell_data.config,
cell_data.cell_id,
)
for cell_data in app.cell_manager.cell_data()
)
)
)
return codes, names, configs, cell_ids
else:
# Don't send code to frontend in run mode
codes, names, configs, cell_ids = tuple(
zip(
*tuple(
("", cell_data.name, cell_data.config, cell_data.cell_id)
for cell_data in app.cell_manager.cell_data()
)
)
)
return codes, names, configs, cell_ids
def is_rtc_available() -> bool:
"""Check if RTC (Loro) is available on this system.
Returns:
True if Loro is available, False otherwise.
"""
return LORO_ALLOWED and DependencyManager.loro.has()
def _should_init_rtc(rtc_enabled: bool, mode: SessionMode) -> bool:
"""Check if RTC should be initialized.
Args:
rtc_enabled: Whether RTC is currently enabled
mode: Session mode (edit/run)
Returns:
True if RTC should be initialized, False otherwise.
"""
return rtc_enabled and mode == SessionMode.EDIT and is_rtc_available()
def _try_init_rtc_doc(
cell_ids: tuple[CellId_t, ...],
codes: tuple[str, ...],
file_key: MarimoFileKey,
doc_manager: LoroDocManager,
) -> None:
"""Try to initialize RTC document with cell data.
Logs a warning if Loro is not available but does not fail.
Args:
cell_ids: Cell IDs to initialize
codes: Cell codes to initialize
file_key: File key for the document
doc_manager: LoroDoc manager
"""
if not LORO_ALLOWED:
LOGGER.warning("RTC: Python version is not supported (requires 3.11+)")
elif not DependencyManager.loro.has():
LOGGER.warning(
"RTC: Loro is not installed, disabling real-time collaboration"
)
else:
asyncio.create_task(doc_manager.create_doc(file_key, cell_ids, codes))
| {
"repo_id": "marimo-team/marimo",
"file_path": "marimo/_server/api/endpoints/ws/ws_kernel_ready.py",
"license": "Apache License 2.0",
"lines": 156,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
marimo-team/marimo:marimo/_server/api/endpoints/ws/ws_message_loop.py | # Copyright 2026 Marimo. All rights reserved.
from __future__ import annotations
import asyncio
from typing import TYPE_CHECKING, Any, Callable
from starlette.websockets import WebSocketDisconnect, WebSocketState
from marimo import _loggers
from marimo._messaging.notification import (
CompletionResultNotification,
FocusCellNotification,
)
from marimo._messaging.serde import deserialize_kernel_notification_name
from marimo._messaging.types import KernelMessage
from marimo._server.api.endpoints.ws.ws_formatter import format_wire_message
if TYPE_CHECKING:
from starlette.websockets import WebSocket
LOGGER = _loggers.marimo_logger()
# Operations that are only sent in kiosk mode
KIOSK_ONLY_OPERATIONS = {
FocusCellNotification.name,
}
# Operations that are excluded from kiosk mode
KIOSK_EXCLUDED_OPERATIONS = {
CompletionResultNotification.name,
}
class WebSocketMessageLoop:
"""Handles the async message send/receive loops for WebSocket."""
def __init__(
self,
websocket: WebSocket,
message_queue: asyncio.Queue[KernelMessage],
kiosk: bool,
on_disconnect: Callable[[Exception, Callable[[], Any]], None],
on_check_status_update: Callable[[], None],
):
self.websocket = websocket
self.message_queue = message_queue
self.kiosk = kiosk
self.on_disconnect = on_disconnect
self.on_check_status_update = on_check_status_update
self._listen_messages_task: asyncio.Task[None] | None = None
self._listen_disconnect_task: asyncio.Task[None] | None = None
async def start(self) -> None:
"""Start the message loops.
Runs two concurrent tasks:
- listen_for_messages: Sends messages from kernel to frontend
- listen_for_disconnect: Detects when WebSocket disconnects
"""
self._listen_messages_task = asyncio.create_task(
self._listen_for_messages()
)
self._listen_disconnect_task = asyncio.create_task(
self._listen_for_disconnect()
)
await asyncio.gather(
self._listen_messages_task,
self._listen_disconnect_task,
)
async def _listen_for_messages(self) -> None:
"""Listen for messages from kernel and send to frontend."""
while True:
data = await self.message_queue.get()
op: str = deserialize_kernel_notification_name(data)
if self._should_filter_operation(op):
continue
# Serialize message
try:
text = format_wire_message(op, data)
except Exception as e:
LOGGER.error("Failed to deserialize message: %s", str(e))
LOGGER.error("Message: %s", data)
continue
# Send to WebSocket
try:
await self.websocket.send_text(text)
except WebSocketDisconnect as e:
self.on_disconnect(e, self._cancel_disconnect_task)
except RuntimeError as e:
# Starlette can raise a runtime error if a message is sent
# when the socket is closed. In case the disconnection
# error hasn't made its way to listen_for_disconnect, do
# the cleanup here.
if (
self.websocket.application_state
== WebSocketState.DISCONNECTED
):
self.on_disconnect(e, self._cancel_disconnect_task)
else:
LOGGER.error(
"Error sending message to frontend: %s", str(e)
)
except Exception as e:
LOGGER.error("Error sending message to frontend: %s", str(e))
raise e
async def _listen_for_disconnect(self) -> None:
"""Listen for WebSocket disconnect."""
try:
# Check for marimo updates when connection starts
self.on_check_status_update()
# Wait for disconnection
await self.websocket.receive_text()
except WebSocketDisconnect as e:
self.on_disconnect(e, self._cancel_messages_task)
except Exception as e:
LOGGER.error("Error listening for disconnect: %s", str(e))
raise e
def _should_filter_operation(self, op: str) -> bool:
"""Determine if operation should be filtered based on kiosk mode.
Args:
op: Operation name to check
Returns:
True if the operation should be filtered (not sent), False
otherwise.
"""
if op in KIOSK_ONLY_OPERATIONS and not self.kiosk:
LOGGER.debug(
"Ignoring operation %s, not in kiosk mode",
op,
)
return True
if op in KIOSK_EXCLUDED_OPERATIONS and self.kiosk:
LOGGER.debug(
"Ignoring operation %s, in kiosk mode",
op,
)
return True
return False
def _cancel_messages_task(self) -> None:
"""Cancel the messages task."""
if (
self._listen_messages_task
and not self._listen_messages_task.done()
):
self._listen_messages_task.cancel()
def _cancel_disconnect_task(self) -> None:
"""Cancel the disconnect task."""
if (
self._listen_disconnect_task
and not self._listen_disconnect_task.done()
):
self._listen_disconnect_task.cancel()
| {
"repo_id": "marimo-team/marimo",
"file_path": "marimo/_server/api/endpoints/ws/ws_message_loop.py",
"license": "Apache License 2.0",
"lines": 140,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
marimo-team/marimo:marimo/_server/api/endpoints/ws/ws_rtc_handler.py | # Copyright 2026 Marimo. All rights reserved.
from __future__ import annotations
import asyncio
from typing import TYPE_CHECKING
from starlette.websockets import WebSocketDisconnect
from marimo import _loggers
if TYPE_CHECKING:
from loro import DiffEvent, ExportMode, LoroDoc
from starlette.websockets import WebSocket
from marimo._server.file_router import MarimoFileKey
from marimo._server.rtc.doc import LoroDocManager
LOGGER = _loggers.marimo_logger()
class RTCWebSocketHandler:
"""Handles Real-Time Collaboration WebSocket connections."""
def __init__(
self,
websocket: WebSocket,
file_key: MarimoFileKey,
doc_manager: LoroDocManager,
):
self.websocket = websocket
self.file_key = file_key
self.doc_manager = doc_manager
async def handle(self) -> None:
"""Handle RTC WebSocket connection lifecycle.
Manages the full lifecycle of an RTC WebSocket connection:
1. Accept the connection
2. Get or create LoroDoc
3. Send initial sync to client
4. Subscribe to updates
5. Handle bidirectional updates
6. Cleanup on disconnect
"""
from loro import ExportMode
await self.websocket.accept()
# Get or create the LoroDoc and add the client to it
LOGGER.debug("RTC: getting document")
update_queue: asyncio.Queue[bytes] = asyncio.Queue()
doc = await self.doc_manager.get_or_create_doc(self.file_key)
self.doc_manager.add_client_to_doc(self.file_key, update_queue)
# Send initial sync to client
await self._send_initial_sync(doc, ExportMode)
# Set up update handling
def handle_doc_update(event: DiffEvent) -> None:
LOGGER.debug("RTC: doc updated", event)
# Subscribe to LoroDoc updates
subscription = doc.subscribe_root(handle_doc_update)
# Create async task to send updates to the client
send_task = asyncio.create_task(
self._send_updates_to_client(update_queue)
)
try:
# Listen for updates from the client
await self._receive_updates_from_client(doc, update_queue)
except WebSocketDisconnect:
LOGGER.debug("RTC: WebSocket disconnected")
except Exception as e:
LOGGER.warning(
f"RTC: Exception in websocket loop for file {self.file_key}: {str(e)}"
)
finally:
LOGGER.debug("RTC: Cleaning up resources")
# Cleanup resources
send_task.cancel()
subscription.unsubscribe()
await self.doc_manager.remove_client(self.file_key, update_queue)
async def _send_initial_sync(
self, doc: LoroDoc, export_mode: type[ExportMode]
) -> None:
"""Send initial document sync to client.
Args:
doc: LoroDoc instance
export_mode: ExportMode from loro module
"""
# Use shallow snapshot for fewer bytes
LOGGER.debug("RTC: sending initial sync")
init_sync_msg = doc.export(
export_mode.ShallowSnapshot(frontiers=doc.state_frontiers)
)
await self.websocket.send_bytes(init_sync_msg)
LOGGER.debug("RTC: initial sync sent")
async def _send_updates_to_client(
self, update_queue: asyncio.Queue[bytes]
) -> None:
"""Send updates from the document to the client.
Args:
update_queue: Queue of updates to send to client
"""
try:
while True:
update = await update_queue.get()
await self.websocket.send_bytes(update)
except Exception as e:
LOGGER.warning(
f"RTC: Could not send loro update to client for file {self.file_key}: {str(e)}",
)
async def _receive_updates_from_client(
self, doc: LoroDoc, update_queue: asyncio.Queue[bytes]
) -> None:
"""Receive and process updates from the client.
Args:
doc: LoroDoc instance
update_queue: Queue used to avoid sending updates back to sender
"""
while True:
message = await self.websocket.receive_bytes()
# Broadcast to other clients (but not back to sender)
await self.doc_manager.broadcast_update(
self.file_key, message, update_queue
)
# Check if the message is an awareness update
if message.startswith(b"awareness:"):
LOGGER.debug("RTC: received awareness update")
else:
# Apply the update to the LoroDoc
doc.import_(message)
| {
"repo_id": "marimo-team/marimo",
"file_path": "marimo/_server/api/endpoints/ws/ws_rtc_handler.py",
"license": "Apache License 2.0",
"lines": 116,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
marimo-team/marimo:marimo/_server/api/endpoints/ws/ws_session_connector.py | # Copyright 2026 Marimo. All rights reserved.
from __future__ import annotations
from enum import Enum
from typing import TYPE_CHECKING
if TYPE_CHECKING:
from starlette.websockets import WebSocket
from marimo._server.api.endpoints.ws.ws_connection_validator import (
ConnectionParams,
)
from marimo._server.api.endpoints.ws_endpoint import WebSocketHandler
from marimo._server.session_manager import SessionManager
from marimo._session import Session
from starlette.websockets import WebSocketDisconnect
from marimo import _loggers
from marimo._messaging.types import NoopStream
from marimo._runtime.params import QueryParams
from marimo._server.codes import WebSocketCodes
from marimo._server.models.models import InstantiateNotebookRequest
from marimo._session.model import ConnectionState, SessionMode
LOGGER = _loggers.marimo_logger()
class ConnectionType(Enum):
"""Type of session connection established."""
KIOSK = "kiosk"
RECONNECT = "reconnect"
RTC_EXISTING = "rtc_existing"
RESUME = "resume"
NEW = "new"
class SessionConnector:
"""Handles different session connection strategies."""
def __init__(
self,
manager: SessionManager,
handler: WebSocketHandler,
params: ConnectionParams,
websocket: WebSocket,
):
self.manager = manager
self.handler = handler
self.params = params
self.websocket = websocket
def connect(self) -> tuple[Session, ConnectionType]:
"""Determine connection type and establish session connection.
Returns:
Tuple of (Session, ConnectionType) indicating the session
and how it was connected.
Raises:
WebSocketDisconnect: If the connection cannot be established.
"""
# 1. Kiosk mode
if self.params.kiosk:
return self._connect_kiosk()
# 2. Reconnect to existing session with same ID
existing_by_id = self.manager.get_session(self.params.session_id)
if existing_by_id is not None:
return self._reconnect_session(existing_by_id)
# 3. Connect to existing session (RTC mode)
existing_by_file = self.manager.get_session_by_file_key(
self.params.file_key
)
if (
existing_by_file is not None
and self.params.rtc_enabled
and self.manager.mode == SessionMode.EDIT
):
return self._connect_rtc_session(existing_by_file)
# 4. Resume previous session
resumable = self.manager.maybe_resume_session(
self.params.session_id, self.params.file_key
)
if resumable is not None:
return self._resume_session(resumable)
# 5. Create new session
return self._create_new_session()
def _connect_kiosk(self) -> tuple[Session, ConnectionType]:
"""Connect to kiosk session.
Raises:
WebSocketDisconnect: If kiosk mode is not supported or session
not found.
"""
if self.manager.mode is not SessionMode.EDIT:
LOGGER.debug("Kiosk mode is only supported in edit mode")
raise WebSocketDisconnect(
WebSocketCodes.FORBIDDEN, "MARIMO_KIOSK_NOT_ALLOWED"
)
# Try to find session by ID first
kiosk_session = self.manager.get_session(self.params.session_id)
if kiosk_session is None:
LOGGER.debug(
"Kiosk session not found for session id %s",
self.params.session_id,
)
# Try to find by file key
kiosk_session = self.manager.get_session_by_file_key(
self.params.file_key
)
if kiosk_session is None:
LOGGER.debug(
"Kiosk session not found for file key %s",
self.params.file_key,
)
raise WebSocketDisconnect(
WebSocketCodes.NORMAL_CLOSE, "MARIMO_NO_SESSION"
)
LOGGER.debug("Connecting to kiosk session")
self.handler._connect_kiosk(kiosk_session)
return kiosk_session, ConnectionType.KIOSK
def _reconnect_session(
self, session: Session
) -> tuple[Session, ConnectionType]:
"""Reconnect to existing session.
The session already exists, but it was disconnected. This can happen
in local development when the client goes to sleep and wakes later.
"""
LOGGER.debug("Reconnecting session %s", self.params.session_id)
# In case there is a lingering connection, close it
session.disconnect_main_consumer()
self.handler._reconnect_session(session, replay=False)
return session, ConnectionType.RECONNECT
def _connect_rtc_session(
self, session: Session
) -> tuple[Session, ConnectionType]:
"""Connect to RTC-enabled session."""
LOGGER.debug(
"Connecting to existing session for file %s", self.params.file_key
)
self.handler._connect_to_existing_session(session)
return session, ConnectionType.RTC_EXISTING
def _resume_session(
self, session: Session
) -> tuple[Session, ConnectionType]:
"""Resume a previous session."""
LOGGER.debug("Resuming session %s", self.params.session_id)
self.handler._reconnect_session(session, replay=True)
return session, ConnectionType.RESUME
@property
def _is_run_mode(self) -> bool:
"""Check if we're in run mode (read-only app mode)."""
return self.manager.mode == SessionMode.RUN
def _create_new_session(self) -> tuple[Session, ConnectionType]:
"""Create a new session.
Grabs query params from the websocket and creates a new session
with the session manager.
"""
# Note: if we resume a session, we don't pick up the new query
# params, and instead use the query params from when the
# session was created.
query_params = self._extract_query_params()
new_session = self.manager.create_session(
query_params=query_params.to_dict(),
session_id=self.params.session_id,
session_consumer=self.handler,
file_key=self.params.file_key,
auto_instantiate=self.params.auto_instantiate,
)
self._notify_kernel_ready(new_session)
# In run mode, auto-instantiate server-side (frontend won't call it)
if self._is_run_mode:
self._auto_instantiate(new_session)
return new_session, ConnectionType.NEW
def _extract_query_params(self) -> QueryParams:
"""Extract query params from the websocket, filtering ignored keys."""
query_params = QueryParams({}, NoopStream())
for key, value in self.websocket.query_params.multi_items():
if key not in QueryParams.IGNORED_KEYS:
query_params.append(key, value)
return query_params
def _notify_kernel_ready(self, session: Session) -> None:
"""Send kernel-ready notification to the frontend."""
self.handler.status = ConnectionState.CONNECTING
self.handler._write_kernel_ready(
session,
resumed=False,
ui_values={},
last_executed_code={},
last_execution_time={},
kiosk=False,
auto_instantiated=self._is_run_mode,
)
self.handler.status = ConnectionState.OPEN
self.handler._replay_previous_session(session)
def _auto_instantiate(self, session: Session) -> None:
"""Auto-instantiate the session (used in run mode)."""
from marimo._runtime.commands import HTTPRequest
session.instantiate(
InstantiateNotebookRequest(
object_ids=[],
values=[],
auto_run=True,
),
http_request=HTTPRequest.from_request(self.websocket),
)
| {
"repo_id": "marimo-team/marimo",
"file_path": "marimo/_server/api/endpoints/ws/ws_session_connector.py",
"license": "Apache License 2.0",
"lines": 191,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
marimo-team/marimo:marimo/_server/api/endpoints/ws_endpoint.py | # Copyright 2026 Marimo. All rights reserved.
from __future__ import annotations
import asyncio
import sys
from typing import Any, Callable, Optional
from starlette.websockets import WebSocket, WebSocketState
from marimo import _loggers
from marimo._cli.upgrade import check_for_updates
from marimo._config.cli_state import MarimoCLIState
from marimo._config.settings import GLOBAL_SETTINGS
from marimo._dependencies.dependencies import DependencyManager
from marimo._messaging.notification import (
AlertNotification,
BannerNotification,
KernelStartupErrorNotification,
NotificationMessage,
ReconnectedNotification,
)
from marimo._messaging.serde import serialize_kernel_message
from marimo._messaging.types import KernelMessage
from marimo._plugins.core.web_component import JSONType
from marimo._server.api.deps import AppState
from marimo._server.api.endpoints.ws.ws_connection_validator import (
ConnectionParams,
WebSocketConnectionValidator,
)
from marimo._server.api.endpoints.ws.ws_formatter import (
serialize_notification_for_websocket,
)
from marimo._server.api.endpoints.ws.ws_kernel_ready import (
build_kernel_ready,
is_rtc_available,
)
from marimo._server.api.endpoints.ws.ws_message_loop import (
WebSocketMessageLoop,
)
from marimo._server.api.endpoints.ws.ws_rtc_handler import RTCWebSocketHandler
from marimo._server.api.endpoints.ws.ws_session_connector import (
SessionConnector,
)
from marimo._server.codes import WebSocketCodes
from marimo._server.router import APIRouter
from marimo._server.rtc.doc import LoroDocManager
from marimo._server.session_manager import SessionManager
from marimo._session import Session
from marimo._session.consumer import SessionConsumer
from marimo._session.events import SessionEventBus
from marimo._session.managers.ipc import KernelStartupError
from marimo._session.model import (
ConnectionState,
SessionMode,
)
from marimo._types.ids import CellId_t, ConsumerId
LOGGER = _loggers.marimo_logger()
LORO_ALLOWED = sys.version_info >= (3, 11)
router = APIRouter()
DOC_MANAGER = LoroDocManager()
@router.websocket("/ws")
async def websocket_endpoint(
websocket: WebSocket,
) -> None:
"""Main WebSocket endpoint for marimo sessions.
responses:
200:
description: Websocket endpoint
"""
app_state = AppState(websocket)
validator = WebSocketConnectionValidator(websocket, app_state)
# Validate authentication before proceeding
if not await validator.validate_auth():
return
# Extract and validate connection parameters
params = await validator.extract_connection_params()
if params is None:
return
# Start handler
await WebSocketHandler(
websocket=websocket,
manager=app_state.session_manager,
params=params,
mode=app_state.mode,
).start()
# WebSocket endpoint for LoroDoc synchronization
@router.websocket("/ws_sync")
async def ws_sync(
websocket: WebSocket,
) -> None:
"""WebSocket endpoint for LoroDoc synchronization (RTC)."""
app_state = AppState(websocket)
validator = WebSocketConnectionValidator(websocket, app_state)
# Validate authentication before proceeding
if not await validator.validate_auth():
return
# Check if Loro is available
if not (LORO_ALLOWED and DependencyManager.loro.has()):
if not LORO_ALLOWED:
LOGGER.warning(
"RTC: Python version is not supported (requires 3.11+)"
)
else:
LOGGER.warning("RTC: Loro is not installed, closing websocket")
await websocket.close(
WebSocketCodes.NORMAL_CLOSE, "MARIMO_LORO_NOT_INSTALLED"
)
return
# Extract file key
file_key = await validator.extract_file_key_only()
if file_key is None:
return
# Verify session exists
manager = app_state.session_manager
session = manager.get_session_by_file_key(file_key)
if session is None:
LOGGER.warning(
f"RTC: Closing websocket - no session found for file key {file_key}"
)
await websocket.close(WebSocketCodes.FORBIDDEN, "MARIMO_NOT_ALLOWED")
return
# Handle RTC connection
handler = RTCWebSocketHandler(websocket, file_key, DOC_MANAGER)
await handler.handle()
class WebSocketHandler(SessionConsumer):
"""WebSocket that sessions use to send messages to frontends.
Each new socket gets a unique session. At most one session can exist when
in edit mode (unless RTC is enabled).
"""
def __init__(
self,
*,
websocket: WebSocket,
manager: SessionManager,
params: ConnectionParams,
mode: SessionMode,
):
self.websocket = websocket
self.manager = manager
self.params = params
self.mode = mode
self.status: ConnectionState
self.cancel_close_handle: Optional[asyncio.TimerHandle] = None
# Messages from the kernel are put in this queue
# to be sent to the frontend
self.message_queue: asyncio.Queue[KernelMessage]
self.ws_future: Optional[asyncio.Task[None]] = None
self._consumer_id = ConsumerId(params.session_id)
@property
def consumer_id(self) -> ConsumerId:
return self._consumer_id
def notify(self, notification: KernelMessage) -> None:
self.message_queue.put_nowait(notification)
def _serialize_and_notify(self, notification: NotificationMessage) -> None:
self.notify(serialize_kernel_message(notification))
def _write_kernel_ready_from_session_view(
self, session: Session, kiosk: bool
) -> None:
"""Write kernel ready message using current session view state."""
self._write_kernel_ready(
session=session,
resumed=True,
ui_values=session.session_view.ui_values,
last_executed_code=session.session_view.last_executed_code,
last_execution_time=session.session_view.last_execution_time,
kiosk=kiosk,
auto_instantiated=False,
)
def _write_kernel_ready(
self,
session: Session,
resumed: bool,
ui_values: dict[str, JSONType],
last_executed_code: dict[CellId_t, str],
last_execution_time: dict[CellId_t, float],
kiosk: bool,
auto_instantiated: bool,
) -> None:
"""Communicates to the client that the kernel is ready.
Sends cell code and other metadata to client.
Args:
session: Current session
resumed: Whether this is a resumed session
ui_values: UI element values
last_executed_code: Last executed code for each cell
last_execution_time: Last execution time for each cell
kiosk: Whether this is kiosk mode
auto_instantiated: Whether the kernel has already been instantiated
server-side (run mode). If True, the frontend does not need
to instantiate the app.
"""
# Only send execution data if sending code to frontend
should_send = self.manager.should_send_code_to_frontend()
# RTC is only enabled if configured AND Loro is available
effective_rtc_enabled = self.params.rtc_enabled and is_rtc_available()
# Build kernel ready message
kernel_ready_msg = build_kernel_ready(
session=session,
manager=self.manager,
resumed=resumed,
ui_values=ui_values,
last_executed_code=last_executed_code if should_send else {},
last_execution_time=last_execution_time if should_send else {},
kiosk=kiosk,
rtc_enabled=effective_rtc_enabled,
file_key=self.params.file_key,
mode=self.mode,
doc_manager=DOC_MANAGER,
auto_instantiated=auto_instantiated,
)
self._serialize_and_notify(kernel_ready_msg)
def _reconnect_session(self, session: Session, replay: bool) -> None:
"""Reconnect to an existing session (kernel).
A websocket can be closed when a user's computer goes to sleep,
spurious network issues, etc.
"""
# Cancel previous close handle
if self.cancel_close_handle is not None:
self.cancel_close_handle.cancel()
self.status = ConnectionState.OPEN
session.connect_consumer(self, main=True)
# Write reconnected message
self._serialize_and_notify(ReconnectedNotification())
# If not replaying, just send a toast
if not replay:
self._serialize_and_notify(
AlertNotification(
title="Reconnected",
description="You have reconnected to an existing session.",
)
)
return
self._write_kernel_ready_from_session_view(session, self.params.kiosk)
self._serialize_and_notify(
BannerNotification(
title="Reconnected",
description="You have reconnected to an existing session.",
action="restart",
)
)
self._replay_previous_session(session)
def _connect_kiosk(self, session: Session) -> None:
"""Connect to a kiosk session.
A kiosk session is a write-ish session that is connected to a
frontend. It can set UI elements and interact with the sidebar,
but cannot change or execute code. This is not a permission limitation,
but rather we don't have full multi-player support yet.
Kiosk mode is useful when the user is using an editor (VSCode or VIM)
that does not easily support our reactive frontend or our panels.
The user uses VSCode or VIM to write code, and the
marimo kiosk/frontend to visualize the output.
"""
session.connect_consumer(self, main=False)
self.status = ConnectionState.CONNECTING
self._write_kernel_ready_from_session_view(session, kiosk=True)
self.status = ConnectionState.OPEN
self._replay_previous_session(session)
def _replay_previous_session(self, session: Session) -> None:
"""Replay the previous session view."""
notifications = session.session_view.notifications
if len(notifications) == 0:
LOGGER.debug("No notifications to replay")
return
LOGGER.debug(f"Replaying {len(notifications)} notifications")
for notif in notifications:
LOGGER.debug("Replaying notification %s", notif)
self._serialize_and_notify(notif)
def _on_disconnect(
self,
e: Exception,
cleanup_fn: Callable[[], Any],
) -> None:
LOGGER.debug(
"Websocket disconnected for session %s with exception %s, type %s",
self.params.session_id,
str(e),
type(e),
)
# Change the status
self.status = ConnectionState.CLOSED
# Disconnect the consumer
session = self.manager.get_session(self.params.session_id)
if session:
session.disconnect_consumer(self)
# When the websocket is closed, we wait session.ttl_seconds before
# closing the session. This prevents the session from being closed
# during intermittent network issues.
# In RUN mode, this always applies (sessions always have a default
# TTL even if the manager's ttl_seconds is None).
# In EDIT mode, this only applies when --session-ttl is explicitly set.
should_ttl_close = (
self.manager.ttl_seconds is not None
or self.mode == SessionMode.RUN
)
if should_ttl_close:
def _close() -> None:
if self.status != ConnectionState.OPEN:
# Guard: if another consumer has taken over, the session
# is alive.
live = self.manager.get_session(self.params.session_id)
if (
live is not None
and live.connection_state() == ConnectionState.OPEN
):
LOGGER.debug(
"Session %s has active consumer, skipping TTL close",
self.params.session_id,
)
return
LOGGER.debug(
"Closing session %s (TTL EXPIRED)",
self.params.session_id,
)
# wait until TTL is expired before calling the cleanup
# function
cleanup_fn()
self.manager.close_session(self.params.session_id)
if session is not None:
cancellation_handle = asyncio.get_event_loop().call_later(
session.ttl_seconds, _close
)
self.cancel_close_handle = cancellation_handle
else:
_close()
else:
cleanup_fn()
async def start(self) -> None:
"""Start the WebSocket handler.
Accepts the connection, establishes a session, and starts the
message loop.
"""
# Accept the websocket connection
await self.websocket.accept()
# Create a new queue for this session
self.message_queue = asyncio.Queue()
LOGGER.debug(
"Websocket open request for session with id %s",
self.params.session_id,
)
LOGGER.debug("Existing sessions: %s", self.manager.sessions)
# Check if connection is allowed
if not self._can_connect():
await self._close_already_connected()
return
# Use SessionConnector to establish session connection
connector = SessionConnector(
manager=self.manager,
handler=self,
params=self.params,
websocket=self.websocket,
)
try:
session, connection_type = connector.connect()
except KernelStartupError as e:
LOGGER.error("Kernel startup failed: %s", e)
await self._close_kernel_startup_error(str(e))
return
LOGGER.debug(
"Connected to session %s with type %s",
session.initialization_id,
connection_type,
)
# Start message loops
message_loop = WebSocketMessageLoop(
websocket=self.websocket,
message_queue=self.message_queue,
kiosk=self.params.kiosk,
on_disconnect=self._on_disconnect,
on_check_status_update=self._check_status_update,
)
try:
self.ws_future = asyncio.create_task(message_loop.start())
await self.ws_future
except asyncio.CancelledError:
LOGGER.debug("Websocket terminated with CancelledError")
def _can_connect(self) -> bool:
"""Check if this connection is allowed.
Only one frontend can be connected at a time in edit mode,
if RTC is not enabled.
"""
if (
self.manager.mode == SessionMode.EDIT
and self.manager.any_clients_connected(self.params.file_key)
and not self.params.kiosk
and not self.params.rtc_enabled
):
LOGGER.debug(
"Refusing connection; a frontend is already connected."
)
return False
return True
async def _close_already_connected(self) -> None:
"""Close the WebSocket with an 'already connected' error."""
if self.websocket.application_state is WebSocketState.CONNECTED:
await self.websocket.close(
WebSocketCodes.ALREADY_CONNECTED,
"MARIMO_ALREADY_CONNECTED",
)
async def _close_kernel_startup_error(self, error_message: str) -> None:
"""Send full error as message, then close the WebSocket."""
if self.websocket.application_state is WebSocketState.CONNECTED:
notification = KernelStartupErrorNotification(error=error_message)
text = serialize_notification_for_websocket(notification)
await self.websocket.send_text(text)
# Then close with simple reason
await self.websocket.close(
WebSocketCodes.UNEXPECTED_ERROR,
"MARIMO_KERNEL_STARTUP_ERROR",
)
def on_attach(self, session: Session, event_bus: SessionEventBus) -> None:
del session
del event_bus
return None
def on_detach(self) -> None:
# If the websocket is open, send a close message
is_connected = (
self.status == ConnectionState.OPEN
or self.status == ConnectionState.CONNECTING
) and self.websocket.application_state is WebSocketState.CONNECTED
if is_connected:
asyncio.create_task(
self.websocket.close(
WebSocketCodes.NORMAL_CLOSE, "MARIMO_SHUTDOWN"
)
)
if self.ws_future:
self.ws_future.cancel()
def connection_state(self) -> ConnectionState:
return self.status
def _check_status_update(self) -> None:
# Only check for updates if we're in edit mode
if (
not GLOBAL_SETTINGS.CHECK_STATUS_UPDATE
or self.mode != SessionMode.EDIT
):
return
def on_update(current_version: str, state: MarimoCLIState) -> None:
# Let's only toast once per marimo server
# so we can just store this in memory.
# We still want to check for updates (which are debounced 24 hours)
# but don't keep toasting.
global has_toasted
if has_toasted:
return
has_toasted = True
title = (
f"Update available {current_version} → {state.latest_version}"
)
release_url = "https://github.com/marimo-team/marimo/releases"
# Build description with notices if present
description = f"Check out the <a class='underline' target='_blank' href='{release_url}'>latest release on GitHub.</a>" # noqa: E501
if state.notices:
notices_text = (
"<br><br><strong>Recent updates:</strong><br>"
+ "<br>".join(f"• {notice}" for notice in state.notices)
)
description += notices_text
self._serialize_and_notify(
AlertNotification(title=title, description=description)
)
check_for_updates(on_update)
def _connect_to_existing_session(self, session: Session) -> None:
"""Connect to an existing session and replay all messages."""
self.status = ConnectionState.CONNECTING
session.connect_consumer(self, main=False)
# Write kernel ready with current state
self._write_kernel_ready_from_session_view(session, kiosk=False)
self.status = ConnectionState.OPEN
# Replay all operations
self._replay_previous_session(session)
has_toasted = False
| {
"repo_id": "marimo-team/marimo",
"file_path": "marimo/_server/api/endpoints/ws_endpoint.py",
"license": "Apache License 2.0",
"lines": 468,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
marimo-team/marimo:marimo/_messaging/notification_utils.py | # Copyright 2026 Marimo. All rights reserved.
"""Notification utilities for kernel messages.
CellNotificationUtils for cell-related broadcasts.
"""
from __future__ import annotations
import sys
from typing import TYPE_CHECKING, Optional
from uuid import uuid4
from marimo import _loggers as loggers
from marimo._messaging.cell_output import CellOutput
from marimo._messaging.errors import (
MarimoInternalError,
is_sensitive_error,
)
from marimo._messaging.serde import serialize_kernel_message
from marimo._messaging.streams import output_max_bytes
from marimo._runtime.context import get_context
from marimo._runtime.context.types import ContextNotInitializedError
from marimo._runtime.context.utils import get_mode
if TYPE_CHECKING:
from collections.abc import Sequence
from marimo._ast.cell import RuntimeStateType
from marimo._ast.toplevel import TopLevelHints, TopLevelStatus
from marimo._messaging.cell_output import CellChannel
from marimo._messaging.errors import Error
from marimo._messaging.mimetypes import KnownMimeType
from marimo._messaging.notification import NotificationMessage
from marimo._messaging.types import Stream
from marimo._types.ids import CellId_t
LOGGER = loggers.marimo_logger()
def broadcast_notification(
notification: NotificationMessage, stream: Optional[Stream] = None
) -> None:
"""Broadcast a notification to the stream."""
if stream is None:
try:
ctx = get_context()
except ContextNotInitializedError:
LOGGER.debug("No context initialized.")
return
else:
stream = ctx.stream
try:
stream.write(serialize_kernel_message(notification))
except Exception as e:
LOGGER.exception(
"Error serializing notification %s: %s",
notification.__class__.__name__,
e,
)
return
class CellNotificationUtils:
"""Utilities for broadcasting cell notifications."""
@staticmethod
def maybe_truncate_output(
mimetype: KnownMimeType, data: str
) -> tuple[KnownMimeType, str]:
if (size := sys.getsizeof(data)) > output_max_bytes():
from marimo._output.md import md
from marimo._plugins.stateless.callout import callout
text = f"""
<span class="text-error">**Your output is too large**</span>
Your output is too large for marimo to show. It has a size
of {size} bytes. Did you output this object by accident?
If this limitation is a problem for you, you can configure
the max output size by adding (eg)
```
[tool.marimo.runtime]
output_max_bytes = 10_000_000
```
to your pyproject.toml, or with the environment variable
`MARIMO_OUTPUT_MAX_BYTES`:
```
export MARIMO_OUTPUT_MAX_BYTES=10_000_000
```
Increasing the max output size may cause performance issues.
If you run into problems, please reach out
to us on [Discord](https://marimo.io/discord?ref=app) or
[GitHub](https://github.com/marimo-team/marimo/issues).
"""
warning = callout(
md(text),
kind="warn",
)
mimetype, data = warning._mime_()
return mimetype, data
@staticmethod
def broadcast_output(
channel: CellChannel,
mimetype: KnownMimeType,
data: str,
cell_id: Optional[CellId_t],
status: Optional[RuntimeStateType],
stream: Stream | None = None,
) -> None:
# Import here to avoid circular dependency
from marimo._messaging.notification import CellNotification
mimetype, data = CellNotificationUtils.maybe_truncate_output(
mimetype, data
)
cell_id = (
cell_id if cell_id is not None else get_context().stream.cell_id
)
assert cell_id is not None
broadcast_notification(
CellNotification(
cell_id=cell_id,
output=CellOutput(
channel=channel,
mimetype=mimetype,
data=data,
),
status=status,
),
stream=stream,
)
@staticmethod
def broadcast_empty_output(
cell_id: Optional[CellId_t],
status: Optional[RuntimeStateType],
stream: Stream | None = None,
) -> None:
# Import here to avoid circular dependency
from marimo._messaging.notification import CellNotification
cell_id = (
cell_id if cell_id is not None else get_context().stream.cell_id
)
assert cell_id is not None
broadcast_notification(
CellNotification(
cell_id=cell_id,
output=CellOutput.empty(),
status=status,
),
stream=stream,
)
@staticmethod
def broadcast_console_output(
channel: CellChannel,
mimetype: KnownMimeType,
data: str,
cell_id: Optional[CellId_t],
status: Optional[RuntimeStateType],
stream: Stream | None = None,
) -> None:
# Import here to avoid circular dependency
from marimo._messaging.notification import CellNotification
mimetype, data = CellNotificationUtils.maybe_truncate_output(
mimetype, data
)
cell_id = (
cell_id if cell_id is not None else get_context().stream.cell_id
)
assert cell_id is not None
broadcast_notification(
CellNotification(
cell_id=cell_id,
console=CellOutput(
channel=channel,
mimetype=mimetype,
data=data,
),
status=status,
),
stream=stream,
)
@staticmethod
def broadcast_status(
cell_id: CellId_t,
status: RuntimeStateType,
stream: Stream | None = None,
) -> None:
# Import here to avoid circular dependency
from marimo._messaging.notification import CellNotification
if status != "running":
broadcast_notification(
CellNotification(cell_id=cell_id, status=status), stream
)
else:
# Console gets cleared on "running"
broadcast_notification(
CellNotification(cell_id=cell_id, console=[], status=status),
stream=stream,
)
@staticmethod
def broadcast_error(
data: Sequence[Error],
clear_console: bool,
cell_id: CellId_t,
) -> None:
# Import here to avoid circular dependency
from marimo._messaging.notification import CellNotification
console: Optional[list[CellOutput]] = [] if clear_console else None
# In run mode, we don't want to broadcast the error. Instead we want to print the error to the console
# and then broadcast a new error such that the data is hidden.
safe_errors: list[Error] = []
if get_mode() == "run":
for error in data:
# Skip non-sensitive errors
if not is_sensitive_error(error):
safe_errors.append(error)
continue
error_id = uuid4()
LOGGER.error(
f"(error_id={error_id}) {error.describe()}",
extra={"error_id": error_id},
)
safe_errors.append(MarimoInternalError(error_id=str(error_id)))
else:
safe_errors = list(data)
broadcast_notification(
CellNotification(
cell_id=cell_id,
output=CellOutput.errors(safe_errors),
console=console,
status=None,
)
)
@staticmethod
def broadcast_stale(
cell_id: CellId_t, stale: bool, stream: Stream | None = None
) -> None:
# Import here to avoid circular dependency
from marimo._messaging.notification import CellNotification
broadcast_notification(
CellNotification(cell_id=cell_id, stale_inputs=stale), stream
)
@staticmethod
def broadcast_serialization(
cell_id: CellId_t,
serialization: TopLevelStatus,
stream: Stream | None = None,
) -> None:
# Import here to avoid circular dependency
from marimo._messaging.notification import CellNotification
status: Optional[TopLevelHints] = serialization.hint
broadcast_notification(
CellNotification(cell_id=cell_id, serialization=str(status)),
stream,
)
| {
"repo_id": "marimo-team/marimo",
"file_path": "marimo/_messaging/notification_utils.py",
"license": "Apache License 2.0",
"lines": 240,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
marimo-team/marimo:marimo/_messaging/serde.py | # Copyright 2026 Marimo. All rights reserved.
"""Serialization and deserialization utilities for kernel messages."""
from __future__ import annotations
from typing import TYPE_CHECKING
import msgspec
from marimo._messaging.msgspec_encoder import encode_json_bytes
from marimo._messaging.types import KernelMessage
if TYPE_CHECKING:
from marimo._messaging.notification import NotificationMessage
def serialize_kernel_message(message: NotificationMessage) -> KernelMessage:
"""
Serialize a NotificationMessage to a KernelMessage.
"""
return KernelMessage(encode_json_bytes(message))
def deserialize_kernel_message(message: KernelMessage) -> NotificationMessage:
"""
Deserialize a KernelMessage to a NotificationMessage.
"""
# Import here to avoid circular dependency
from marimo._messaging.notification import NotificationMessage
return msgspec.json.decode(message, strict=True, type=NotificationMessage) # type: ignore[no-any-return]
class _NotificationName(msgspec.Struct):
op: str
def deserialize_kernel_notification_name(message: KernelMessage) -> str:
"""
Deserialize a KernelMessage to a NotificationMessage name.
"""
# We use the _NotificationName type to deserialize the message because it is slimmer than NotificationMessage
return msgspec.json.decode(message, strict=True, type=_NotificationName).op
| {
"repo_id": "marimo-team/marimo",
"file_path": "marimo/_messaging/serde.py",
"license": "Apache License 2.0",
"lines": 29,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
marimo-team/marimo:tests/_ai/tools/test_utils.py | # Copyright 2026 Marimo. All rights reserved.
"""Shared fixtures and mocks for AI tools tests."""
from __future__ import annotations
from dataclasses import dataclass
from typing import Any
@dataclass
class MockSessionView:
"""Mock session view for testing."""
cell_notifications: dict | None = None
last_execution_time: dict | None = None
variable_values: dict | None = None
data_connectors: Any = None
def __post_init__(self) -> None:
if self.cell_notifications is None:
self.cell_notifications = {}
if self.last_execution_time is None:
self.last_execution_time = {}
if self.variable_values is None:
self.variable_values = {}
@dataclass
class MockSession:
"""Mock session for testing."""
_session_view: MockSessionView
@property
def session_view(self) -> MockSessionView:
return self._session_view
| {
"repo_id": "marimo-team/marimo",
"file_path": "tests/_ai/tools/test_utils.py",
"license": "Apache License 2.0",
"lines": 26,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
marimo-team/marimo:marimo/_internal/templates.py | # Copyright 2026 Marimo. All rights reserved.
"""Internal API for rendering marimo notebooks as HTML."""
from marimo._server.templates.api import (
render_notebook,
render_static_notebook,
)
__all__ = [
"render_notebook",
"render_static_notebook",
]
| {
"repo_id": "marimo-team/marimo",
"file_path": "marimo/_internal/templates.py",
"license": "Apache License 2.0",
"lines": 10,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
marimo-team/marimo:marimo/_server/templates/api.py | # Copyright 2026 Marimo. All rights reserved.
"""Public API for rendering marimo notebooks as HTML.
This module provides a clean, public API for rendering marimo notebooks
to HTML without needing to use internal template functions directly.
"""
from __future__ import annotations
from pathlib import Path
from typing import Any, Literal, Optional, cast
from marimo._ast.app_config import _AppConfig
from marimo._config.config import MarimoConfig, PartialMarimoConfig
from marimo._convert.converters import MarimoConvert
from marimo._schemas.notebook import NotebookV1
from marimo._schemas.session import NotebookSessionV1
from marimo._server.templates.templates import (
_custom_css_block,
notebook_page_template,
static_notebook_template,
)
from marimo._server.tokens import SkewProtectionToken
from marimo._session.model import SessionMode
from marimo._utils.code import hash_code
def _get_html_template() -> str:
"""Get the base HTML template."""
from marimo._utils.paths import marimo_package_path
index_html = Path(marimo_package_path()) / "_static" / "index.html"
return index_html.read_text(encoding="utf-8")
def _parse_config(config: Optional[dict[str, Any]]) -> MarimoConfig:
"""Parse config dict to MarimoConfig."""
if config is None:
return cast(MarimoConfig, {})
return cast(MarimoConfig, config)
def _parse_partial_config(
config: Optional[dict[str, Any]],
) -> PartialMarimoConfig:
"""Parse config dict to PartialMarimoConfig."""
if config is None:
return cast(PartialMarimoConfig, {})
return cast(PartialMarimoConfig, config)
def _convert_code_to_notebook(
code: str,
) -> tuple[NotebookV1, _AppConfig]:
"""Convert Python code to notebook format."""
try:
ir = MarimoConvert.from_py(code)
except Exception:
# Fallback to non-marimo script conversion
ir = MarimoConvert.from_non_marimo_python_script(code, aggressive=True)
app_config = _AppConfig.from_untrusted_dict(ir.ir.app.options)
notebook = cast(NotebookV1, ir.to_notebook_v1())
return notebook, app_config
def render_notebook(
*,
code: str,
mode: Literal["edit", "read"],
filename: str | None = None,
config: dict[str, Any] | None = None,
config_overrides: dict[str, Any] | None = None,
app_config: dict[str, Any] | None = None,
runtime_config: list[dict[str, Any]] | None = None,
server_token: str | None = None,
session_snapshot: NotebookSessionV1 | None = None,
notebook_snapshot: NotebookV1 | None = None,
base_url: str = "",
asset_url: str | None = None,
custom_css: str | None = None,
) -> str:
"""Render a marimo notebook to HTML.
Handles all the conversions internally.
Args:
code: Raw Python code as a string.
filename: Display filename.
mode: Rendering mode - "edit" for editable, "read" for read-only.
config: User configuration overrides as a dict.
config_overrides: Notebook-specific configuration overrides as a dict.
app_config: Notebook-specific configuration as a dict.
runtime_config: Remote kernel configuration, e.g.,
[{"url": "wss://...", "authToken": "..."}]
server_token: Skew protection token.
session_snapshot: Pre-computed session state.
notebook_snapshot: Notebook structure/metadata.
base_url: Base URL for the application (for <base> tag).
asset_url: CDN URL for static assets (supports {version} placeholder).
custom_css: CSS string to inject directly into the HTML.
Returns:
HTML string of the rendered notebook.
Example:
>>> html = render_notebook(
... code="import marimo as mo\\nmo.md('Hello')",
... filename="notebook.py",
... mode="edit",
... runtime_config=[{"url": "wss://kernel.example.com"}],
... )
"""
# Convert code to notebook format if not already provided
if notebook_snapshot is None or app_config is None:
converted_notebook, converted_app_config = _convert_code_to_notebook(
code
)
if notebook_snapshot is None:
notebook_snapshot = converted_notebook
if app_config is None:
app_config = converted_app_config.asdict()
# Parse configs
user_config = _parse_config(config)
config_overrides_obj = _parse_partial_config(config_overrides)
app_config_obj = _AppConfig.from_untrusted_dict(app_config)
# Get HTML template
html = _get_html_template()
result = notebook_page_template(
html=html,
base_url=base_url,
user_config=user_config,
config_overrides=config_overrides_obj,
server_token=SkewProtectionToken(server_token or ""),
app_config=app_config_obj,
filename=filename,
mode=_parse_session_mode(mode),
session_snapshot=session_snapshot,
notebook_snapshot=notebook_snapshot,
runtime_config=runtime_config,
asset_url=asset_url,
)
# Add custom CSS if provided
if custom_css:
result = _add_custom_css(result, custom_css)
return result
def render_static_notebook(
*,
code: str,
filename: str | None = None,
include_code: bool = True,
session_snapshot: NotebookSessionV1,
notebook_snapshot: NotebookV1 | None = None,
files: dict[str, str] | None = None,
config: dict[str, Any] | None = None,
app_config: dict[str, Any] | None = None,
asset_url: str | None = None,
) -> str:
"""Render a static (pre-computed) marimo notebook to HTML.
Creates a fully self-contained HTML file with pre-computed outputs.
Ideal for sharing read-only versions of notebooks.
Args:
code: Raw Python code as a string.
filename: Display filename.
include_code: Whether to include source code in the export.
session_snapshot: Pre-computed outputs for all cells (required).
notebook_snapshot: Notebook structure/metadata.
files: Files to embed (key=path, value=base64 content).
config: User configuration overrides.
app_config: Notebook-specific configuration.
asset_url: CDN URL for assets (default: jsDelivr).
Returns:
HTML string of the static notebook.
Example:
>>> html = render_static_notebook(
... code=Path("analysis.py").read_text(),
... session_snapshot=precomputed_outputs,
... config={"theme": "dark"},
... )
"""
# Convert code to notebook format if not already provided
if notebook_snapshot is None or app_config is None:
converted_notebook, converted_app_config = _convert_code_to_notebook(
code
)
if notebook_snapshot is None:
notebook_snapshot = converted_notebook
if app_config is None:
app_config = converted_app_config.asdict()
# Parse configs
user_config = _parse_config(config)
config_overrides_obj = _parse_partial_config(config or {})
app_config_obj = _AppConfig.from_untrusted_dict(app_config)
# Get HTML template
html = _get_html_template()
return static_notebook_template(
html=html,
user_config=user_config,
config_overrides=config_overrides_obj,
server_token=SkewProtectionToken("static"),
app_config=app_config_obj,
filepath=filename,
code=code if include_code else "",
code_hash=hash_code(code),
session_snapshot=session_snapshot,
notebook_snapshot=notebook_snapshot,
files=files or {},
asset_url=asset_url,
)
def _add_custom_css(html: str, custom_css: str) -> str:
css_block = _custom_css_block(custom_css)
return html.replace("</head>", f"{css_block}</head>")
def _parse_session_mode(mode: Literal["edit", "read"]) -> SessionMode:
if mode == "edit":
return SessionMode.EDIT
elif mode in ["run", "read"]:
return SessionMode.RUN
else:
raise ValueError(
f"Invalid session mode: {mode}. Must be 'edit' or 'run'."
)
__all__ = [
"render_notebook",
"render_static_notebook",
]
| {
"repo_id": "marimo-team/marimo",
"file_path": "marimo/_server/templates/api.py",
"license": "Apache License 2.0",
"lines": 205,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
marimo-team/marimo:tests/_internal/test_internal_api.py | from __future__ import annotations
import importlib
from pathlib import Path
from tests.mocks import snapshotter
from tests.utils import explore_module
snapshot = snapshotter(__file__)
def test_internal_api():
# Get all Python files and folders in marimo/_internal
internal_path = (
Path(__file__).parent.parent.parent / "marimo" / "_internal"
)
# Get both .py files and directories
items = []
for item in sorted(internal_path.iterdir()):
if item.name.startswith("_"):
continue
if item.is_file() and item.suffix == ".py":
items.append((item.stem, False))
elif item.is_dir():
items.append((item.name, True))
all_results = []
for name, _is_dir in items:
# Import the module dynamically
module_name = f"marimo._internal.{name}"
module = importlib.import_module(module_name)
# Explore the module
results = explore_module(module)
if results:
all_results.append(name)
all_results.extend([f" {line}" for line in results])
assert len(all_results) > 0
snapshot("internal_api.txt", "\n".join(all_results))
| {
"repo_id": "marimo-team/marimo",
"file_path": "tests/_internal/test_internal_api.py",
"license": "Apache License 2.0",
"lines": 32,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
marimo-team/marimo:tests/_server/templates/test_templates_api.py | # Copyright 2026 Marimo. All rights reserved.
from __future__ import annotations
import unittest
from marimo._schemas.session import (
VERSION,
Cell,
DataOutput,
NotebookSessionMetadata,
NotebookSessionV1,
)
from marimo._server.templates.api import (
render_notebook,
render_static_notebook,
)
class TestRenderNotebook(unittest.TestCase):
def setUp(self) -> None:
self.code = """
import marimo as mo
app = mo.App()
@app.cell
def __():
import marimo as mo
return mo,
@app.cell
def __(mo):
mo.md("Hello, World!")
return
if __name__ == "__main__":
app.run()
"""
def test_render_notebook_edit_mode(self) -> None:
html = render_notebook(code=self.code, mode="edit")
assert "<html" in html
assert "</html>" in html
def test_render_notebook_read_mode(self) -> None:
html = render_notebook(code=self.code, mode="read")
assert "<html" in html
def test_render_notebook_with_filename(self) -> None:
html = render_notebook(
code=self.code, mode="edit", filename="test_notebook.py"
)
assert "test_notebook.py" in html
def test_render_notebook_with_config(self) -> None:
html = render_notebook(
code=self.code,
mode="edit",
config={"completion": {"activate_on_typing": False}},
)
assert "activate_on_typing" in html
def test_render_notebook_with_runtime_config(self) -> None:
html = render_notebook(
code=self.code,
mode="edit",
runtime_config=[{"url": "wss://example.com"}],
)
assert "wss://example.com" in html
def test_render_notebook_with_custom_css(self) -> None:
custom_css = "body { background-color: red; }"
html = render_notebook(
code=self.code, mode="edit", custom_css=custom_css
)
assert custom_css in html
class TestRenderStaticNotebook(unittest.TestCase):
def setUp(self) -> None:
self.code = """
import marimo as mo
app = mo.App()
@app.cell
def __():
import marimo as mo
return mo,
@app.cell
def __(mo):
mo.md("Hello, World!")
return
if __name__ == "__main__":
app.run()
"""
self.session_snapshot = NotebookSessionV1(
version=VERSION,
metadata=NotebookSessionMetadata(marimo_version="0.1.0"),
cells=[
Cell(
id="cell1",
code_hash="abc123",
outputs=[
DataOutput(
type="data",
data={"text/plain": "Hello, World!"},
)
],
console=[],
),
],
)
def test_render_static_notebook(self) -> None:
html = render_static_notebook(
code=self.code, session_snapshot=self.session_snapshot
)
assert "<html" in html
def test_render_static_notebook_with_filename(self) -> None:
html = render_static_notebook(
code=self.code,
filename="test_notebook.py",
session_snapshot=self.session_snapshot,
)
assert "test_notebook.py" in html
def test_render_static_notebook_include_code(self) -> None:
html_with_code = render_static_notebook(
code=self.code,
include_code=True,
session_snapshot=self.session_snapshot,
)
html_without_code = render_static_notebook(
code=self.code,
include_code=False,
session_snapshot=self.session_snapshot,
)
assert "import marimo as mo" in html_with_code
assert '<marimo-code hidden=""></marimo-code>' in html_without_code
| {
"repo_id": "marimo-team/marimo",
"file_path": "tests/_server/templates/test_templates_api.py",
"license": "Apache License 2.0",
"lines": 120,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
marimo-team/marimo:marimo/_runtime/dataflow/cycles.py | # Copyright 2026 Marimo. All rights reserved.
"""Cycle detection and tracking for cell dependencies."""
from __future__ import annotations
from dataclasses import dataclass, field
from typing import TYPE_CHECKING
from marimo._runtime.dataflow.types import Edge
if TYPE_CHECKING:
from collections.abc import Collection
from marimo._runtime.dataflow.topology import GraphTopology
from marimo._types.ids import CellId_t
@dataclass
class CycleTracker:
"""Detects and tracks cycles in the graph.
Responsibilities:
- Detect cycles when edges are added
- Maintain cycle set
- Remove broken cycles
"""
# The set of cycles in the graph
# Each cycle is represented as a tuple of edges
cycles: set[tuple[Edge, ...]] = field(default_factory=set)
def detect_cycle_for_edge(
self,
edge: Edge,
topology: GraphTopology,
) -> tuple[Edge, ...] | None:
"""Detect if adding an edge creates a cycle.
Args:
edge: The edge (parent, child) being added
topology: The graph topology to search for paths
Returns:
The cycle as a tuple of edges if one exists, None otherwise.
The cycle includes the new edge plus the path from child back to parent.
"""
parent, child = edge
# Check if there's a path from child back to parent
# If so, adding this edge creates a cycle
path = topology.get_path(child, parent)
if path:
# The cycle is: edge + path
cycle = tuple([edge] + path)
self.cycles.add(cycle)
return cycle
return None
def remove_cycles_with_edge(self, edge: Edge) -> None:
"""Remove all cycles that contain the given edge.
This should be called when an edge is removed from the graph.
"""
broken_cycles = [c for c in self.cycles if edge in c]
for c in broken_cycles:
self.cycles.remove(c)
def get_cycles(
self,
cell_ids: Collection[CellId_t],
topology: GraphTopology,
) -> list[tuple[Edge, ...]]:
"""Get all cycles among the given cell_ids.
Args:
cell_ids: The cells to consider
topology: The graph topology
Returns:
List of cycles, where each cycle is a tuple of edges.
Only returns cycles where all edges are between cells in cell_ids.
"""
# Build induced subgraph edges
induced_edges = set()
for u in cell_ids:
if u in topology.children:
for v in topology.children[u]:
if v in cell_ids:
induced_edges.add((u, v))
# Filter cycles to those in the induced subgraph
return [c for c in self.cycles if all(e in induced_edges for e in c)]
| {
"repo_id": "marimo-team/marimo",
"file_path": "marimo/_runtime/dataflow/cycles.py",
"license": "Apache License 2.0",
"lines": 73,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
marimo-team/marimo:marimo/_runtime/dataflow/definitions.py | # Copyright 2026 Marimo. All rights reserved.
"""Variable definition tracking for cells."""
from __future__ import annotations
from dataclasses import dataclass, field
from typing import TYPE_CHECKING
if TYPE_CHECKING:
from marimo._ast.sql_visitor import SQLRef
from marimo._ast.visitor import Name, VariableData
from marimo._types.ids import CellId_t
@dataclass
class DefinitionRegistry:
"""Tracks variable definitions across cells.
Responsibilities:
- Track which cells define which variables
- Handle typed definitions (SQL vs Python)
- Detect multiply-defined names
- SQL hierarchical reference matching
"""
# A mapping from defs to the cells that define them
definitions: dict[Name, set[CellId_t]] = field(default_factory=dict)
# Typed definitions for SQL support: (name, type) -> cell_ids
# e.g. ("my_table", "table") -> {cell_id_1}
typed_definitions: dict[tuple[Name, str], set[CellId_t]] = field(
default_factory=dict
)
# Track all types for a given definition name
# e.g. "my_table" -> {"table", "view"}
definition_types: dict[Name, set[str]] = field(default_factory=dict)
def register_definition(
self,
cell_id: CellId_t,
name: Name,
variable_data: list[VariableData],
) -> set[CellId_t]:
"""Register a definition for a cell.
Args:
cell_id: The cell that defines the variable
name: The variable name
variable_data: List of metadata about the variable (from cell.variable_data[name])
Returns:
Set of sibling cell IDs (cells that also define this variable)
"""
variable = variable_data[-1] # Only the last definition matters
typed_def = (name, variable.kind)
# Check if this is a duplicate definition
if (
name in self.definitions
and typed_def not in self.typed_definitions
):
# Duplicate if the qualified name is no different
if variable.qualified_name == name or variable.language != "sql":
self.definitions[name].add(cell_id)
else:
self.definitions.setdefault(name, set()).add(cell_id)
self.typed_definitions.setdefault(typed_def, set()).add(cell_id)
self.definition_types.setdefault(name, set()).add(variable.kind)
# Return siblings (other cells that define this name)
siblings = self.definitions[name] - {cell_id}
return siblings
def unregister_definitions(
self,
cell_id: CellId_t,
defs: set[Name],
) -> None:
"""Unregister all definitions for a cell.
Args:
cell_id: The cell being removed
defs: The set of variable names defined by the cell
"""
for name in defs:
if name not in self.definitions:
continue
name_defs = self.definitions[name]
name_defs.discard(cell_id)
if not name_defs:
# No more cells define this name, so we remove it
del self.definitions[name]
# Clean up all typed definitions
for typed_def in self.definition_types.get(name, set()):
self.typed_definitions.pop((name, typed_def), None)
self.definition_types.pop(name, None)
def get_defining_cells(self, name: Name) -> set[CellId_t]:
"""Get all cells that define a variable name.
This is a singleton for well-formed graphs (no multiply-defined names).
Args:
name: The variable name
Returns:
Set of cell IDs that define this name
"""
return self.definitions.get(name, set())
def find_sql_hierarchical_matches(
self, sql_ref: SQLRef
) -> list[tuple[set[CellId_t], Name]]:
"""Find cells that define components of a hierarchical SQL reference.
This method searches through all definitions in the graph to find cells
that define the individual components (table, schema, or catalog) of the
hierarchical reference.
For example, given a reference "my_schema.my_table", this method will:
- Look for cells that define a table/view named "my_table"
- Look for cells that define a catalog named "my_schema"
(when the reference has at least 2 parts)
Args:
sql_ref: A hierarchical SQL reference (e.g., "schema.table",
"catalog.schema.table") to find matching definitions for.
Returns:
A list of tuples containing:
- A set of cell IDs that define components of the hierarchical reference
- The definition of the name that was found (e.g., "schema.table" -> "table")
"""
matching_cell_ids_list = []
for (def_name, kind), cell_ids in self.typed_definitions.items():
# Match table/view definitions
if sql_ref.contains_hierarchical_ref(def_name, kind):
matching_cell_ids_list.append((cell_ids, def_name))
return matching_cell_ids_list
def get_multiply_defined(self) -> list[Name]:
"""Return a list of names that are defined in multiple cells."""
names: list[Name] = []
for name, definers in self.definitions.items():
if len(definers) > 1:
names.append(name)
return names
| {
"repo_id": "marimo-team/marimo",
"file_path": "marimo/_runtime/dataflow/definitions.py",
"license": "Apache License 2.0",
"lines": 122,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.