sample_id stringlengths 21 196 | text stringlengths 105 936k | metadata dict | category stringclasses 6
values |
|---|---|---|---|
locustio/locust:examples/markov_taskset.py | from locust import MarkovTaskSet, User, constant, transition, transitions
"""
This example demonstrates the different ways to specify transitions in a MarkovTaskSet.
The MarkovTaskSet class supports several ways to define transitions between tasks:
1. Using @transition decorator for a single transition
2. Stacking multiple @transition decorators
3. Using @transitions with a dictionary of task names and weights
4. Using @transitions with a list of task names (default weight 1)
5. Using @transitions with a list of tuples (task_name, weight)
6. Using @transitions with a mixed list of strings and tuples
"""
class TransitionsExample(MarkovTaskSet):
"""
This MarkovTaskSet demonstrates all the different ways to specify transitions.
"""
@transition("method2")
def method1(self):
print("Method 1: Using a single @transition decorator")
print(" Next: Will transition to method2")
@transition("method3", weight=2)
@transition("method1")
def method2(self):
print("Method 2: Using multiple stacked @transition decorators")
print(" Next: Will transition to method3 (weight 2) or method1 (weight 1)")
@transitions({"method4": 3, "method2": 1, "method1": 1})
def method3(self):
print("Method 3: Using @transitions with a dictionary")
print(" Next: Will transition to method4 (weight 3), method2 (weight 1), or method1 (weight 1)")
@transitions(["method5", "method3"])
def method4(self):
print("Method 4: Using @transitions with a list of task names")
print(" Next: Will transition to method5 or method3 with equal probability (weight 1 each)")
@transitions([("method6", 4), ("method4", 1)])
def method5(self):
print("Method 5: Using @transitions with a list of tuples")
print(" Next: Will transition to method6 (weight 4) or method4 (weight 1)")
@transitions([("method1", 2), "method5"])
def method6(self):
print("Method 6: Using @transitions with a mixed list")
print(" Next: Will transition to method1 (weight 2) or method5 (weight 1)")
class TransitionsUser(User):
tasks = [TransitionsExample]
wait_time = constant(1)
if __name__ == "__main__":
from locust import run_single_user
run_single_user(TransitionsUser)
| {
"repo_id": "locustio/locust",
"file_path": "examples/markov_taskset.py",
"license": "MIT License",
"lines": 46,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
locustio/locust:locust/test/test_markov_taskset.py | from locust import User, tag
from locust.exception import RescheduleTask
from locust.user.markov_taskset import (
InvalidTransitionError,
MarkovTaskSet,
MarkovTaskTagError,
NoMarkovTasksError,
NonMarkovTaskTransitionError,
transition,
transitions,
)
import random
from .testcases import LocustTestCase
class TestMarkovTaskSet(LocustTestCase):
def setUp(self):
super().setUp()
self.locust = User(self.environment)
def test_basic_markov_chain(self):
"""Test a simple markov chain with transitions between tasks"""
log = []
class MyMarkovTaskSet(MarkovTaskSet):
@transition("t2")
def t1(self):
log.append(1)
@transition("t3")
def t2(self):
log.append(2)
@transition("t1")
def t3(self):
log.append(3)
if len(log) >= 9:
self.interrupt(reschedule=False)
ts = MyMarkovTaskSet(self.locust)
self.assertRaises(RescheduleTask, lambda: ts.run())
# Since transitions are deterministic in this test, we expect a repeating pattern
self.assertEqual([1, 2, 3, 1, 2, 3, 1, 2, 3], log)
def test_multiple_transitions(self):
"""Test multiple transitions"""
log = []
random.seed(12345)
class MyMarkovTaskSet(MarkovTaskSet):
@transition("t2")
def t1(self):
log.append(1)
@transition("t1")
@transition("t3")
def t2(self):
log.append(2)
if len(log) >= 10:
self.interrupt(reschedule=False)
@transition("t1")
def t3(self):
log.append(3)
ts = MyMarkovTaskSet(self.locust)
self.assertRaises(RescheduleTask, lambda: ts.run())
# Check that we have at least one of each task type
self.assertIn(1, log)
self.assertIn(2, log)
self.assertIn(3, log)
def test_weighted_transitions(self):
"""Test transitions with different weights"""
log = []
random.seed(12345)
class MyMarkovTaskSet(MarkovTaskSet):
@transition("t2", weight=1)
def t1(self):
log.append(1)
@transitions({"t1": 2, "t3": 1})
def t2(self):
log.append(2)
if len(log) >= 10:
self.interrupt(reschedule=False)
@transition("t1")
def t3(self):
log.append(3)
ts = MyMarkovTaskSet(self.locust)
self.assertRaises(RescheduleTask, lambda: ts.run())
# Check that we have at least one of each task type
self.assertIn(1, log)
self.assertIn(2, log)
self.assertIn(3, log)
def test_transitions_list_format(self):
"""Test using the transitions decorator with a list format"""
log = []
random.seed(12345)
class MyMarkovTaskSet(MarkovTaskSet):
@transitions([("t2", 1), "t3"]) # t3 has default weight of 1
def t1(self):
log.append(1)
@transition("t1")
def t2(self):
log.append(2)
@transition("t1")
def t3(self):
log.append(3)
if len(log) >= 10:
self.interrupt(reschedule=False)
ts = MyMarkovTaskSet(self.locust)
self.assertRaises(RescheduleTask, lambda: ts.run())
# Check that we have at least one of each task type
self.assertIn(1, log)
self.assertIn(2, log)
self.assertIn(3, log)
def test_validation_no_markov_tasks(self):
"""Test that an exception is raised when no markov tasks are defined"""
with self.assertRaises(NoMarkovTasksError) as context:
class EmptyMarkovTaskSet(MarkovTaskSet): ...
self.assertIn("No Markov tasks defined", str(context.exception))
def test_validation_invalid_transition(self):
"""Test that an exception is raised when a transition points to a non-existent task"""
with self.assertRaises(InvalidTransitionError) as context:
class InvalidTransitionTaskSet(MarkovTaskSet):
@transition("non_existent_task")
def t1(self):
pass
self.assertIn("invalid since no such element exists", str(context.exception))
def test_validation_non_markov_transition(self):
"""Test that an exception is raised when a transition points to a non-markov task"""
with self.assertRaises(NonMarkovTaskTransitionError) as context:
class NonMarkovTransitionTaskSet(MarkovTaskSet):
@transition("t2")
def t1(self):
pass
def t2(self):
pass
self.assertIn("cannot be used as a target for a transition", str(context.exception))
def test_validation_unreachable_tasks(self):
"""Test that a warning is logged when there are unreachable tasks"""
class UnreachableTaskSet(MarkovTaskSet):
@transition("t2")
def t1(self):
pass
@transition("t1")
def t2(self):
pass
@transition("t3") # This task is unreachable from t1 and t2
def t3(self):
pass
UnreachableTaskSet(self.locust)
# Check that a warning was logged
self.assertTrue(any("unreachable" in warning for warning in self.mocked_log.warning))
def test_validation_unreachable_tasks_because_of_weights(self):
"""Test that a warning is logged when there are unreachable tasks"""
class UnreachableTaskSet(MarkovTaskSet):
@transition("t2", 0)
def t1(self):
pass
@transition("t1")
def t2(self):
pass
UnreachableTaskSet(self.locust)
# Check that a warning was logged
self.assertTrue(any("unreachable" in warning for warning in self.mocked_log.warning))
def test_validation_no_tags(self):
"""Test that an exception is raised when a task has tags"""
with self.assertRaises(MarkovTaskTagError) as context:
class TaggedTaskSet(MarkovTaskSet):
@tag("tag1")
@transition("t2")
def t1(self):
pass
@transition("t1")
def t2(self):
pass
self.assertIn("Tags are unsupported", str(context.exception))
def test_abstract_markov_taskset(self):
"""Test that abstract MarkovTaskSets are not validated"""
# Define a class with abstract=True explicitly
class AbstractMarkovTaskSet(MarkovTaskSet):
abstract = True
# This should not raise an exception even though it has no tasks
AbstractMarkovTaskSet(self.locust)
| {
"repo_id": "locustio/locust",
"file_path": "locust/test/test_markov_taskset.py",
"license": "MIT License",
"lines": 172,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
locustio/locust:locust/user/markov_taskset.py | from locust.exception import LocustError
from locust.user.task import TaskSetMeta
from locust.user.users import TaskSet
import logging
import random
from collections.abc import Callable
MarkovTaskT = Callable[..., None]
class NoMarkovTasksError(LocustError):
"""Raised when a MarkovTaskSet class doesn't define any Markov tasks."""
pass
class InvalidTransitionError(LocustError):
"""Raised when a transition in a MarkovTaskSet points to a non-existent task."""
pass
class NonMarkovTaskTransitionError(LocustError):
"""Raised when a transition in a MarkovTaskSet points to a task that doesn't define transitions."""
pass
class MarkovTaskTagError(LocustError):
"""Raised when tags are used with Markov tasks, which is unsupported."""
pass
def is_markov_task(task: MarkovTaskT):
"""
Determines if a task is a Markov task by checking if it has transitions defined.
:param task: The task to check
:return: True if the task is a Markov task, False otherwise
"""
return "transitions" in dir(task)
def transition(func_name: str, weight: int = 1) -> Callable[[MarkovTaskT], MarkovTaskT]:
"""
Decorator for adding a single transition to a Markov task.
This decorator allows you to define a transition from one task to another in a MarkovTaskSet,
with an associated weight that determines the probability of taking this transition.
:param func_name: The name of the target task function
:param weight: The weight of this transition (default: 1)
:return: The decorated function with the transition added
Example::
class UserBehavior(MarkovTaskSet):
@transition('browse_products')
def index(self):
self.client.get("/")
@transition('index', weight=3)
@transition('product_page', weight=1)
def browse_products(self):
self.client.get("/products/")
"""
def decorator_func(decorated):
if not hasattr(decorated, "transitions"):
decorated.transitions = {}
decorated.transitions[func_name] = weight
return decorated
return decorator_func
def transitions(weights: dict[str, int] | list[tuple[str, int] | str]) -> Callable[[MarkovTaskT], MarkovTaskT]:
"""
Decorator for adding multiple transitions to a Markov task at once.
This decorator allows you to define multiple transitions from one task to others in a MarkovTaskSet,
with associated weights that determine the probability of taking each transition.
:param weights: Either a dictionary mapping function names to weights, or a list of function names
(with default weight 1) or (function_name, weight) tuples
:return: The decorated function with the transitions added
Example::
class UserBehavior(MarkovTaskSet):
@transitions({'checkout': 1, 'browse_products': 3, 'index': 2})
def view_cart(self):
self.client.get("/cart/")
@transitions([
('index', 2), # with weight 2
'browse_products' # with default weight 1
])
def checkout(self):
self.client.get("/checkout/")
"""
def parse_list_item(item: tuple[str, int] | str) -> tuple[str, int]:
return item if isinstance(item, tuple) else (item, 1)
def decorator_func(decorated):
if not hasattr(decorated, "transitions"):
decorated.transitions = {}
decorated.transitions.update(
weights
if isinstance(weights, dict)
else {func_name: weight for func_name, weight in map(parse_list_item, weights)}
)
return decorated
return decorator_func
def get_markov_tasks(class_dict: dict) -> list:
"""
Extracts all Markov tasks from a class dictionary.
This function is used internally by MarkovTaskSetMeta to find all methods
that have been decorated with @transition or @transitions.
:param class_dict: Dictionary containing class attributes and methods
:return: List of functions that are Markov tasks
"""
return [fn for fn in class_dict.values() if is_markov_task(fn)]
def to_weighted_list(transitions: dict):
return [name for name in transitions.keys() for _ in range(transitions[name])]
def validate_has_markov_tasks(tasks: list, classname: str):
"""
Validates that a MarkovTaskSet has at least one Markov task.
This function is used internally during MarkovTaskSet validation to ensure
that the class has at least one method decorated with @transition or @transitions.
:param tasks: List of tasks to validate
:param classname: Name of the class being validated (for error messages)
:raises NoMarkovTasksError: If no Markov tasks are found
"""
if not tasks:
raise NoMarkovTasksError(
f"No Markov tasks defined in class {classname}. Use the @transition(s) decorators to define some."
)
def validate_transitions(tasks: list, class_dict: dict, classname: str):
"""
Validates that all transitions in Markov tasks point to existing Markov tasks.
This function checks two conditions for each transition:
1. The target task exists in the class
2. The target task is also a Markov task (has transitions defined)
:param tasks: List of Markov tasks to validate
:param class_dict: Dictionary containing class attributes and methods
:param classname: Name of the class being validated (for error messages)
:raises InvalidTransitionError: If a transition points to a non-existent task
:raises NonMarkovTaskTransitionError: If a transition points to a task that isn't a Markov task
"""
for task in tasks:
for dest in task.transitions.keys():
dest_task = class_dict.get(dest)
if not dest_task:
raise InvalidTransitionError(
f"Transition to {dest} from {task.__name__} is invalid since no such element exists on class {classname}"
)
if not is_markov_task(dest_task):
raise NonMarkovTaskTransitionError(
f"{classname}.{dest} cannot be used as a target for a transition since it does not define any transitions of its own."
+ f"Used as a transition from {task.__name__}."
)
def validate_no_unreachable_tasks(tasks: list, class_dict: dict, classname: str):
"""
Checks for and warns about unreachable Markov tasks in a MarkovTaskSet.
This function uses depth-first search (DFS) starting from the first task to identify
all reachable tasks. It then warns about any tasks that cannot be reached from the
starting task through the defined transitions.
:param tasks: List of Markov tasks to validate
:param class_dict: Dictionary containing class attributes and methods
:param classname: Name of the class being validated (for warning messages)
:return: The original list of tasks
"""
visited = set()
def dfs(task_name):
visited.add(task_name)
# Convert to a weighted list first to handle bad weights
for dest in set(to_weighted_list(class_dict.get(task_name).transitions)):
if dest not in visited:
dfs(dest)
dfs(tasks[0].__name__)
unreachable = {task.__name__ for task in tasks} - visited
if len(unreachable) > 0:
logging.warning(f"The following markov tasks are unreachable in class {classname}: {unreachable}")
return tasks
def validate_no_tags(task, classname: str):
"""
Validates that Markov tasks don't have tags, which are unsupported.
Tags are not supported for MarkovTaskSet because they can make the Markov chain invalid
by potentially filtering out tasks that are part of the chain.
:param task: The task to validate
:param classname: Name of the class being validated (for error messages)
:raises MarkovTaskTagError: If the task has tags
"""
if "locust_tag_set" in dir(task):
raise MarkovTaskTagError(
"Tags are unsupported for MarkovTaskSet since they can make the markov chain invalid. "
+ f"Tags detected on {classname}.{task.__name__}: {task.locust_tag_set}"
)
def validate_task_name(decorated_func):
"""
Validates that certain method names aren't used as Markov tasks.
This function checks for special method names that shouldn't be used as Markov tasks:
- "on_stop" and "on_start": Using these as Markov tasks will cause them to be called
both as tasks AND on stop/start, which is usually not what the user intended.
- "run": This method is used internally by Locust and must not be overridden or
annotated with transitions.
:param decorated_func: The function to validate
:raises Exception: If the function name is "run"
"""
if decorated_func.__name__ in ["on_stop", "on_start"]:
logging.warning(
"You have tagged your on_stop/start function with @transition. This will make the method get called both as a step AND on stop/start."
) # this is usually not what the user intended
if decorated_func.__name__ == "run":
raise Exception(
"TaskSet.run() is a method used internally by Locust, and you must not override it or annotate it with transitions"
)
def validate_markov_chain(tasks: list, class_dict: dict, classname: str):
"""
Runs all validation functions on a Markov chain.
:param tasks: List of Markov tasks to validate
:param class_dict: Dictionary containing class attributes and methods
:param classname: Name of the class being validated (for error/warning messages)
:raises: Various exceptions if validation fails
"""
validate_has_markov_tasks(tasks, classname)
validate_transitions(tasks, class_dict, classname)
validate_no_unreachable_tasks(tasks, class_dict, classname)
for task in tasks:
validate_task_name(task)
validate_no_tags(task, classname)
class MarkovTaskSetMeta(TaskSetMeta):
"""
Meta class for MarkovTaskSet. It's used to allow MarkovTaskSet classes to specify
task execution using the @transition(s) decorators
"""
def __new__(mcs, classname, bases, class_dict):
if not class_dict.get("abstract"):
class_dict["abstract"] = False
tasks = get_markov_tasks(class_dict)
validate_markov_chain(tasks, class_dict, classname)
class_dict["current"] = tasks[0]
for task in tasks:
task.transitions = to_weighted_list(task.transitions)
return type.__new__(mcs, classname, bases, class_dict)
class MarkovTaskSet(TaskSet, metaclass=MarkovTaskSetMeta):
"""
Class defining a probabilistic sequence of functions that a User will execute.
The sequence is defined by a Markov Chain to describe a user's load.
It holds a current state and a set of possible transitions for each state.
Every transition as an associated weight that defines how likely it is to be taken.
"""
current: Callable | TaskSet
abstract: bool = True
"""If abstract is True, the class is meant to be subclassed, and the markov chain won't be validated"""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
def get_next_task(self):
"""
Gets the next task to execute based on the current state and transitions.
:return: The current task to execute
"""
fn = self.current
transitions = getattr(fn, "transitions")
next = random.choice(transitions)
self.current = getattr(self, next)
return fn
| {
"repo_id": "locustio/locust",
"file_path": "locust/user/markov_taskset.py",
"license": "MIT License",
"lines": 236,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | documentation |
m-bain/whisperX:whisperx/log_utils.py | import logging
import sys
from typing import Optional
_LOG_FORMAT = "%(asctime)s - %(name)s - %(levelname)s - %(message)s"
_DATE_FORMAT = "%Y-%m-%d %H:%M:%S"
def setup_logging(
level: str = "info",
log_file: Optional[str] = None,
) -> None:
"""
Configure logging for WhisperX.
Args:
level: Logging level (debug, info, warning, error, critical). Default: info
log_file: Optional path to log file. If None, logs only to console.
"""
logger = logging.getLogger("whisperx")
logger.handlers.clear()
try:
log_level = getattr(logging, level.upper())
except AttributeError:
log_level = logging.WARNING
logger.setLevel(log_level)
formatter = logging.Formatter(_LOG_FORMAT, datefmt=_DATE_FORMAT)
console_handler = logging.StreamHandler(sys.stdout)
console_handler.setLevel(log_level)
console_handler.setFormatter(formatter)
logger.addHandler(console_handler)
if log_file:
try:
file_handler = logging.FileHandler(log_file)
file_handler.setLevel(log_level)
file_handler.setFormatter(formatter)
logger.addHandler(file_handler)
except (OSError) as e:
logger.warning(f"Failed to create log file '{log_file}': {e}")
logger.warning("Continuing with console logging only")
# Don't propagate to root logger to avoid duplicate messages
logger.propagate = False
def get_logger(name: str) -> logging.Logger:
"""
Get a logger instance for the given module.
Args:
name: Logger name (typically __name__ from calling module)
Returns:
Logger instance configured with WhisperX settings
"""
whisperx_logger = logging.getLogger("whisperx")
if not whisperx_logger.handlers:
setup_logging()
logger_name = "whisperx" if name == "__main__" else name
return logging.getLogger(logger_name)
| {
"repo_id": "m-bain/whisperX",
"file_path": "whisperx/log_utils.py",
"license": "BSD 2-Clause \"Simplified\" License",
"lines": 51,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
marimo-team/marimo:marimo/_server/export/_session_cache.py | # Copyright 2026 Marimo. All rights reserved.
from __future__ import annotations
import json
from collections import Counter
from pathlib import Path
from typing import TYPE_CHECKING, Any, cast
from marimo._server.file_router import AppFileRouter
from marimo._session.state.serialize import (
get_session_cache_file,
serialize_session_view,
)
from marimo._utils.code import hash_code
from marimo._utils.inline_script_metadata import (
script_metadata_hash_from_filename,
)
from marimo._utils.marimo_path import MarimoPath
from marimo._utils.paths import maybe_make_dirs
if TYPE_CHECKING:
from collections.abc import Iterable
from marimo._schemas.session import NotebookSessionV1
from marimo._session.state.session_view import SessionView
from marimo._types.ids import CellId_t
def get_script_metadata_hash(path: str | Path | None) -> str | None:
if path is None:
return None
return script_metadata_hash_from_filename(str(path))
def _hash_code_for_session_compare(code: str | None) -> str | None:
if code is None or code == "":
return None
return hash_code(code)
def current_notebook_code_hashes(
notebook: MarimoPath,
) -> tuple[str | None, ...]:
file_router = AppFileRouter.from_filename(notebook)
file_key = file_router.get_unique_file_key()
if file_key is None:
raise RuntimeError(
"Expected a unique file key when checking staleness for "
f"{notebook.absolute_name}"
)
file_manager = file_router.get_file_manager(file_key)
return tuple(
_hash_code_for_session_compare(cell_data.code)
for cell_data in file_manager.app.cell_manager.cell_data()
)
def serialize_session_snapshot(
view: SessionView,
*,
notebook_path: str | Path | None,
cell_ids: Iterable[CellId_t] | None = None,
) -> NotebookSessionV1:
return serialize_session_view(
view,
cell_ids=cell_ids,
script_metadata_hash=get_script_metadata_hash(notebook_path),
)
def write_session_snapshot(
*,
notebook_path: str | Path,
snapshot: NotebookSessionV1,
) -> Path:
output = get_session_cache_file(Path(notebook_path))
maybe_make_dirs(output)
output.write_text(json.dumps(snapshot, indent=2), encoding="utf-8")
return output
def persist_session_view_to_cache(
*,
view: SessionView,
notebook_path: str | Path | None,
cell_ids: Iterable[CellId_t] | None = None,
) -> Path | None:
if notebook_path is None:
return None
snapshot = serialize_session_snapshot(
view,
notebook_path=notebook_path,
cell_ids=cell_ids,
)
return write_session_snapshot(
notebook_path=notebook_path, snapshot=snapshot
)
def is_session_snapshot_stale(output: Path, notebook: MarimoPath) -> bool:
"""Return True when a saved session should be regenerated.
A snapshot is stale if it is unreadable, malformed, missing the
script metadata hash, or if either the current code-hash multiset or the
current script metadata hash differs from the snapshot.
"""
try:
snapshot = cast(
dict[str, Any], json.loads(output.read_text(encoding="utf-8"))
)
except (OSError, json.JSONDecodeError):
return True
metadata = snapshot.get("metadata")
if not isinstance(metadata, dict):
return True
if "script_metadata_hash" not in metadata:
return True
session_script_hash = metadata["script_metadata_hash"]
if session_script_hash is not None and not isinstance(
session_script_hash, str
):
return True
if session_script_hash != get_script_metadata_hash(notebook.absolute_name):
return True
cells = snapshot.get("cells")
if not isinstance(cells, list):
return True
try:
current_hashes = current_notebook_code_hashes(notebook)
except (RuntimeError, ValueError, OSError, SyntaxError):
return True
notebook_hashes = Counter(current_hashes)
session_hashes: Counter[str | None] = Counter()
for cell in cells:
if not isinstance(cell, dict):
return True
if "code_hash" not in cell:
return True
code_hash = cell["code_hash"]
if code_hash is not None and not isinstance(code_hash, str):
return True
session_hashes[code_hash] += 1
return notebook_hashes != session_hashes
| {
"repo_id": "marimo-team/marimo",
"file_path": "marimo/_server/export/_session_cache.py",
"license": "Apache License 2.0",
"lines": 124,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
marimo-team/marimo:tests/_server/export/test_session_cache.py | from __future__ import annotations
import json
from typing import Any, cast
from unittest.mock import patch
from marimo._server.export import _session_cache
from marimo._session.state.session_view import SessionView
from marimo._utils.marimo_path import MarimoPath
from marimo._version import __version__
def _write_snapshot(path: str, payload: dict[str, object]) -> None:
with open(path, "w", encoding="utf-8") as f:
f.write(json.dumps(payload))
def _make_snapshot(
*,
code_hashes: list[str | None],
script_metadata_hash: str | None = "meta-hash",
) -> dict[str, object]:
metadata: dict[str, str | None] = {"marimo_version": __version__}
if script_metadata_hash is not None:
metadata["script_metadata_hash"] = script_metadata_hash
return {
"version": "1",
"metadata": metadata,
"cells": [
{"code_hash": code_hash, "outputs": []}
for code_hash in code_hashes
],
}
def test_is_session_snapshot_stale_false_when_snapshot_is_current(
tmp_path,
) -> None:
output = tmp_path / "session.json"
snapshot = _make_snapshot(code_hashes=["hash-b", "hash-a", None, "hash-a"])
_write_snapshot(str(output), snapshot)
notebook = MarimoPath(str(tmp_path / "notebook.py"))
with (
patch.object(
_session_cache,
"current_notebook_code_hashes",
return_value=("hash-a", None, "hash-a", "hash-b"),
),
patch.object(
_session_cache,
"get_script_metadata_hash",
return_value="meta-hash",
),
):
assert (
_session_cache.is_session_snapshot_stale(output, notebook) is False
)
def test_is_session_snapshot_stale_true_when_script_metadata_hash_missing(
tmp_path,
) -> None:
output = tmp_path / "session.json"
snapshot = _make_snapshot(
code_hashes=["hash-a"],
script_metadata_hash=None,
)
_write_snapshot(str(output), snapshot)
notebook = MarimoPath(str(tmp_path / "notebook.py"))
with (
patch.object(
_session_cache,
"current_notebook_code_hashes",
return_value=("hash-a",),
),
patch.object(
_session_cache,
"get_script_metadata_hash",
return_value=None,
),
):
assert _session_cache.is_session_snapshot_stale(output, notebook)
def test_is_session_snapshot_stale_true_when_script_metadata_hash_mismatch(
tmp_path,
) -> None:
output = tmp_path / "session.json"
snapshot = _make_snapshot(
code_hashes=["hash-a"],
script_metadata_hash="old",
)
_write_snapshot(str(output), snapshot)
notebook = MarimoPath(str(tmp_path / "notebook.py"))
with (
patch.object(
_session_cache,
"current_notebook_code_hashes",
return_value=("hash-a",),
),
patch.object(
_session_cache,
"get_script_metadata_hash",
return_value="new",
),
):
assert _session_cache.is_session_snapshot_stale(output, notebook)
def test_is_session_snapshot_stale_true_when_code_hashes_do_not_match(
tmp_path,
) -> None:
output = tmp_path / "session.json"
snapshot = _make_snapshot(code_hashes=["hash-a", "hash-b"])
_write_snapshot(str(output), snapshot)
notebook = MarimoPath(str(tmp_path / "notebook.py"))
with (
patch.object(
_session_cache,
"current_notebook_code_hashes",
return_value=("hash-a",),
),
patch.object(
_session_cache,
"get_script_metadata_hash",
return_value="meta-hash",
),
):
assert _session_cache.is_session_snapshot_stale(output, notebook)
def test_is_session_snapshot_stale_true_when_hash_multiplicity_differs(
tmp_path,
) -> None:
output = tmp_path / "session.json"
snapshot = _make_snapshot(code_hashes=["hash-a"])
_write_snapshot(str(output), snapshot)
notebook = MarimoPath(str(tmp_path / "notebook.py"))
with (
patch.object(
_session_cache,
"current_notebook_code_hashes",
return_value=("hash-a", "hash-a"),
),
patch.object(
_session_cache,
"get_script_metadata_hash",
return_value="meta-hash",
),
):
assert _session_cache.is_session_snapshot_stale(output, notebook)
def test_is_session_snapshot_stale_true_when_cell_missing_code_hash(
tmp_path,
) -> None:
output = tmp_path / "session.json"
snapshot = _make_snapshot(code_hashes=["hash-a"])
snapshot["cells"] = [{"outputs": []}]
_write_snapshot(str(output), snapshot)
notebook = MarimoPath(str(tmp_path / "notebook.py"))
with (
patch.object(
_session_cache,
"current_notebook_code_hashes",
return_value=("hash-a",),
),
patch.object(
_session_cache,
"get_script_metadata_hash",
return_value="meta-hash",
),
):
assert _session_cache.is_session_snapshot_stale(output, notebook)
def test_is_session_snapshot_stale_true_when_code_hash_has_wrong_type(
tmp_path,
) -> None:
output = tmp_path / "session.json"
snapshot = _make_snapshot(code_hashes=["hash-a"])
snapshot["cells"] = [{"code_hash": 1, "outputs": []}]
_write_snapshot(str(output), snapshot)
notebook = MarimoPath(str(tmp_path / "notebook.py"))
with (
patch.object(
_session_cache,
"current_notebook_code_hashes",
return_value=("hash-a",),
),
patch.object(
_session_cache,
"get_script_metadata_hash",
return_value="meta-hash",
),
):
assert _session_cache.is_session_snapshot_stale(output, notebook)
def test_is_session_snapshot_stale_true_when_snapshot_is_unreadable(
tmp_path,
) -> None:
output = tmp_path / "session.json"
output.write_text("{ not valid json", encoding="utf-8")
notebook = MarimoPath(str(tmp_path / "notebook.py"))
assert _session_cache.is_session_snapshot_stale(output, notebook)
def test_is_session_snapshot_stale_true_when_hash_lookup_fails(
tmp_path,
) -> None:
output = tmp_path / "session.json"
snapshot = _make_snapshot(code_hashes=["hash-a"])
_write_snapshot(str(output), snapshot)
notebook = MarimoPath(str(tmp_path / "notebook.py"))
with (
patch.object(
_session_cache,
"current_notebook_code_hashes",
side_effect=RuntimeError("failed to inspect notebook"),
),
patch.object(
_session_cache,
"get_script_metadata_hash",
return_value="meta-hash",
),
):
assert _session_cache.is_session_snapshot_stale(output, notebook)
def test_serialize_session_snapshot_includes_script_metadata_hash() -> None:
view = SessionView()
with patch.object(
_session_cache,
"get_script_metadata_hash",
return_value="meta-hash",
):
snapshot = _session_cache.serialize_session_snapshot(
view,
notebook_path="notebook.py",
cell_ids=(),
)
metadata = snapshot["metadata"]
assert metadata["script_metadata_hash"] == "meta-hash"
def test_persist_session_view_to_cache_writes_under_marimo_dir(
tmp_path,
) -> None:
notebook = tmp_path / "notebook.py"
notebook.write_text("import marimo\n", encoding="utf-8")
view = SessionView()
with patch.object(
_session_cache,
"get_script_metadata_hash",
return_value="meta-hash",
):
output = _session_cache.persist_session_view_to_cache(
view=view,
notebook_path=notebook,
cell_ids=(),
)
assert output is not None
assert output == tmp_path / "__marimo__" / "session" / "notebook.py.json"
data = cast(dict[str, Any], json.loads(output.read_text(encoding="utf-8")))
assert data["metadata"]["script_metadata_hash"] == "meta-hash"
| {
"repo_id": "marimo-team/marimo",
"file_path": "tests/_server/export/test_session_cache.py",
"license": "Apache License 2.0",
"lines": 238,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
marimo-team/marimo:marimo/_cli/export/_common.py | # Copyright 2026 Marimo. All rights reserved.
from __future__ import annotations
import json
import subprocess
from pathlib import Path
from typing import TYPE_CHECKING, Any
import click
from marimo._server.file_router import flatten_files
from marimo._server.files.directory_scanner import DirectoryScanner
from marimo._utils.http import HTTPException, HTTPStatus
from marimo._utils.marimo_path import MarimoPath
if TYPE_CHECKING:
from collections.abc import Iterable
def is_multi_target(paths: list[Path]) -> bool:
return len(paths) > 1 or any(path.is_dir() for path in paths)
def collect_notebooks(paths: Iterable[Path]) -> list[MarimoPath]:
notebooks: dict[str, MarimoPath] = {}
for path in paths:
if path.is_dir():
scanner = DirectoryScanner(str(path), include_markdown=True)
try:
file_infos = scanner.scan()
except HTTPException as e:
if e.status_code != HTTPStatus.REQUEST_TIMEOUT:
raise
file_infos = scanner.partial_results
for file_info in flatten_files(file_infos):
if not file_info.is_marimo_file or file_info.is_directory:
continue
absolute_path = str(Path(path) / file_info.path)
notebooks[absolute_path] = MarimoPath(absolute_path)
else:
notebooks[str(path)] = MarimoPath(str(path))
return [notebooks[k] for k in sorted(notebooks)]
class SandboxVenvPool:
def __init__(self) -> None:
self._envs: dict[tuple[str, ...], tuple[str, str]] = {}
def get_python(self, notebook_path: str) -> str:
from marimo._cli.sandbox import (
build_sandbox_venv,
get_sandbox_requirements,
)
requirements = tuple(get_sandbox_requirements(notebook_path))
existing = self._envs.get(requirements)
if existing is not None:
return existing[1]
sandbox_dir, venv_python = build_sandbox_venv(notebook_path)
self._envs[requirements] = (sandbox_dir, venv_python)
return venv_python
def close(self) -> None:
from marimo._cli.sandbox import cleanup_sandbox_dir
for sandbox_dir, _ in self._envs.values():
cleanup_sandbox_dir(sandbox_dir)
self._envs.clear()
def run_python_subprocess(
*,
venv_python: str,
script: str,
payload: dict[str, Any],
action: str,
) -> str:
result = subprocess.run(
[venv_python, "-c", script, json.dumps(payload)],
check=False,
capture_output=True,
text=True,
)
if result.returncode != 0:
stderr = result.stderr.strip()
raise click.ClickException(
f"Failed to {action} in sandbox.\n\n"
f"Command:\n\n {venv_python} -c <script>\n\n"
f"Stderr:\n\n{stderr}"
)
return result.stdout
| {
"repo_id": "marimo-team/marimo",
"file_path": "marimo/_cli/export/_common.py",
"license": "Apache License 2.0",
"lines": 75,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
marimo-team/marimo:marimo/_cli/export/session.py | # Copyright 2026 Marimo. All rights reserved.
from __future__ import annotations
import asyncio
import json
import sys
from pathlib import Path
from typing import TYPE_CHECKING, Any, Optional, cast
import click
from marimo._cli.errors import MarimoCLIMissingDependencyError
from marimo._cli.export._common import (
SandboxVenvPool,
collect_notebooks,
is_multi_target,
run_python_subprocess,
)
from marimo._cli.parse_args import parse_args
from marimo._cli.print import echo, green, red, yellow
from marimo._dependencies.dependencies import DependencyManager
from marimo._schemas.session import NotebookSessionV1
from marimo._server.export import run_app_until_completion
from marimo._server.export._session_cache import (
is_session_snapshot_stale,
serialize_session_snapshot,
write_session_snapshot,
)
from marimo._server.file_router import AppFileRouter
from marimo._server.utils import asyncio_run
from marimo._session.state.serialize import get_session_cache_file
from marimo._utils.marimo_path import MarimoPath
if TYPE_CHECKING:
from marimo._cli.sandbox import SandboxMode
_sandbox_message = (
"Run the command in an isolated virtual environment using "
"`uv run --isolated`. Requires `uv`."
)
def _resolve_session_sandbox_mode(
*,
sandbox: bool | None,
path_targets: list[Path],
first_target: str,
) -> SandboxMode | None:
from marimo._cli.sandbox import SandboxMode, resolve_sandbox_mode
if is_multi_target(path_targets):
if sandbox is None:
return None
return SandboxMode.MULTI if sandbox else None
return resolve_sandbox_mode(sandbox=sandbox, name=first_target)
async def _export_session_snapshot(
marimo_path: MarimoPath,
*,
notebook_args: tuple[str, ...],
venv_python: str | None = None,
) -> tuple[NotebookSessionV1, bool]:
if venv_python is None:
cli_args = parse_args(notebook_args) if notebook_args else {}
file_router = AppFileRouter.from_filename(marimo_path)
file_key = file_router.get_unique_file_key()
if file_key is None:
raise RuntimeError(
"Expected a unique file key when exporting a single "
f"notebook: {marimo_path.absolute_name}"
)
file_manager = file_router.get_file_manager(file_key)
session_view, did_error = await run_app_until_completion(
file_manager,
cli_args=cli_args,
argv=list(notebook_args),
quiet=True,
persist_session=False,
)
session_snapshot = serialize_session_snapshot(
session_view,
notebook_path=marimo_path.absolute_name,
cell_ids=list(file_manager.app.cell_manager.cell_ids()),
)
return session_snapshot, did_error
payload = {
"path": marimo_path.absolute_name,
"args": list(notebook_args),
}
return await asyncio.to_thread(
_export_session_snapshot_in_subprocess,
venv_python,
payload,
)
def _export_session_snapshot_in_subprocess(
venv_python: str, payload: dict[str, Any]
) -> tuple[NotebookSessionV1, bool]:
script = r"""
import asyncio
import json
import sys
from marimo._cli.parse_args import parse_args
from marimo._server.export import run_app_until_completion
from marimo._server.file_router import AppFileRouter
from marimo._server.export._session_cache import serialize_session_snapshot
from marimo._utils.marimo_path import MarimoPath
payload = json.loads(sys.argv[1])
path = MarimoPath(payload["path"])
args = payload.get("args") or []
file_router = AppFileRouter.from_filename(path)
file_key = file_router.get_unique_file_key()
if file_key is None:
raise RuntimeError("Expected a unique file key for session export.")
file_manager = file_router.get_file_manager(file_key)
cli_args = parse_args(tuple(args)) if args else {}
session_view, did_error = asyncio.run(
run_app_until_completion(
file_manager,
cli_args=cli_args,
argv=list(args),
quiet=True,
persist_session=False,
)
)
session_snapshot = serialize_session_snapshot(
session_view,
notebook_path=path.absolute_name,
cell_ids=list(file_manager.app.cell_manager.cell_ids()),
)
sys.stdout.write(
json.dumps(
{
"session_snapshot": session_snapshot,
"did_error": did_error,
}
)
)
"""
output = run_python_subprocess(
venv_python=venv_python,
script=script,
payload=payload,
action="export session",
)
try:
data = cast(dict[str, Any], json.loads(output))
except json.JSONDecodeError as e:
raise click.ClickException(
"Failed to parse sandbox session export output.\n\n"
f"Stdout:\n\n{output.strip()}"
) from e
session_snapshot = data.get("session_snapshot")
did_error = bool(data.get("did_error", False))
if not isinstance(session_snapshot, dict):
raise click.ClickException(
"Sandbox session export returned an invalid payload."
)
return cast(NotebookSessionV1, session_snapshot), did_error
async def _export_session_for_notebook(
notebook: MarimoPath,
*,
force_overwrite: bool,
notebook_args: tuple[str, ...],
sandbox_pool: SandboxVenvPool | None,
) -> None:
output = get_session_cache_file(notebook.path)
if _maybe_skip_fresh_snapshot(notebook, force_overwrite=force_overwrite):
return
echo(f"Running {notebook.short_name}...")
venv_python = (
sandbox_pool.get_python(str(notebook.path))
if sandbox_pool is not None
else None
)
session_snapshot, did_error = await _export_session_snapshot(
notebook,
notebook_args=notebook_args,
venv_python=venv_python,
)
output = write_session_snapshot(
notebook_path=notebook.path,
snapshot=session_snapshot,
)
if did_error:
raise click.ClickException(
"Session export succeeded, but some cells failed to execute."
)
echo(green("ok") + f": {output}")
def _maybe_skip_fresh_snapshot(
notebook: MarimoPath, *, force_overwrite: bool
) -> bool:
output = get_session_cache_file(notebook.path)
if force_overwrite or not output.exists():
return False
if is_session_snapshot_stale(output, notebook):
return False
echo(
yellow("skip") + f": {notebook.short_name} "
"(up-to-date, use --force-overwrite if you want to re-export anyway)"
)
return True
async def _export_sessions(
*,
notebooks: list[MarimoPath],
force_overwrite: bool,
notebook_args: tuple[str, ...],
continue_on_error: bool,
sandbox_mode: SandboxMode | None,
) -> None:
from marimo._cli.sandbox import SandboxMode
failures: list[tuple[MarimoPath, Exception]] = []
use_per_notebook_sandbox = sandbox_mode is SandboxMode.MULTI
if use_per_notebook_sandbox and not DependencyManager.which("uv"):
raise MarimoCLIMissingDependencyError(
"uv is required for --sandbox session export.",
"uv",
additional_tip="Install uv from https://github.com/astral-sh/uv",
)
sandbox_pool: SandboxVenvPool | None = (
SandboxVenvPool() if use_per_notebook_sandbox else None
)
try:
for notebook in notebooks:
try:
await _export_session_for_notebook(
notebook,
force_overwrite=force_overwrite,
notebook_args=notebook_args,
sandbox_pool=sandbox_pool,
)
except Exception as error:
failures.append((notebook, error))
echo(red("error") + f": {notebook.short_name}: {error}")
if not continue_on_error:
raise
finally:
if sandbox_pool is not None:
sandbox_pool.close()
if failures:
raise click.ClickException(
f"Failed to export sessions for {len(failures)} notebooks."
)
@click.command(
"session",
help=(
"Execute a notebook or directory of notebooks and export session snapshots."
),
)
@click.argument(
"name",
type=click.Path(
exists=True, file_okay=True, dir_okay=True, path_type=Path
),
)
@click.option(
"--sandbox/--no-sandbox",
is_flag=True,
default=None,
type=bool,
help=_sandbox_message,
)
@click.option(
"--force-overwrite/--no-force-overwrite",
default=False,
help=(
"Overwrite all existing session snapshots, even if they are "
"already up-to-date."
),
)
@click.option(
"--continue-on-error/--no-continue-on-error",
default=True,
help="Continue processing other notebooks if one notebook fails.",
)
@click.argument("args", nargs=-1, type=click.UNPROCESSED)
def session(
name: Path,
sandbox: Optional[bool],
force_overwrite: bool,
continue_on_error: bool,
args: tuple[str, ...],
) -> None:
"""Execute notebooks and export their session snapshots."""
path_targets = [name]
notebooks = collect_notebooks(path_targets)
if not notebooks:
raise click.ClickException("No marimo notebooks found.")
sandbox_mode = _resolve_session_sandbox_mode(
sandbox=sandbox,
path_targets=path_targets,
first_target=str(name),
)
from marimo._cli.sandbox import SandboxMode, run_in_sandbox
if sandbox_mode is SandboxMode.SINGLE:
notebook = notebooks[0]
if _maybe_skip_fresh_snapshot(
notebook, force_overwrite=force_overwrite
):
return
run_in_sandbox(sys.argv[1:], name=str(name))
return
asyncio_run(
_export_sessions(
notebooks=notebooks,
force_overwrite=force_overwrite,
notebook_args=args,
continue_on_error=continue_on_error,
sandbox_mode=sandbox_mode,
)
)
| {
"repo_id": "marimo-team/marimo",
"file_path": "marimo/_cli/export/session.py",
"license": "Apache License 2.0",
"lines": 301,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
marimo-team/marimo:tests/_cli/test_cli_export_session.py | # Copyright 2026 Marimo. All rights reserved.
from __future__ import annotations
from typing import TYPE_CHECKING
from unittest.mock import AsyncMock, patch
import click
import pytest
from click.testing import CliRunner
import marimo._cli.export.session as session_module
from marimo._cli.sandbox import SandboxMode
from marimo._server.utils import asyncio_run
from marimo._session.state.serialize import get_session_cache_file
if TYPE_CHECKING:
from pathlib import Path
def _write_notebook(path: Path) -> None:
path.write_text(
"""
import marimo
app = marimo.App()
@app.cell
def _():
return
if __name__ == "__main__":
app.run()
""",
encoding="utf-8",
)
def test_session_sandbox_single_runs_in_sandbox(tmp_path: Path) -> None:
notebook = tmp_path / "notebook.py"
_write_notebook(notebook)
runner = CliRunner()
export_sessions = AsyncMock()
with (
patch(
"marimo._cli.sandbox.run_in_sandbox",
return_value=0,
) as run_in_sandbox,
patch(
"marimo._cli.sandbox.resolve_sandbox_mode",
return_value=SandboxMode.SINGLE,
) as resolve_sandbox_mode,
patch.object(session_module, "_export_sessions", new=export_sessions),
):
result = runner.invoke(
session_module.session,
[str(notebook), "--sandbox"],
)
assert result.exit_code == 0, result.output
run_in_sandbox.assert_called_once()
resolve_sandbox_mode.assert_called_once()
assert run_in_sandbox.call_args.kwargs["name"] == str(notebook)
export_sessions.assert_not_called()
def test_session_sandbox_single_skips_before_sandbox_when_fresh(
tmp_path: Path,
) -> None:
notebook = tmp_path / "notebook.py"
_write_notebook(notebook)
session_file = get_session_cache_file(notebook)
session_file.parent.mkdir(parents=True, exist_ok=True)
session_file.write_text("{}", encoding="utf-8")
runner = CliRunner()
with (
patch(
"marimo._cli.sandbox.resolve_sandbox_mode",
return_value=SandboxMode.SINGLE,
),
patch.object(
session_module,
"is_session_snapshot_stale",
return_value=False,
) as is_stale,
patch("marimo._cli.sandbox.run_in_sandbox") as run_in_sandbox,
):
result = runner.invoke(
session_module.session,
[str(notebook), "--sandbox"],
)
assert result.exit_code == 0, result.output
is_stale.assert_called_once()
run_in_sandbox.assert_not_called()
assert "skip:" in result.output
def test_session_sandbox_multi_uses_in_process_export(tmp_path: Path) -> None:
notebook_dir = tmp_path / "notebooks"
notebook_dir.mkdir()
first = notebook_dir / "first.py"
second = notebook_dir / "second.py"
_write_notebook(first)
_write_notebook(second)
runner = CliRunner()
export_sessions = AsyncMock()
with (
patch("marimo._cli.sandbox.run_in_sandbox") as run_in_sandbox,
patch("marimo._cli.sandbox.resolve_sandbox_mode") as resolve_mode,
patch.object(session_module, "_export_sessions", new=export_sessions),
):
result = runner.invoke(
session_module.session,
[str(notebook_dir), "--sandbox"],
)
assert result.exit_code == 0, result.output
run_in_sandbox.assert_not_called()
resolve_mode.assert_not_called()
export_sessions.assert_called_once()
assert (
export_sessions.call_args.kwargs["sandbox_mode"] is SandboxMode.MULTI
)
def test_session_no_sandbox_uses_in_process_export(tmp_path: Path) -> None:
notebook = tmp_path / "notebook.py"
_write_notebook(notebook)
runner = CliRunner()
export_sessions = AsyncMock()
with (
patch("marimo._cli.sandbox.run_in_sandbox") as run_in_sandbox,
patch(
"marimo._cli.sandbox.resolve_sandbox_mode", return_value=None
) as resolve_mode,
patch.object(session_module, "_export_sessions", new=export_sessions),
):
result = runner.invoke(
session_module.session,
[str(notebook), "--no-sandbox"],
)
assert result.exit_code == 0, result.output
run_in_sandbox.assert_not_called()
resolve_mode.assert_called_once()
export_sessions.assert_called_once()
assert export_sessions.call_args.kwargs["sandbox_mode"] is None
def test_export_sessions_continue_on_error_processes_remaining(
tmp_path: Path,
) -> None:
first = tmp_path / "first.py"
second = tmp_path / "second.py"
third = tmp_path / "third.py"
_write_notebook(first)
_write_notebook(second)
_write_notebook(third)
notebooks = [
session_module.MarimoPath(str(p)) for p in [first, second, third]
]
called: list[str] = []
async def fake_export(
marimo_path: session_module.MarimoPath,
*,
notebook_args: tuple[str, ...],
venv_python: str | None = None,
) -> tuple[dict[str, str], bool]:
del notebook_args, venv_python
called.append(marimo_path.short_name)
if marimo_path.short_name == "second.py":
raise click.ClickException("boom")
return {"name": marimo_path.short_name}, False
with (
patch.object(
session_module,
"_export_session_snapshot",
new=AsyncMock(side_effect=fake_export),
),
pytest.raises(
click.ClickException,
match="Failed to export sessions for 1 notebooks",
),
):
asyncio_run(
session_module._export_sessions(
notebooks=notebooks,
force_overwrite=True,
notebook_args=(),
continue_on_error=True,
sandbox_mode=None,
)
)
assert called == ["first.py", "second.py", "third.py"]
assert get_session_cache_file(first).exists()
assert not get_session_cache_file(second).exists()
assert get_session_cache_file(third).exists()
def test_export_sessions_no_continue_on_error_stops_at_first_error(
tmp_path: Path,
) -> None:
first = tmp_path / "first.py"
second = tmp_path / "second.py"
third = tmp_path / "third.py"
_write_notebook(first)
_write_notebook(second)
_write_notebook(third)
notebooks = [
session_module.MarimoPath(str(p)) for p in [first, second, third]
]
called: list[str] = []
async def fake_export(
marimo_path: session_module.MarimoPath,
*,
notebook_args: tuple[str, ...],
venv_python: str | None = None,
) -> tuple[dict[str, str], bool]:
del notebook_args, venv_python
called.append(marimo_path.short_name)
if marimo_path.short_name == "first.py":
raise click.ClickException("first failed")
return {"name": marimo_path.short_name}, False
with (
patch.object(
session_module,
"_export_session_snapshot",
new=AsyncMock(side_effect=fake_export),
),
pytest.raises(click.ClickException, match="first failed"),
):
asyncio_run(
session_module._export_sessions(
notebooks=notebooks,
force_overwrite=True,
notebook_args=(),
continue_on_error=False,
sandbox_mode=None,
)
)
assert called == ["first.py"]
assert not get_session_cache_file(first).exists()
assert not get_session_cache_file(second).exists()
assert not get_session_cache_file(third).exists()
def test_export_session_snapshot_subprocess_invalid_json() -> None:
with (
patch.object(
session_module, "run_python_subprocess", return_value="not json"
),
pytest.raises(
click.ClickException,
match="Failed to parse sandbox session export output",
),
):
session_module._export_session_snapshot_in_subprocess(
"python",
{"path": "notebook.py", "args": []},
)
| {
"repo_id": "marimo-team/marimo",
"file_path": "tests/_cli/test_cli_export_session.py",
"license": "Apache License 2.0",
"lines": 233,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
marimo-team/marimo:marimo/_smoke_tests/issues/8527-holoviews-dynamicmap.py | # /// script
# dependencies = [
# "holoviews==1.22.1",
# "marimo",
# "numpy",
# ]
# requires-python = ">=3.12"
# ///
import marimo
__generated_with = "0.20.2"
app = marimo.App(width="medium")
@app.cell
def _():
# Taken directly from https://holoviews.org/reference/streams/bokeh/Selection1D_points.html#selection1d-points
import numpy as np
import holoviews as hv
from holoviews import opts, streams
hv.extension("bokeh")
opts.defaults(opts.Points(tools=["box_select", "lasso_select"]))
# Declare some points
points = hv.Points(np.random.randn(1000, 2))
# Declare points as source of selection stream
selection = streams.Selection1D(source=points)
# Write function that uses the selection indices to slice points and compute stats
def selected_info(index):
selected = points.iloc[index]
if index:
label = "Mean x, y: {:.3f}, {:.3f}".format(
*tuple(selected.array().mean(axis=0))
)
else:
label = "No selection"
return selected.relabel(label).opts(color="red")
# Combine points and DynamicMap
points + hv.DynamicMap(selected_info, streams=[selection])
return
@app.cell
def _():
return
if __name__ == "__main__":
app.run()
| {
"repo_id": "marimo-team/marimo",
"file_path": "marimo/_smoke_tests/issues/8527-holoviews-dynamicmap.py",
"license": "Apache License 2.0",
"lines": 41,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
marimo-team/marimo:marimo/_mcp/code_server/lifespan.py | # Copyright 2026 Marimo. All rights reserved.
from __future__ import annotations
import contextlib
from typing import TYPE_CHECKING
from marimo._loggers import marimo_logger
LOGGER = marimo_logger()
if TYPE_CHECKING:
from collections.abc import AsyncIterator
from starlette.applications import Starlette
@contextlib.asynccontextmanager
async def code_mcp_server_lifespan(app: Starlette) -> AsyncIterator[None]:
"""Lifespan for Code Mode MCP server functionality."""
try:
code_mcp_app = getattr(app.state, "code_mcp", None)
if code_mcp_app is None:
LOGGER.warning("Code MCP server not found in app state")
yield
return
async with code_mcp_app.session_manager.run():
LOGGER.info("Code MCP server session manager started")
yield
except ImportError as e:
LOGGER.warning(f"Code MCP server dependencies not available: {e}")
yield
return
except Exception as e:
LOGGER.error(f"Failed to start Code MCP server: {e}")
yield
return
| {
"repo_id": "marimo-team/marimo",
"file_path": "marimo/_mcp/code_server/lifespan.py",
"license": "Apache License 2.0",
"lines": 29,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
marimo-team/marimo:marimo/_mcp/code_server/main.py | # Copyright 2026 Marimo. All rights reserved.
"""
Code Mode MCP Server for Marimo
A minimal MCP server that lets external AI agents execute Python code
in a running marimo kernel via the scratchpad.
"""
from __future__ import annotations
import asyncio
import os
from typing import TYPE_CHECKING, Any
from marimo._ai._tools.types import (
CodeExecutionResult,
ListSessionsResult,
MarimoNotebookInfo,
)
from marimo._dependencies.dependencies import DependencyManager
from marimo._loggers import marimo_logger
from marimo._messaging.cell_output import CellChannel
from marimo._messaging.notification import CellNotification
from marimo._messaging.serde import deserialize_kernel_message
from marimo._runtime.scratch import SCRATCH_CELL_ID
from marimo._session.events import SessionEventBus, SessionEventListener
from marimo._types.ids import SessionId
LOGGER = marimo_logger()
if TYPE_CHECKING:
from starlette.applications import Starlette
from starlette.types import Receive, Scope, Send
from marimo._messaging.types import KernelMessage
from marimo._session.session import Session
_EXECUTION_TIMEOUT = 30.0 # seconds
class _ScratchCellListener(SessionEventListener):
"""Listens for scratch cell idle notifications and signals waiters.
Implements SessionExtension so it can be dynamically attached to a
session via session.attach_extension / session.detach_extension.
"""
def __init__(self) -> None:
self._waiters: dict[str, asyncio.Event] = {}
def wait_for(self, session_id: str) -> asyncio.Event:
event = asyncio.Event()
self._waiters[session_id] = event
return event
# SessionExtension protocol
def on_attach(self, session: Session, event_bus: SessionEventBus) -> None:
del session
event_bus.subscribe(self)
def on_detach(self) -> None:
pass
def on_notification_sent(
self, session: Session, notification: KernelMessage
) -> None:
del session
msg = deserialize_kernel_message(notification)
if not isinstance(msg, CellNotification):
return
if msg.cell_id != SCRATCH_CELL_ID:
return
if msg.status != "idle":
return
# Signal any waiter for this session
for sid, event in list(self._waiters.items()):
if not event.is_set():
event.set()
del self._waiters[sid]
def setup_code_mcp_server(
app: Starlette, *, allow_remote: bool = False
) -> None:
"""Create and configure the Code Mode MCP server.
Mounts at /mcp with a single /server streamable HTTP endpoint.
Exposes two tools: `list_sessions` and `execute_code`.
Args:
app: Starlette application instance for accessing marimo state
allow_remote: If True, disable DNS rebinding protection to allow remote access behind proxies.
"""
if not DependencyManager.mcp.has():
from click import ClickException
msg = "MCP dependencies not available. Install with `pip install marimo[mcp]` or `uv add marimo[mcp]`"
raise ClickException(msg)
from mcp.server.fastmcp import FastMCP
from starlette.middleware.base import BaseHTTPMiddleware
from starlette.responses import JSONResponse
from starlette.routing import Mount
from marimo._runtime.commands import ExecuteScratchpadCommand
from marimo._server.api.deps import AppStateBase
from marimo._session.model import ConnectionState
transport_security = None
if allow_remote:
from mcp.server.transport_security import TransportSecuritySettings
transport_security = TransportSecuritySettings(
enable_dns_rebinding_protection=False,
)
mcp = FastMCP(
"marimo-code-mcp",
stateless_http=True,
log_level="WARNING",
streamable_http_path="/server",
transport_security=transport_security,
)
# Per-session locks to prevent overlapping scratchpad executions
session_locks: dict[str, asyncio.Lock] = {}
listener = _ScratchCellListener()
@mcp.tool()
async def list_sessions() -> ListSessionsResult:
"""List active marimo sessions.
Returns a list of active sessions, each with 'name', 'path',
and 'session_id' fields.
Use the session_id with execute_code to run code in that session.
"""
state = AppStateBase.from_app(app)
session_manager = state.session_manager
sessions: list[MarimoNotebookInfo] = []
for session_id, session in session_manager.sessions.items():
conn_state = session.connection_state()
if conn_state in (ConnectionState.OPEN, ConnectionState.ORPHANED):
full_path = session.app_file_manager.path
filename = session.app_file_manager.filename
basename = os.path.basename(filename) if filename else None
sessions.append(
MarimoNotebookInfo(
name=basename or "new notebook",
path=full_path or "(unsaved notebook)",
session_id=SessionId(session_id),
)
)
return ListSessionsResult(sessions=sessions[::-1])
@mcp.tool()
async def execute_code(session_id: str, code: str) -> CodeExecutionResult:
"""Execute Python code in a notebook's kernel scratchpad.
The code runs in the scratchpad — a temporary execution environment
that has access to all variables defined in the notebook but does not
affect the notebook's cells or dependency graph.
Args:
session_id: The session ID of the notebook (from list_sessions).
code: Python code to execute.
"""
state = AppStateBase.from_app(app)
session = state.session_manager.get_session(SessionId(session_id))
if session is None:
return CodeExecutionResult(
success=False,
error=f"Session '{session_id}' not found. "
"Use list_sessions to find valid session IDs.",
)
# Attach listener as a session extension
session.attach_extension(listener)
lock = session_locks.setdefault(session_id, asyncio.Lock())
async with lock:
try:
# Set up event before sending command
done = listener.wait_for(session_id)
# Send the scratchpad execution command
session.put_control_request(
ExecuteScratchpadCommand(code=code),
from_consumer_id=None,
)
# Wait for the scratch cell to become idle
await asyncio.wait_for(done.wait(), timeout=_EXECUTION_TIMEOUT)
# FIXME: stdout/stderr are flushed every 10ms by the buffered
# writer thread. Wait 50ms so trailing console output arrives
# before we read cell_notifications.
# See: marimo-team/marimo-lsp ExecutionRegistry.ts
await asyncio.sleep(0.05)
except asyncio.TimeoutError:
return CodeExecutionResult(
success=False,
error=f"Execution timed out after {_EXECUTION_TIMEOUT}s",
)
finally:
session.detach_extension(listener)
return _extract_result(session)
# Build the streamable HTTP app
mcp_app = mcp.streamable_http_app()
class RequiresEditMiddleware(BaseHTTPMiddleware):
async def __call__(
self, scope: Scope, receive: Receive, send: Send
) -> None:
auth = scope.get("auth")
if auth is None or "edit" not in auth.scopes:
response = JSONResponse(
{"detail": "Forbidden"}, status_code=403
)
return await response(scope, receive, send)
return await self.app(scope, receive, send)
mcp_app.add_middleware(RequiresEditMiddleware)
app.routes.insert(0, Mount("/mcp", mcp_app))
app.state.code_mcp = mcp
def _extract_result(session: Any) -> CodeExecutionResult:
"""Read the scratch cell's final state from the session view."""
cell_notif = session.session_view.cell_notifications.get(SCRATCH_CELL_ID)
if cell_notif is None:
return CodeExecutionResult(success=True)
# Output
output_data = None
if cell_notif.output is not None:
data = cell_notif.output.data
if isinstance(data, str):
output_data = data
elif isinstance(data, dict):
output_data = data.get(
"text/plain", data.get("text/html", str(data))
)
# list → errors, handled below
# Console
stdout: list[str] = []
stderr: list[str] = []
for out in (
cell_notif.console if isinstance(cell_notif.console, list) else []
):
if out is None:
continue
if out.channel == CellChannel.STDOUT:
stdout.append(str(out.data))
elif out.channel == CellChannel.STDERR:
stderr.append(str(out.data))
# Errors
errors: list[str] = []
if cell_notif.output is not None and isinstance(
cell_notif.output.data, list
):
for err in cell_notif.output.data:
errors.append(str(getattr(err, "msg", None) or err))
return CodeExecutionResult(
success=len(errors) == 0,
output=output_data,
stdout=stdout,
stderr=stderr,
errors=errors,
)
| {
"repo_id": "marimo-team/marimo",
"file_path": "marimo/_mcp/code_server/main.py",
"license": "Apache License 2.0",
"lines": 227,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
marimo-team/marimo:tests/_mcp/code_server/test_code_server.py | # Copyright 2026 Marimo. All rights reserved.
from __future__ import annotations
from unittest.mock import MagicMock
import pytest
from marimo._mcp.code_server.lifespan import code_mcp_server_lifespan
pytest.importorskip("mcp", reason="MCP requires Python 3.10+")
from typing import TYPE_CHECKING
from starlette.applications import Starlette
from starlette.authentication import AuthCredentials, SimpleUser
from starlette.middleware import Middleware
from starlette.middleware.authentication import AuthenticationMiddleware
from starlette.testclient import TestClient
from marimo._mcp.code_server.main import setup_code_mcp_server
from marimo._messaging.cell_output import CellChannel, CellOutput
from marimo._messaging.notification import CellNotification
from marimo._runtime.scratch import SCRATCH_CELL_ID
from marimo._server.api.middleware import AuthBackend
from marimo._session.model import ConnectionState
from tests._server.mocks import get_mock_session_manager
if TYPE_CHECKING:
from starlette.requests import HTTPConnection
def create_test_app() -> Starlette:
"""Create a test Starlette app with Code MCP server."""
app = Starlette(
middleware=[
Middleware(
AuthenticationMiddleware,
backend=AuthBackend(should_authenticate=False),
),
],
)
app.state.session_manager = get_mock_session_manager()
setup_code_mcp_server(app)
return app
def test_code_mcp_server_starts_up():
"""Test that Code MCP server can be set up and routes are registered."""
app = create_test_app()
assert hasattr(app.state, "code_mcp")
assert any("/mcp" in str(route.path) for route in app.routes)
async def test_code_mcp_server_requires_edit_scope():
"""Test that Code MCP server validates 'edit' scope is present."""
class MockAuthBackendNoEdit:
async def authenticate(self, conn: HTTPConnection):
del conn
return AuthCredentials(scopes=["read"]), SimpleUser("test_user")
app_no_edit = Starlette(
middleware=[
Middleware(
AuthenticationMiddleware,
backend=MockAuthBackendNoEdit(),
),
],
)
app_no_edit.state.session_manager = get_mock_session_manager()
setup_code_mcp_server(app_no_edit)
client = TestClient(app_no_edit, raise_server_exceptions=False)
response = client.get("/mcp/server")
assert response.status_code == 403
class MockAuthBackendWithEdit:
async def authenticate(self, conn: HTTPConnection):
del conn
return AuthCredentials(scopes=["edit"]), SimpleUser("test_user")
app_with_edit = Starlette(
middleware=[
Middleware(
AuthenticationMiddleware,
backend=MockAuthBackendWithEdit(),
),
],
)
setup_code_mcp_server(app_with_edit)
async with code_mcp_server_lifespan(app_with_edit):
app_with_edit.state.session_manager = get_mock_session_manager()
client_with_edit = TestClient(app_with_edit)
response = client_with_edit.get("/mcp/server")
assert response.status_code != 403
def _make_mock_session(
*,
filename: str = "/path/to/notebook.py",
connection_state: ConnectionState = ConnectionState.OPEN,
) -> MagicMock:
"""Create a mock session for testing."""
session = MagicMock()
session.app_file_manager.path = filename
session.app_file_manager.filename = filename
session.connection_state.return_value = connection_state
session.session_view = MagicMock()
session.session_view.cell_notifications = {}
return session
def _make_idle_scratch_notification(
*,
output_data: str | None = None,
stdout: list[str] | None = None,
stderr: list[str] | None = None,
errors: list[object] | None = None,
) -> CellNotification:
"""Create a CellNotification for the scratch cell in idle state."""
output = None
if output_data is not None:
output = CellOutput(
channel=CellChannel.OUTPUT,
mimetype="text/plain",
data=output_data,
)
elif errors is not None:
output = CellOutput(
channel=CellChannel.MARIMO_ERROR,
mimetype="application/vnd.marimo+error",
data=errors,
)
console: list[CellOutput] = []
for msg in stdout or []:
console.append(
CellOutput(
channel=CellChannel.STDOUT, mimetype="text/plain", data=msg
)
)
for msg in stderr or []:
console.append(
CellOutput(
channel=CellChannel.STDERR, mimetype="text/plain", data=msg
)
)
return CellNotification(
cell_id=SCRATCH_CELL_ID,
output=output,
console=console if console else None,
status="idle",
)
class TestGetActiveNotebooks:
def test_no_sessions_returns_empty(self):
"""get_active_notebooks returns empty list with no sessions."""
app = create_test_app()
assert app.state.session_manager.sessions == {}
def test_session_lookup(self):
"""Sessions injected into the repository can be looked up."""
app = create_test_app()
sm = app.state.session_manager
mock_session = _make_mock_session(filename="/path/to/nb.py")
sm._repository._sessions["s1"] = mock_session
assert sm.get_session("s1") is mock_session
class TestExecuteCode:
async def test_session_not_found(self):
"""Session lookup returns None for missing sessions."""
app = create_test_app()
from marimo._server.api.deps import AppStateBase
state = AppStateBase.from_app(app)
assert state.session_manager.get_session("nonexistent") is None
def test_scratchpad_command_dispatch(self):
"""put_control_request is called with ExecuteScratchpadCommand."""
app = create_test_app()
sm = app.state.session_manager
mock_session = _make_mock_session()
sm._repository._sessions["s1"] = mock_session
from marimo._runtime.commands import ExecuteScratchpadCommand
command = ExecuteScratchpadCommand(code="2 + 2")
mock_session.put_control_request(command, from_consumer_id=None)
mock_session.put_control_request.assert_called_once_with(
command, from_consumer_id=None
)
def test_idle_notification_output_extraction(self):
"""Output data is extracted correctly from idle scratch notification."""
notif = _make_idle_scratch_notification(output_data="4")
assert notif.status == "idle"
assert notif.output is not None
assert notif.output.data == "4"
def test_stdout_extraction(self):
"""Stdout messages are extracted from console outputs."""
notif = _make_idle_scratch_notification(stdout=["hello\n"])
assert notif.console is not None
console_list = (
notif.console
if isinstance(notif.console, list)
else [notif.console]
)
stdout_msgs = [
str(o.data)
for o in console_list
if o.channel == CellChannel.STDOUT
]
assert stdout_msgs == ["hello\n"]
def test_stderr_extraction(self):
"""Stderr messages are extracted from console outputs."""
notif = _make_idle_scratch_notification(
output_data="ok", stderr=["warning: something\n"]
)
assert notif.console is not None
console_list = (
notif.console
if isinstance(notif.console, list)
else [notif.console]
)
stderr_msgs = [
str(o.data)
for o in console_list
if o.channel == CellChannel.STDERR
]
assert stderr_msgs == ["warning: something\n"]
def test_error_extraction(self):
"""Errors are extracted from output data list."""
mock_error = MagicMock()
mock_error.msg = "NameError: name 'x' is not defined"
notif = _make_idle_scratch_notification(errors=[mock_error])
assert notif.output is not None
assert isinstance(notif.output.data, list)
assert len(notif.output.data) == 1
def test_notification_set_after_command(self):
"""Simulates the full flow: command -> notification -> output."""
app = create_test_app()
sm = app.state.session_manager
mock_session = _make_mock_session()
sm._repository._sessions["s1"] = mock_session
notif = _make_idle_scratch_notification(
output_data="4", stdout=["debug\n"]
)
def set_notification(*args: object, **kwargs: object):
del args
del kwargs
mock_session.session_view.cell_notifications = {
SCRATCH_CELL_ID: notif
}
mock_session.put_control_request.side_effect = set_notification
from marimo._runtime.commands import ExecuteScratchpadCommand
command = ExecuteScratchpadCommand(code="2 + 2")
mock_session.put_control_request(command, from_consumer_id=None)
cell_notif = mock_session.session_view.cell_notifications.get(
SCRATCH_CELL_ID
)
assert cell_notif is not None
assert cell_notif.status == "idle"
assert cell_notif.output is not None
assert cell_notif.output.data == "4"
| {
"repo_id": "marimo-team/marimo",
"file_path": "tests/_mcp/code_server/test_code_server.py",
"license": "Apache License 2.0",
"lines": 232,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
marimo-team/marimo:marimo/_smoke_tests/markdown/latex_outline.py | import marimo
__generated_with = "0.20.2"
app = marimo.App()
@app.cell(hide_code=True)
def _(mo):
mo.md(r"""
# Introduction
This notebook tests that LaTeX renders correctly in the outline panel.
""")
return
@app.cell(hide_code=True)
def _(mo):
mo.md(r"""
## The equation $E = mc^2$
Einstein's famous mass-energy equivalence.
""")
return
@app.cell(hide_code=True)
def _(mo):
mo.md(r"""
## Inline math: $\alpha + \beta = \gamma$
Greek letters in a heading.
""")
return
@app.cell(hide_code=True)
def _(mo):
mo.md(r"""
## Display math
$$\int_0^\infty e^{-x^2} dx = \frac{\sqrt{\pi}}{2}$$
""")
return
@app.cell(hide_code=True)
def _(mo):
mo.md(r"""
## Mixed: text and $\sum_{i=1}^{n} x_i$
A heading with both plain text and a summation.
""")
return
@app.cell(hide_code=True)
def _(mo):
mo.md(r"""
## Plain heading (no LaTeX)
This heading has no math for comparison.
""")
return
@app.cell
def _():
import marimo as mo
return (mo,)
if __name__ == "__main__":
app.run()
| {
"repo_id": "marimo-team/marimo",
"file_path": "marimo/_smoke_tests/markdown/latex_outline.py",
"license": "Apache License 2.0",
"lines": 51,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | documentation |
marimo-team/marimo:tests/_save/test_cache_invalidation.py | # Copyright 2026 Marimo. All rights reserved.
"""Tests for cache invalidation when function body changes."""
from __future__ import annotations
import textwrap
from marimo._runtime.runtime import Kernel
from tests.conftest import ExecReqProvider
class TestCacheInvalidation:
async def test_numeric_return_invalidation(
self, k: Kernel, exec_req: ExecReqProvider
) -> None:
"""@mo.cache with numeric return types must invalidate on body change."""
# First run: return 11 + 19 = 30
await k.run(
[
exec_req.get_with_id(
cell_id="0",
code=textwrap.dedent("""
import marimo as mo
@mo.cache
def query():
return 11 + 19
result = query()
"""),
),
]
)
assert k.globals["result"] == 30
first_hash = k.globals["query"]._last_hash
# Second run: return 5 + 3 = 8 (same cell, different code)
await k.run(
[
exec_req.get_with_id(
cell_id="0",
code=textwrap.dedent("""
import marimo as mo
@mo.cache
def query():
return 5 + 3
result = query()
"""),
),
]
)
second_hash = k.globals["query"]._last_hash
# Hashes should be different
assert first_hash != second_hash, (
"Hash should change when function body changes"
)
# Should get 8, not stale 30
assert k.globals["result"] == 8, (
f"Expected 8, got {k.globals['result']} (stale cache)"
)
async def test_string_return_invalidation(
self, k: Kernel, exec_req: ExecReqProvider
) -> None:
"""@mo.cache with string return types must invalidate on body change."""
# First run
await k.run(
[
exec_req.get_with_id(
cell_id="0",
code=textwrap.dedent("""
import marimo as mo
@mo.cache
def query():
return "hello"
result = query()
"""),
),
]
)
assert k.globals["result"] == "hello"
first_hash = k.globals["query"]._last_hash
# Second run: different function body
await k.run(
[
exec_req.get_with_id(
cell_id="0",
code=textwrap.dedent("""
import marimo as mo
@mo.cache
def query():
return "world"
result = query()
"""),
),
]
)
second_hash = k.globals["query"]._last_hash
# Hashes should be different
assert first_hash != second_hash, (
"Hash should change when function body changes"
)
assert k.globals["result"] == "world", (
f"Expected 'world', got {k.globals['result']} (stale cache)"
)
async def test_float_return_invalidation(
self, k: Kernel, exec_req: ExecReqProvider
) -> None:
"""@mo.cache with float return types must invalidate on body change."""
# First run: return 1.5 + 2.5 = 4.0
await k.run(
[
exec_req.get_with_id(
cell_id="0",
code=textwrap.dedent("""
import marimo as mo
@mo.cache
def query():
return 1.5 + 2.5
result = query()
"""),
),
]
)
assert k.globals["result"] == 4.0
# Second run: return 0.1 + 0.2 ≈ 0.3
await k.run(
[
exec_req.get_with_id(
cell_id="0",
code=textwrap.dedent("""
import marimo as mo
@mo.cache
def query():
return 0.1 + 0.2
result = query()
"""),
),
]
)
# Should get approximately 0.3, not stale 4.0
assert abs(k.globals["result"] - 0.3) < 0.0001, (
f"Expected ~0.3, got {k.globals['result']} (stale cache)"
)
| {
"repo_id": "marimo-team/marimo",
"file_path": "tests/_save/test_cache_invalidation.py",
"license": "Apache License 2.0",
"lines": 135,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
marimo-team/marimo:examples/outputs/live_raster.py | # /// script
# requires-python = ">=3.13"
# dependencies = [
# "anywidget==0.9.21",
# "marimo>=0.20.2",
# "traitlets==5.14.3",
# ]
# ///
import marimo
__generated_with = "unknown"
app = marimo.App(width="medium")
@app.cell(hide_code=True)
def _(CounterWidget):
CounterWidget(count=42)
return
@app.cell
def _(anywidget, os, traitlets):
class CounterWidget(anywidget.AnyWidget):
_esm = """
export default async () => {
let hostName = null;
return {
initialize({ model }) {
// This message gets handled by _handle_custom_msg on the Python side
model.send({ event: "requestHostName" });
},
render({ model, el }) {
let count = () => model.get("count");
let btn = document.createElement("button");
btn.classList.add("counter-button");
btn.innerHTML = `Initializing...`;
// Set proper HTML content once message arrives from Python connection
model.on("msg:custom", (msg, buffers) => {
hostName = msg.response;
btn.innerHTML = `count is ${count()} from ${hostName} host`;
});
btn.addEventListener("click", () => {
model.set("count", count() + 1);
model.save_changes();
});
model.on("change:count", () => {
btn.innerHTML =
hostName
? `count is ${count()} from ${hostName} host`
: `Initializing...`;
});
el.appendChild(btn);
},
};
};
"""
_css = """
.counter-button {
background: #387262;
border: 0;
border-radius: 10px;
padding: 10px 50px;
color: white;
}
"""
count = traitlets.Int(0).tag(sync=True)
def __init__(self, **kwargs):
super().__init__(**kwargs)
self.on_msg(self._handle_custom_msg)
def _handle_custom_msg(self, *args, **kwargs):
self.send({"response": os.name})
return (CounterWidget,)
@app.cell(hide_code=True)
def _():
import marimo as mo
import anywidget
import traitlets
import os
return anywidget, os, traitlets
if __name__ == "__main__":
app.run()
| {
"repo_id": "marimo-team/marimo",
"file_path": "examples/outputs/live_raster.py",
"license": "Apache License 2.0",
"lines": 76,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | documentation |
marimo-team/marimo:marimo/_server/export/_html_asset_server.py | # Copyright 2026 Marimo. All rights reserved.
from __future__ import annotations
import threading
from contextlib import AbstractContextManager
from functools import partial
from http.server import SimpleHTTPRequestHandler, ThreadingHTTPServer
from typing import TYPE_CHECKING, Any, cast
if TYPE_CHECKING:
from pathlib import Path
from types import TracebackType
class _HtmlAssetRequestHandler(SimpleHTTPRequestHandler):
def do_GET(self) -> None:
server = cast(_HtmlAssetHTTPServer, self.server)
route = self.path.split("?", 1)[0]
dynamic_route = server.dynamic_route
if route == dynamic_route:
with server.dynamic_html_lock:
html = server.dynamic_html
self.send_response(200)
self.send_header("Content-Type", "text/html; charset=utf-8")
self.send_header("Cache-Control", "no-store")
self.end_headers()
self.wfile.write(html.encode("utf-8"))
return
return super().do_GET()
def log_message(self, format: str, *args: Any) -> None: # noqa: A002
del format
del args
class _HtmlAssetHTTPServer(ThreadingHTTPServer):
dynamic_route: str
dynamic_html: str
dynamic_html_lock: threading.Lock
class HtmlAssetServer(AbstractContextManager["HtmlAssetServer"]):
def __init__(self, *, directory: Path, route: str) -> None:
self._directory = directory
self._route = route if route.startswith("/") else f"/{route}"
self._server: _HtmlAssetHTTPServer | None = None
self._thread: threading.Thread | None = None
@property
def base_url(self) -> str:
if self._server is None:
raise RuntimeError("HTML asset server is not running")
host, port = self._server.server_address[:2]
if isinstance(host, bytes):
host = host.decode("utf-8")
return f"http://{host}:{port}"
@property
def page_url(self) -> str:
return f"{self.base_url}{self._route}"
def set_html(self, html: str) -> None:
if self._server is None:
raise RuntimeError("HTML asset server is not running")
with self._server.dynamic_html_lock:
self._server.dynamic_html = html
def __enter__(self) -> HtmlAssetServer: # noqa: PYI034
if not self._directory.is_dir():
raise RuntimeError(f"Static assets not found at {self._directory}")
handler = partial(
_HtmlAssetRequestHandler,
directory=str(self._directory),
)
self._server = _HtmlAssetHTTPServer(("127.0.0.1", 0), handler)
self._server.dynamic_route = self._route
self._server.dynamic_html = ""
self._server.dynamic_html_lock = threading.Lock()
self._thread = threading.Thread(
target=self._server.serve_forever,
daemon=True,
)
self._thread.start()
return self
def __exit__(
self,
_exc_type: type[BaseException] | None,
_exc: BaseException | None,
_tb: TracebackType | None,
) -> None:
# Cleanup only. We intentionally do not suppress exceptions raised inside the with-block.
if self._server is not None:
self._server.shutdown()
self._server.server_close()
self._server = None
if self._thread is not None:
self._thread.join(timeout=1)
self._thread = None
| {
"repo_id": "marimo-team/marimo",
"file_path": "marimo/_server/export/_html_asset_server.py",
"license": "Apache License 2.0",
"lines": 85,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
marimo-team/marimo:marimo/_server/export/_live_notebook_server.py | # Copyright 2026 Marimo. All rights reserved.
from __future__ import annotations
import subprocess
import sys
import tempfile
import time
from contextlib import AbstractContextManager
from pathlib import Path
from urllib.error import URLError
from urllib.request import urlopen
from marimo import _loggers
from marimo._utils.net import find_free_port
LOGGER = _loggers.marimo_logger()
_LIVE_SERVER_START_TIMEOUT_S = 90.0
_LIVE_SERVER_POLL_INTERVAL_S = 0.2
_LIVE_SERVER_SHUTDOWN_TIMEOUT_S = 5.0
_LIVE_SERVER_DEFAULT_PORT = 2719
class LiveNotebookServer(AbstractContextManager["LiveNotebookServer"]):
"""Run a temporary headless marimo server for live output capture."""
def __init__(self, *, filepath: str, argv: list[str] | None) -> None:
self._filepath = filepath
self._argv = list(argv or [])
self._port = find_free_port(
_LIVE_SERVER_DEFAULT_PORT,
addr="127.0.0.1",
)
self._process: subprocess.Popen[str] | None = None
self._log_file: tempfile._TemporaryFileWrapper[str] | None = None
@property
def page_url(self) -> str:
return f"http://127.0.0.1:{self._port}"
@property
def health_url(self) -> str:
return f"{self.page_url}/health"
def __enter__(self) -> LiveNotebookServer:
"""Start the server process and block until the health endpoint is ready."""
self._log_file = tempfile.NamedTemporaryFile(
mode="w+",
encoding="utf-8",
delete=False,
)
self._process = subprocess.Popen(
self._build_command(),
stdout=self._log_file,
stderr=subprocess.STDOUT,
text=True,
)
self._wait_until_ready()
return self
def __exit__(
self,
_exc_type: type[BaseException] | None,
_exc: BaseException | None,
_tb: object,
) -> None:
"""Terminate the server and clean up temporary log resources."""
# Cleanup only. We intentionally do not suppress exceptions raised
# inside the with-block.
if self._process is not None:
self._terminate_process(self._process)
self._process = None
if self._log_file is not None:
log_name = self._log_file.name
self._log_file.close()
try:
Path(log_name).unlink(missing_ok=True)
except OSError:
LOGGER.debug(
"Failed to clean up live capture server log file: %s",
log_name,
)
self._log_file = None
def _build_command(self) -> list[str]:
"""Build the marimo CLI command used to launch the live notebook."""
command = [
sys.executable,
"-m",
"marimo",
# We don't want to prompt for anything.
"-y",
"run",
self._filepath,
"--headless",
"--no-token",
"--no-skew-protection",
"--no-check",
# We don't need a sandbox because we are going to use the same marimo process.
"--no-sandbox",
"--host",
"127.0.0.1",
"--port",
str(self._port),
]
if self._argv:
command.extend(["--", *self._argv])
return command
def _wait_until_ready(self) -> None:
"""Poll process and health endpoint until server is ready or times out."""
start = time.monotonic()
while time.monotonic() - start < _LIVE_SERVER_START_TIMEOUT_S:
process = self._process
if process is None:
raise RuntimeError("Live notebook server process is missing")
if process.poll() is not None:
logs = self._read_logs()
raise RuntimeError(
"Live notebook server exited before becoming ready."
+ (f"\n\n{logs}" if logs else "")
)
try:
with urlopen(self.health_url, timeout=1) as response:
if response.status == 200:
return
except URLError:
pass
time.sleep(_LIVE_SERVER_POLL_INTERVAL_S)
logs = self._read_logs()
raise RuntimeError(
"Timed out waiting for live notebook server to become ready."
+ (f"\n\n{logs}" if logs else "")
)
def _read_logs(self) -> str:
"""Return recent process logs for startup/shutdown error reporting."""
log_file = self._log_file
if log_file is None:
return ""
log_file.flush()
log_file.seek(0)
logs = log_file.read()
if len(logs) > 4_000:
return logs[-4_000:]
return logs
def _terminate_process(self, process: subprocess.Popen[str]) -> None:
"""Gracefully terminate the server process and force-kill if needed."""
if process.poll() is not None:
return
process.terminate()
try:
process.wait(timeout=_LIVE_SERVER_SHUTDOWN_TIMEOUT_S)
except subprocess.TimeoutExpired:
process.kill()
process.wait(timeout=_LIVE_SERVER_SHUTDOWN_TIMEOUT_S)
| {
"repo_id": "marimo-team/marimo",
"file_path": "marimo/_server/export/_live_notebook_server.py",
"license": "Apache License 2.0",
"lines": 142,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
marimo-team/marimo:marimo/_server/export/_nbformat_png_fallbacks.py | # Copyright 2026 Marimo. All rights reserved.
from __future__ import annotations
from typing import TYPE_CHECKING, Any, cast
from marimo._server.export._raster_mime import MIME_TYPES_REPLACED_BY_PNG
from marimo._types.ids import CellId_t
if TYPE_CHECKING:
from collections.abc import Mapping
from nbformat.notebooknode import NotebookNode # type: ignore
_DISPLAY_OUTPUT_TYPES = {"display_data", "execute_result"}
_PNG_DATA_URL_PREFIX = "data:image/png;base64,"
def _to_png_payload(data_url_or_payload: str) -> str | None:
if data_url_or_payload.startswith(_PNG_DATA_URL_PREFIX):
return data_url_or_payload.removeprefix(_PNG_DATA_URL_PREFIX)
if data_url_or_payload.startswith("data:"):
return None
return data_url_or_payload
def inject_png_fallbacks_into_notebook(
notebook: NotebookNode,
png_fallbacks: Mapping[CellId_t, str],
) -> int:
"""Inject image/png fallbacks into a notebook node by code-cell id."""
injected_count = 0
cells = notebook.get("cells", [])
if not isinstance(cells, list):
return 0
for cell in cells:
if not isinstance(cell, dict):
continue
if cell.get("cell_type") != "code":
continue
raw_cell_id = cell.get("id")
if not isinstance(raw_cell_id, str):
continue
cell_id = cast(CellId_t, raw_cell_id)
data_url = png_fallbacks.get(cell_id)
if not data_url:
continue
png_payload = _to_png_payload(data_url)
if png_payload is None:
continue
outputs = cell.get("outputs")
if not isinstance(outputs, list):
continue
display_output: dict[str, Any] | None = None
for output in outputs:
if not isinstance(output, dict):
continue
output_type = output.get("output_type")
if output_type in _DISPLAY_OUTPUT_TYPES:
display_output = output
break
if display_output is None:
display_output = {
"output_type": "display_data",
"data": {},
"metadata": {},
}
outputs.append(display_output)
data = display_output.get("data")
if not isinstance(data, dict):
display_output["data"] = {}
data = cast(dict[str, Any], display_output["data"])
for mime_type in MIME_TYPES_REPLACED_BY_PNG:
data.pop(mime_type, None)
data["image/png"] = png_payload
injected_count += 1
return injected_count
| {
"repo_id": "marimo-team/marimo",
"file_path": "marimo/_server/export/_nbformat_png_fallbacks.py",
"license": "Apache License 2.0",
"lines": 67,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
marimo-team/marimo:marimo/_server/export/_pdf_raster.py | # Copyright 2026 Marimo. All rights reserved.
from __future__ import annotations
import base64
import json
from copy import deepcopy
from dataclasses import dataclass
from html import unescape
from typing import TYPE_CHECKING, Any, Literal, cast
from marimo import _loggers
from marimo._config.config import DisplayConfig
from marimo._config.manager import get_default_config_manager
from marimo._server.export._html_asset_server import HtmlAssetServer
from marimo._server.export._live_notebook_server import LiveNotebookServer
from marimo._server.export._raster_mime import (
TEXT_HTML,
TEXT_MARKDOWN,
TEXT_PLAIN,
VEGA_MIME_TYPES,
)
from marimo._server.models.export import ExportAsHTMLRequest
from marimo._types.ids import CellId_t
from marimo._utils.paths import marimo_package_path
if TYPE_CHECKING:
from collections.abc import Sequence
from marimo._ast.app import InternalApp
from marimo._session.state.session_view import SessionView
LOGGER = _loggers.marimo_logger()
MIMEBUNDLE_TYPE = "application/vnd.marimo+mimebundle"
MARIMO_COMPONENT_MARKERS: tuple[str, ...] = (
"<marimo-",
"<marimo-",
)
ANYWIDGET_COMPONENT_MARKERS: tuple[str, ...] = (
"<marimo-anywidget",
"<marimo-anywidget",
)
EMBEDDED_VEGA_MARKERS: tuple[str, ...] = tuple(VEGA_MIME_TYPES)
_READINESS_TIMEOUT_MS = 90_000
_NETWORK_IDLE_TIMEOUT_MS = 10_000
_NAVIGATION_SETTLE_TIMEOUT_MS = 30_000
_DYNAMIC_OUTPUT_EXTRA_WAIT_MS = 10_000
_VIEWPORT_WIDTH = 1440
_VIEWPORT_HEIGHT = 1000
WAIT_FOR_PAGE_READY = r"""
() => {
const root = document.getElementById("root");
if (!root) return false;
return root.childElementCount > 0;
}
"""
GO_TO_NEXT_SLIDE = r"""
() => {
const swiper = document.querySelector('.swiper')?.swiper;
if (swiper) {
swiper.slideNext();
}
}
"""
WAIT_FOR_NEXT_PAINT = r"""
() => new Promise((resolve) => {
requestAnimationFrame(() => requestAnimationFrame(resolve));
})
"""
@dataclass(frozen=True)
class PDFRasterizationOptions:
enabled: bool = True
scale: float = 4.0
server_mode: str = "static"
CaptureExpectation = Literal["anywidget", "vega"]
@dataclass(frozen=True)
class _RasterTarget:
cell_id: CellId_t
expects: tuple[CaptureExpectation, ...]
def _format_target_expects(expects: tuple[CaptureExpectation, ...]) -> str:
if not expects:
return "generic"
return ",".join(expects)
def _contains_marker(content: Any, markers: tuple[str, ...]) -> bool:
"""Return whether any string payload contains one of the markers."""
def _contains(value: str) -> bool:
return any(marker in value for marker in markers)
if isinstance(content, list):
return any(
isinstance(item, str) and _contains(item) for item in content
)
return isinstance(content, str) and _contains(content)
def _dedupe_strings(values: Sequence[str]) -> tuple[str, ...]:
return tuple(dict.fromkeys(values))
def _load_mimebundle(data: Any) -> dict[str, Any] | None:
if isinstance(data, dict):
return data
if isinstance(data, str):
try:
loaded = json.loads(data)
except json.JSONDecodeError:
return None
if isinstance(loaded, dict):
return loaded
return None
def _should_rasterize_output(mimetype: str, data: Any) -> bool:
"""Return whether this output is eligible for PNG fallback capture."""
if mimetype in VEGA_MIME_TYPES:
return True
if mimetype in {TEXT_HTML, TEXT_PLAIN, TEXT_MARKDOWN}:
return _contains_marker(data, MARIMO_COMPONENT_MARKERS)
return False
def _build_target_from_mimebundle(
cell_id: CellId_t,
mimebundle: dict[str, Any],
) -> _RasterTarget | None:
"""Build capture metadata for a cell emitting a MIME bundle output."""
should_capture = False
expects: list[CaptureExpectation] = []
for mimetype, content in mimebundle.items():
if _should_rasterize_output(mimetype, content):
should_capture = True
if _contains_marker(content, ANYWIDGET_COMPONENT_MARKERS):
expects.append("anywidget")
if mimetype in VEGA_MIME_TYPES or _contains_marker(
content, EMBEDDED_VEGA_MARKERS
):
expects.append("vega")
if not should_capture:
return None
return _RasterTarget(
cell_id=cell_id,
expects=cast(tuple[CaptureExpectation, ...], _dedupe_strings(expects)),
)
def _build_target_from_output(
cell_id: CellId_t,
*,
mimetype: str,
data: Any,
) -> _RasterTarget | None:
"""Build capture metadata for a non-mimebundle output."""
if not _should_rasterize_output(mimetype, data):
return None
expects: list[CaptureExpectation] = []
if _contains_marker(data, ANYWIDGET_COMPONENT_MARKERS):
expects.append("anywidget")
if mimetype in VEGA_MIME_TYPES or _contains_marker(
data, EMBEDDED_VEGA_MARKERS
):
expects.append("vega")
return _RasterTarget(
cell_id=cell_id,
expects=cast(tuple[CaptureExpectation, ...], _dedupe_strings(expects)),
)
def _collect_raster_targets(session_view: SessionView) -> list[_RasterTarget]:
"""Collect capture targets from the current executed session outputs."""
targets: list[_RasterTarget] = []
for cell_id, cell_notification in session_view.cell_notifications.items():
output = cell_notification.output
if output is None or output.data is None:
continue
if output.mimetype == MIMEBUNDLE_TYPE:
mimebundle = _load_mimebundle(output.data)
if mimebundle is None:
continue
target = _build_target_from_mimebundle(cell_id, mimebundle)
if target is not None:
targets.append(target)
continue
target = _build_target_from_output(
cell_id,
mimetype=output.mimetype,
data=output.data,
)
if target is not None:
targets.append(target)
return targets
def _sort_targets_by_notebook_order(
session_view: SessionView,
targets: list[_RasterTarget],
) -> list[_RasterTarget]:
"""Sort targets in notebook order so capture sequencing is deterministic."""
if session_view.cell_ids is None:
return targets
order = {
cell_id: index
for index, cell_id in enumerate(session_view.cell_ids.cell_ids)
}
fallback_index = len(order)
return sorted(
targets,
key=lambda target: order.get(target.cell_id, fallback_index),
)
def _promote_text_output_for_capture(output: Any) -> None:
if not _contains_marker(output.data, MARIMO_COMPONENT_MARKERS):
return
output.mimetype = TEXT_HTML
output.data = _unescape_component_markup(output.data)
def _promote_mimebundle_output_for_capture(output: Any) -> None:
"""Ensure mimebundle component markup is available as unescaped text/html."""
mimebundle = _load_mimebundle(output.data)
if mimebundle is None:
return
html_data = mimebundle.get(TEXT_HTML)
if html_data is not None and _contains_marker(
html_data, MARIMO_COMPONENT_MARKERS
):
mimebundle[TEXT_HTML] = _unescape_component_markup(html_data)
output.data = mimebundle
return
plain_data = mimebundle.get(TEXT_PLAIN)
if plain_data is None or not _contains_marker(
plain_data, MARIMO_COMPONENT_MARKERS
):
markdown_data = mimebundle.get(TEXT_MARKDOWN)
if markdown_data is None or not _contains_marker(
markdown_data, MARIMO_COMPONENT_MARKERS
):
return
mimebundle[TEXT_HTML] = _unescape_component_markup(markdown_data)
output.data = mimebundle
return
mimebundle[TEXT_HTML] = _unescape_component_markup(plain_data)
output.data = mimebundle
def _promote_component_markup_for_capture(
session_view: SessionView,
targets: list[_RasterTarget],
) -> SessionView:
"""Return a copied session view with target component outputs normalized."""
capture_view = deepcopy(session_view)
for target in targets:
cell_notification = capture_view.cell_notifications.get(target.cell_id)
if cell_notification is None or cell_notification.output is None:
continue
output = cell_notification.output
if output.mimetype in {TEXT_HTML, TEXT_PLAIN, TEXT_MARKDOWN}:
_promote_text_output_for_capture(output)
continue
if output.mimetype != MIMEBUNDLE_TYPE:
continue
_promote_mimebundle_output_for_capture(output)
return capture_view
def _unescape_component_markup(data: Any) -> Any:
if isinstance(data, str):
return unescape(data)
if isinstance(data, list):
return [
unescape(item) if isinstance(item, str) else item for item in data
]
return data
def _to_display_config(filepath: str | None) -> DisplayConfig:
"""Resolve display config for exporter rendering settings."""
config = get_default_config_manager(current_path=filepath).get_config()
return cast(DisplayConfig, config["display"])
def _to_data_url(image: bytes) -> str:
encoded = base64.b64encode(image).decode("ascii")
return f"data:image/png;base64,{encoded}"
async def _collect_static_captures(
*,
app: InternalApp,
session_view: SessionView,
filename: str | None,
filepath: str | None,
options: PDFRasterizationOptions,
static_targets: list[_RasterTarget],
) -> dict[CellId_t, str]:
"""Capture PNG fallbacks from exported static HTML for non-live targets."""
from marimo._server.export.exporter import Exporter
LOGGER.debug(
"Raster capture static phase: %s target(s), scale=%s",
len(static_targets),
options.scale,
)
static_dir = marimo_package_path() / "_static"
with HtmlAssetServer(
directory=static_dir,
route="/__marimo_pdf_raster__.html",
) as server:
capture_view = _promote_component_markup_for_capture(
session_view,
static_targets,
)
html, _download_filename = Exporter().export_as_html(
filename=filename,
app=app,
session_view=capture_view,
display_config=_to_display_config(filepath),
request=ExportAsHTMLRequest(
download=False,
files=[],
include_code=True,
asset_url=server.base_url,
),
)
server.set_html(html)
captures = await _capture_pngs_from_page(
page_url=server.page_url,
targets=static_targets,
scale=options.scale,
)
LOGGER.debug(
"Raster capture static phase complete: %s/%s captured",
len(captures),
len(static_targets),
)
return captures
async def _collect_live_captures(
*,
filepath: str | None,
argv: list[str] | None,
options: PDFRasterizationOptions,
live_targets: list[_RasterTarget],
) -> dict[CellId_t, str]:
"""Capture PNG fallbacks from a live notebook runtime when required."""
if not filepath:
LOGGER.debug(
"Raster capture live phase skipped: no filepath provided."
)
return {}
LOGGER.debug(
"Raster capture live phase: %s target(s), scale=%s",
len(live_targets),
options.scale,
)
captures = await _capture_pngs_from_live_page(
filepath=filepath,
targets=live_targets,
scale=options.scale,
argv=argv,
)
LOGGER.debug(
"Raster capture live phase complete: %s/%s captured",
len(captures),
len(live_targets),
)
return captures
async def collect_pdf_png_fallbacks(
*,
app: InternalApp,
session_view: SessionView,
filename: str | None,
filepath: str | None,
argv: list[str] | None = None,
options: PDFRasterizationOptions,
) -> dict[CellId_t, str]:
"""Collect per-cell PNG fallbacks to inject before nbconvert PDF export."""
if not options.enabled:
LOGGER.debug("Raster capture disabled by options.")
return {}
targets = _collect_raster_targets(session_view)
if not targets:
LOGGER.debug("Raster capture skipped: no eligible outputs found.")
return {}
targets = _sort_targets_by_notebook_order(session_view, targets)
server_mode = options.server_mode.lower()
if server_mode not in {"static", "live"}:
LOGGER.warning(
"Unknown raster server mode '%s'; defaulting to static.",
options.server_mode,
)
server_mode = "static"
LOGGER.debug(
"Raster capture planning: total=%s server_mode=%s",
len(targets),
server_mode,
)
LOGGER.info(
"Rasterizing %s component(s) for PDF [mode=%s, scale=%s].",
len(targets),
server_mode,
options.scale,
)
if server_mode == "live":
LOGGER.debug("Raster capture strategy: live-only.")
captures = await _collect_live_captures(
filepath=filepath,
argv=argv,
options=options,
live_targets=targets,
)
else:
LOGGER.debug("Raster capture strategy: static-only.")
captures = await _collect_static_captures(
app=app,
session_view=session_view,
filename=filename,
filepath=filepath,
options=options,
static_targets=targets,
)
LOGGER.debug(
"Raster capture complete: %s/%s outputs captured.",
len(captures),
len(targets),
)
return captures
async def _wait_for_network_idle(
page: Any,
*,
timeout_ms: int,
timeout_error: type[Exception],
) -> None:
"""Best-effort network-idle wait that never fails capture flow."""
try:
await page.wait_for_load_state("networkidle", timeout=timeout_ms)
except timeout_error:
return
async def _wait_for_navigation_settled(
*,
page: Any,
timeout_error: type[Exception],
) -> None:
"""Settle the page by waiting for load states and a fixed quiet period."""
for state in ("domcontentloaded", "load", "networkidle"):
try:
await page.wait_for_load_state(
state,
timeout=_NAVIGATION_SETTLE_TIMEOUT_MS,
)
except timeout_error:
continue
await page.wait_for_timeout(_DYNAMIC_OUTPUT_EXTRA_WAIT_MS)
async def _wait_for_target_ready(
*,
page: Any,
target: _RasterTarget,
locator: Any,
timeout_error: type[Exception],
) -> bool:
"""Wait for a target output to become visible and then stabilize."""
try:
await locator.wait_for(
state="visible",
timeout=_READINESS_TIMEOUT_MS,
)
await locator.scroll_into_view_if_needed(
timeout=_READINESS_TIMEOUT_MS,
)
await page.evaluate(WAIT_FOR_NEXT_PAINT)
await _wait_for_network_idle(
page,
timeout_ms=_NETWORK_IDLE_TIMEOUT_MS,
timeout_error=timeout_error,
)
if target.expects:
await _wait_for_navigation_settled(
page=page,
timeout_error=timeout_error,
)
await _wait_for_network_idle(
page,
timeout_ms=_NETWORK_IDLE_TIMEOUT_MS,
timeout_error=timeout_error,
)
await page.evaluate(WAIT_FOR_NEXT_PAINT)
except timeout_error:
LOGGER.debug(
"Raster target %s timed out while waiting for visibility/readiness.",
target.cell_id,
)
return False
else:
return True
async def _capture_pngs_from_page(
*,
page_url: str,
targets: list[_RasterTarget],
scale: float,
) -> dict[CellId_t, str]:
"""Capture PNG screenshots for target outputs from a single page URL."""
from playwright.async_api import ( # type: ignore[import-not-found]
TimeoutError as PlaywrightTimeoutError,
async_playwright,
)
captures: dict[CellId_t, str] = {}
device_scale_factor = max(1.0, scale)
LOGGER.debug(
"Raster page capture start: url=%s targets=%s scale=%s",
page_url,
len(targets),
scale,
)
async with async_playwright() as playwright:
browser = await playwright.chromium.launch()
context = await browser.new_context(
viewport={
"width": _VIEWPORT_WIDTH,
"height": _VIEWPORT_HEIGHT,
},
device_scale_factor=device_scale_factor,
)
page = await context.new_page()
await page.goto(
page_url,
wait_until="domcontentloaded",
timeout=_READINESS_TIMEOUT_MS,
)
LOGGER.debug("Page loaded, waiting for readiness...")
await _wait_for_network_idle(
page,
timeout_ms=_NETWORK_IDLE_TIMEOUT_MS,
timeout_error=PlaywrightTimeoutError,
)
LOGGER.debug(
"Initial network idle achieved, waiting for page ready..."
)
await page.wait_for_function(
WAIT_FOR_PAGE_READY,
timeout=_READINESS_TIMEOUT_MS,
)
LOGGER.debug("Page ready, waiting for final network idle...")
await _wait_for_network_idle(
page,
timeout_ms=_NETWORK_IDLE_TIMEOUT_MS,
timeout_error=PlaywrightTimeoutError,
)
LOGGER.debug("Page network idle, starting target captures...")
for index, target in enumerate(targets, start=1):
LOGGER.info(
"Rasterizing [%s/%s] cell=%s (%s)",
index,
len(targets),
target.cell_id,
_format_target_expects(target.expects),
)
LOGGER.debug(
"Processing raster target: cell_id=%s expects=%s",
target.cell_id,
target.expects,
)
# In live notebook mode, output wrappers often use `display: contents`,
# which cannot be directly screenshotted, so we target a concrete child node.
locator = page.locator(f"#output-{target.cell_id} > .output").first
if not await _wait_for_target_ready(
page=page,
target=target,
locator=locator,
timeout_error=PlaywrightTimeoutError,
):
LOGGER.debug(
"Raster target skipped: cell_id=%s",
target.cell_id,
)
continue
try:
await page.evaluate(GO_TO_NEXT_SLIDE)
await page.evaluate(WAIT_FOR_NEXT_PAINT)
image = await locator.screenshot(
type="png",
animations="disabled",
timeout=_READINESS_TIMEOUT_MS,
)
except PlaywrightTimeoutError:
LOGGER.debug(
"Raster screenshot timed out: cell_id=%s",
target.cell_id,
)
continue
captures[target.cell_id] = _to_data_url(image)
LOGGER.debug(
"Raster target captured: cell_id=%s",
target.cell_id,
)
await context.close()
await browser.close()
LOGGER.debug(
"Raster page capture complete: %s/%s captured",
len(captures),
len(targets),
)
LOGGER.info(
"Rasterization complete: captured %s/%s component(s).",
len(captures),
len(targets),
)
return captures
async def _capture_pngs_from_live_page(
*,
filepath: str,
targets: list[_RasterTarget],
scale: float,
argv: list[str] | None,
) -> dict[CellId_t, str]:
"""Capture outputs by running notebook in a live marimo server process."""
with LiveNotebookServer(filepath=filepath, argv=argv) as server:
return await _capture_pngs_from_page(
page_url=server.page_url,
targets=targets,
scale=scale,
)
| {
"repo_id": "marimo-team/marimo",
"file_path": "marimo/_server/export/_pdf_raster.py",
"license": "Apache License 2.0",
"lines": 584,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
marimo-team/marimo:marimo/_server/export/_raster_mime.py | # Copyright 2026 Marimo. All rights reserved.
from __future__ import annotations
from typing import Final
from marimo._messaging.mimetypes import KnownMimeType
TEXT_HTML: Final[KnownMimeType] = "text/html"
TEXT_PLAIN: Final[KnownMimeType] = "text/plain"
TEXT_MARKDOWN: Final[KnownMimeType] = "text/markdown"
VEGA_MIME_TYPES: Final[set[KnownMimeType]] = {
"application/vnd.vegalite.v5+json",
"application/vnd.vega.v5+json",
"application/vnd.vegalite.v6+json",
"application/vnd.vega.v6+json",
}
MIME_TYPES_REPLACED_BY_PNG: Final[set[KnownMimeType]] = {
TEXT_HTML,
TEXT_PLAIN,
TEXT_MARKDOWN,
*VEGA_MIME_TYPES,
}
| {
"repo_id": "marimo-team/marimo",
"file_path": "marimo/_server/export/_raster_mime.py",
"license": "Apache License 2.0",
"lines": 19,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
marimo-team/marimo:tests/_server/export/test_nbformat_png_fallbacks.py | from __future__ import annotations
import pytest
from marimo._server.export._nbformat_png_fallbacks import (
inject_png_fallbacks_into_notebook,
)
nbformat = pytest.importorskip("nbformat")
def test_inject_png_fallbacks_replaces_rasterized_mimetypes() -> None:
notebook = nbformat.v4.new_notebook()
cell = nbformat.v4.new_code_cell("print('x')", id="cell-1")
cell.outputs = [
nbformat.v4.new_output(
"display_data",
data={
"text/html": "<div>hello</div>",
"text/plain": "hello",
"application/vnd.vega.v5+json": {"mark": "point"},
},
metadata={},
)
]
notebook.cells = [cell]
injected = inject_png_fallbacks_into_notebook(
notebook,
png_fallbacks={"cell-1": "data:image/png;base64,ZmFrZQ=="},
)
assert injected == 1
data = notebook.cells[0].outputs[0]["data"]
assert "text/html" not in data
assert "text/plain" not in data
assert "application/vnd.vega.v5+json" not in data
assert data["image/png"] == "ZmFrZQ=="
def test_inject_png_fallbacks_appends_display_output_when_missing() -> None:
notebook = nbformat.v4.new_notebook()
cell = nbformat.v4.new_code_cell("print('x')", id="cell-2")
cell.outputs = []
notebook.cells = [cell]
injected = inject_png_fallbacks_into_notebook(
notebook,
png_fallbacks={"cell-2": "data:image/png;base64,YWJj"},
)
assert injected == 1
assert len(notebook.cells[0].outputs) == 1
output = notebook.cells[0].outputs[0]
assert output["output_type"] == "display_data"
assert output["data"]["image/png"] == "YWJj"
def test_inject_png_fallbacks_keeps_existing_plain_payload() -> None:
notebook = nbformat.v4.new_notebook()
cell = nbformat.v4.new_code_cell("print('x')", id="cell-3")
cell.outputs = [
nbformat.v4.new_output("display_data", data={}, metadata={})
]
notebook.cells = [cell]
injected = inject_png_fallbacks_into_notebook(
notebook,
png_fallbacks={"cell-3": "YWJj"},
)
assert injected == 1
assert notebook.cells[0].outputs[0]["data"]["image/png"] == "YWJj"
| {
"repo_id": "marimo-team/marimo",
"file_path": "tests/_server/export/test_nbformat_png_fallbacks.py",
"license": "Apache License 2.0",
"lines": 58,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
marimo-team/marimo:tests/_server/export/test_pdf_raster.py | from __future__ import annotations
import asyncio
from typing import Any
from unittest.mock import AsyncMock, patch
from marimo._ast.app import App, InternalApp
from marimo._messaging.cell_output import CellChannel, CellOutput
from marimo._messaging.notification import (
CellNotification,
UpdateCellIdsNotification,
)
from marimo._server.export import _pdf_raster
from marimo._session.state.session_view import SessionView
def _cell_notification(
*,
cell_id: str,
mimetype: str,
data: object,
) -> CellNotification:
return CellNotification(
cell_id=cell_id,
status="idle",
output=CellOutput(
channel=CellChannel.OUTPUT,
mimetype=mimetype,
data=data,
),
timestamp=0,
)
def test_collect_raster_targets_detects_html_vega_and_anywidget() -> None:
session_view = SessionView()
session_view.cell_notifications["1"] = _cell_notification(
cell_id="1",
mimetype="text/html",
data="<div>hello</div>",
)
session_view.cell_notifications["2"] = _cell_notification(
cell_id="2",
mimetype="application/vnd.marimo+mimebundle",
data={
"application/vnd.vegalite.v5+json": {"mark": "point"},
"text/plain": "vega",
},
)
session_view.cell_notifications["3"] = _cell_notification(
cell_id="3",
mimetype="text/markdown",
data=(
"<marimo-anywidget "
'data-initial-value=\'{"model_id":"model-1"}\''
"></marimo-anywidget>"
),
)
session_view.cell_notifications["4"] = _cell_notification(
cell_id="4",
mimetype="text/plain",
data="plain text",
)
session_view.cell_notifications["5"] = _cell_notification(
cell_id="5",
mimetype="text/html",
data=("<marimo-stack>application/vnd.vegalite.v6+json</marimo-stack>"),
)
targets = _pdf_raster._collect_raster_targets(session_view)
by_id = {target.cell_id: target for target in targets}
assert set(by_id) == {"2", "3", "5"}
assert by_id["2"].expects == ("vega",)
assert by_id["3"].expects == ("anywidget",)
assert by_id["5"].expects == ("vega",)
def test_collect_pdf_png_fallbacks_mixed_targets_use_static_by_default() -> (
None
):
session_view = SessionView()
original_anywidget = (
'<marimo-anywidget data-initial-value=\'{"model_id":"m-live"}\''
"></marimo-anywidget>"
)
session_view.cell_notifications["1"] = _cell_notification(
cell_id="1",
mimetype="text/plain",
data=original_anywidget,
)
session_view.cell_notifications["2"] = _cell_notification(
cell_id="2",
mimetype="text/html",
data="<marimo-slider></marimo-slider>",
)
session_view.cell_notifications["3"] = _cell_notification(
cell_id="3",
mimetype="application/vnd.marimo+mimebundle",
data={
"application/vnd.vegalite.v6+json": {"mark": "point"},
"text/plain": "vega",
},
)
session_view.cell_ids = UpdateCellIdsNotification(cell_ids=["3", "2", "1"])
app = InternalApp(App())
live_capture_mock = AsyncMock(
side_effect=AssertionError(
"live capture should not run for default static mode"
)
)
class _FakeServer:
base_url = "http://127.0.0.1:1234"
page_url = "http://127.0.0.1:1234/__marimo_pdf_raster__.html"
def __enter__(self) -> Any:
return self
def __exit__(self, *args: object) -> None:
del args
def set_html(self, html: str) -> None:
assert "<html" in html
async def _capture_static(
*,
page_url: str,
targets: list[_pdf_raster._RasterTarget],
scale: float,
) -> dict[str, str]:
assert page_url.endswith("__marimo_pdf_raster__.html")
assert scale == 4.0
assert [target.cell_id for target in targets] == ["3", "2", "1"]
return {
"3": "data:image/png;base64,c3RhdGljMw==",
"2": "data:image/png;base64,c3RhdGljMg==",
"1": "data:image/png;base64,c3RhdGljMQ==",
}
def _fake_export_as_html(*args: Any, **kwargs: Any) -> tuple[str, str]:
del args
del kwargs
return "<html></html>", "demo.html"
async def _run() -> dict[str, str]:
with (
patch.object(
_pdf_raster,
"HtmlAssetServer",
return_value=_FakeServer(),
),
patch.object(
_pdf_raster,
"_capture_pngs_from_page",
side_effect=_capture_static,
),
patch.object(
_pdf_raster,
"_capture_pngs_from_live_page",
live_capture_mock,
),
patch(
"marimo._server.export.exporter.Exporter.export_as_html",
side_effect=_fake_export_as_html,
),
):
return await _pdf_raster.collect_pdf_png_fallbacks(
app=app,
session_view=session_view,
filename="demo.py",
filepath="demo.py",
argv=["--arg", "value"],
options=_pdf_raster.PDFRasterizationOptions(),
)
captures = asyncio.run(_run())
assert captures == {
"1": "data:image/png;base64,c3RhdGljMQ==",
"2": "data:image/png;base64,c3RhdGljMg==",
"3": "data:image/png;base64,c3RhdGljMw==",
}
live_capture_mock.assert_not_awaited()
output = session_view.cell_notifications["1"].output
assert output is not None
assert output.mimetype == "text/plain"
assert output.data == original_anywidget
def test_collect_pdf_png_fallbacks_static_only_uses_static_capture() -> None:
session_view = SessionView()
session_view.cell_notifications["1"] = _cell_notification(
cell_id="1",
mimetype="application/vnd.marimo+mimebundle",
data={
"application/vnd.vegalite.v6+json": {"mark": "point"},
"text/plain": "vega",
},
)
session_view.cell_ids = UpdateCellIdsNotification(cell_ids=["1"])
app = InternalApp(App())
class _FakeServer:
base_url = "http://127.0.0.1:1234"
page_url = "http://127.0.0.1:1234/__marimo_pdf_raster__.html"
def __enter__(self) -> Any:
return self
def __exit__(self, *args: object) -> None:
del args
def set_html(self, html: str) -> None:
assert "<html" in html
async def _capture_static(
*,
page_url: str,
targets: list[_pdf_raster._RasterTarget],
scale: float,
) -> dict[str, str]:
assert page_url.endswith("__marimo_pdf_raster__.html")
assert scale == 4.0
assert [target.cell_id for target in targets] == ["1"]
return {"1": "data:image/png;base64,c3RhdGlj"}
live_capture_mock = AsyncMock(
side_effect=AssertionError(
"live capture should not run for static-only targets"
)
)
def _fake_export_as_html(*args: Any, **kwargs: Any) -> tuple[str, str]:
del args
del kwargs
return "<html></html>", "demo.html"
async def _run() -> dict[str, str]:
with (
patch.object(
_pdf_raster,
"HtmlAssetServer",
return_value=_FakeServer(),
),
patch.object(
_pdf_raster,
"_capture_pngs_from_page",
side_effect=_capture_static,
),
patch.object(
_pdf_raster,
"_capture_pngs_from_live_page",
live_capture_mock,
),
patch(
"marimo._server.export.exporter.Exporter.export_as_html",
side_effect=_fake_export_as_html,
),
):
return await _pdf_raster.collect_pdf_png_fallbacks(
app=app,
session_view=session_view,
filename="demo.py",
filepath="demo.py",
options=_pdf_raster.PDFRasterizationOptions(),
)
captures = asyncio.run(_run())
assert captures == {"1": "data:image/png;base64,c3RhdGlj"}
live_capture_mock.assert_not_awaited()
def test_collect_pdf_png_fallbacks_live_mode_uses_live_capture() -> None:
session_view = SessionView()
session_view.cell_notifications["1"] = _cell_notification(
cell_id="1",
mimetype="application/vnd.marimo+mimebundle",
data={
"application/vnd.vegalite.v6+json": {"mark": "point"},
"text/plain": "vega",
},
)
session_view.cell_notifications["2"] = _cell_notification(
cell_id="2",
mimetype="text/html",
data="<marimo-slider></marimo-slider>",
)
session_view.cell_ids = UpdateCellIdsNotification(cell_ids=["2", "1"])
app = InternalApp(App())
static_capture_mock = AsyncMock(
side_effect=AssertionError(
"static capture should not run for explicit live mode"
)
)
async def _capture_live(
*,
filepath: str,
targets: list[_pdf_raster._RasterTarget],
scale: float,
argv: list[str] | None,
) -> dict[str, str]:
del filepath
del scale
del argv
assert [target.cell_id for target in targets] == ["2", "1"]
return {
"2": "data:image/png;base64,bGl2ZTI=",
"1": "data:image/png;base64,bGl2ZTE=",
}
async def _run() -> dict[str, str]:
with (
patch.object(
_pdf_raster,
"_capture_pngs_from_page",
static_capture_mock,
),
patch.object(
_pdf_raster,
"_capture_pngs_from_live_page",
side_effect=_capture_live,
),
):
return await _pdf_raster.collect_pdf_png_fallbacks(
app=app,
session_view=session_view,
filename="demo.py",
filepath="demo.py",
options=_pdf_raster.PDFRasterizationOptions(
server_mode="live"
),
)
captures = asyncio.run(_run())
assert captures == {
"1": "data:image/png;base64,bGl2ZTE=",
"2": "data:image/png;base64,bGl2ZTI=",
}
static_capture_mock.assert_not_awaited()
def test_wait_for_target_ready_uses_settle_wait_for_dynamic_targets() -> None:
class _TimeoutError(Exception):
pass
page = AsyncMock()
locator = AsyncMock()
target = _pdf_raster._RasterTarget(
cell_id="dynamic-cell",
expects=("vega",),
)
settle_wait = AsyncMock()
with patch.object(
_pdf_raster,
"_wait_for_navigation_settled",
settle_wait,
):
ready = asyncio.run(
_pdf_raster._wait_for_target_ready(
page=page,
target=target,
locator=locator,
timeout_error=_TimeoutError,
)
)
assert ready is True
settle_wait.assert_awaited_once()
| {
"repo_id": "marimo-team/marimo",
"file_path": "tests/_server/export/test_pdf_raster.py",
"license": "Apache License 2.0",
"lines": 331,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
marimo-team/marimo:tests/_snippets/data/multi_markdown.py | # Copyright 2026 Marimo. All rights reserved.
import marimo
__generated_with = "0.3.9"
app = marimo.App()
@app.cell
def __(mo):
mo.md(
r"""
# Multi Markdown Snippet
This is the title cell with a description.
"""
)
return
@app.cell
def __(mo):
mo.md(
r"""
This is a second markdown cell that should also be rendered.
"""
)
return
@app.cell
def __():
x = 1 + 1
return (x,)
@app.cell
def __():
import marimo as mo
return (mo,)
if __name__ == "__main__":
app.run()
| {
"repo_id": "marimo-team/marimo",
"file_path": "tests/_snippets/data/multi_markdown.py",
"license": "Apache License 2.0",
"lines": 31,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
marimo-team/marimo:examples/frameworks/fastapi-auth/main.py | # /// script
# requires-python = ">=3.12"
# dependencies = [
# "fastapi",
# "marimo",
# "starlette",
# "uvicorn",
# "itsdangerous",
# "python-multipart",
# ]
# ///
"""Example: Authentication middleware that passes user info into marimo notebooks.
This shows the recommended pattern for authentication with marimo when using
FastAPI. It uses a pure ASGI middleware (not BaseHTTPMiddleware) so that
scope["user"] and scope["meta"] are set for both HTTP *and* WebSocket
connections.
The user info is then available in notebooks via:
request = mo.app_meta().request
username = request.user["username"]
Run with:
uv run --no-project main.py
Then open http://localhost:8000/ and log in with admin / password123.
"""
import os
import logging
import marimo
import uvicorn
from fastapi import FastAPI, Request, Form
from fastapi.responses import HTMLResponse, RedirectResponse
from starlette.middleware.sessions import SessionMiddleware
from starlette.responses import Response
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger(__name__)
notebook_dir = os.path.dirname(__file__)
notebook_path = os.path.join(notebook_dir, "notebook.py")
# Simulated user database (replace with a real database in production)
users_db = {"admin": "password123"}
app = FastAPI()
# Pure ASGI middleware — runs for both HTTP and WebSocket requests.
# This is the recommended pattern for passing user/meta into marimo.
#
# Important: Do NOT use Starlette's BaseHTTPMiddleware here.
# BaseHTTPMiddleware only processes HTTP requests, not WebSocket
# connections. marimo uses WebSocket for real-time communication,
# so scope["user"] and scope["meta"] would be lost.
#
# This is a simplified version of authentication and you may want to use
# starlette.middleware.authentication.AuthenticationMiddleware instead.
class AuthMiddleware:
# Paths that don't require authentication
PUBLIC_PATHS = {"/login"}
def __init__(self, app):
self.app = app
async def __call__(self, scope, receive, send):
if scope["type"] not in ("http", "websocket"):
await self.app(scope, receive, send)
return
# SessionMiddleware has already run, so scope["session"] is available.
session = scope.get("session", {})
username = session.get("username")
if username:
# Set user/meta so marimo can read them via mo.app_meta().request
scope["user"] = {
"is_authenticated": True,
"username": username,
}
scope["meta"] = {"role": "admin"}
await self.app(scope, receive, send)
return
# Not logged in — block unauthenticated access.
path = scope.get("path", "")
# Allow public paths through without authentication.
if path in self.PUBLIC_PATHS:
await self.app(scope, receive, send)
return
# Reject unauthenticated WebSocket connections.
if scope["type"] == "websocket":
from starlette.websockets import WebSocket
ws = WebSocket(scope, receive, send)
await ws.close(code=4003)
return
# Redirect unauthenticated HTTP requests to /login.
response = Response(
status_code=302, headers={"location": "/login"}
)
await response(scope, receive, send)
# Middleware ordering: In Starlette, the LAST added middleware is the
# OUTERMOST (runs first). We need SessionMiddleware to run before
# AuthMiddleware so that scope["session"] is populated. So we add
# AuthMiddleware first (innermost) and SessionMiddleware last (outermost).
app.add_middleware(AuthMiddleware)
app.add_middleware(
SessionMiddleware,
secret_key=os.getenv("SECRET_KEY", "change-me-in-production"),
)
LOGIN_PAGE = """\
<!DOCTYPE html>
<html>
<head><title>Login</title></head>
<body style="display:flex;justify-content:center;align-items:center;height:100vh;font-family:sans-serif">
<form method="post" action="/login" style="width:300px">
<h2>Login</h2>
{error}
<div style="margin-bottom:8px">
<label>Username</label><br>
<input name="username" required style="width:100%;padding:6px">
</div>
<div style="margin-bottom:8px">
<label>Password</label><br>
<input name="password" type="password" required style="width:100%;padding:6px">
</div>
<button type="submit" style="width:100%;padding:8px">Log in</button>
</form>
</body>
</html>
"""
@app.get("/login")
async def get_login():
return HTMLResponse(LOGIN_PAGE.format(error=""))
@app.post("/login")
async def post_login(
request: Request,
username: str = Form(...),
password: str = Form(...),
):
if username in users_db and password == users_db[username]:
request.session["username"] = username
logger.info("User %s logged in", username)
return RedirectResponse(url="/", status_code=302)
logger.warning("Failed login attempt for %s", username)
return HTMLResponse(
LOGIN_PAGE.format(
error='<p style="color:red">Invalid credentials</p>'
)
)
@app.get("/logout")
async def logout(request: Request):
request.session.clear()
return RedirectResponse(url="/login")
# Mount marimo
marimo_app = (
marimo.create_asgi_app(include_code=True)
.with_app(path="/", root=notebook_path)
.build()
)
app.mount("/", marimo_app)
if __name__ == "__main__":
uvicorn.run(app, host="127.0.0.1", port=8000)
| {
"repo_id": "marimo-team/marimo",
"file_path": "examples/frameworks/fastapi-auth/main.py",
"license": "Apache License 2.0",
"lines": 148,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
marimo-team/marimo:examples/frameworks/fastapi-auth/notebook.py | import marimo
__generated_with = "0.20.0"
app = marimo.App(width="medium")
@app.cell
def _():
import marimo as mo
return (mo,)
@app.cell
def _(mo):
req = mo.app_meta().request
user = req.user if req else None
meta = req.meta if req else None
mo.md(f"""
## User info from `mo.app_meta().request`
- **user**: `{user}`
- **username**: `{user['username'] if isinstance(user, dict) else 'N/A'}`
- **meta**: `{meta}`
""")
return
if __name__ == "__main__":
app.run()
| {
"repo_id": "marimo-team/marimo",
"file_path": "examples/frameworks/fastapi-auth/notebook.py",
"license": "Apache License 2.0",
"lines": 21,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
marimo-team/marimo:marimo/_smoke_tests/sql/explain.py | import marimo
__generated_with = "0.20.2"
app = marimo.App(width="medium", auto_download=["html"], sql_output="native")
@app.cell
def _():
import marimo as mo
import duckdb as db
return db, mo
@app.cell
def _(db):
connection = db.connect(":memory:")
connection.sql(f"""
CREATE OR REPLACE TABLE fake_data (
id INTEGER,
name VARCHAR,
value INTEGER
);
INSERT INTO fake_data (id, name, value) VALUES
(1, 'Alice', 100),
(2, 'Bob', 150),
(3, 'Charlie', 200),
(4, 'David', 120),
(5, 'Eve', 180);
""")
return (connection,)
@app.cell
def _(connection, mo):
_df = mo.sql(
f"""
CREATE OR REPLACE TABLE students (name VARCHAR, sid INTEGER);
CREATE OR REPLACE TABLE exams (eid INTEGER, subject VARCHAR, sid INTEGER);
INSERT INTO students VALUES ('Mark', 1), ('Joe', 2), ('Matthew', 3);
INSERT INTO exams VALUES (10, 'Physics', 1), (20, 'Chemistry', 2), (30, 'Literature', 3);
""",
engine=connection,
)
return
@app.cell
def _(connection, mo):
_df = mo.sql(
f"""
EXPLAIN ANALYZE
SELECT name
FROM students
JOIN exams USING (sid)
WHERE name LIKE 'Ma%';
""",
engine=connection,
)
return
@app.cell
def _(mo):
import duckdb
mo.plain_text(repr(duckdb.sql("explain select 1").pl()))
return
@app.cell
def _(connection):
connection.sql("explain select * from fake_data")
return
@app.cell
def _(connection, mo):
mo.md(f"""
{connection.sql("explain select * from fake_data")}
""")
return
if __name__ == "__main__":
app.run()
| {
"repo_id": "marimo-team/marimo",
"file_path": "marimo/_smoke_tests/sql/explain.py",
"license": "Apache License 2.0",
"lines": 67,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | documentation |
marimo-team/marimo:marimo/_cli/errors.py | # Copyright 2026 Marimo. All rights reserved.
from __future__ import annotations
from typing import Any
import click
from marimo._cli.install_hints import get_install_commands
from marimo._cli.print import bold, green, muted, red
class MarimoCLIError(click.ClickException):
"""Base class for marimo CLI errors."""
def show(self, file: Any = None) -> None:
if file is None:
file = click.get_text_stream("stderr")
ctx = getattr(self, "ctx", None)
color = ctx.color if ctx is not None else None
click.echo(
f"{red('Error', bold=True)}: {self.format_message()}",
file=file,
color=color,
)
class MarimoCLIRuntimeError(MarimoCLIError):
"""Raised for runtime or environment setup failures."""
class MarimoCLIMissingDependencyError(MarimoCLIError):
"""Raised when a required dependency is unavailable."""
def __init__(
self,
message: str,
packages: str | list[str] | tuple[str, ...],
*,
additional_tip: str | None = None,
followup_commands: str | list[str] | tuple[str, ...] | None = None,
followup_label: str = "Then run:",
) -> None:
"""Build a dependency error message with install and follow-up hints."""
package_spec: str | list[str]
if isinstance(packages, str):
package_spec = packages
else:
package_spec = list(packages)
commands = get_install_commands(package_spec)
lines = [message]
if commands:
primary_install = commands[0]
lines.extend(
[
"",
f" {green('Tip:')} Install with:",
"",
f" {primary_install}",
]
)
if len(commands) > 1:
pip_install = commands[1]
lines.extend(
[
"",
f" {muted('Or with pip:')}",
"",
f" {pip_install}",
]
)
followup: list[str]
if followup_commands is None:
followup = []
elif isinstance(followup_commands, str):
command = followup_commands.strip()
followup = [command] if command else []
else:
followup = [
command.strip()
for command in followup_commands
if command.strip()
]
if followup:
primary_followup = followup[0]
lines.extend(
[
"",
f" {green('Tip:')} {followup_label.strip()}",
"",
f" {bold(primary_followup)}",
]
)
if len(followup) > 1:
fallback_followup = followup[1]
lines.extend(
[
"",
f" {muted('Or with fallback:')}",
"",
f" {bold(fallback_followup)}",
]
)
if additional_tip:
lines.extend(["", f" {additional_tip.strip()}"])
super().__init__("\n".join(lines))
| {
"repo_id": "marimo-team/marimo",
"file_path": "marimo/_cli/errors.py",
"license": "Apache License 2.0",
"lines": 94,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
marimo-team/marimo:marimo/_cli/install_hints.py | # Copyright 2026 Marimo. All rights reserved.
from __future__ import annotations
import os
from dataclasses import dataclass
from pathlib import Path
from typing import Final
from marimo._config.packages import PackageManagerKind, infer_package_manager
from marimo._utils.platform import is_windows
@dataclass(frozen=True)
class CommandRule:
manager: PackageManagerKind
context: str
template: str
_INSTALL_COMMAND_RULES: Final[tuple[CommandRule, ...]] = (
CommandRule("uv", "project", "uv add {packages}"),
CommandRule("uv", "any", "uv pip install {packages}"),
CommandRule("pixi", "any", "pixi add {packages}"),
CommandRule("poetry", "any", "poetry add {packages}"),
CommandRule("rye", "any", "rye add {packages}"),
CommandRule("pip", "any", "python -m pip install {packages}"),
)
_POST_INSTALL_COMMAND_RULES: Final[tuple[CommandRule, ...]] = (
CommandRule("uv", "project", "uv run {command}"),
CommandRule("pixi", "any", "pixi run {command}"),
CommandRule("poetry", "any", "poetry run {command}"),
CommandRule("rye", "any", "rye run {command}"),
CommandRule("pip", "any", "{module_fallback}"),
)
_UPGRADE_COMMAND_RULES: Final[tuple[CommandRule, ...]] = (
CommandRule("uv", "project", "uv add --upgrade {packages}"),
CommandRule("uv", "any", "uv pip install --upgrade {packages}"),
CommandRule("pixi", "any", "pixi upgrade {packages}"),
CommandRule("poetry", "any", "poetry update --no-interaction {packages}"),
CommandRule("rye", "any", "rye sync --update {packages}"),
CommandRule("pip", "any", "python -m pip install --upgrade {packages}"),
)
def _is_uv_project_context() -> bool:
"""Return whether the current environment matches a uv project venv."""
uv_project_environment = os.environ.get("UV_PROJECT_ENVIRONMENT")
virtual_env = os.environ.get("VIRTUAL_ENV")
if uv_project_environment and uv_project_environment == virtual_env:
return True
cwd = Path.cwd()
for path in (cwd, *cwd.parents):
if (path / "pyproject.toml").exists() and (path / "uv.lock").exists():
return True
return False
def _normalize_packages(packages: str | list[str] | tuple[str, ...]) -> str:
if isinstance(packages, str):
package_tokens = packages.split()
else:
package_tokens = [
package.strip() for package in packages if package.strip()
]
return " ".join(
_quote_package_token(package) for package in package_tokens
)
def _quote_package_token(package: str) -> str:
if "[" not in package and "]" not in package:
return package
if is_windows():
return package
escaped = package.replace("'", "'\"'\"'")
return f"'{escaped}'"
def _normalize_command(command: str) -> str:
return command.strip()
def _resolve_manager_context() -> tuple[PackageManagerKind, str]:
"""Infer package manager and whether uv should use project-local commands."""
manager = infer_package_manager()
context = (
"project" if manager == "uv" and _is_uv_project_context() else "any"
)
return manager, context
def _resolve_template(
rules: tuple[CommandRule, ...],
manager: PackageManagerKind,
*,
context: str,
) -> str | None:
"""Select the first command template matching manager and context."""
for rule in rules:
if rule.manager != manager:
continue
if rule.context not in ("any", context):
continue
return rule.template
return None
def _build_primary_and_fallback(
*,
rules: tuple[CommandRule, ...],
format_args: dict[str, str],
) -> list[str]:
"""Build primary and pip-fallback commands from declarative templates."""
manager, context = _resolve_manager_context()
template = _resolve_template(rules, manager, context=context)
fallback_template = _resolve_template(rules, "pip", context="any")
assert fallback_template is not None
primary_command = (
template.format(**format_args)
if template is not None
else fallback_template.format(**format_args)
)
pip_fallback_command = fallback_template.format(**format_args)
if primary_command == pip_fallback_command:
return [primary_command]
return [primary_command, pip_fallback_command]
def get_install_commands(
packages: str | list[str] | tuple[str, ...],
) -> list[str]:
package_text = _normalize_packages(packages)
if not package_text:
return []
return _build_primary_and_fallback(
rules=_INSTALL_COMMAND_RULES,
format_args={"packages": package_text},
)
def get_post_install_commands(
command: str, *, module_fallback: str | None = None
) -> list[str]:
command_text = _normalize_command(command)
if not command_text:
return []
fallback = _normalize_command(module_fallback or command_text)
if not fallback:
return []
return _build_primary_and_fallback(
rules=_POST_INSTALL_COMMAND_RULES,
format_args={
"command": command_text,
"module_fallback": fallback,
},
)
def get_upgrade_commands(
packages: str | list[str] | tuple[str, ...],
) -> list[str]:
package_text = _normalize_packages(packages)
if not package_text:
return []
return _build_primary_and_fallback(
rules=_UPGRADE_COMMAND_RULES,
format_args={"packages": package_text},
)
def get_playwright_chromium_setup_commands() -> list[str]:
return get_post_install_commands(
"playwright install chromium",
module_fallback="python -m playwright install chromium",
)
| {
"repo_id": "marimo-team/marimo",
"file_path": "marimo/_cli/install_hints.py",
"license": "Apache License 2.0",
"lines": 147,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
marimo-team/marimo:marimo/_cli/parser_ux.py | # Copyright 2026 Marimo. All rights reserved.
from __future__ import annotations
from typing import Any
import click
from marimo._cli.print import red
def _normalize_usage_message(message: str) -> str:
"""Normalize usage text for concise lowercase error output."""
message = message.strip()
if message.endswith("."):
message = message[:-1]
if message and message[0].isupper():
return message[0].lower() + message[1:]
return message
def _format_suggestion_tip(suggestions: list[str]) -> str:
suggestions = sorted(suggestions)
if len(suggestions) == 1:
return f" tip: a similar argument exists: {suggestions[0]!r}"
joined = ", ".join(repr(item) for item in suggestions)
return f" tip: some similar arguments exist: {joined}"
def _format_no_such_option(error: click.NoSuchOption) -> list[str]:
lines = [
f"{red('error')}: unexpected argument {error.option_name!r} found"
]
if error.possibilities:
lines.extend(["", _format_suggestion_tip(list(error.possibilities))])
return lines
def _format_generic_usage_error(error: click.UsageError) -> list[str]:
message_lines = error.format_message().splitlines()
if not message_lines:
return [f"{red('error')}: usage error"]
first = _normalize_usage_message(message_lines[0])
return [f"{red('error')}: {first}", *message_lines[1:]]
def _format_usage_error(error: click.UsageError) -> list[str]:
if isinstance(error, click.NoSuchOption):
return _format_no_such_option(error)
return _format_generic_usage_error(error)
def show_compact_usage_error(
error: click.UsageError, file: Any = None
) -> None:
"""Print compact parser errors without the full command help block."""
if file is None:
file = click.get_text_stream("stderr")
color = error.ctx.color if error.ctx is not None else None
for line in _format_usage_error(error):
click.echo(line, file=file, color=color)
if error.ctx is not None:
click.echo(file=file, color=color)
click.echo(error.ctx.get_usage(), file=file, color=color)
click.echo(file=file, color=color)
click.echo(
"For more information, try '--help'.",
file=file,
color=color,
)
| {
"repo_id": "marimo-team/marimo",
"file_path": "marimo/_cli/parser_ux.py",
"license": "Apache License 2.0",
"lines": 54,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
marimo-team/marimo:marimo/_cli/suggestions.py | # Copyright 2026 Marimo. All rights reserved.
from __future__ import annotations
from difflib import get_close_matches
from typing import TYPE_CHECKING
from marimo._utils.edit_distance import edit_distance
if TYPE_CHECKING:
from collections.abc import Iterable
def _dedupe(items: Iterable[str]) -> list[str]:
return list(dict.fromkeys(items))
def _lower_map(candidates: Iterable[str]) -> dict[str, str]:
lowered: dict[str, str] = {}
for candidate in candidates:
lowered.setdefault(candidate.lower(), candidate)
return lowered
def suggest_commands(token: str, candidates: Iterable[str]) -> list[str]:
"""Suggest close command names using difflib then Levenshtein fallback."""
lowered = _lower_map(candidates)
if not lowered:
return []
token_lower = token.lower()
if token_lower in lowered:
return [lowered[token_lower]]
close = get_close_matches(
token_lower,
list(lowered.keys()),
n=3,
cutoff=0.6,
)
if close:
return [lowered[item] for item in close]
ranked = sorted(
(
(edit_distance(token_lower, key), value)
for key, value in lowered.items()
),
key=lambda item: (item[0], item[1]),
)
if not ranked:
return []
min_distance = ranked[0][0]
if min_distance > 2:
return []
closest_values = [
value for distance, value in ranked if distance == min_distance
]
return closest_values[:3]
def suggest_short_options(token: str, candidates: Iterable[str]) -> list[str]:
"""Suggest a single short-flag correction when the typo is unambiguous."""
if not token.startswith("-") or token.startswith("--"):
return []
short_candidates = _dedupe(
option
for option in candidates
if option.startswith("-") and not option.startswith("--")
)
if not short_candidates:
return []
token_lower = token.lower()
exact = [opt for opt in short_candidates if opt.lower() == token_lower]
if exact:
return [exact[0]]
ranked = sorted(
(edit_distance(token, candidate), candidate)
for candidate in short_candidates
)
best_distance = ranked[0][0]
if best_distance > 1:
return []
best = [
candidate
for distance, candidate in ranked
if distance == best_distance
]
if len(best) != 1:
return []
return best
| {
"repo_id": "marimo-team/marimo",
"file_path": "marimo/_cli/suggestions.py",
"license": "Apache License 2.0",
"lines": 76,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
marimo-team/marimo:marimo/_utils/edit_distance.py | # Copyright 2026 Marimo. All rights reserved.
from __future__ import annotations
def edit_distance(left: str, right: str) -> int:
"""Return the Levenshtein edit distance between two strings."""
if left == right:
return 0
if not left:
return len(right)
if not right:
return len(left)
previous_row = list(range(len(right) + 1))
for left_index, left_char in enumerate(left, start=1):
current_row = [left_index]
for right_index, right_char in enumerate(right, start=1):
if left_char == right_char:
current_row.append(previous_row[right_index - 1])
else:
current_row.append(
1
+ min(
previous_row[right_index],
current_row[right_index - 1],
previous_row[right_index - 1],
)
)
previous_row = current_row
return previous_row[-1]
| {
"repo_id": "marimo-team/marimo",
"file_path": "marimo/_utils/edit_distance.py",
"license": "Apache License 2.0",
"lines": 27,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
marimo-team/marimo:tests/_cli/test_cli_errors.py | # Copyright 2026 Marimo. All rights reserved.
# NB. test_cli_errors.py because test_errors.py name causes pycache errors
# with pytest.
from __future__ import annotations
from io import StringIO
from pathlib import Path
from unittest.mock import patch
from marimo._cli.errors import MarimoCLIError, MarimoCLIMissingDependencyError
def test_missing_dependency_error_renders_followup_commands() -> None:
with patch(
"marimo._cli.errors.get_install_commands",
return_value=[
"poetry add nbconvert[webpdf]",
"python -m pip install nbconvert[webpdf]",
],
):
error = MarimoCLIMissingDependencyError(
"Playwright is required for WebPDF export.",
"nbconvert[webpdf]",
followup_commands=[
"poetry run playwright install chromium",
"python -m playwright install chromium",
],
)
message = str(error)
assert "Tip: Install with:" in message
assert "poetry add nbconvert[webpdf]" in message
assert "Or with pip:" in message
assert "Then run:" in message
assert "poetry run playwright install chromium" in message
assert "Or with fallback:" in message
assert "python -m playwright install chromium" in message
def test_missing_dependency_error_preserves_additional_tip() -> None:
with patch(
"marimo._cli.errors.get_install_commands",
return_value=["python -m pip install uv"],
):
error = MarimoCLIMissingDependencyError(
"uv must be installed to use --sandbox.",
"uv",
additional_tip="Install uv from https://github.com/astral-sh/uv",
)
message = str(error)
assert "python -m pip install uv" in message
assert "Install uv from https://github.com/astral-sh/uv" in message
def test_chromium_setup_command_not_hardcoded_in_export_callsites() -> None:
repo_root = Path(__file__).resolve().parents[2]
export_commands = (
repo_root / "marimo" / "_cli" / "export" / "commands.py"
).read_text(encoding="utf-8")
export_thumbnail = (
repo_root / "marimo" / "_cli" / "export" / "thumbnail.py"
).read_text(encoding="utf-8")
assert "python -m playwright install chromium" not in export_commands
assert "python -m playwright install chromium" not in export_thumbnail
def test_marimo_cli_error_show_formats_error_prefix() -> None:
error = MarimoCLIError("boom")
output = StringIO()
with patch("marimo._cli.errors.red", return_value="<error>") as mock_red:
error.show(file=output)
mock_red.assert_called_once_with("Error", bold=True)
assert output.getvalue() == "<error>: boom\n"
| {
"repo_id": "marimo-team/marimo",
"file_path": "tests/_cli/test_cli_errors.py",
"license": "Apache License 2.0",
"lines": 62,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
marimo-team/marimo:tests/_cli/test_install_hints.py | # Copyright 2026 Marimo. All rights reserved.
from __future__ import annotations
from unittest.mock import patch
from marimo._cli.install_hints import (
get_install_commands,
get_playwright_chromium_setup_commands,
get_post_install_commands,
get_upgrade_commands,
)
def test_install_commands_uv_project() -> None:
with (
patch(
"marimo._cli.install_hints.infer_package_manager",
return_value="uv",
),
patch(
"marimo._cli.install_hints._is_uv_project_context",
return_value=True,
),
):
assert get_install_commands("rich") == [
"uv add rich",
"python -m pip install rich",
]
def test_install_commands_uv_non_project() -> None:
with (
patch(
"marimo._cli.install_hints.infer_package_manager",
return_value="uv",
),
patch(
"marimo._cli.install_hints._is_uv_project_context",
return_value=False,
),
):
assert get_install_commands("rich") == [
"uv pip install rich",
"python -m pip install rich",
]
def test_install_commands_for_pixi() -> None:
with patch(
"marimo._cli.install_hints.infer_package_manager",
return_value="pixi",
):
assert get_install_commands("nbconvert playwright") == [
"pixi add nbconvert playwright",
"python -m pip install nbconvert playwright",
]
def test_install_commands_for_pip_only() -> None:
with patch(
"marimo._cli.install_hints.infer_package_manager",
return_value="pip",
):
assert get_install_commands(["nbformat"]) == [
"python -m pip install nbformat"
]
def test_post_install_commands_uv_project() -> None:
with (
patch(
"marimo._cli.install_hints.infer_package_manager",
return_value="uv",
),
patch(
"marimo._cli.install_hints._is_uv_project_context",
return_value=True,
),
):
assert get_post_install_commands(
"playwright install chromium",
module_fallback="python -m playwright install chromium",
) == [
"uv run playwright install chromium",
"python -m playwright install chromium",
]
def test_post_install_commands_uv_non_project() -> None:
with (
patch(
"marimo._cli.install_hints.infer_package_manager",
return_value="uv",
),
patch(
"marimo._cli.install_hints._is_uv_project_context",
return_value=False,
),
):
assert get_post_install_commands(
"playwright install chromium",
module_fallback="python -m playwright install chromium",
) == ["python -m playwright install chromium"]
def test_post_install_commands_poetry() -> None:
with patch(
"marimo._cli.install_hints.infer_package_manager",
return_value="poetry",
):
assert get_post_install_commands(
"playwright install chromium",
module_fallback="python -m playwright install chromium",
) == [
"poetry run playwright install chromium",
"python -m playwright install chromium",
]
def test_upgrade_commands_uv_project() -> None:
with (
patch(
"marimo._cli.install_hints.infer_package_manager",
return_value="uv",
),
patch(
"marimo._cli.install_hints._is_uv_project_context",
return_value=True,
),
):
assert get_upgrade_commands("marimo") == [
"uv add --upgrade marimo",
"python -m pip install --upgrade marimo",
]
def test_upgrade_commands_uv_non_project() -> None:
with (
patch(
"marimo._cli.install_hints.infer_package_manager",
return_value="uv",
),
patch(
"marimo._cli.install_hints._is_uv_project_context",
return_value=False,
),
):
assert get_upgrade_commands("marimo") == [
"uv pip install --upgrade marimo",
"python -m pip install --upgrade marimo",
]
def test_upgrade_commands_for_pixi() -> None:
with patch(
"marimo._cli.install_hints.infer_package_manager",
return_value="pixi",
):
assert get_upgrade_commands("marimo") == [
"pixi upgrade marimo",
"python -m pip install --upgrade marimo",
]
def test_upgrade_commands_for_pip_only() -> None:
with patch(
"marimo._cli.install_hints.infer_package_manager",
return_value="pip",
):
assert get_upgrade_commands(["marimo"]) == [
"python -m pip install --upgrade marimo"
]
def test_playwright_chromium_setup_commands() -> None:
with patch(
"marimo._cli.install_hints.infer_package_manager",
return_value="pip",
):
assert get_playwright_chromium_setup_commands() == [
"python -m playwright install chromium"
]
def test_install_commands_quotes_extras_on_posix() -> None:
with (
patch(
"marimo._cli.install_hints.infer_package_manager",
return_value="uv",
),
patch(
"marimo._cli.install_hints._is_uv_project_context",
return_value=True,
),
patch("marimo._cli.install_hints.is_windows", return_value=False),
):
assert get_install_commands(["marimo[sandbox]", "pyzmq"]) == [
"uv add 'marimo[sandbox]' pyzmq",
"python -m pip install 'marimo[sandbox]' pyzmq",
]
def test_install_commands_does_not_quote_extras_on_windows() -> None:
with (
patch(
"marimo._cli.install_hints.infer_package_manager",
return_value="uv",
),
patch(
"marimo._cli.install_hints._is_uv_project_context",
return_value=True,
),
patch("marimo._cli.install_hints.is_windows", return_value=True),
):
assert get_install_commands(["marimo[sandbox]", "pyzmq"]) == [
"uv add marimo[sandbox] pyzmq",
"python -m pip install marimo[sandbox] pyzmq",
]
| {
"repo_id": "marimo-team/marimo",
"file_path": "tests/_cli/test_install_hints.py",
"license": "Apache License 2.0",
"lines": 188,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
marimo-team/marimo:tests/_cli/test_suggestions.py | # Copyright 2026 Marimo. All rights reserved.
from __future__ import annotations
from marimo._cli.suggestions import suggest_commands, suggest_short_options
def test_suggest_commands_close_match() -> None:
candidates = ["edit", "export", "run", "tutorial"]
assert suggest_commands("xport", candidates) == ["export"]
def test_suggest_commands_case_insensitive_exact() -> None:
candidates = ["edit", "export", "run"]
assert suggest_commands("EXPORT", candidates) == ["export"]
def test_suggest_short_option_case_variant() -> None:
candidates = ["-p", "-h", "-q"]
assert suggest_short_options("-P", candidates) == ["-p"]
def test_suggest_short_option_ambiguous_returns_empty() -> None:
candidates = ["-p", "-q", "-d", "-h"]
assert suggest_short_options("-Z", candidates) == []
| {
"repo_id": "marimo-team/marimo",
"file_path": "tests/_cli/test_suggestions.py",
"license": "Apache License 2.0",
"lines": 15,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
marimo-team/marimo:tests/_utils/test_edit_distance.py | # Copyright 2026 Marimo. All rights reserved.
from __future__ import annotations
from marimo._utils.edit_distance import edit_distance
def test_edit_distance() -> None:
assert edit_distance("kitten", "sitting") == 3
assert edit_distance("export", "export") == 0
| {
"repo_id": "marimo-team/marimo",
"file_path": "tests/_utils/test_edit_distance.py",
"license": "Apache License 2.0",
"lines": 6,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
marimo-team/marimo:marimo/_ai/_tools/tools/dependency_graph.py | # Copyright 2026 Marimo. All rights reserved.
from __future__ import annotations
from collections import deque
from dataclasses import dataclass, field
from typing import Optional
from marimo._ai._tools.base import ToolBase
from marimo._ai._tools.types import SuccessResult, ToolGuidelines
from marimo._ai._tools.utils.exceptions import ToolExecutionError
from marimo._ast.errors import (
CycleError,
MultipleDefinitionError,
UnparsableError,
)
from marimo._runtime.dataflow.graph import DirectedGraph
from marimo._types.ids import CellId_t, SessionId
@dataclass
class GetCellDependencyGraphArgs:
session_id: SessionId
cell_id: Optional[CellId_t] = None
depth: Optional[int] = None
@dataclass
class VariableInfo:
name: str
kind: str
@dataclass
class CellDependencyInfo:
cell_id: str
cell_name: str
defs: list[VariableInfo]
refs: list[str]
parent_cell_ids: list[str]
child_cell_ids: list[str]
@dataclass
class CycleInfo:
cell_ids: list[str]
edges: list[list[str]]
@dataclass
class GetCellDependencyGraphOutput(SuccessResult):
cells: list[CellDependencyInfo] = field(default_factory=list)
variable_owners: dict[str, list[str]] = field(default_factory=dict)
multiply_defined: list[str] = field(default_factory=list)
cycles: list[CycleInfo] = field(default_factory=list)
class GetCellDependencyGraph(
ToolBase[GetCellDependencyGraphArgs, GetCellDependencyGraphOutput]
):
"""Get the cell dependency graph showing how cells relate through shared variables.
This tool reveals which variables each cell defines and references, parent/child
relationships between cells, variable ownership, and any dependency issues like
multiply-defined variables or cycles.
Use this tool to understand the dataflow structure of a notebook before making
changes that involve shared variables.
Args:
session_id: The session ID of the notebook from get_active_notebooks.
cell_id: Optional cell ID to center the graph on. If provided, only cells
within the specified depth are included. If omitted, the full graph is returned.
depth: Number of hops from the center cell to include (1 = direct parents/children,
2 = two hops, etc.). Only used when cell_id is provided.
Defaults to None (full transitive closure).
Returns:
A success result containing cell dependency info, variable ownership map,
multiply-defined variables, and cycle information.
"""
guidelines = ToolGuidelines(
when_to_use=[
"Before editing cells that define or reference shared variables",
"When diagnosing MB002 (variable defined in multiple cells) errors",
"To understand the dataflow structure and execution order of a notebook",
"When you need to know which cell owns a particular variable",
],
avoid_if=[
"You only need cell code or outputs - use get_cell_runtime_data or get_cell_outputs instead",
],
prerequisites=[
"You must have a valid session id from an active notebook",
],
)
def handle(
self, args: GetCellDependencyGraphArgs
) -> GetCellDependencyGraphOutput:
session = self.context.get_session(args.session_id)
app = session.app_file_manager.app
cell_manager = app.cell_manager
# app.graph calls _maybe_initialize() which raises on cycles or
# multiply-defined variables. Those are exactly the issues this
# tool is designed to *report*, so we catch and continue — the
# graph is still usable after the exception (the finally block
# in _maybe_initialize sets _initialized = True).
try:
graph = app.graph
except (CycleError, MultipleDefinitionError):
graph = app._app._graph
except UnparsableError as e:
raise ToolExecutionError(
str(e),
code="UNPARSABLE_NOTEBOOK",
is_retryable=False,
suggested_fix="Fix the syntax errors in the notebook cells first",
) from e
# Validate depth parameter
if args.depth is not None:
if args.depth < 0:
raise ToolExecutionError(
f"depth must be non-negative, got {args.depth}",
code="BAD_ARGUMENTS",
is_retryable=False,
suggested_fix="Use depth >= 0 (1 = direct parents/children, 2 = two hops, etc.)",
)
if args.cell_id is None:
raise ToolExecutionError(
"depth requires a cell_id to center the graph on",
code="BAD_ARGUMENTS",
is_retryable=False,
suggested_fix="Provide a cell_id when using depth, or omit depth for the full graph",
)
# Determine which cells to include
if args.cell_id is not None:
if args.cell_id not in graph.cells:
raise ToolExecutionError(
f"Cell {args.cell_id} not found in the dependency graph",
code="CELL_NOT_FOUND",
is_retryable=False,
suggested_fix="Use get_lightweight_cell_map to find valid cell IDs",
)
included_cell_ids = _get_cells_within_depth(
graph, args.cell_id, args.depth
)
else:
included_cell_ids = set(graph.cells.keys())
# Build cell dependency info (in notebook order)
cells: list[CellDependencyInfo] = []
for cell_data in cell_manager.cell_data():
cid = cell_data.cell_id
if cid not in included_cell_ids or cid not in graph.cells:
continue
cell_impl = graph.cells[cid]
defs: list[VariableInfo] = []
for var_name in sorted(cell_impl.defs):
kind = "variable"
if var_name in cell_impl.variable_data:
vd_list = cell_impl.variable_data[var_name]
if vd_list:
kind = vd_list[-1].kind
defs.append(VariableInfo(name=var_name, kind=kind))
cells.append(
CellDependencyInfo(
cell_id=cid,
cell_name=cell_data.name,
defs=defs,
refs=sorted(cell_impl.refs),
parent_cell_ids=sorted(graph.parents.get(cid, set())),
child_cell_ids=sorted(graph.children.get(cid, set())),
)
)
# Variable owners (always global)
variable_owners: dict[str, list[str]] = {}
for var_name, defining_cells in graph.definitions.items():
variable_owners[var_name] = sorted(defining_cells)
multiply_defined = sorted(graph.get_multiply_defined())
# Cycles (sorted for deterministic output)
cycles: list[CycleInfo] = []
for cycle_edges in sorted(
graph.cycles,
key=lambda edges: sorted({cid for e in edges for cid in e}),
):
cycle_cell_ids: set[str] = set()
edges_list: list[list[str]] = []
for parent_id, child_id in cycle_edges:
cycle_cell_ids.add(parent_id)
cycle_cell_ids.add(child_id)
edges_list.append([parent_id, child_id])
cycles.append(
CycleInfo(
cell_ids=sorted(cycle_cell_ids),
edges=edges_list,
)
)
return GetCellDependencyGraphOutput(
cells=cells,
variable_owners=variable_owners,
multiply_defined=multiply_defined,
cycles=cycles,
next_steps=_build_next_steps(
multiply_defined, cycles, args.cell_id
),
)
def _get_cells_within_depth(
graph: DirectedGraph,
center_cell_id: CellId_t,
depth: Optional[int],
) -> set[CellId_t]:
if depth is None:
all_related = graph.ancestors(center_cell_id) | graph.descendants(
center_cell_id
)
all_related.add(center_cell_id)
return all_related
result: set[CellId_t] = {center_cell_id}
queue: deque[tuple[CellId_t, int]] = deque([(center_cell_id, 0)])
while queue:
current, current_depth = queue.popleft()
if current_depth >= depth:
continue
neighbors: set[CellId_t] = set()
neighbors.update(graph.parents.get(current, set()))
neighbors.update(graph.children.get(current, set()))
for neighbor in neighbors:
if neighbor not in result:
result.add(neighbor)
queue.append((neighbor, current_depth + 1))
return result
def _build_next_steps(
multiply_defined: list[str],
cycles: list[CycleInfo],
cell_id: Optional[CellId_t],
) -> list[str]:
next_steps: list[str] = []
if multiply_defined:
names = ", ".join(multiply_defined[:5])
suffix = "..." if len(multiply_defined) > 5 else ""
next_steps.append(
f"Fix {len(multiply_defined)} multiply-defined variable(s): {names}{suffix}"
)
if cycles:
next_steps.append(
f"Resolve {len(cycles)} dependency cycle(s) to ensure correct execution order"
)
if cell_id is not None:
next_steps.append(
"Use get_cell_runtime_data to inspect the code of related cells"
)
else:
next_steps.append(
"Use cell_id parameter to focus on a specific cell's dependencies"
)
return next_steps
| {
"repo_id": "marimo-team/marimo",
"file_path": "marimo/_ai/_tools/tools/dependency_graph.py",
"license": "Apache License 2.0",
"lines": 235,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
marimo-team/marimo:tests/_ai/tools/tools/test_dependency_graph.py | # Copyright 2026 Marimo. All rights reserved.
from __future__ import annotations
from unittest.mock import Mock
import pytest
from marimo._ai._tools.base import ToolContext
from marimo._ai._tools.tools.dependency_graph import (
GetCellDependencyGraph,
GetCellDependencyGraphArgs,
VariableInfo,
)
from marimo._ai._tools.utils.exceptions import ToolExecutionError
from marimo._types.ids import CellId_t, SessionId
def _make_cell_impl(
defs: set[str],
refs: set[str],
variable_data: dict[str, list[Mock]] | None = None,
) -> Mock:
cell = Mock()
cell.defs = defs
cell.refs = refs
if variable_data is None:
variable_data = {name: [Mock(kind="variable")] for name in defs}
cell.variable_data = variable_data
return cell
def _make_cell_data(cell_id: str, name: str = "") -> Mock:
cd = Mock()
cd.cell_id = CellId_t(cell_id)
cd.name = name
return cd
def _make_tool_with_graph(
cell_impls: dict[str, Mock],
parents: dict[str, set[str]],
children: dict[str, set[str]],
definitions: dict[str, set[str]],
cell_data_list: list[Mock],
cycles: set | None = None,
ancestors_map: dict[str, set[str]] | None = None,
descendants_map: dict[str, set[str]] | None = None,
) -> GetCellDependencyGraph:
graph = Mock()
graph.cells = {CellId_t(k): v for k, v in cell_impls.items()}
graph.parents = {
CellId_t(k): {CellId_t(p) for p in v} for k, v in parents.items()
}
graph.children = {
CellId_t(k): {CellId_t(c) for c in v} for k, v in children.items()
}
graph.definitions = {
k: {CellId_t(c) for c in v} for k, v in definitions.items()
}
graph.cycles = cycles or set()
graph.get_multiply_defined.return_value = [
name for name, defs in definitions.items() if len(defs) > 1
]
if ancestors_map:
graph.ancestors = lambda cid: {
CellId_t(a) for a in ancestors_map.get(cid, set())
}
else:
graph.ancestors = lambda _cid: set()
if descendants_map:
graph.descendants = lambda cid: {
CellId_t(d) for d in descendants_map.get(cid, set())
}
else:
graph.descendants = lambda _cid: set()
cell_manager = Mock()
cell_manager.cell_data.return_value = cell_data_list
mock_app = Mock()
mock_app.graph = graph
mock_app.cell_manager = cell_manager
mock_session = Mock()
mock_session.app_file_manager.app = mock_app
context = Mock(spec=ToolContext)
context.get_session.return_value = mock_session
tool = GetCellDependencyGraph(ToolContext())
tool.context = context
return tool
def test_full_graph_basic():
"""Three cells in a chain: c1 defines x, c2 refs x defines y, c3 refs y defines z."""
tool = _make_tool_with_graph(
cell_impls={
"c1": _make_cell_impl(defs={"x"}, refs=set()),
"c2": _make_cell_impl(defs={"y"}, refs={"x"}),
"c3": _make_cell_impl(defs={"z"}, refs={"y"}),
},
parents={"c1": set(), "c2": {"c1"}, "c3": {"c2"}},
children={"c1": {"c2"}, "c2": {"c3"}, "c3": set()},
definitions={"x": {"c1"}, "y": {"c2"}, "z": {"c3"}},
cell_data_list=[
_make_cell_data("c1", "imports"),
_make_cell_data("c2", "transform"),
_make_cell_data("c3", "output"),
],
)
result = tool.handle(
GetCellDependencyGraphArgs(session_id=SessionId("s1"))
)
assert result.status == "success"
assert len(result.cells) == 3
# Check cell ordering matches cell_data order
assert result.cells[0].cell_id == "c1"
assert result.cells[0].cell_name == "imports"
assert result.cells[1].cell_id == "c2"
assert result.cells[2].cell_id == "c3"
# Check defs/refs
assert result.cells[0].defs == [VariableInfo(name="x", kind="variable")]
assert result.cells[0].refs == []
assert result.cells[1].refs == ["x"]
assert result.cells[2].refs == ["y"]
# Check parent/child relationships
assert result.cells[0].parent_cell_ids == []
assert result.cells[0].child_cell_ids == ["c2"]
assert result.cells[1].parent_cell_ids == ["c1"]
assert result.cells[1].child_cell_ids == ["c3"]
assert result.cells[2].parent_cell_ids == ["c2"]
assert result.cells[2].child_cell_ids == []
# Check variable owners
assert result.variable_owners == {"x": ["c1"], "y": ["c2"], "z": ["c3"]}
assert result.multiply_defined == []
assert result.cycles == []
def test_cell_id_with_depth_1():
"""Centered on c2 with depth=1 should include c1, c2, c3 but not c0."""
tool = _make_tool_with_graph(
cell_impls={
"c0": _make_cell_impl(defs={"w"}, refs=set()),
"c1": _make_cell_impl(defs={"x"}, refs={"w"}),
"c2": _make_cell_impl(defs={"y"}, refs={"x"}),
"c3": _make_cell_impl(defs={"z"}, refs={"y"}),
},
parents={"c0": set(), "c1": {"c0"}, "c2": {"c1"}, "c3": {"c2"}},
children={"c0": {"c1"}, "c1": {"c2"}, "c2": {"c3"}, "c3": set()},
definitions={"w": {"c0"}, "x": {"c1"}, "y": {"c2"}, "z": {"c3"}},
cell_data_list=[
_make_cell_data("c0"),
_make_cell_data("c1"),
_make_cell_data("c2"),
_make_cell_data("c3"),
],
)
result = tool.handle(
GetCellDependencyGraphArgs(
session_id=SessionId("s1"),
cell_id=CellId_t("c2"),
depth=1,
)
)
cell_ids = [c.cell_id for c in result.cells]
assert "c0" not in cell_ids
assert "c1" in cell_ids
assert "c2" in cell_ids
assert "c3" in cell_ids
# variable_owners is still global
assert "w" in result.variable_owners
def test_cell_id_with_depth_none():
"""Centered on c2 with depth=None should return full transitive closure."""
tool = _make_tool_with_graph(
cell_impls={
"c0": _make_cell_impl(defs={"w"}, refs=set()),
"c1": _make_cell_impl(defs={"x"}, refs={"w"}),
"c2": _make_cell_impl(defs={"y"}, refs={"x"}),
"c3": _make_cell_impl(defs={"z"}, refs={"y"}),
},
parents={"c0": set(), "c1": {"c0"}, "c2": {"c1"}, "c3": {"c2"}},
children={"c0": {"c1"}, "c1": {"c2"}, "c2": {"c3"}, "c3": set()},
definitions={"w": {"c0"}, "x": {"c1"}, "y": {"c2"}, "z": {"c3"}},
cell_data_list=[
_make_cell_data("c0"),
_make_cell_data("c1"),
_make_cell_data("c2"),
_make_cell_data("c3"),
],
ancestors_map={"c2": {"c0", "c1"}},
descendants_map={"c2": {"c3"}},
)
result = tool.handle(
GetCellDependencyGraphArgs(
session_id=SessionId("s1"),
cell_id=CellId_t("c2"),
depth=None,
)
)
cell_ids = [c.cell_id for c in result.cells]
assert set(cell_ids) == {"c0", "c1", "c2", "c3"}
def test_cell_id_not_found():
"""Invalid cell_id should raise ToolExecutionError."""
tool = _make_tool_with_graph(
cell_impls={},
parents={},
children={},
definitions={},
cell_data_list=[],
)
with pytest.raises(ToolExecutionError) as exc_info:
tool.handle(
GetCellDependencyGraphArgs(
session_id=SessionId("s1"),
cell_id=CellId_t("invalid"),
)
)
assert exc_info.value.code == "CELL_NOT_FOUND"
def test_multiply_defined_variables():
"""Two cells defining the same variable should be reported."""
tool = _make_tool_with_graph(
cell_impls={
"c1": _make_cell_impl(defs={"x"}, refs=set()),
"c2": _make_cell_impl(defs={"x"}, refs=set()),
},
parents={"c1": set(), "c2": set()},
children={"c1": set(), "c2": set()},
definitions={"x": {"c1", "c2"}},
cell_data_list=[
_make_cell_data("c1"),
_make_cell_data("c2"),
],
)
result = tool.handle(
GetCellDependencyGraphArgs(session_id=SessionId("s1"))
)
assert result.multiply_defined == ["x"]
assert sorted(result.variable_owners["x"]) == ["c1", "c2"]
def test_variable_kind_info():
"""Variable kind should be extracted from variable_data."""
tool = _make_tool_with_graph(
cell_impls={
"c1": _make_cell_impl(
defs={"bar", "foo"},
refs=set(),
variable_data={
"foo": [Mock(kind="function")],
"bar": [Mock(kind="variable")],
},
),
},
parents={"c1": set()},
children={"c1": set()},
definitions={"foo": {"c1"}, "bar": {"c1"}},
cell_data_list=[_make_cell_data("c1")],
)
result = tool.handle(
GetCellDependencyGraphArgs(session_id=SessionId("s1"))
)
defs = result.cells[0].defs
# Sorted by name
assert defs[0] == VariableInfo(name="bar", kind="variable")
assert defs[1] == VariableInfo(name="foo", kind="function")
def test_variable_owners_always_global():
"""variable_owners should include all variables even when depth-filtered."""
tool = _make_tool_with_graph(
cell_impls={
"c1": _make_cell_impl(defs={"x"}, refs=set()),
"c2": _make_cell_impl(defs={"y"}, refs={"x"}),
"c3": _make_cell_impl(defs={"z"}, refs=set()),
},
parents={"c1": set(), "c2": {"c1"}, "c3": set()},
children={"c1": {"c2"}, "c2": set(), "c3": set()},
definitions={"x": {"c1"}, "y": {"c2"}, "z": {"c3"}},
cell_data_list=[
_make_cell_data("c1"),
_make_cell_data("c2"),
_make_cell_data("c3"),
],
)
result = tool.handle(
GetCellDependencyGraphArgs(
session_id=SessionId("s1"),
cell_id=CellId_t("c1"),
depth=1,
)
)
# Only c1 and c2 are within depth 1 of c1
cell_ids = [c.cell_id for c in result.cells]
assert "c3" not in cell_ids
# But z is still in variable_owners
assert "z" in result.variable_owners
assert result.variable_owners["z"] == ["c3"]
def test_empty_graph():
"""Empty graph should return empty results."""
tool = _make_tool_with_graph(
cell_impls={},
parents={},
children={},
definitions={},
cell_data_list=[],
)
result = tool.handle(
GetCellDependencyGraphArgs(session_id=SessionId("s1"))
)
assert result.cells == []
assert result.variable_owners == {}
assert result.multiply_defined == []
assert result.cycles == []
| {
"repo_id": "marimo-team/marimo",
"file_path": "tests/_ai/tools/tools/test_dependency_graph.py",
"license": "Apache License 2.0",
"lines": 292,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
marimo-team/marimo:marimo/_sql/sql_quoting.py | # Copyright 2026 Marimo. All rights reserved.
from __future__ import annotations
import re
def quote_sql_identifier(identifier: str, *, dialect: str = "duckdb") -> str:
"""
Quote a SQL identifier for the given dialect, escaping special characters.
Args:
identifier: The raw identifier string (database, schema, or table name).
dialect: The SQL dialect.
Double-quote style: "duckdb", "redshift", "postgresql"/"postgres".
Backtick style: "clickhouse", "mysql", "bigquery".
Unknown dialects return the identifier unquoted.
Returns:
The properly quoted identifier string.
"""
if dialect in ("duckdb", "redshift", "postgresql", "postgres"):
# Double-quote style: escape embedded " as ""
escaped = identifier.replace('"', '""')
return f'"{escaped}"'
elif dialect in ("clickhouse", "mysql", "bigquery"):
# Backtick style: escape embedded ` as ``
escaped = identifier.replace("`", "``")
return f"`{escaped}`"
else:
# Unknown dialect: return unquoted to avoid breaking databases
# that treat quoted identifiers differently
return identifier
def quote_qualified_name(*parts: str, dialect: str = "duckdb") -> str:
"""
Build a fully qualified name from parts, quoting each one.
Example:
quote_qualified_name("my db", "public", "my.table", dialect="duckdb")
# => '"my db"."public"."my.table"'
"""
return ".".join(quote_sql_identifier(p, dialect=dialect) for p in parts)
def parse_fully_qualified_table_name(
fully_qualified_table_name: str,
) -> tuple[str, str, str]:
"""
Parse a fully qualified table name into (database, schema, table).
Handles both quoted and unquoted identifiers:
- "my.db"."schema"."table" => ("my.db", "schema", "table")
- db.schema.table => ("db", "schema", "table")
Raises ValueError for malformed input (unterminated quotes, stray quotes,
wrong number of parts).
"""
# Fast path for simple unquoted identifiers (no quotes)
if '"' not in fully_qualified_table_name:
parts = fully_qualified_table_name.split(".")
if len(parts) != 3:
raise ValueError(
f"Invalid fully qualified table name: {fully_qualified_table_name}"
)
return parts[0], parts[1], parts[2]
# Each identifier is either:
# - a quoted identifier: "..." with escaped "" inside
# - an unquoted identifier: no dots or quotes
_ident = r'(?:"([^"]*(?:""[^"]*)*)"|([^."]+))'
pattern = re.compile(rf"^{_ident}\.{_ident}\.{_ident}$")
match = pattern.fullmatch(fully_qualified_table_name)
if not match:
raise ValueError(
f"Invalid fully qualified table name: {fully_qualified_table_name}"
)
def _segment(quoted_group: int, unquoted_group: int) -> str:
value = match.group(quoted_group)
if value is not None:
return value.replace('""', '"')
return match.group(unquoted_group)
return (
_segment(1, 2),
_segment(3, 4),
_segment(5, 6),
)
| {
"repo_id": "marimo-team/marimo",
"file_path": "marimo/_sql/sql_quoting.py",
"license": "Apache License 2.0",
"lines": 74,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
marimo-team/marimo:tests/_sql/test_sql_quoting.py | # Copyright 2026 Marimo. All rights reserved.
from __future__ import annotations
import pytest
from marimo._sql.sql_quoting import (
parse_fully_qualified_table_name,
quote_qualified_name,
quote_sql_identifier,
)
class TestQuoteSqlIdentifier:
@pytest.mark.parametrize(
("identifier", "dialect", "expected"),
[
# DuckDB / Redshift / Postgres (double-quote style)
("table", "duckdb", '"table"'),
("my table", "duckdb", '"my table"'),
("nested.namespace", "duckdb", '"nested.namespace"'),
('has"quotes', "duckdb", '"has""quotes"'),
('double""already', "duckdb", '"double""""already"'),
("", "duckdb", '""'),
("unicode_ñoño", "duckdb", '"unicode_ñoño"'),
("back`ticks", "duckdb", '"back`ticks"'),
# Redshift uses same double-quote style
("table", "redshift", '"table"'),
("my table", "redshift", '"my table"'),
('has"quotes', "redshift", '"has""quotes"'),
# PostgreSQL aliases
("table", "postgresql", '"table"'),
("table", "postgres", '"table"'),
# ClickHouse / MySQL (backtick style)
("table", "clickhouse", "`table`"),
("my table", "clickhouse", "`my table`"),
("nested.namespace", "clickhouse", "`nested.namespace`"),
("has`backtick", "clickhouse", "`has``backtick`"),
("double``already", "clickhouse", "`double````already`"),
("", "clickhouse", "``"),
("unicode_ñoño", "clickhouse", "`unicode_ñoño`"),
('has"quotes', "clickhouse", '`has"quotes`'),
# MySQL same as clickhouse
("table", "mysql", "`table`"),
("has`backtick", "mysql", "`has``backtick`"),
# BigQuery uses backtick style
("table", "bigquery", "`table`"),
("my table", "bigquery", "`my table`"),
("has`backtick", "bigquery", "`has``backtick`"),
# Unknown dialect returns unquoted
("table", "sqlite", "table"),
("my table", "unknown", "my table"),
],
)
def test_quote_identifier(
self, identifier: str, dialect: str, expected: str
) -> None:
assert quote_sql_identifier(identifier, dialect=dialect) == expected
@pytest.mark.parametrize(
"identifier",
[
"simple",
"with spaces",
"with.dots",
'with"quotes',
"with`backticks",
"with'single'quotes",
"mixed.dots and spaces",
"slashes/and/paths",
],
)
def test_duckdb_roundtrip_safe(self, identifier: str) -> None:
"""Verify that quoting an identifier produces valid DuckDB syntax."""
quoted = quote_sql_identifier(identifier, dialect="duckdb")
# Must start and end with double quotes
assert quoted.startswith('"')
assert quoted.endswith('"')
# Inner content should not have unescaped double quotes
inner = quoted[1:-1]
# After un-escaping "", we should get back the original
assert inner.replace('""', '"') == identifier
@pytest.mark.parametrize(
"identifier",
[
"simple",
"with spaces",
"with.dots",
"with`backticks",
'with"quotes',
],
)
def test_clickhouse_roundtrip_safe(self, identifier: str) -> None:
"""Verify that quoting an identifier produces valid ClickHouse syntax."""
quoted = quote_sql_identifier(identifier, dialect="clickhouse")
assert quoted.startswith("`")
assert quoted.endswith("`")
inner = quoted[1:-1]
assert inner.replace("``", "`") == identifier
class TestQuoteQualifiedName:
@pytest.mark.parametrize(
("parts", "dialect", "expected"),
[
# Simple 3-part name
(
("mydb", "public", "users"),
"duckdb",
'"mydb"."public"."users"',
),
# Parts with special characters
(
("my.db", "my schema", "my.table"),
"duckdb",
'"my.db"."my schema"."my.table"',
),
# ClickHouse 2-part name
(
("default", "events"),
"clickhouse",
"`default`.`events`",
),
# Redshift 3-part name
(
("catalog", "schema", "table"),
"redshift",
'"catalog"."schema"."table"',
),
# Single part
(
("just_table",),
"duckdb",
'"just_table"',
),
# Parts with quotes in them
(
('db"name', "schema", "table"),
"duckdb",
'"db""name"."schema"."table"',
),
(
("db`name", "table"),
"clickhouse",
"`db``name`.`table`",
),
],
)
def test_quote_qualified_name(
self, parts: tuple[str, ...], dialect: str, expected: str
) -> None:
assert quote_qualified_name(*parts, dialect=dialect) == expected
class TestParseFullyQualifiedTableName:
@pytest.mark.parametrize(
("fqn", "expected"),
[
# Simple unquoted
("db.schema.table", ("db", "schema", "table")),
# All quoted
('"db"."schema"."table"', ("db", "schema", "table")),
# Quoted with dots inside
(
'"my.db"."my.schema"."my.table"',
("my.db", "my.schema", "my.table"),
),
# Quoted with spaces
(
'"my db"."my schema"."my table"',
("my db", "my schema", "my table"),
),
# Quoted with escaped double quotes
('"my""db"."schema"."table"', ('my"db', "schema", "table")),
# Mixed quoted and unquoted
('"my.db".public.users', ("my.db", "public", "users")),
# All parts have special chars
(
'"db.with.dots"."schema with spaces"."table""quoted"',
("db.with.dots", "schema with spaces", 'table"quoted'),
),
],
)
def test_parse_valid(
self, fqn: str, expected: tuple[str, str, str]
) -> None:
assert parse_fully_qualified_table_name(fqn) == expected
@pytest.mark.parametrize(
"fqn",
[
"just_a_table",
"two.parts",
"four.parts.here.extra",
"",
'"single_quoted"',
'"two"."parts"',
# Malformed quoted FQNs: unterminated or stray quotes
'"unterminated',
'"db"."schema"."table',
'db"."schema"."table"',
'"db".schema"."table',
'db.sch"ema.table',
],
)
def test_parse_invalid(self, fqn: str) -> None:
with pytest.raises(
ValueError, match="Invalid fully qualified table name"
):
parse_fully_qualified_table_name(fqn)
def test_roundtrip_with_quote_qualified_name(self) -> None:
"""quote_qualified_name output should be parseable by parse_fully_qualified_table_name."""
db, schema, table = "my.db", "my schema", 'table"name'
fqn = quote_qualified_name(db, schema, table, dialect="duckdb")
parsed = parse_fully_qualified_table_name(fqn)
assert parsed == (db, schema, table)
| {
"repo_id": "marimo-team/marimo",
"file_path": "tests/_sql/test_sql_quoting.py",
"license": "Apache License 2.0",
"lines": 205,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
marimo-team/marimo:examples/storage/general.py | # /// script
# requires-python = ">=3.13"
# dependencies = [
# "fsspec==2026.2.0",
# "marimo>=0.19.9",
# "obstore==0.8.2",
# "python-dotenv==1.2.1",
# "requests==2.32.5",
# "s3fs==2026.2.0",
# ]
# ///
import marimo
__generated_with = "0.19.11"
app = marimo.App(width="medium")
@app.cell
def _():
import marimo as mo
from fsspec.implementations.local import LocalFileSystem
from fsspec.implementations.github import GithubFileSystem
import s3fs
import obstore
import os
from dotenv import load_dotenv
load_dotenv()
return GithubFileSystem, LocalFileSystem, os, s3fs
@app.cell
def _(LocalFileSystem):
local = LocalFileSystem(asynchronous=True)
return
@app.cell
def _(os):
from obstore.store import S3Store
access_key_id = os.environ.get("CLOUDFLARE_ACCESS_KEY_ID")
secret_access_key = os.environ.get("CLOUDFLARE_SECRET_ACCESS_KEY")
url = os.environ.get("CLOUDFLARE_MARIMO_URL")
store = S3Store.from_url(
f"{url}/marimo-artifacts",
access_key_id=access_key_id,
secret_access_key=secret_access_key,
)
return (url,)
@app.cell
def _(GithubFileSystem):
marimo_repo = GithubFileSystem(org="marimo-team", repo="marimo")
return (marimo_repo,)
@app.cell
def _(marimo_repo):
print(marimo_repo.read_text("github://AGENTS.md"))
return
@app.cell
def _(os, s3fs, url):
s3_client = s3fs.S3FileSystem(
endpoint_url=url,
key=os.getenv("CLOUDFLARE_ACCESS_KEY_ID"),
secret=os.getenv("CLOUDFLARE_SECRET_ACCESS_KEY"),
)
return
if __name__ == "__main__":
app.run()
| {
"repo_id": "marimo-team/marimo",
"file_path": "examples/storage/general.py",
"license": "Apache License 2.0",
"lines": 59,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
marimo-team/marimo:marimo/_server/api/endpoints/storage.py | # Copyright 2026 Marimo. All rights reserved.
from __future__ import annotations
from typing import TYPE_CHECKING
from starlette.authentication import requires
from marimo import _loggers
from marimo._server.api.utils import dispatch_control_request
from marimo._server.models.models import (
BaseResponse,
StorageDownloadRequest,
StorageListEntriesRequest,
)
from marimo._server.router import APIRouter
if TYPE_CHECKING:
from starlette.requests import Request
LOGGER = _loggers.marimo_logger()
router = APIRouter()
@router.post("/list_entries")
@requires("edit")
async def list_entries(request: Request) -> BaseResponse:
"""
parameters:
- in: header
name: Marimo-Session-Id
schema:
type: string
required: true
requestBody:
content:
application/json:
schema:
$ref: "#/components/schemas/StorageListEntriesRequest"
responses:
200:
description: List storage entries at a prefix
content:
application/json:
schema:
$ref: "#/components/schemas/SuccessResponse"
"""
return await dispatch_control_request(request, StorageListEntriesRequest)
@router.post("/download")
@requires("edit")
async def download(request: Request) -> BaseResponse:
"""
parameters:
- in: header
name: Marimo-Session-Id
schema:
type: string
required: true
requestBody:
content:
application/json:
schema:
$ref: "#/components/schemas/StorageDownloadRequest"
responses:
200:
description: Download a storage entry
content:
application/json:
schema:
$ref: "#/components/schemas/SuccessResponse"
"""
return await dispatch_control_request(request, StorageDownloadRequest)
| {
"repo_id": "marimo-team/marimo",
"file_path": "marimo/_server/api/endpoints/storage.py",
"license": "Apache License 2.0",
"lines": 64,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | documentation |
marimo-team/marimo:tests/_runtime/test_runtime_external_storage.py | # Copyright 2026 Marimo. All rights reserved.
from __future__ import annotations
import threading
from unittest.mock import AsyncMock, MagicMock
import pytest
from dirty_equals import IsPositiveFloat, IsStr
from marimo._data._external_storage.models import StorageEntry
from marimo._data._external_storage.storage import Obstore
from marimo._dependencies.dependencies import DependencyManager
from marimo._messaging.notification import (
StorageDownloadReadyNotification,
StorageEntriesNotification,
)
from marimo._runtime.commands import (
ExecuteCellCommand,
StorageDownloadCommand,
StorageListEntriesCommand,
)
from marimo._types.ids import CellId_t, RequestId, VariableName
from tests.conftest import MockedKernel
HAS_OBSTORE = DependencyManager.obstore.has()
STORAGE_VAR = "my_store"
class TestExternalStorageErrors:
"""Error-handling tests that don't require a real storage backend."""
async def test_list_entries_variable_not_found(
self, mocked_kernel: MockedKernel
) -> None:
k = mocked_kernel.k
stream = mocked_kernel.stream
request = StorageListEntriesCommand(
request_id=RequestId("req-1"),
namespace="nonexistent_var",
limit=100,
prefix=None,
)
await k.handle_message(request)
results = [
op
for op in stream.operations
if isinstance(op, StorageEntriesNotification)
]
assert results == [
StorageEntriesNotification(
request_id=RequestId("req-1"),
entries=[],
namespace="nonexistent_var",
prefix=None,
error="Variable 'nonexistent_var' not found",
)
]
async def test_list_entries_incompatible_backend(
self, mocked_kernel: MockedKernel
) -> None:
k = mocked_kernel.k
stream = mocked_kernel.stream
await k.run(
[
ExecuteCellCommand(
cell_id=CellId_t("0"),
code='not_storage = "just a string"',
),
]
)
request = StorageListEntriesCommand(
request_id=RequestId("req-2"),
namespace="not_storage",
limit=100,
prefix=None,
)
await k.handle_message(request)
results = [
op
for op in stream.operations
if isinstance(op, StorageEntriesNotification)
]
assert results == [
StorageEntriesNotification(
request_id=RequestId("req-2"),
entries=[],
namespace="not_storage",
prefix=None,
error=(
"Variable 'not_storage' is not a compatible "
"storage backend (expected obstore or fsspec)"
),
)
]
async def test_download_variable_not_found(
self, mocked_kernel: MockedKernel
) -> None:
k = mocked_kernel.k
stream = mocked_kernel.stream
request = StorageDownloadCommand(
request_id=RequestId("req-3"),
namespace="nonexistent_var",
path="data/file.csv",
)
await k.handle_message(request)
results = [
op
for op in stream.operations
if isinstance(op, StorageDownloadReadyNotification)
]
assert results == [
StorageDownloadReadyNotification(
request_id=RequestId("req-3"),
url=None,
filename=None,
error="Variable 'nonexistent_var' not found",
)
]
async def test_download_incompatible_backend(
self, mocked_kernel: MockedKernel
) -> None:
k = mocked_kernel.k
stream = mocked_kernel.stream
await k.run(
[
ExecuteCellCommand(
cell_id=CellId_t("0"),
code="not_storage = 42",
),
]
)
request = StorageDownloadCommand(
request_id=RequestId("req-4"),
namespace="not_storage",
path="data/file.csv",
)
await k.handle_message(request)
results = [
op
for op in stream.operations
if isinstance(op, StorageDownloadReadyNotification)
]
assert results == [
StorageDownloadReadyNotification(
request_id=RequestId("req-4"),
url=None,
filename=None,
error=(
"Variable 'not_storage' is not a compatible "
"storage backend (expected obstore or fsspec)"
),
)
]
@pytest.mark.skipif(not HAS_OBSTORE, reason="obstore not installed")
class TestExternalStorageCallbacks:
"""Integration tests using a real obstore MemoryStore."""
async def test_list_entries(self, mocked_kernel: MockedKernel) -> None:
k = mocked_kernel.k
stream = mocked_kernel.stream
await k.run(
[
ExecuteCellCommand(
cell_id=CellId_t("0"),
code=(
"from obstore.store import MemoryStore\n"
f"{STORAGE_VAR} = MemoryStore()"
),
),
]
)
store = k.globals[VariableName(STORAGE_VAR)]
await store.put_async("data/file1.csv", b"a,b,c")
await store.put_async("data/file2.txt", b"hello")
request = StorageListEntriesCommand(
request_id=RequestId("req-10"),
namespace=STORAGE_VAR,
limit=100,
prefix="data/",
)
await k.handle_message(request)
results = [
op
for op in stream.operations
if isinstance(op, StorageEntriesNotification)
]
assert results == [
StorageEntriesNotification(
request_id=RequestId("req-10"),
entries=[
StorageEntry(
path="data/file1.csv",
kind="object",
size=5,
last_modified=IsPositiveFloat(), # pyright: ignore[reportArgumentType]
metadata={"e_tag": IsStr()},
mime_type="text/csv",
),
StorageEntry(
path="data/file2.txt",
kind="object",
size=5,
last_modified=IsPositiveFloat(), # pyright: ignore[reportArgumentType]
metadata={"e_tag": IsStr()},
mime_type="text/plain",
),
],
namespace=STORAGE_VAR,
prefix="data/",
)
]
async def test_list_entries_with_limit(
self, mocked_kernel: MockedKernel
) -> None:
k = mocked_kernel.k
stream = mocked_kernel.stream
await k.run(
[
ExecuteCellCommand(
cell_id=CellId_t("0"),
code=(
"from obstore.store import MemoryStore\n"
f"{STORAGE_VAR} = MemoryStore()"
),
),
]
)
store = k.globals[VariableName(STORAGE_VAR)]
await store.put_async("a.txt", b"1")
await store.put_async("b.txt", b"2")
await store.put_async("c.txt", b"3")
request = StorageListEntriesCommand(
request_id=RequestId("req-11"),
namespace=STORAGE_VAR,
limit=2,
prefix=None,
)
await k.handle_message(request)
results = [
op
for op in stream.operations
if isinstance(op, StorageEntriesNotification)
]
assert results == [
StorageEntriesNotification(
request_id=RequestId("req-11"),
entries=[
StorageEntry(
path="a.txt",
kind="object",
size=1,
last_modified=IsPositiveFloat(), # pyright: ignore[reportArgumentType]
metadata={"e_tag": IsStr()},
mime_type="text/plain",
),
StorageEntry(
path="b.txt",
kind="object",
size=1,
last_modified=IsPositiveFloat(), # pyright: ignore[reportArgumentType]
metadata={"e_tag": IsStr()},
mime_type="text/plain",
),
],
namespace=STORAGE_VAR,
prefix=None,
)
]
async def test_list_entries_empty(
self, mocked_kernel: MockedKernel
) -> None:
k = mocked_kernel.k
stream = mocked_kernel.stream
await k.run(
[
ExecuteCellCommand(
cell_id=CellId_t("0"),
code=(
"from obstore.store import MemoryStore\n"
f"{STORAGE_VAR} = MemoryStore()"
),
),
]
)
request = StorageListEntriesCommand(
request_id=RequestId("req-12"),
namespace=STORAGE_VAR,
limit=100,
prefix=None,
)
await k.handle_message(request)
results = [
op
for op in stream.operations
if isinstance(op, StorageEntriesNotification)
]
assert results == [
StorageEntriesNotification(
request_id=RequestId("req-12"),
entries=[],
namespace=STORAGE_VAR,
prefix=None,
)
]
async def test_list_entries_backend_exception(
self, mocked_kernel: MockedKernel
) -> None:
k = mocked_kernel.k
stream = mocked_kernel.stream
await k.run(
[
ExecuteCellCommand(
cell_id=CellId_t("0"),
code=(
"from obstore.store import MemoryStore\n"
f"{STORAGE_VAR} = MemoryStore()"
),
),
]
)
# Use a real Obstore backend with a broken list_entries
store = k.globals[VariableName(STORAGE_VAR)]
broken_backend = Obstore(store, VariableName(STORAGE_VAR))
broken_backend.list_entries = MagicMock( # type: ignore[method-assign]
side_effect=RuntimeError("connection timeout"),
)
with pytest.MonkeyPatch.context() as mp:
mp.setattr(
k.external_storage_callbacks,
"_get_storage_backend",
lambda _: (broken_backend, None),
)
request = StorageListEntriesCommand(
request_id=RequestId("req-13"),
namespace=STORAGE_VAR,
limit=100,
prefix=None,
)
await k.handle_message(request)
results = [
op
for op in stream.operations
if isinstance(op, StorageEntriesNotification)
]
assert results == [
StorageEntriesNotification(
request_id=RequestId("req-13"),
entries=[],
namespace=STORAGE_VAR,
prefix=None,
error="Failed to list entries: connection timeout",
)
]
async def test_list_entries_runs_in_background_thread(
self, mocked_kernel: MockedKernel
) -> None:
"""list_entries dispatches to a thread via asyncio.to_thread."""
k = mocked_kernel.k
stream = mocked_kernel.stream
await k.run(
[
ExecuteCellCommand(
cell_id=CellId_t("0"),
code=(
"from obstore.store import MemoryStore\n"
f"{STORAGE_VAR} = MemoryStore()"
),
),
]
)
store = k.globals[VariableName(STORAGE_VAR)]
real_backend = Obstore(store, VariableName(STORAGE_VAR))
call_thread_name: str | None = None
original_list = real_backend.list_entries
def spy_list_entries(
prefix: str | None, *, limit: int = 100
) -> list[StorageEntry]:
nonlocal call_thread_name
call_thread_name = threading.current_thread().name
return original_list(prefix, limit=limit)
real_backend.list_entries = spy_list_entries # type: ignore[method-assign]
with pytest.MonkeyPatch.context() as mp:
mp.setattr(
k.external_storage_callbacks,
"_get_storage_backend",
lambda _: (real_backend, None),
)
request = StorageListEntriesCommand(
request_id=RequestId("req-14"),
namespace=STORAGE_VAR,
limit=100,
prefix=None,
)
await k.handle_message(request)
# asyncio.to_thread runs the callable in a worker thread,
# not the main thread
assert call_thread_name is not None
assert call_thread_name != threading.main_thread().name
results = [
op
for op in stream.operations
if isinstance(op, StorageEntriesNotification)
]
assert results == [
StorageEntriesNotification(
request_id=RequestId("req-14"),
entries=[],
namespace=STORAGE_VAR,
prefix=None,
)
]
async def test_download(self, mocked_kernel: MockedKernel) -> None:
k = mocked_kernel.k
stream = mocked_kernel.stream
await k.run(
[
ExecuteCellCommand(
cell_id=CellId_t("0"),
code=(
"from obstore.store import MemoryStore\n"
f"{STORAGE_VAR} = MemoryStore()"
),
),
]
)
store = k.globals[VariableName(STORAGE_VAR)]
await store.put_async("reports/data.csv", b"a,b,c\n1,2,3")
request = StorageDownloadCommand(
request_id=RequestId("req-20"),
namespace=STORAGE_VAR,
path="reports/data.csv",
)
await k.handle_message(request)
results = [
op
for op in stream.operations
if isinstance(op, StorageDownloadReadyNotification)
]
assert results == [
StorageDownloadReadyNotification(
request_id=RequestId("req-20"),
url=IsStr(), # pyright: ignore[reportArgumentType]
filename="data.csv",
)
]
async def test_download_uses_signed_url_when_available(
self, mocked_kernel: MockedKernel
) -> None:
k = mocked_kernel.k
stream = mocked_kernel.stream
await k.run(
[
ExecuteCellCommand(
cell_id=CellId_t("0"),
code=(
"from obstore.store import MemoryStore\n"
f"{STORAGE_VAR} = MemoryStore()"
),
),
]
)
store = k.globals[VariableName(STORAGE_VAR)]
backend = Obstore(store, VariableName(STORAGE_VAR))
backend.sign_download_url = AsyncMock( # type: ignore[method-assign]
return_value="https://signed.example.com/data.csv?token=abc",
)
# download_file should NOT be called when signing succeeds
backend.download_file = AsyncMock( # type: ignore[method-assign]
side_effect=AssertionError("should not be called"),
)
with pytest.MonkeyPatch.context() as mp:
mp.setattr(
k.external_storage_callbacks,
"_get_storage_backend",
lambda _: (backend, None),
)
request = StorageDownloadCommand(
request_id=RequestId("req-22"),
namespace=STORAGE_VAR,
path="reports/data.csv",
)
await k.handle_message(request)
results = [
op
for op in stream.operations
if isinstance(op, StorageDownloadReadyNotification)
]
assert results == [
StorageDownloadReadyNotification(
request_id=RequestId("req-22"),
url="https://signed.example.com/data.csv?token=abc",
filename="data.csv",
)
]
async def test_download_falls_back_when_signing_returns_none(
self, mocked_kernel: MockedKernel
) -> None:
k = mocked_kernel.k
stream = mocked_kernel.stream
await k.run(
[
ExecuteCellCommand(
cell_id=CellId_t("0"),
code=(
"from obstore.store import MemoryStore\n"
f"{STORAGE_VAR} = MemoryStore()"
),
),
]
)
store = k.globals[VariableName(STORAGE_VAR)]
await store.put_async("local/readme.txt", b"hello world")
# MemoryStore can't sign, so sign_download_url returns None
# and the handler falls back to virtual file
request = StorageDownloadCommand(
request_id=RequestId("req-23"),
namespace=STORAGE_VAR,
path="local/readme.txt",
)
await k.handle_message(request)
results = [
op
for op in stream.operations
if isinstance(op, StorageDownloadReadyNotification)
]
assert results == [
StorageDownloadReadyNotification(
request_id=RequestId("req-23"),
url=IsStr(), # pyright: ignore[reportArgumentType]
filename="readme.txt",
)
]
async def test_download_fallback_schedules_cleanup(
self, mocked_kernel: MockedKernel
) -> None:
import asyncio
k = mocked_kernel.k
await k.run(
[
ExecuteCellCommand(
cell_id=CellId_t("0"),
code=(
"from obstore.store import MemoryStore\n"
f"{STORAGE_VAR} = MemoryStore()"
),
),
]
)
store = k.globals[VariableName(STORAGE_VAR)]
await store.put_async("tmp/file.bin", b"data")
request = StorageDownloadCommand(
request_id=RequestId("req-24"),
namespace=STORAGE_VAR,
path="tmp/file.bin",
)
loop = asyncio.get_running_loop()
scheduled: list[tuple[float, object]] = []
original_call_later = loop.call_later
def spy_call_later(
delay: float, callback: object, *args: object
) -> asyncio.TimerHandle:
scheduled.append((delay, callback))
return original_call_later(delay, callback, *args)
with pytest.MonkeyPatch.context() as mp:
mp.setattr(loop, "call_later", spy_call_later)
await k.handle_message(request)
assert len(scheduled) == 1
delay, _callback = scheduled[0]
assert delay == 60
async def test_download_backend_exception(
self, mocked_kernel: MockedKernel
) -> None:
k = mocked_kernel.k
stream = mocked_kernel.stream
await k.run(
[
ExecuteCellCommand(
cell_id=CellId_t("0"),
code=(
"from obstore.store import MemoryStore\n"
f"{STORAGE_VAR} = MemoryStore()"
),
),
]
)
store = k.globals[VariableName(STORAGE_VAR)]
broken_backend = Obstore(store, VariableName(STORAGE_VAR))
broken_backend.download_file = AsyncMock( # type: ignore[method-assign]
side_effect=PermissionError("access denied"),
)
with pytest.MonkeyPatch.context() as mp:
mp.setattr(
k.external_storage_callbacks,
"_get_storage_backend",
lambda _: (broken_backend, None),
)
request = StorageDownloadCommand(
request_id=RequestId("req-21"),
namespace=STORAGE_VAR,
path="secret/file.bin",
)
await k.handle_message(request)
results = [
op
for op in stream.operations
if isinstance(op, StorageDownloadReadyNotification)
]
assert results == [
StorageDownloadReadyNotification(
request_id=RequestId("req-21"),
url=None,
filename=None,
error="Failed to download: access denied",
)
]
async def test_download_preview(self, mocked_kernel: MockedKernel) -> None:
k = mocked_kernel.k
stream = mocked_kernel.stream
await k.run(
[
ExecuteCellCommand(
cell_id=CellId_t("0"),
code=(
"from obstore.store import MemoryStore\n"
f"{STORAGE_VAR} = MemoryStore()"
),
),
]
)
store = k.globals[VariableName(STORAGE_VAR)]
await store.put_async("docs/readme.txt", b"hello world")
request = StorageDownloadCommand(
request_id=RequestId("req-30"),
namespace=STORAGE_VAR,
path="docs/readme.txt",
preview=True,
)
await k.handle_message(request)
results = [
op
for op in stream.operations
if isinstance(op, StorageDownloadReadyNotification)
]
assert results == [
StorageDownloadReadyNotification(
request_id=RequestId("req-30"),
url=IsStr(), # pyright: ignore[reportArgumentType]
filename="readme.txt",
error=None,
)
]
notif = results[0]
assert notif.url is not None
# Preview returns a virtual file URL, not a signed URL
assert "@file/" in notif.url
async def test_download_preview_skips_signed_url(
self, mocked_kernel: MockedKernel
) -> None:
"""Preview mode should use read_range, never sign_download_url."""
k = mocked_kernel.k
stream = mocked_kernel.stream
await k.run(
[
ExecuteCellCommand(
cell_id=CellId_t("0"),
code=(
"from obstore.store import MemoryStore\n"
f"{STORAGE_VAR} = MemoryStore()"
),
),
]
)
store = k.globals[VariableName(STORAGE_VAR)]
await store.put_async("file.py", b"print('hello')")
backend = Obstore(store, VariableName(STORAGE_VAR))
backend.sign_download_url = AsyncMock( # type: ignore[method-assign]
side_effect=AssertionError("should not be called"),
)
with pytest.MonkeyPatch.context() as mp:
mp.setattr(
k.external_storage_callbacks,
"_get_storage_backend",
lambda _: (backend, None),
)
request = StorageDownloadCommand(
request_id=RequestId("req-31"),
namespace=STORAGE_VAR,
path="file.py",
preview=True,
)
await k.handle_message(request)
results = [
op
for op in stream.operations
if isinstance(op, StorageDownloadReadyNotification)
]
assert results == [
StorageDownloadReadyNotification(
request_id=RequestId("req-31"),
url=IsStr(), # pyright: ignore[reportArgumentType]
filename="file.py",
error=None,
)
]
async def test_download_preview_schedules_cleanup(
self, mocked_kernel: MockedKernel
) -> None:
import asyncio
k = mocked_kernel.k
await k.run(
[
ExecuteCellCommand(
cell_id=CellId_t("0"),
code=(
"from obstore.store import MemoryStore\n"
f"{STORAGE_VAR} = MemoryStore()"
),
),
]
)
store = k.globals[VariableName(STORAGE_VAR)]
await store.put_async("tmp/preview.txt", b"data")
request = StorageDownloadCommand(
request_id=RequestId("req-32"),
namespace=STORAGE_VAR,
path="tmp/preview.txt",
preview=True,
)
loop = asyncio.get_running_loop()
scheduled: list[tuple[float, object]] = []
original_call_later = loop.call_later
def spy_call_later(
delay: float, callback: object, *args: object
) -> asyncio.TimerHandle:
scheduled.append((delay, callback))
return original_call_later(delay, callback, *args) # pyright: ignore[reportArgumentType]
with pytest.MonkeyPatch.context() as mp:
mp.setattr(loop, "call_later", spy_call_later)
await k.handle_message(request)
assert len(scheduled) == 1
delay, _callback = scheduled[0]
assert delay == 60
async def test_download_preview_backend_exception(
self, mocked_kernel: MockedKernel
) -> None:
k = mocked_kernel.k
stream = mocked_kernel.stream
await k.run(
[
ExecuteCellCommand(
cell_id=CellId_t("0"),
code=(
"from obstore.store import MemoryStore\n"
f"{STORAGE_VAR} = MemoryStore()"
),
),
]
)
store = k.globals[VariableName(STORAGE_VAR)]
broken_backend = Obstore(store, VariableName(STORAGE_VAR))
broken_backend.read_range = AsyncMock( # type: ignore[method-assign]
side_effect=OSError("read failed"),
)
with pytest.MonkeyPatch.context() as mp:
mp.setattr(
k.external_storage_callbacks,
"_get_storage_backend",
lambda _: (broken_backend, None),
)
request = StorageDownloadCommand(
request_id=RequestId("req-33"),
namespace=STORAGE_VAR,
path="broken/file.txt",
preview=True,
)
await k.handle_message(request)
results = [
op
for op in stream.operations
if isinstance(op, StorageDownloadReadyNotification)
]
assert results == [
StorageDownloadReadyNotification(
request_id=RequestId("req-33"),
url=None,
filename=None,
error="Failed to download: read failed",
)
]
async def test_download_preview_variable_not_found(
self, mocked_kernel: MockedKernel
) -> None:
k = mocked_kernel.k
stream = mocked_kernel.stream
request = StorageDownloadCommand(
request_id=RequestId("req-34"),
namespace="nonexistent_var",
path="file.txt",
preview=True,
)
await k.handle_message(request)
results = [
op
for op in stream.operations
if isinstance(op, StorageDownloadReadyNotification)
]
assert results == [
StorageDownloadReadyNotification(
request_id=RequestId("req-34"),
url=None,
filename=None,
error="Variable 'nonexistent_var' not found",
)
]
| {
"repo_id": "marimo-team/marimo",
"file_path": "tests/_runtime/test_runtime_external_storage.py",
"license": "Apache License 2.0",
"lines": 806,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
marimo-team/marimo:tests/_server/api/endpoints/test_storage_endpoints.py | # Copyright 2026 Marimo. All rights reserved.
from __future__ import annotations
from typing import TYPE_CHECKING
from tests._server.mocks import token_header, with_read_session, with_session
if TYPE_CHECKING:
from starlette.testclient import TestClient
SESSION_ID = "session-123"
HEADERS = {
"Marimo-Session-Id": SESSION_ID,
**token_header("fake-token"),
}
@with_session(SESSION_ID)
def test_list_entries(client: TestClient) -> None:
response = client.post(
"/api/storage/list_entries",
headers=HEADERS,
json={
"requestId": "test_request_id",
"namespace": "my_store",
"limit": 100,
},
)
assert response.status_code == 200, response.text
assert response.json()["success"] is True
@with_session(SESSION_ID)
def test_list_entries_with_prefix(client: TestClient) -> None:
response = client.post(
"/api/storage/list_entries",
headers=HEADERS,
json={
"requestId": "test_request_id",
"namespace": "my_store",
"limit": 50,
"prefix": "data/images/",
},
)
assert response.status_code == 200, response.text
assert response.json()["success"] is True
@with_session(SESSION_ID)
def test_download(client: TestClient) -> None:
response = client.post(
"/api/storage/download",
headers=HEADERS,
json={
"requestId": "test_request_id",
"namespace": "my_store",
"path": "data/file.csv",
},
)
assert response.status_code == 200, response.text
assert response.json()["success"] is True
@with_read_session(SESSION_ID)
def test_list_entries_fails_in_read_mode(client: TestClient) -> None:
response = client.post(
"/api/storage/list_entries",
headers=HEADERS,
json={
"requestId": "test_request_id",
"namespace": "my_store",
"limit": 100,
},
)
assert response.status_code == 401
@with_read_session(SESSION_ID)
def test_download_fails_in_read_mode(client: TestClient) -> None:
response = client.post(
"/api/storage/download",
headers=HEADERS,
json={
"requestId": "test_request_id",
"namespace": "my_store",
"path": "data/file.csv",
},
)
assert response.status_code == 401
| {
"repo_id": "marimo-team/marimo",
"file_path": "tests/_server/api/endpoints/test_storage_endpoints.py",
"license": "Apache License 2.0",
"lines": 75,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
marimo-team/marimo:tests/_session/state/test_session_external_storage.py | # Copyright 2026 Marimo. All rights reserved.
from __future__ import annotations
from inline_snapshot import snapshot
from marimo._data._external_storage.models import StorageNamespace
from marimo._messaging.notification import (
StorageNamespacesNotification,
VariableDeclarationNotification,
VariablesNotification,
)
from marimo._messaging.serde import serialize_kernel_message
from marimo._session.state.session_view import SessionView
from marimo._types.ids import CellId_t, VariableName
cell_id = CellId_t("cell_1")
def test_add_storage_namespaces(session_view: SessionView) -> None:
# Add initial namespaces
session_view.add_raw_notification(
serialize_kernel_message(
StorageNamespacesNotification(
namespaces=[
StorageNamespace(
name=VariableName("s3_store"),
display_name="s3_store",
protocol="s3",
root_path="my-bucket",
storage_entries=[],
),
StorageNamespace(
name=VariableName("local_fs"),
display_name="local_fs",
protocol="file",
root_path="/data",
storage_entries=[],
),
]
)
)
)
assert session_view.external_storage_namespaces.namespaces == snapshot(
[
StorageNamespace(
name=VariableName("s3_store"),
display_name="s3_store",
protocol="s3",
root_path="my-bucket",
storage_entries=[],
),
StorageNamespace(
name=VariableName("local_fs"),
display_name="local_fs",
protocol="file",
root_path="/data",
storage_entries=[],
),
]
)
# Add a new namespace and update an existing one
session_view.add_raw_notification(
serialize_kernel_message(
StorageNamespacesNotification(
namespaces=[
StorageNamespace(
name=VariableName("s3_store"),
display_name="s3_store_updated",
protocol="s3",
root_path="new-bucket",
storage_entries=[],
),
StorageNamespace(
name=VariableName("gcs_store"),
display_name="gcs_store",
protocol="gcs",
root_path="gcs-bucket",
storage_entries=[],
),
]
)
)
)
assert session_view.external_storage_namespaces.namespaces == snapshot(
[
StorageNamespace(
name=VariableName("s3_store"),
display_name="s3_store_updated",
protocol="s3",
root_path="new-bucket",
storage_entries=[],
),
StorageNamespace(
name=VariableName("local_fs"),
display_name="local_fs",
protocol="file",
root_path="/data",
storage_entries=[],
),
StorageNamespace(
name=VariableName("gcs_store"),
display_name="gcs_store",
protocol="gcs",
root_path="gcs-bucket",
storage_entries=[],
),
]
)
ns_by_name = {
ns.name: ns
for ns in session_view.external_storage_namespaces.namespaces
}
# Updated
assert (
ns_by_name[VariableName("s3_store")].display_name == "s3_store_updated"
)
assert ns_by_name[VariableName("s3_store")].root_path == "new-bucket"
# Check namespaces appear in notifications
assert (
session_view.external_storage_namespaces in session_view.notifications
)
def test_storage_namespaces_filtered_by_variables(
session_view: SessionView,
) -> None:
# Add storage namespaces
session_view.add_notification(
StorageNamespacesNotification(
namespaces=[
StorageNamespace(
name=VariableName("s3_store"),
display_name="s3_store",
protocol="s3",
root_path="bucket-1",
storage_entries=[],
),
StorageNamespace(
name=VariableName("gcs_store"),
display_name="gcs_store",
protocol="gcs",
root_path="bucket-2",
storage_entries=[],
),
]
)
)
assert len(session_view.external_storage_namespaces.namespaces) == 2
# Declare both variables in scope
session_view.add_notification(
VariablesNotification(
variables=[
VariableDeclarationNotification(
name="s3_store", declared_by=[cell_id], used_by=[]
),
VariableDeclarationNotification(
name="gcs_store", declared_by=[cell_id], used_by=[]
),
]
)
)
assert len(session_view.external_storage_namespaces.namespaces) == 2
# Remove gcs_store from variables => only s3_store remains
session_view.add_notification(
VariablesNotification(
variables=[
VariableDeclarationNotification(
name="s3_store", declared_by=[cell_id], used_by=[]
),
]
)
)
assert session_view.external_storage_namespaces.namespaces == snapshot(
[
StorageNamespace(
name=VariableName("s3_store"),
display_name="s3_store",
protocol="s3",
root_path="bucket-1",
storage_entries=[],
),
]
)
# Remove all variables => no namespaces remain
session_view.add_notification(VariablesNotification(variables=[]))
assert session_view.external_storage_namespaces.namespaces == []
def test_storage_namespaces_empty_not_in_notifications(
session_view: SessionView,
) -> None:
"""Empty storage namespaces should not appear in notifications."""
assert session_view.external_storage_namespaces.namespaces == []
storage_ops = [
op
for op in session_view.notifications
if isinstance(op, StorageNamespacesNotification)
]
assert len(storage_ops) == 0
def test_storage_namespaces_in_notifications(
session_view: SessionView,
) -> None:
"""Non-empty storage namespaces should appear in notifications."""
session_view.add_notification(
StorageNamespacesNotification(
namespaces=[
StorageNamespace(
name=VariableName("store"),
display_name="store",
protocol="s3",
root_path="bucket",
storage_entries=[],
),
]
)
)
storage_ops = [
op
for op in session_view.notifications
if isinstance(op, StorageNamespacesNotification)
]
assert storage_ops == snapshot(
[
StorageNamespacesNotification(
namespaces=[
StorageNamespace(
name=VariableName("store"),
display_name="store",
protocol="s3",
root_path="bucket",
storage_entries=[],
),
]
)
]
)
| {
"repo_id": "marimo-team/marimo",
"file_path": "tests/_session/state/test_session_external_storage.py",
"license": "Apache License 2.0",
"lines": 227,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
marimo-team/marimo:marimo/_runtime/cell_output_list.py | # Copyright 2026 Marimo. All rights reserved.
from __future__ import annotations
import threading
from typing import TYPE_CHECKING
if TYPE_CHECKING:
from marimo._output.hypertext import Html
class CellOutputList:
"""Thread-safe container for a cell's imperatively constructed output."""
def __init__(self) -> None:
self._items: list[Html] = []
self._lock = threading.RLock()
def append(self, item: Html) -> None:
with self._lock:
self._items.append(item)
def clear(self) -> None:
with self._lock:
self._items.clear()
def replace_at_index(self, item: Html, idx: int) -> None:
with self._lock:
if idx > len(self._items):
raise IndexError(
f"idx is {idx}, must be <= {len(self._items)}"
)
if idx == len(self._items):
self._items.append(item)
else:
self._items[idx] = item
def remove(self, value: object) -> None:
with self._lock:
self._items[:] = [
item for item in self._items if item is not value
]
def stack(self) -> Html | None:
"""Return `vstack` of the items, or `None` if empty."""
with self._lock:
if self._items:
from marimo._plugins.stateless.flex import vstack
return vstack(self._items)
return None
def __bool__(self) -> bool:
with self._lock:
return bool(self._items)
def __len__(self) -> int:
with self._lock:
return len(self._items)
def __repr__(self) -> str:
with self._lock:
return f"CellOutputList({self._items!r})"
| {
"repo_id": "marimo-team/marimo",
"file_path": "marimo/_runtime/cell_output_list.py",
"license": "Apache License 2.0",
"lines": 48,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
marimo-team/marimo:marimo/_smoke_tests/threads/multiple_appends.py | import marimo
__generated_with = "0.20.1"
app = marimo.App()
@app.cell
def _():
import marimo as mo
import time
import threading
return mo, threading, time
@app.cell
def _(mo, threading, time):
def append():
for i in range(3):
mo.output.append(f"{i}: Hello from {threading.get_ident()}")
time.sleep(1)
return (append,)
@app.cell
def _(mo, threading, time):
def replace():
for i in range(3):
mo.output.replace(f"{i}: Hello from {threading.get_ident()}")
time.sleep(1)
return (replace,)
@app.cell
def _(mo):
def run_threads(fn):
_threads = [mo.Thread(target=fn) for _ in range(3)]
for _t in _threads:
_t.start()
for _t in _threads:
_t.join()
return (run_threads,)
@app.cell
def _(append, run_threads):
run_threads(append)
return
@app.cell
def _(replace, run_threads):
run_threads(replace)
return
if __name__ == "__main__":
app.run()
| {
"repo_id": "marimo-team/marimo",
"file_path": "marimo/_smoke_tests/threads/multiple_appends.py",
"license": "Apache License 2.0",
"lines": 42,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
marimo-team/marimo:marimo/_smoke_tests/threads/progress_bar_threads.py | import marimo
__generated_with = "0.20.1"
app = marimo.App()
@app.cell
def _():
import marimo as mo
import random
import time
import threading
return mo, random, threading, time
@app.cell
def _(mo, random, threading, time):
def step(pbar: mo.status.progress_bar, work: int):
for _ in range(work):
# Sleep... or anything else that releases GIL
time.sleep(random.uniform(0.5, 1))
pbar.update(
subtitle=f"work completed by thread {threading.get_ident()}"
)
return (step,)
@app.cell
def _(mo, random, step, time):
total = 30
with mo.status.progress_bar(total=total) as pbar:
n_threads = 4
work = total // n_threads
remainder = total % n_threads
threads = [
mo.Thread(target=step, args=(pbar, work))
for _ in range(n_threads)
]
for t in threads:
t.start()
for t in threads:
t.join()
for _ in range(remainder):
time.sleep(random.uniform(0.5, 1))
pbar.update(subtitle="work completed by main thread")
return
if __name__ == "__main__":
app.run()
| {
"repo_id": "marimo-team/marimo",
"file_path": "marimo/_smoke_tests/threads/progress_bar_threads.py",
"license": "Apache License 2.0",
"lines": 41,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
marimo-team/marimo:tests/_runtime/test_cell_output_list.py | # Copyright 2026 Marimo. All rights reserved.
from __future__ import annotations
from unittest.mock import MagicMock
import pytest
from marimo._runtime.cell_output_list import CellOutputList
def _html(text: str) -> MagicMock:
m = MagicMock()
m.__repr__ = lambda _: text
return m
class TestCellOutputList:
def test_append_and_len(self) -> None:
col = CellOutputList()
assert len(col) == 0
col.append(_html("a"))
col.append(_html("b"))
assert len(col) == 2
def test_bool(self) -> None:
col = CellOutputList()
assert not col
col.append(_html("a"))
assert col
def test_clear(self) -> None:
col = CellOutputList()
col.append(_html("a"))
col.clear()
assert len(col) == 0
def test_replace_at_index(self) -> None:
col = CellOutputList()
col.append(_html("a"))
col.append(_html("b"))
replacement = _html("c")
col.replace_at_index(replacement, 0)
assert col._items[0] is replacement
assert len(col) == 2
def test_replace_at_index_appends_at_end(self) -> None:
col = CellOutputList()
col.append(_html("a"))
new = _html("b")
col.replace_at_index(new, 1)
assert len(col) == 2
assert col._items[1] is new
def test_replace_at_index_out_of_range(self) -> None:
col = CellOutputList()
with pytest.raises(IndexError):
col.replace_at_index(_html("a"), 5)
def test_remove_by_identity(self) -> None:
col = CellOutputList()
a = _html("a")
b = _html("b")
col.append(a)
col.append(b)
col.remove(a)
assert len(col) == 1
assert col._items[0] is b
def test_stack_empty_returns_none(self) -> None:
col = CellOutputList()
assert col.stack() is None
| {
"repo_id": "marimo-team/marimo",
"file_path": "tests/_runtime/test_cell_output_list.py",
"license": "Apache License 2.0",
"lines": 57,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
marimo-team/marimo:marimo/_plugins/ui/_impl/mpl.py | # Copyright 2026 Marimo. All rights reserved.
from __future__ import annotations
import base64
import io
from dataclasses import dataclass
from typing import (
TYPE_CHECKING,
Final,
Protocol,
cast,
)
from marimo._dependencies.dependencies import DependencyManager
from marimo._output.rich_help import mddoc
from marimo._plugins.core.web_component import JSONType
from marimo._plugins.ui._core.ui_element import UIElement
if TYPE_CHECKING:
import numpy as np
from matplotlib.axes import Axes # type: ignore[import-untyped]
from matplotlib.figure import Figure # type: ignore[import-untyped]
from numpy.typing import ArrayLike, NDArray
class MatplotlibSelection(Protocol):
def get_mask(self, x: ArrayLike, y: ArrayLike) -> NDArray[np.bool_]: ...
@dataclass(frozen=True)
class BoxSelection:
"""A rectangular box selection on a matplotlib plot.
Attributes:
x_min: Left boundary of the selection.
x_max: Right boundary of the selection.
y_min: Bottom boundary of the selection.
y_max: Top boundary of the selection.
"""
x_min: float
x_max: float
y_min: float
y_max: float
def get_mask(self, x: ArrayLike, y: ArrayLike) -> NDArray[np.bool_]:
"""Get a boolean mask for points within this selection.
Args:
x: Array-like of x-coordinates.
y: Array-like of y-coordinates.
Returns:
A boolean numpy array where `True` indicates the point is
within the selection.
"""
import numpy as np
x_arr = np.asarray(x)
y_arr = np.asarray(y)
return (
(x_arr >= self.x_min)
& (x_arr <= self.x_max)
& (y_arr >= self.y_min)
& (y_arr <= self.y_max)
)
@dataclass(frozen=True)
class LassoSelection:
"""A freehand polygon (lasso) selection on a matplotlib plot.
Attributes:
vertices: The polygon vertices as a tuple of (x, y) pairs.
"""
vertices: tuple[tuple[float, float], ...]
def get_mask(self, x: ArrayLike, y: ArrayLike) -> NDArray[np.bool_]:
"""Get a boolean mask for points within this selection.
Args:
x: Array-like of x-coordinates.
y: Array-like of y-coordinates.
Returns:
A boolean numpy array where `True` indicates the point is
within the selection.
"""
import numpy as np
from matplotlib.path import Path # type: ignore[import-untyped]
x_arr = np.asarray(x)
y_arr = np.asarray(y)
path = Path(self.vertices)
points = np.column_stack([x_arr, y_arr])
return path.contains_points(points)
@dataclass(frozen=True)
class EmptySelection:
"""Sentinel representing no selection.
Returned by `mo.ui.matplotlib.value` when nothing is selected.
Behaves like a selection with no points, and evaluates to False
when coerced as a bool.
"""
def get_mask(self, x: ArrayLike, y: ArrayLike) -> NDArray[np.bool_]: # noqa: ARG002
"""Return an all-``False`` mask.
Args:
x: Array-like of x-coordinates.
y: Array-like of y-coordinates.
Returns:
A boolean numpy array of all ``False``.
"""
import numpy as np
return np.zeros(len(np.asarray(x)), dtype=bool)
def __bool__(self) -> bool:
return False
def _figure_pixel_size(figure: Figure) -> tuple[float, float]:
"""Get figure dimensions in pixels."""
dpi = figure.get_dpi()
width_in, height_in = figure.get_size_inches()
return width_in * dpi, height_in * dpi
def _figure_to_base64(figure: Figure) -> str:
"""Render a matplotlib figure to a base64-encoded PNG data URL."""
buf = io.BytesIO()
figure.savefig(buf, format="png", dpi=figure.get_dpi(), bbox_inches=None)
buf.seek(0)
encoded = base64.b64encode(buf.read()).decode("utf-8")
buf.close()
return f"data:image/png;base64,{encoded}"
@mddoc
class matplotlib(UIElement[dict[str, JSONType], MatplotlibSelection]):
"""Make reactive selections on matplotlib plots.
Use `mo.ui.matplotlib` to make matplotlib plots interactive: draw a box
selection or a freehand lasso selection on the frontend, then use the
selection geometry in Python to filter your data.
The figure is rendered as a static image with an interactive selection
overlay:
- click and drag for box selection;
- hold the `Shift` key and drag for lasso selection.
Example:
```python
import matplotlib.pyplot as plt
import marimo as mo
import numpy as np
x = np.arange(5)
y = x**2
plt.scatter(x=x, y=y)
ax = mo.ui.matplotlib(plt.gca())
ax
```
```python
# Filter data using the selection
mask = ax.value.get_mask(x, y)
selected_x, selected_y = x[mask], y[mask]
```
```python
# Check if anything is selected
if ax.value:
print("Data has been selected")
```
Attributes:
value: The selected data, with `get_mask(x, y)` returning a
mask array corresponding to the selection.
Args:
axes: A matplotlib `Axes` object. The full figure is rendered,
but selections map to this axes' coordinate space.
debounce: If `True`, the selection is only sent to Python on
mouse-up. If `False` (the default), it streams while dragging.
"""
name: Final[str] = "marimo-matplotlib"
def __init__(self, axes: Axes, *, debounce: bool = False) -> None:
DependencyManager.matplotlib.require("for `mo.ui.matplotlib`")
from matplotlib.figure import Figure # type: ignore[import-untyped]
figure = axes.get_figure()
if not isinstance(figure, Figure):
raise ValueError("Axes must be attached to a figure.")
self._ax: Axes = axes
# Validate scales first (fail fast, no side effects)
_SUPPORTED_SCALES = ("linear", "log")
x_scale = axes.get_xscale()
y_scale = axes.get_yscale()
if x_scale not in _SUPPORTED_SCALES:
raise ValueError(
f"Unsupported x-axis scale {x_scale!r}. "
f"mo.ui.matplotlib supports: {', '.join(_SUPPORTED_SCALES)}."
)
if y_scale not in _SUPPORTED_SCALES:
raise ValueError(
f"Unsupported y-axis scale {y_scale!r}. "
f"mo.ui.matplotlib supports: {', '.join(_SUPPORTED_SCALES)}."
)
# Render the figure first — savefig triggers the draw which
# finalizes layout (tight_layout, constrained_layout, etc.)
chart_base64 = _figure_to_base64(figure)
# Now capture axes position — reflects post-layout bounds
fig_width_px, fig_height_px = _figure_pixel_size(figure)
bbox = axes.get_position()
axes_pixel_bounds: list[float] = [
bbox.x0 * fig_width_px, # left
(1 - bbox.y1) * fig_height_px, # top
bbox.x1 * fig_width_px, # right
(1 - bbox.y0) * fig_height_px, # bottom
]
super().__init__(
component_name=matplotlib.name,
initial_value={},
label="",
args={
"chart-base64": chart_base64,
"x-bounds": list(axes.get_xlim()),
"y-bounds": list(axes.get_ylim()),
"axes-pixel-bounds": axes_pixel_bounds,
"width": fig_width_px,
"height": fig_height_px,
"debounce": debounce,
"x-scale": x_scale,
"y-scale": y_scale,
},
on_change=None,
)
@property
def axes(self) -> Axes:
"""The associated matplotlib Axes object."""
return self._ax
def _convert_value(
self, value: dict[str, JSONType]
) -> MatplotlibSelection:
if not value or not value.get("has_selection"):
return EmptySelection()
sel_type = value.get("type")
data = value.get("data")
if sel_type == "box":
data = cast(dict[str, float], data)
return BoxSelection(
x_min=float(data["x_min"]),
x_max=float(data["x_max"]),
y_min=float(data["y_min"]),
y_max=float(data["y_max"]),
)
if sel_type == "lasso":
data = cast(list[list[float]], data)
return LassoSelection(
vertices=tuple((float(v[0]), float(v[1])) for v in data),
)
return EmptySelection()
| {
"repo_id": "marimo-team/marimo",
"file_path": "marimo/_plugins/ui/_impl/mpl.py",
"license": "Apache License 2.0",
"lines": 224,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
marimo-team/marimo:marimo/_smoke_tests/ui_matplotlib.py | import marimo
__generated_with = "0.20.1"
app = marimo.App()
@app.cell
def _():
import marimo as mo
import matplotlib.pyplot as plt
import pymde
import numpy as np
return mo, np, plt, pymde
@app.cell
def _(pymde):
mnist = pymde.datasets.MNIST()
return (mnist,)
@app.cell
def _(mnist, mo, pymde):
@mo.persistent_cache
def compute_embedding():
return pymde.preserve_neighbors(
mnist.data, constraint=pymde.Standardized(), verbose=True
).embed(verbose=True)
return (compute_embedding,)
@app.cell
def _(compute_embedding):
embedding = compute_embedding()
return (embedding,)
@app.cell
def _(embedding, mnist, plt, pymde):
x = embedding[:, 0]
y = embedding[:, 1]
ax = pymde.plot(X=embedding, color_by=mnist.attributes["digits"])
plt.tight_layout()
return ax, x, y
@app.cell
def _(ax, mo):
fig = mo.ui.matplotlib(ax)
fig
return (fig,)
@app.cell
def _(embedding, fig, x, y):
embedding[fig.value.get_mask(x, y)]
return
@app.cell
def _(fig):
fig.value if fig.value else "No selection!"
return
@app.cell(hide_code=True)
def _(mo):
mo.md("""
## Edge-data test
Points are clustered right at the axes edges. Clicking on tick labels,
axis labels, or the title should **not** start a selection. Previously
the click was clamped to the nearest edge, which would select these
edge points via `get_mask()`.
""")
return
@app.cell
def _(np, plt):
rng = np.random.default_rng(99)
# Points hugging the four edges of [0, 10] x [0, 10]
edge_n = 30
edge_x = np.concatenate([
rng.uniform(0, 0.3, edge_n), # left edge
rng.uniform(9.7, 10, edge_n), # right edge
rng.uniform(0, 10, edge_n), # bottom edge
rng.uniform(0, 10, edge_n), # top edge
rng.uniform(3, 7, edge_n), # centre (control)
])
edge_y = np.concatenate([
rng.uniform(0, 10, edge_n), # left edge
rng.uniform(0, 10, edge_n), # right edge
rng.uniform(0, 0.3, edge_n), # bottom edge
rng.uniform(9.7, 10, edge_n), # top edge
rng.uniform(3, 7, edge_n), # centre (control)
])
plt.figure()
plt.scatter(edge_x, edge_y, s=20, c=edge_y, cmap="viridis")
plt.colorbar(label="y value")
plt.xlim(0, 10)
plt.ylim(0, 10)
plt.xlabel("X axis (click here should NOT select)")
plt.ylabel("Y axis (click here should NOT select)")
plt.title("Title area (click here should NOT select)")
edge_ax = plt.gca()
return edge_ax, edge_x, edge_y
@app.cell
def _(edge_ax, mo):
edge_fig = mo.ui.matplotlib(edge_ax)
edge_fig
return (edge_fig,)
@app.cell
def _(edge_fig, edge_x, edge_y):
_m = edge_fig.value.get_mask(edge_x, edge_y)
f"Selected {_m.sum()} / {len(edge_x)} points"
return
@app.cell
def _(edge_fig):
edge_fig.value if edge_fig.value else "No selection!"
return
@app.cell(hide_code=True)
def _(mo):
mo.md("""
## Log-scale axes test
""")
return
@app.cell
def _(np, plt):
# Exponentially distributed data for log-scale testing
np.random.seed(42)
log_x = np.random.lognormal(mean=2, sigma=1, size=500)
log_y = np.random.lognormal(mean=3, sigma=0.8, size=500)
plt.figure()
plt.scatter(log_x, log_y, s=10, alpha=0.6)
plt.yscale("log")
plt.xlabel("X (linear scale)")
plt.ylabel("Y (log scale)")
plt.title("Log-scale scatter")
log_ax = plt.gca()
return log_ax, log_x, log_y
@app.cell
def _(log_ax, mo):
log_fig = mo.ui.matplotlib(log_ax)
log_fig
return (log_fig,)
@app.cell
def _(log_fig, log_x, log_y, np):
_m = log_fig.value.get_mask(log_x, log_y)
log_x_sel, log_y_sel = log_x[_m], log_y[_m]
np.column_stack([log_x_sel, log_y_sel])
return
@app.cell
def _(log_fig):
log_fig.value if log_fig.value else "No selection!"
return
if __name__ == "__main__":
app.run()
| {
"repo_id": "marimo-team/marimo",
"file_path": "marimo/_smoke_tests/ui_matplotlib.py",
"license": "Apache License 2.0",
"lines": 136,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
marimo-team/marimo:tests/_plugins/ui/_impl/test_ui_mpl.py | # Copyright 2026 Marimo. All rights reserved.
from __future__ import annotations
import io
from typing import Any
import matplotlib.pyplot as plt # noqa: E402
import pytest
from marimo._plugins.ui._impl.mpl import ( # noqa: E402
BoxSelection,
EmptySelection,
LassoSelection,
_figure_pixel_size,
_figure_to_base64,
matplotlib,
)
mpl = pytest.importorskip("matplotlib")
mpl.use("Agg") # Non-interactive backend for testing
np = pytest.importorskip("numpy")
# ============================================================================
# Constructor tests
# ============================================================================
def _make_scatter_ax() -> Any:
fig, ax = plt.subplots()
ax.scatter([1, 2, 3, 4, 5], [2, 4, 1, 5, 3])
plt.close(fig)
return ax
def test_basic_construction() -> None:
ax = _make_scatter_ax()
fig = matplotlib(ax)
assert fig is not None
assert isinstance(fig.value, EmptySelection)
assert not fig.value
def test_construction_no_figure_raises() -> None:
fig = plt.figure()
ax = fig.add_subplot(111)
ax.scatter([1, 2, 3], [4, 5, 6])
# Remove ax from figure to simulate detached axes
# Actually, axes are always attached. Test empty figure differently.
plt.close(fig)
# We can still construct since ax.get_figure() returns the figure
fig = matplotlib(ax)
assert fig is not None
def test_unsupported_scale_raises() -> None:
fig, ax = plt.subplots()
ax.scatter([1, 2, 3], [4, 5, 6])
ax.set_xscale("symlog")
plt.close(fig)
with pytest.raises(ValueError, match="Unsupported x-axis scale"):
matplotlib(ax)
def test_unsupported_yscale_raises() -> None:
fig, ax = plt.subplots()
ax.scatter([1, 2, 3], [4, 5, 6])
ax.set_yscale("logit")
plt.close(fig)
with pytest.raises(ValueError, match="Unsupported y-axis scale"):
matplotlib(ax)
def test_construction_args() -> None:
ax = _make_scatter_ax()
fig = matplotlib(ax)
args = fig._component_args
assert "chart-base64" in args
assert "x-bounds" in args
assert "y-bounds" in args
assert "axes-pixel-bounds" in args
assert "width" in args
assert "height" in args
assert isinstance(args["chart-base64"], str)
assert args["chart-base64"].startswith("data:image/png;base64,")
assert len(args["x-bounds"]) == 2
assert len(args["y-bounds"]) == 2
assert len(args["axes-pixel-bounds"]) == 4
assert args["width"] > 0
assert args["height"] > 0
# Style args should NOT be present (removed from public API)
assert "selection-color" not in args
assert "selection-opacity" not in args
assert "stroke-width" not in args
# ============================================================================
# _convert_value tests
# ============================================================================
def test_convert_value_empty() -> None:
ax = _make_scatter_ax()
fig = matplotlib(ax)
result = fig._convert_value({})
assert isinstance(result, EmptySelection)
assert not result
def test_convert_value_no_selection() -> None:
ax = _make_scatter_ax()
fig = matplotlib(ax)
result = fig._convert_value({"has_selection": False})
assert isinstance(result, EmptySelection)
assert not result
def test_convert_value_box() -> None:
ax = _make_scatter_ax()
fig = matplotlib(ax)
value = {
"type": "box",
"has_selection": True,
"data": {
"x_min": 1.0,
"x_max": 3.0,
"y_min": 2.0,
"y_max": 4.0,
},
}
result = fig._convert_value(value)
assert isinstance(result, BoxSelection)
assert result.x_min == 1.0
assert result.x_max == 3.0
assert result.y_min == 2.0
assert result.y_max == 4.0
def test_convert_value_lasso() -> None:
ax = _make_scatter_ax()
fig = matplotlib(ax)
value = {
"type": "lasso",
"has_selection": True,
"data": [[1.0, 2.0], [3.0, 4.0], [5.0, 2.0]],
}
result = fig._convert_value(value)
assert isinstance(result, LassoSelection)
assert result.vertices == ((1.0, 2.0), (3.0, 4.0), (5.0, 2.0))
# ============================================================================
# EmptySelection tests
# ============================================================================
def test_empty_selection_is_falsy() -> None:
sel = EmptySelection()
assert not sel
assert bool(sel) is False
def test_empty_selection_get_mask() -> None:
sel = EmptySelection()
x = np.array([1, 2, 3, 4, 5])
y = np.array([2, 4, 1, 5, 3])
mask = sel.get_mask(x, y)
assert mask.sum() == 0
assert len(mask) == 5
def test_empty_selection_frozen() -> None:
sel = EmptySelection()
with pytest.raises(AttributeError):
sel.foo = 1 # type: ignore[attr-defined]
def test_value_get_mask_before_selection() -> None:
"""fig.value.get_mask() should work even with no selection."""
ax = _make_scatter_ax()
fig = matplotlib(ax)
x = np.array([1, 2, 3])
y = np.array([4, 5, 6])
mask = fig.value.get_mask(x, y)
assert mask.sum() == 0
# ============================================================================
# BoxSelection dataclass tests
# ============================================================================
def test_box_selection_frozen() -> None:
sel = BoxSelection(x_min=1.0, x_max=3.0, y_min=2.0, y_max=4.0)
with pytest.raises(AttributeError):
sel.x_min = 5.0 # type: ignore[misc]
def test_box_selection_get_mask() -> None:
sel = BoxSelection(x_min=1.5, x_max=3.5, y_min=1.5, y_max=4.5)
x = np.array([1, 2, 3, 4, 5])
y = np.array([2, 4, 1, 5, 3])
mask = sel.get_mask(x, y)
assert mask[1] is np.True_ # (2, 4) in range
assert not mask[2] # (3, 1) y out of range
assert not mask[0] # (1, 2) x out of range
# ============================================================================
# LassoSelection dataclass tests
# ============================================================================
def test_lasso_selection_frozen() -> None:
sel = LassoSelection(vertices=((0.0, 0.0), (4.0, 0.0), (2.0, 3.0)))
with pytest.raises(AttributeError):
sel.vertices = () # type: ignore[misc]
def test_lasso_selection_get_mask() -> None:
sel = LassoSelection(vertices=((0.0, 0.0), (10.0, 0.0), (5.0, 10.0)))
x = np.array([5.0, 0.0, 10.0, 5.0])
y = np.array([1.0, 5.0, 5.0, 5.0])
mask = sel.get_mask(x, y)
assert mask[0] # (5, 1) inside
assert not mask[1] # (0, 5) outside
assert not mask[2] # (10, 5) outside
assert mask[3] # (5, 5) inside
# ============================================================================
# Helper function tests
# ============================================================================
def test_figure_pixel_size() -> None:
fig, ax = plt.subplots()
ax.scatter([1, 2, 3], [4, 5, 6])
plt.close(fig)
w, h = _figure_pixel_size(fig)
assert w > 0
assert h > 0
def test_figure_to_base64() -> None:
fig, ax = plt.subplots()
ax.scatter([1, 2, 3], [4, 5, 6])
plt.close(fig)
result = _figure_to_base64(fig)
assert result.startswith("data:image/png;base64,")
assert len(result) > 50
# ============================================================================
# HTML output tests
# ============================================================================
def test_axes_bounds_match_constrained_layout() -> None:
"""Axes bounds should reflect post-layout position with constrained_layout."""
fig, ax = plt.subplots(constrained_layout=True)
ax.scatter([1, 2, 3], [4, 5, 6])
ax.set_xlabel("X label")
ax.set_ylabel("Y label")
ax.set_title("Title")
plt.close(fig)
widget = matplotlib(ax)
args = widget._component_args
# Render independently with the same settings to get expected bounds
fig2 = ax.get_figure()
fig2.savefig(
io.BytesIO(), format="png", dpi=fig2.get_dpi(), bbox_inches=None
)
bbox = ax.get_position()
w, h = _figure_pixel_size(fig2)
expected = [
bbox.x0 * w,
(1 - bbox.y1) * h,
bbox.x1 * w,
(1 - bbox.y0) * h,
]
for actual, exp in zip(args["axes-pixel-bounds"], expected):
assert abs(actual - exp) < 1e-6
def test_axes_bounds_match_tight_layout() -> None:
"""Axes bounds should reflect post-layout position with tight_layout."""
fig, ax = plt.subplots()
ax.scatter([1, 2, 3], [4, 5, 6])
ax.set_xlabel("X label")
ax.set_ylabel("Y label")
ax.set_title("Title")
fig.tight_layout()
plt.close(fig)
widget = matplotlib(ax)
args = widget._component_args
# Render independently with the same settings to get expected bounds
fig2 = ax.get_figure()
fig2.savefig(
io.BytesIO(), format="png", dpi=fig2.get_dpi(), bbox_inches=None
)
bbox = ax.get_position()
w, h = _figure_pixel_size(fig2)
expected = [
bbox.x0 * w,
(1 - bbox.y1) * h,
bbox.x1 * w,
(1 - bbox.y0) * h,
]
for actual, exp in zip(args["axes-pixel-bounds"], expected):
assert abs(actual - exp) < 1e-6
# ============================================================================
# HTML output tests
# ============================================================================
def test_html_contains_tag() -> None:
ax = _make_scatter_ax()
fig = matplotlib(ax)
html = fig.text
assert "marimo-matplotlib" in html
| {
"repo_id": "marimo-team/marimo",
"file_path": "tests/_plugins/ui/_impl/test_ui_mpl.py",
"license": "Apache License 2.0",
"lines": 257,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
marimo-team/marimo:examples/ui/matrix.py | import marimo
__generated_with = "0.19.11"
app = marimo.App(width="medium")
@app.cell
def _():
import marimo as mo
import numpy as np
return mo, np
@app.cell
def _(mo):
matrix = mo.ui.matrix(
[[1, 0, 0], [0, 1, 0], [0, 0, 1]],
min_value=-5,
max_value=10,
step=0.001,
precision=3,
scientific=True,
label="$I$",
)
matrix
return (matrix,)
@app.cell
def _(matrix):
matrix.value
return
@app.cell
def _(mo, np):
mo.hstack(
[mo.ui.matrix(np.ones(3)), mo.ui.matrix(np.ones((1, 3)))],
justify="start",
gap=2,
)
return
if __name__ == "__main__":
app.run()
| {
"repo_id": "marimo-team/marimo",
"file_path": "examples/ui/matrix.py",
"license": "Apache License 2.0",
"lines": 35,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
marimo-team/marimo:marimo/_plugins/ui/_impl/matrix.py | # Copyright 2026 Marimo. All rights reserved.
from __future__ import annotations
from typing import (
TYPE_CHECKING,
Any,
Final,
)
from marimo._output.rich_help import mddoc
from marimo._plugins.ui._core.ui_element import UIElement
if TYPE_CHECKING:
from numpy.typing import ArrayLike
Numeric = int | float
def _broadcast(
name: str,
param: Any,
rows: int,
cols: int,
) -> list[list[Any]]:
"""Broadcast a scalar, nested list, or array-like to a rows x cols matrix.
*convert* is applied to every cell (e.g. `float` or `bool`).
"""
if hasattr(param, "tolist"):
param = param.tolist()
if not isinstance(param, list):
return [[param] * cols for _ in range(rows)]
if len(param) != rows:
raise ValueError(f"`{name}` has {len(param)} rows but expected {rows}")
for i, row in enumerate(param):
if not isinstance(row, (list, tuple)):
raise ValueError(
f"`{name}` row {i} must be a list, got {type(row)}"
)
if len(row) != cols:
raise ValueError(
f"`{name}` row {i} has {len(row)} columns but expected {cols}"
)
for j, cell in enumerate(row):
if isinstance(cell, (list, tuple)):
raise ValueError(
f"`{name}` must be 2D, but found a nested "
f"sequence at position [{i}][{j}]"
)
return param
def _to_flat_list(
value: list[Numeric] | ArrayLike,
) -> list[Numeric]:
"""Validate and convert input to a flat list of numbers.
Accepts a flat list of numbers or a 1D array-like with `.tolist()`.
Rejects empty, non-1D, or nested inputs.
"""
if hasattr(value, "tolist"):
value = value.tolist()
if not isinstance(value, list):
raise ValueError(
f"`value` must be a list or array-like, got {type(value)}"
)
if not value:
raise ValueError("`value` must be non-empty")
for i, v in enumerate(value):
if isinstance(v, (list, tuple)):
raise ValueError(
f"`value` must be 1D, but element {i} is a {type(v).__name__}"
)
return value
def _1d_to_2d(
name: str,
param: Any,
) -> Any:
"""Convert a 1D list param to a column-vector 2D layout.
Scalars pass through unchanged (they'll be broadcast by `_broadcast`).
A 1D list `[a, b, c]` becomes `[[a], [b], [c]]`.
"""
if hasattr(param, "tolist"):
param = param.tolist()
if not isinstance(param, list):
# Scalar — let _broadcast handle it
return param
# Must be a flat list at this point
for i, v in enumerate(param):
if isinstance(v, (list, tuple)):
raise ValueError(
f"`{name}` must be scalar or 1D, "
f"but element {i} is a {type(v).__name__}"
)
return [[v] for v in param]
def _parse_value(
value: list[list[Numeric]] | list[Numeric] | ArrayLike,
) -> tuple[list[list[Numeric]], bool]:
"""Parse and validate initial matrix data.
Returns `(data_2d, is_vector)` where *is_vector* is True when the
input was a flat 1D list.
"""
if hasattr(value, "tolist"):
value = value.tolist()
if not isinstance(value, list):
raise ValueError(
f"`value` must be a list of lists or array-like, got {type(value)}"
)
if not value:
raise ValueError("`value` must be non-empty")
first = value[0]
# 1D path
if not isinstance(first, (list, tuple)):
flat = _to_flat_list(value)
return flat, True # type: ignore[return-value]
# 2D path
if not first:
raise ValueError(
f"`value` must contain non-empty lists, but row 0 is {first!r}"
)
data_2d = _broadcast("value", value, len(value), len(first))
return data_2d, False
def _decimal_places(x: Numeric) -> int:
"""Count decimal places needed in fixed notation."""
if isinstance(x, int) or x == int(x):
return 0
s = repr(x)
if "e" in s or "E" in s:
parts = s.lower().split("e")
exp = int(parts[1])
mantissa_dec = len(parts[0].split(".")[1]) if "." in parts[0] else 0
return max(0, mantissa_dec - exp)
if "." in s:
return len(s.split(".")[1])
return 0
def _mantissa_decimal_places(x: Numeric) -> int:
"""Count decimal places needed in the mantissa for scientific notation.
For example, `0.00153` -> `1.53e-3` -> 2 mantissa places,
while `1e-8` -> `1e-8` -> 0 mantissa places.
"""
if isinstance(x, int):
# Strip trailing zeros: 1234000 -> 1.234e6 -> 3 places
if x == 0:
return 0
s = str(abs(x)).rstrip("0")
return max(0, len(s) - 1)
if x == 0.0:
return 0
# Format with enough mantissa digits, then strip trailing zeros
s = f"{x:.15e}" # e.g. "1.530000000000000e-03"
mantissa = s.split("e")[0].rstrip("0").rstrip(".")
if "." in mantissa:
return len(mantissa.split(".")[1])
return 0
def _infer_precision(
data: list[list[Numeric]],
step_val: list[list[Any]],
scientific: bool,
) -> int:
"""Choose a display precision based on the data values and step sizes.
When *scientific* is True, counts mantissa decimal places (e.g.
`0.00153` needs 2 for `1.53e-3`). Otherwise counts total
decimal places in fixed notation (`0.00153` needs 5).
"""
counter = _mantissa_decimal_places if scientific else _decimal_places
best = 0
for row in data:
for v in row:
best = max(best, counter(v))
for row in step_val:
for v in row:
best = max(best, counter(v))
return min(best, 8)
def _validate_and_build_args(
data: list[list[Numeric]],
*,
min_val: list[list[Numeric]] | None,
max_val: list[list[Numeric]] | None,
step_val: list[list[Any]],
disabled_val: list[list[Any]],
symmetric: bool,
scientific: bool,
precision: int | None,
row_labels: list[str] | None,
column_labels: list[str] | None,
debounce: bool,
) -> dict[str, Any]:
"""Validate matrix parameters and return the args dict for UIElement."""
rows = len(data)
cols = len(data[0])
if precision is None:
precision = _infer_precision(data, step_val, scientific)
# Validate per-cell constraints in a single pass
for i in range(rows):
for j in range(cols):
if step_val[i][j] <= 0:
raise ValueError(
f"`step` must be positive, got {step_val[i][j]} "
f"at position [{i}][{j}]"
)
if min_val is not None and max_val is not None:
if min_val[i][j] >= max_val[i][j]:
raise ValueError(
f"`min_value` ({min_val[i][j]}) must be less "
f"than `max_value` ({max_val[i][j]}) at "
f"position [{i}][{j}]"
)
if min_val is not None and data[i][j] < min_val[i][j]:
raise ValueError(
f"Initial value {data[i][j]} at [{i}][{j}] is "
f"less than min_value {min_val[i][j]}"
)
if max_val is not None and data[i][j] > max_val[i][j]:
raise ValueError(
f"Initial value {data[i][j]} at [{i}][{j}] is "
f"greater than max_value {max_val[i][j]}"
)
# Validate label lengths
if row_labels is not None and len(row_labels) != rows:
raise ValueError(
f"`row_labels` has {len(row_labels)} entries "
f"but matrix has {rows} rows"
)
if column_labels is not None and len(column_labels) != cols:
raise ValueError(
f"`column_labels` has {len(column_labels)} entries "
f"but matrix has {cols} columns"
)
# Validate symmetric requires square matrix with symmetric data
if symmetric:
if rows != cols:
raise ValueError(
f"`symmetric` requires a square matrix, but got {rows}x{cols}"
)
for i in range(rows):
for j in range(i + 1, cols):
if data[i][j] != data[j][i]:
raise ValueError(
f"`symmetric` is True but initial data is not "
f"symmetric: value[{i}][{j}]={data[i][j]} != "
f"value[{j}][{i}]={data[j][i]}"
)
if not isinstance(precision, int) or precision < 0:
raise ValueError(
f"`precision` must be a non-negative integer, got {precision}"
)
return {
"min-value": min_val,
"max-value": max_val,
"step": step_val,
"precision": precision,
"row-labels": row_labels,
"column-labels": column_labels,
"symmetric": symmetric,
"debounce": debounce,
"scientific": scientific,
"disabled": disabled_val,
}
@mddoc
class matrix(
UIElement[list[list[Numeric]], list[list[Numeric]] | list[Numeric]]
):
"""An interactive matrix/vector editor.
A matrix UI element in which each entry is a slider: click and drag
horizontally on an entry to increment or decrement its value. The matrix
can be configured in many ways, including element-wise bounds, element-wise
steps, an element-wise disable mask, and symmetry enforcement. These
configuration values may be any array-like object, including as lists,
NumPy arrays, or torch Tensors.
Supports both 2D (matrix) and 1D (vector) inputs. When a flat list is
passed, the element displays as a column vector and `.value` returns a flat
list.
Examples:
Basic 2D matrix:
```python
mat = mo.ui.matrix([[1, 0], [0, 1]])
mat
```
Access the value in another cell with
```python
mat.value # [[1, 0], [0, 1]]
```
Column vector (1D input):
```python
v = mo.ui.matrix([1, 2, 3])
v.value # [1, 2, 3]
```
Row vecto
```python
v = mo.ui.matrix([[1, 2, 3]])
v.value # [[1, 2, 3]]
```
You can specify bounds and a step size as well:
```python
mat = mo.ui.matrix(
[[1, 0], [0, 1]],
min_value=-10,
max_value=10,
step=0.5,
)
```
To disable editing of some or all entries, use the disabled argument:
```python
mat = mo.ui.matrix(
[[1, 0], [0, 1]],
# Disable editing the diagonal values
disabled=[[True, False], [False, True]],
)
```
The value, bounds, step, and disabled arguments can optionally be NumPy
arrays, interpreted elementwise.
```python
import numpy as np
mat = mo.ui.matrix(np.eye(2))
mat
```
```
np.asarray(mat.value)
```
Attributes:
value (list[list[Numeric]] | list[Numeric]): The current matrix
as a nested list, or a flat list when created with 1D input.
Use `np.asarray(matrix.value)` to convert to a numpy array.
Args:
value (list[list[Numeric]] | list[Numeric] | ArrayLike): Initial
data. A nested list or 2D array creates a matrix; a flat list
or 1D array creates a column vector. Rows and columns are
inferred from the shape.
min_value (Numeric | list[list[Numeric]] | list[Numeric] | ArrayLike | None, optional):
Minimum allowed value. A scalar is broadcast to all cells; a
list or numpy array sets per-element bounds. For 1D input,
accepts a flat list. None means unbounded. Defaults to None.
max_value (Numeric | list[list[Numeric]] | list[Numeric] | ArrayLike | None, optional):
Maximum allowed value. A scalar is broadcast to all cells; a
list or numpy array sets per-element bounds. For 1D input,
accepts a flat list. None means unbounded. Defaults to None.
step (Numeric | list[list[Numeric]] | list[Numeric] | ArrayLike, optional):
Drag increment. A scalar is broadcast to all cells; a list
or numpy array sets per-element step sizes. For 1D input,
accepts a flat list. Defaults to 1.0.
disabled (bool | list[list[bool]] | list[bool] | ArrayLike, optional):
Whether cells are disabled. A scalar bool is broadcast to all
cells; a list or numpy bool array sets a per-element mask.
For 1D input, accepts a flat list. Defaults to False.
symmetric (bool, optional): If True, editing cell `[i][j]` also
updates cell `[j][i]`. Requires a square matrix. Defaults to
False.
scientific (bool, optional): If True, display values in scientific
notation (e.g., `1.0e-4`). Defaults to False.
precision (int | None, optional): Number of decimal places
displayed. When None, inferred from the data values and step
sizes. Defaults to None.
row_labels (list[str] | None, optional): Labels for each row.
Defaults to None.
column_labels (list[str] | None, optional): Labels for each column.
Defaults to None.
debounce (bool, optional): If True, value updates are only sent
to the backend on mouse-up (pointer release) instead of on
every drag movement. Useful when the matrix drives expensive
downstream computations. Defaults to False.
label (str, optional): Markdown/LaTeX label for the element.
Defaults to "".
"""
_name: Final[str] = "marimo-matrix"
def __init__(
self,
value: list[list[Numeric]] | list[Numeric] | ArrayLike,
*,
min_value: (
Numeric | list[list[Numeric]] | list[Numeric] | ArrayLike | None
) = None,
max_value: (
Numeric | list[list[Numeric]] | list[Numeric] | ArrayLike | None
) = None,
step: (
Numeric | list[list[Numeric]] | list[Numeric] | ArrayLike
) = 1.0,
disabled: bool | list[list[bool]] | list[bool] | ArrayLike = False,
symmetric: bool = False,
scientific: bool = False,
precision: int | None = None,
row_labels: list[str] | None = None,
column_labels: list[str] | None = None,
debounce: bool = False,
label: str = "",
) -> None:
parsed, is_vector = _parse_value(value)
if is_vector:
# --- 1D (vector) path: always a column vector ---
flat: list[Numeric] = parsed # type: ignore[assignment]
if symmetric:
raise ValueError(
"`symmetric` is not supported for 1D (vector) input"
)
data: list[list[Numeric]] = [[v] for v in flat]
rows = len(data)
cols = 1
# Convert 1D params -> 2D column layout
min_2d = _1d_to_2d("min_value", min_value)
max_2d = _1d_to_2d("max_value", max_value)
step_2d = _1d_to_2d("step", step)
disabled_2d = _1d_to_2d("disabled", disabled)
min_val = (
_broadcast("min_value", min_2d, rows, cols)
if min_value is not None
else None
)
max_val = (
_broadcast("max_value", max_2d, rows, cols)
if max_value is not None
else None
)
step_val = _broadcast("step", step_2d, rows, cols)
disabled_val = _broadcast("disabled", disabled_2d, rows, cols)
else:
# --- 2D (matrix) path ---
data = parsed # type: ignore[assignment]
rows = len(data)
cols = len(data[0])
min_val = (
_broadcast("min_value", min_value, rows, cols)
if min_value is not None
else None
)
max_val = (
_broadcast("max_value", max_value, rows, cols)
if max_value is not None
else None
)
step_val = _broadcast("step", step, rows, cols)
disabled_val = _broadcast("disabled", disabled, rows, cols)
self._is_vector = is_vector
args = _validate_and_build_args(
data,
min_val=min_val,
max_val=max_val,
step_val=step_val,
disabled_val=disabled_val,
symmetric=symmetric,
scientific=scientific,
precision=precision,
row_labels=row_labels,
column_labels=column_labels,
debounce=debounce,
)
super().__init__(
component_name=matrix._name,
initial_value=data,
label=label,
args=args,
on_change=None,
)
def _convert_value(
self, value: list[list[Numeric]]
) -> list[list[Numeric]] | list[Numeric]:
if self._is_vector:
return [cell for row in value for cell in row]
return value
| {
"repo_id": "marimo-team/marimo",
"file_path": "marimo/_plugins/ui/_impl/matrix.py",
"license": "Apache License 2.0",
"lines": 448,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
marimo-team/marimo:marimo/_smoke_tests/matrix.py | # Copyright 2026 Marimo. All rights reserved.
import marimo
__generated_with = "0.19.11"
app = marimo.App()
@app.cell
def _():
import marimo as mo
return (mo,)
@app.cell(hide_code=True)
def _(mo):
mo.md("""
## Basic matrix (2x2 identity)
""")
return
@app.cell
def _(mo):
identity = mo.ui.matrix([[1, 0], [0, 1]], label="Identity")
identity
return (identity,)
@app.cell
def _(identity):
identity.value
return
@app.cell(hide_code=True)
def _(mo):
mo.md("""
## With bounds, step, and precision
""")
return
@app.cell
def _(mo):
bounded = mo.ui.matrix(
[[0.0, 0.0, 0.0], [0.0, 0.0, 0.0], [0.0, 0.0, 0.0]],
min_value=-5,
max_value=5,
step=0.25,
precision=2,
label="Bounded ([-5, 5], step=0.25)",
)
bounded
return (bounded,)
@app.cell
def _(bounded):
bounded.value
return
@app.cell(hide_code=True)
def _(mo):
mo.md("""
## With row and column labels
""")
return
@app.cell
def _(mo):
labeled = mo.ui.matrix(
[[1, 2, 3], [4, 5, 6]],
row_labels=["x", "y"],
column_labels=["a", "b", "c"],
label="Labeled matrix",
)
labeled
return (labeled,)
@app.cell
def _(labeled):
labeled.value
return
@app.cell(hide_code=True)
def _(mo):
mo.md("""
## Symmetric mode (square matrix)
""")
return
@app.cell
def _(mo):
sym = mo.ui.matrix(
[[1, 0.5, 0], [0.5, 1, 0.3], [0, 0.3, 1]],
min_value=-1,
max_value=1,
step=0.1,
precision=1,
symmetric=True,
label="Symmetric (drag [i,j] to also update [j,i])",
)
sym
return (sym,)
@app.cell
def _(sym):
sym.value
return
@app.cell(hide_code=True)
def _(mo):
mo.md("""
## Per-element disabled mask
""")
return
@app.cell
def _(mo):
masked = mo.ui.matrix(
[[1, 0, 0], [0, 1, 0], [0, 0, 1]],
disabled=[
[True, False, False],
[False, True, False],
[False, False, True],
],
label="Diagonal locked",
)
masked
return (masked,)
@app.cell
def _(masked):
masked.value
return
@app.cell(hide_code=True)
def _(mo):
mo.md("""
## NumPy interop
""")
return
@app.cell
def _(mo):
import numpy as np
np_mat = mo.ui.matrix(
np.eye(3),
step=0.1,
precision=1,
label="$I$",
)
np_mat
return np, np_mat
@app.cell
def _(np, np_mat):
arr = np.asarray(np_mat.value)
arr
return
@app.cell(hide_code=True)
def _(mo):
mo.md("""
## NumPy arrays for bounds and step
""")
return
@app.cell
def _(mo, np):
np_bounds = mo.ui.matrix(
np.zeros((3, 3)),
min_value=np.full((3, 3), -10.0),
max_value=np.full((3, 3), 10.0),
step=np.full((3, 3), 0.5),
precision=1,
label="np bounds and step",
)
np_bounds
return (np_bounds,)
@app.cell
def _(np, np_bounds):
np.asarray(np_bounds.value)
return
@app.cell(hide_code=True)
def _(mo):
mo.md("""
## NumPy per-element step (varies by cell)
""")
return
@app.cell
def _(mo, np):
step_matrix = np.array([[0.1, 0.5, 1.0], [1.0, 0.5, 0.1], [0.01, 0.01, 0.01]])
varying_step = mo.ui.matrix(
np.zeros((3, 3)),
step=step_matrix,
precision=2,
label="Per-element step",
column_labels=["step=col", "step=mid", "step=fine"],
)
varying_step
return (varying_step,)
@app.cell
def _(np, varying_step):
np.asarray(varying_step.value)
return
@app.cell(hide_code=True)
def _(mo):
mo.md("""
## NumPy disabled mask
""")
return
@app.cell
def _(mo, np):
mask = np.eye(4, dtype=bool)
np_disabled = mo.ui.matrix(
np.arange(16, dtype=float).reshape(4, 4),
disabled=mask,
precision=0,
label=r"$\text{diag}(0, 5, 10, 15) + X$",
)
np_disabled
return (np_disabled,)
@app.cell
def _(np, np_disabled):
np.asarray(np_disabled.value)
return
@app.cell(hide_code=True)
def _(mo):
mo.md("""
## NumPy: asymmetric bounds (different per cell)
""")
return
@app.cell
def _(mo, np):
mins = np.array([[0, -5], [-10, -1]])
maxs = np.array([[10, 5], [10, 1]])
asym = mo.ui.matrix(
np.array([[5.0, 0.0], [0.0, 0.0]]),
min_value=mins,
max_value=maxs,
step=0.5,
precision=1,
row_labels=["row0", "row1"],
column_labels=["[0,10]|[-5,5]", "[-10,10]|[-1,1]"],
label="Per-cell bounds from numpy",
)
asym
return (asym,)
@app.cell
def _(asym, np):
np.asarray(asym.value)
return
@app.cell(hide_code=True)
def _(mo):
mo.md("""
## Round-trip: numpy in, numpy out
""")
return
@app.cell
def _(mo, np):
original = np.random.default_rng(42).uniform(-1, 1, size=(3, 3)).round(2)
roundtrip = mo.ui.matrix(
original,
precision=2,
step=0.01,
label="Random matrix (edit and check below)",
)
roundtrip
return original, roundtrip
@app.cell
def _(np, original, roundtrip):
current = np.asarray(roundtrip.value)
mo_result = {
"original": original,
"current": current,
"changed": not np.array_equal(original, current),
"dtype": current.dtype,
"shape": current.shape,
}
mo_result
return
@app.cell(hide_code=True)
def _(mo):
mo.md("""
## Scientific notation
""")
return
@app.cell
def _(mo):
sci = mo.ui.matrix(
[[0.00153, 1234567], [1e-8, -0.042]],
scientific=True,
precision=2,
step=1e-4,
label="Scientific notation",
)
sci
return (sci,)
@app.cell
def _(sci):
sci.value
return
@app.cell(hide_code=True)
def _(mo):
mo.md(r"""
## Debounce
""")
return
@app.cell
def _(mo, np):
db = mo.ui.matrix(
np.eye(3),
precision=2,
debounce=True,
)
db
return
@app.cell(hide_code=True)
def _(mo):
mo.md(r"""
## Column vector
""")
return
@app.cell
def _(mo, vecnp):
vec = mo.ui.matrix([1, 2, 3]);vecnp
return (vec,)
@app.cell
def _(vec):
vec.value
return
@app.cell
def _(mo, np):
vecnp = mo.ui.matrix(np.ones(3)); vecnp
return (vecnp,)
@app.cell
def _(vecnp):
vecnp.value
return
if __name__ == "__main__":
app.run()
| {
"repo_id": "marimo-team/marimo",
"file_path": "marimo/_smoke_tests/matrix.py",
"license": "Apache License 2.0",
"lines": 313,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
marimo-team/marimo:marimo/_smoke_tests/vector.py | # Copyright 2026 Marimo. All rights reserved.
import marimo
__generated_with = "0.19.11"
app = marimo.App()
@app.cell
def _():
import marimo as mo
return (mo,)
@app.cell(hide_code=True)
def _(mo):
mo.md("""
## Basic column vector (1D matrix)
""")
return
@app.cell
def _(mo):
col = mo.ui.matrix([1, 2, 3], precision=2, label="Column vector")
col
return (col,)
@app.cell
def _(col):
col.value
return
@app.cell(hide_code=True)
def _(mo):
mo.md("""
## With bounds and step
""")
return
@app.cell
def _(mo):
bounded = mo.ui.matrix(
[0, 0, 0, 0],
min_value=-5,
max_value=5,
step=0.25,
precision=2,
label="Bounded ([-5, 5], step=0.25)",
)
bounded
return (bounded,)
@app.cell
def _(bounded):
bounded.value
return
@app.cell(hide_code=True)
def _(mo):
mo.md("""
## With row labels
""")
return
@app.cell
def _(mo):
labeled = mo.ui.matrix(
[1.0, 0.5, 0.0],
row_labels=["x", "y", "z"],
step=0.1,
precision=1,
label="$v$",
)
labeled
return (labeled,)
@app.cell
def _(labeled):
labeled.value
return
@app.cell(hide_code=True)
def _(mo):
mo.md("""
## NumPy interop
""")
return
@app.cell
def _(mo):
import numpy as np
np_vec = mo.ui.matrix(
np.zeros(5),
step=0.1,
precision=1,
label=r"$\vec{0}$",
)
np_vec
return np, np_vec
@app.cell
def _(np, np_vec):
np.asarray(np_vec.value)
return
@app.cell(hide_code=True)
def _(mo):
mo.md("""
## Scientific notation
""")
return
@app.cell
def _(mo):
sci = mo.ui.matrix(
[0.00153, 1234567, 1e-8, -0.042],
scientific=True,
precision=2,
step=1e-4,
label="Scientific notation",
)
sci
return (sci,)
@app.cell
def _(sci):
sci.value
return
@app.cell(hide_code=True)
def _(mo):
mo.md("""
## Debounce
""")
return
@app.cell
def _(mo):
db = mo.ui.matrix(
[1, 2, 3],
precision=2,
debounce=True,
label="Debounced",
)
db
return (db,)
@app.cell
def _(db):
db.value
return
if __name__ == "__main__":
app.run()
| {
"repo_id": "marimo-team/marimo",
"file_path": "marimo/_smoke_tests/vector.py",
"license": "Apache License 2.0",
"lines": 130,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
marimo-team/marimo:tests/_plugins/ui/_impl/test_matrix.py | from __future__ import annotations
import pytest
from marimo._plugins import ui
# =========================================================================
# 2D matrix tests (original)
# =========================================================================
def test_matrix_basic():
m = ui.matrix([[1, 2], [3, 4]])
assert m.value == [[1.0, 2.0], [3.0, 4.0]]
def test_matrix_debounce():
m = ui.matrix([[1, 2], [3, 4]], debounce=True)
assert m.value == [[1.0, 2.0], [3.0, 4.0]]
def test_matrix_single_cell():
m = ui.matrix([[5]])
assert m.value == [[5.0]]
def test_matrix_with_bounds():
m = ui.matrix(
[[0, 0], [0, 0]],
min_value=-10,
max_value=10,
step=0.5,
precision=2,
)
assert m.value == [[0.0, 0.0], [0.0, 0.0]]
def test_matrix_with_per_element_bounds():
m = ui.matrix(
[[1, 2], [3, 4]],
min_value=[[0, 1], [2, 3]],
max_value=[[5, 6], [7, 8]],
)
assert m.value == [[1.0, 2.0], [3.0, 4.0]]
def test_matrix_with_labels():
m = ui.matrix(
[[1, 2], [3, 4]],
row_labels=["r1", "r2"],
column_labels=["c1", "c2"],
)
assert m.value == [[1.0, 2.0], [3.0, 4.0]]
def test_matrix_symmetric():
m = ui.matrix([[1, 2], [2, 1]], symmetric=True)
assert m.value == [[1.0, 2.0], [2.0, 1.0]]
def test_matrix_convert_value():
m = ui.matrix([[1, 2], [3, 4]])
result = m._convert_value([[5, 6], [7, 8]])
assert result == [[5, 6], [7, 8]]
def test_matrix_update():
m = ui.matrix([[1, 2], [3, 4]])
m._update([[10, 20], [30, 40]])
assert m.value == [[10, 20], [30, 40]]
def test_matrix_disabled_scalar():
m = ui.matrix([[1, 2], [3, 4]], disabled=True)
assert m.value == [[1.0, 2.0], [3.0, 4.0]]
def test_matrix_disabled_per_element():
m = ui.matrix(
[[1, 2], [3, 4]],
disabled=[[True, False], [False, True]],
)
assert m.value == [[1.0, 2.0], [3.0, 4.0]]
# --- 2D validation error tests ---
def test_matrix_empty_raises():
with pytest.raises(ValueError, match="non-empty"):
ui.matrix([])
def test_matrix_non_list_raises():
with pytest.raises(ValueError, match="list of lists"):
ui.matrix("not a matrix")
def test_matrix_inconsistent_rows_raises():
with pytest.raises(ValueError, match="columns but expected"):
ui.matrix([[1, 2], [3]])
def test_matrix_min_ge_max_raises():
with pytest.raises(ValueError, match="less than"):
ui.matrix([[5]], min_value=10, max_value=5)
def test_matrix_min_eq_max_raises():
with pytest.raises(ValueError, match="less than"):
ui.matrix([[5]], min_value=5, max_value=5)
def test_matrix_value_below_min_raises():
with pytest.raises(ValueError, match="less than min_value"):
ui.matrix([[0]], min_value=1)
def test_matrix_value_above_max_raises():
with pytest.raises(ValueError, match="greater than max_value"):
ui.matrix([[10]], max_value=5)
def test_matrix_row_labels_mismatch_raises():
with pytest.raises(ValueError, match="row_labels"):
ui.matrix([[1, 2], [3, 4]], row_labels=["a"])
def test_matrix_column_labels_mismatch_raises():
with pytest.raises(ValueError, match="column_labels"):
ui.matrix([[1, 2], [3, 4]], column_labels=["a", "b", "c"])
def test_matrix_symmetric_non_square_raises():
with pytest.raises(ValueError, match="square"):
ui.matrix([[1, 2, 3], [4, 5, 6]], symmetric=True)
def test_matrix_bounds_shape_mismatch_raises():
with pytest.raises(ValueError, match="rows"):
ui.matrix([[1, 2]], min_value=[[0, 0], [0, 0]])
def test_matrix_negative_precision_raises():
with pytest.raises(ValueError, match="precision"):
ui.matrix([[1]], precision=-1)
def test_matrix_scientific():
m = ui.matrix([[0.001, 1000]], scientific=True, precision=2)
assert m.value == [[0.001, 1000.0]]
def test_matrix_numpy_array_like():
"""Test that array-like objects with .tolist() are accepted."""
class FakeArray:
def tolist(self):
return [[1.0, 2.0], [3.0, 4.0]]
m = ui.matrix(FakeArray())
assert m.value == [[1.0, 2.0], [3.0, 4.0]]
def test_matrix_empty_row_raises():
with pytest.raises(ValueError, match="non-empty"):
ui.matrix([[]])
def test_matrix_step_zero_raises():
with pytest.raises(ValueError, match="step.*positive"):
ui.matrix([[1]], step=0)
def test_matrix_step_negative_raises():
with pytest.raises(ValueError, match="step.*positive"):
ui.matrix([[1]], step=-0.5)
def test_matrix_step_per_element_zero_raises():
with pytest.raises(ValueError, match="step.*positive"):
ui.matrix([[1, 2]], step=[[1, 0]])
def test_matrix_3d_value_raises():
"""Test that 3D input produces a clear error, not a confusing TypeError."""
class Fake3D:
def tolist(self):
return [[[1, 2], [3, 4]], [[5, 6], [7, 8]]]
with pytest.raises(ValueError, match="2D"):
ui.matrix(Fake3D())
def test_matrix_3d_param_raises():
"""Test that 3D input for a broadcast param raises clearly."""
class Fake3D:
def tolist(self):
return [[[1, 2], [3, 4]], [[5, 6], [7, 8]]]
with pytest.raises(ValueError, match="2D"):
ui.matrix([[1, 2], [3, 4]], min_value=Fake3D())
def test_matrix_symmetric_asymmetric_data_raises():
with pytest.raises(ValueError, match="not symmetric"):
ui.matrix([[1, 2], [999, 1]], symmetric=True)
def test_matrix_symmetric_valid():
m = ui.matrix([[1, 0.5], [0.5, 1]], symmetric=True)
assert m.value == [[1.0, 0.5], [0.5, 1.0]]
def test_matrix_precision_default_integers():
"""Integer data with default step=1 -> precision 0."""
m = ui.matrix([[1, 2], [3, 4]])
assert m._component_args["precision"] == 0
def test_matrix_precision_default_float_step():
"""Float step should drive inferred precision."""
m = ui.matrix([[0, 0]], step=0.25)
assert m._component_args["precision"] == 2
def test_matrix_precision_default_float_data():
"""Float data values should drive inferred precision."""
m = ui.matrix([[1.5, 2.33]])
assert m._component_args["precision"] == 2
def test_matrix_precision_explicit():
"""Explicit precision should override auto-inference."""
m = ui.matrix([[1, 2]], precision=5)
assert m._component_args["precision"] == 5
def test_matrix_precision_scientific_small():
"""Scientific notation: 1e-8 needs 0 mantissa places, not 8."""
m = ui.matrix([[1e-8]], scientific=True)
assert m._component_args["precision"] == 0
def test_matrix_precision_scientific_mixed():
"""Scientific notation: 0.00153 -> 1.53e-3 -> 2 mantissa places."""
m = ui.matrix([[0.00153, 1e-8]], scientific=True)
assert m._component_args["precision"] == 2
def test_matrix_precision_scientific_step():
"""Scientific notation: step mantissa drives precision."""
m = ui.matrix([[0]], step=2.5e-3, scientific=True)
# 2.5e-3 -> mantissa 2.5 -> 1 decimal place
assert m._component_args["precision"] == 1
# =========================================================================
# 1D (vector) tests
# =========================================================================
def test_matrix_1d_column():
"""1D input creates a column vector; value is a flat list."""
m = ui.matrix([1, 2, 3])
assert m.value == [1, 2, 3]
def test_matrix_1d_single_element():
m = ui.matrix([5])
assert m.value == [5]
def test_matrix_1d_scalar_bounds():
m = ui.matrix([0, 0, 0], min_value=-10, max_value=10, step=0.5)
assert m.value == [0, 0, 0]
def test_matrix_1d_per_element_bounds():
m = ui.matrix([1, 2, 3], min_value=[0, 1, 2], max_value=[5, 6, 7])
assert m.value == [1, 2, 3]
def test_matrix_1d_per_element_step():
m = ui.matrix([0, 0], step=[0.1, 0.5])
assert m.value == [0, 0]
def test_matrix_1d_per_element_disabled():
m = ui.matrix([1, 2, 3], disabled=[True, False, True])
assert m.value == [1, 2, 3]
def test_matrix_1d_scalar_disabled():
m = ui.matrix([1, 2], disabled=True)
assert m.value == [1, 2]
def test_matrix_1d_row_labels():
"""For column vector, row_labels are set."""
m = ui.matrix([1, 2, 3], row_labels=["x", "y", "z"])
assert m._component_args["row-labels"] == ["x", "y", "z"]
assert m._component_args["column-labels"] is None
def test_matrix_1d_convert_value():
"""_convert_value flattens 2D back to 1D for column vector."""
m = ui.matrix([1, 2, 3])
result = m._convert_value([[10], [20], [30]])
assert result == [10, 20, 30]
def test_matrix_1d_update():
m = ui.matrix([1, 2, 3])
m._update([[10], [20], [30]])
assert m.value == [10, 20, 30]
def test_matrix_1d_scientific():
m = ui.matrix([0.001, 1000], scientific=True, precision=2)
assert m.value == [0.001, 1000]
def test_matrix_1d_debounce():
m = ui.matrix([1, 2], debounce=True)
assert m._component_args["debounce"] is True
def test_matrix_1d_precision_explicit():
m = ui.matrix([1.0], precision=5)
assert m._component_args["precision"] == 5
def test_matrix_1d_precision_default_integers():
"""Integer data with default step=1 -> precision 0."""
m = ui.matrix([1, 2, 3])
assert m._component_args["precision"] == 0
def test_matrix_1d_precision_default_float_step():
"""Float step should drive inferred precision."""
m = ui.matrix([0, 0], step=0.1)
assert m._component_args["precision"] == 1
def test_matrix_1d_precision_default_float_data():
"""Float data values should drive inferred precision."""
m = ui.matrix([1.5, 2.333])
assert m._component_args["precision"] == 3
def test_matrix_1d_precision_scientific():
"""Scientific notation: 1e-8 needs 0 mantissa places, not 8."""
m = ui.matrix([1e-8, 0.00153], scientific=True)
assert m._component_args["precision"] == 2
def test_matrix_1d_label():
m = ui.matrix([1, 2], label="test label")
assert m.value == [1, 2]
def test_matrix_1d_numpy_array():
"""Test that 1D array-like objects with .tolist() are accepted."""
class FakeArray1D:
def tolist(self):
return [1.0, 2.0, 3.0]
m = ui.matrix(FakeArray1D())
assert m.value == [1.0, 2.0, 3.0]
def test_matrix_1d_numpy_bounds():
"""Test that 1D array-like bounds are accepted."""
class FakeArray1D:
def tolist(self):
return [0.0, 0.0]
class FakeMins:
def tolist(self):
return [-5.0, -10.0]
class FakeMaxs:
def tolist(self):
return [5.0, 10.0]
m = ui.matrix(FakeArray1D(), min_value=FakeMins(), max_value=FakeMaxs())
assert m.value == [0.0, 0.0]
# --- 1D validation error tests ---
def test_matrix_1d_empty_raises():
with pytest.raises(ValueError, match="non-empty"):
ui.matrix([])
def test_matrix_1d_min_ge_max_raises():
with pytest.raises(ValueError, match="less than"):
ui.matrix([5], min_value=10, max_value=5)
def test_matrix_1d_value_below_min_raises():
with pytest.raises(ValueError, match="less than min_value"):
ui.matrix([0], min_value=1)
def test_matrix_1d_value_above_max_raises():
with pytest.raises(ValueError, match="greater than max_value"):
ui.matrix([10], max_value=5)
def test_matrix_1d_step_zero_raises():
with pytest.raises(ValueError, match="step.*positive"):
ui.matrix([1], step=0)
def test_matrix_1d_step_negative_raises():
with pytest.raises(ValueError, match="step.*positive"):
ui.matrix([1], step=-0.5)
def test_matrix_1d_bounds_length_mismatch_raises():
with pytest.raises(ValueError, match="rows"):
ui.matrix([1, 2], min_value=[0, 0, 0])
def test_matrix_1d_negative_precision_raises():
with pytest.raises(ValueError, match="precision"):
ui.matrix([1], precision=-1)
def test_matrix_1d_2d_param_raises():
"""1D params that are actually 2D should be rejected."""
with pytest.raises(ValueError, match="1D"):
ui.matrix([1, 2], step=[[0.1, 0.2], [0.3, 0.4]])
def test_matrix_1d_symmetric_raises():
"""symmetric=True with 1D input should raise."""
with pytest.raises(ValueError, match="symmetric.*not supported.*1D"):
ui.matrix([1, 2], symmetric=True)
def test_matrix_1d_uses_matrix_plugin():
"""1D input should use the marimo-matrix frontend component."""
m = ui.matrix([1, 2])
assert m._name == "marimo-matrix"
| {
"repo_id": "marimo-team/marimo",
"file_path": "tests/_plugins/ui/_impl/test_matrix.py",
"license": "Apache License 2.0",
"lines": 290,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
marimo-team/marimo:tests/_cli/test_cli_export_thumbnail.py | # Copyright 2026 Marimo. All rights reserved.
from __future__ import annotations
import os
from typing import TYPE_CHECKING
from unittest.mock import AsyncMock, patch
from click.testing import CliRunner
import marimo._cli.export.thumbnail as thumbnail_module
from marimo._cli.sandbox import SandboxMode
if TYPE_CHECKING:
from pathlib import Path
def _write_notebook(path: Path) -> None:
path.write_text(
"""
import marimo
app = marimo.App()
@app.cell
def _():
return
if __name__ == "__main__":
app.run()
""",
encoding="utf-8",
)
def test_thumbnail_sandbox_single_bootstrap_adds_playwright(
tmp_path: Path,
) -> None:
notebook = tmp_path / "notebook.py"
_write_notebook(notebook)
runner = CliRunner()
captured: dict[str, str | list[str] | None] = {}
def _fake_run_in_sandbox(
args: list[str],
*,
name: str | None = None,
additional_features: list[str] | None = None,
additional_deps: list[str] | None = None,
extra_env: dict[str, str] | None = None,
) -> int:
del args
del additional_features
captured["name"] = name
captured["deps"] = additional_deps
captured["mode"] = (extra_env or {}).get(
thumbnail_module._sandbox_mode_env
)
captured["bootstrapped"] = (extra_env or {}).get(
thumbnail_module._sandbox_bootstrapped_env
)
return 0
with (
patch(
"marimo._cli.sandbox.run_in_sandbox",
side_effect=_fake_run_in_sandbox,
) as run_in_sandbox,
patch(
"marimo._cli.sandbox.resolve_sandbox_mode",
return_value=SandboxMode.SINGLE,
),
patch.object(
thumbnail_module.DependencyManager.playwright, "require"
) as playwright_require,
):
result = runner.invoke(
thumbnail_module.thumbnail,
[str(notebook), "--execute", "--sandbox"],
)
assert result.exit_code == 0, result.output
run_in_sandbox.assert_called_once()
playwright_require.assert_not_called()
assert captured["name"] == str(notebook)
assert captured["deps"] == ["playwright"]
assert captured["mode"] == SandboxMode.SINGLE.value
assert captured["bootstrapped"] == "1"
assert thumbnail_module._sandbox_bootstrapped_env not in os.environ
assert thumbnail_module._sandbox_mode_env not in os.environ
def test_thumbnail_sandbox_multi_bootstrap_sets_multi_mode(
tmp_path: Path,
) -> None:
first = tmp_path / "first.py"
second = tmp_path / "second.py"
_write_notebook(first)
_write_notebook(second)
runner = CliRunner()
captured: dict[str, str | list[str] | None] = {}
def _fake_run_in_sandbox(
args: list[str],
*,
name: str | None = None,
additional_features: list[str] | None = None,
additional_deps: list[str] | None = None,
extra_env: dict[str, str] | None = None,
) -> int:
del args
del additional_features
captured["name"] = name
captured["deps"] = additional_deps
captured["mode"] = (extra_env or {}).get(
thumbnail_module._sandbox_mode_env
)
return 0
with (
patch(
"marimo._cli.sandbox.run_in_sandbox",
side_effect=_fake_run_in_sandbox,
) as run_in_sandbox,
patch("marimo._cli.sandbox.resolve_sandbox_mode") as resolve_mode,
patch.object(
thumbnail_module.DependencyManager.playwright, "require"
) as playwright_require,
):
result = runner.invoke(
thumbnail_module.thumbnail,
[str(first), str(second), "--execute", "--sandbox"],
)
assert result.exit_code == 0, result.output
run_in_sandbox.assert_called_once()
resolve_mode.assert_not_called()
playwright_require.assert_not_called()
assert captured["name"] == str(first)
assert captured["deps"] == ["playwright"]
assert captured["mode"] == SandboxMode.MULTI.value
def test_thumbnail_sandbox_requires_execute(tmp_path: Path) -> None:
notebook = tmp_path / "notebook.py"
_write_notebook(notebook)
runner = CliRunner()
result = runner.invoke(
thumbnail_module.thumbnail,
[str(notebook), "--sandbox"],
)
assert result.exit_code != 0
assert "--sandbox requires --execute" in result.output
def test_thumbnail_reentry_single_skips_bootstrap(
tmp_path: Path,
monkeypatch,
) -> None:
notebook = tmp_path / "notebook.py"
_write_notebook(notebook)
runner = CliRunner()
generate = AsyncMock()
monkeypatch.setenv(thumbnail_module._sandbox_bootstrapped_env, "1")
monkeypatch.setenv(
thumbnail_module._sandbox_mode_env,
SandboxMode.SINGLE.value,
)
with (
patch("marimo._cli.sandbox.run_in_sandbox") as run_in_sandbox,
patch("marimo._cli.sandbox.resolve_sandbox_mode") as resolve_mode,
patch.object(
thumbnail_module.DependencyManager.playwright, "require"
) as playwright_require,
patch.object(thumbnail_module, "_generate_thumbnails", new=generate),
):
result = runner.invoke(
thumbnail_module.thumbnail,
[str(notebook), "--execute"],
)
assert result.exit_code == 0, result.output
run_in_sandbox.assert_not_called()
resolve_mode.assert_not_called()
playwright_require.assert_called_once_with("for thumbnail generation")
generate.assert_called_once()
assert generate.call_args.kwargs["sandbox_mode"] is SandboxMode.SINGLE
def test_thumbnail_reentry_multi_skips_bootstrap(
tmp_path: Path,
monkeypatch,
) -> None:
first = tmp_path / "first.py"
second = tmp_path / "second.py"
_write_notebook(first)
_write_notebook(second)
runner = CliRunner()
generate = AsyncMock()
monkeypatch.setenv(thumbnail_module._sandbox_bootstrapped_env, "1")
monkeypatch.setenv(
thumbnail_module._sandbox_mode_env,
SandboxMode.MULTI.value,
)
with (
patch("marimo._cli.sandbox.run_in_sandbox") as run_in_sandbox,
patch("marimo._cli.sandbox.resolve_sandbox_mode") as resolve_mode,
patch.object(
thumbnail_module.DependencyManager.playwright, "require"
) as playwright_require,
patch.object(thumbnail_module, "_generate_thumbnails", new=generate),
):
result = runner.invoke(
thumbnail_module.thumbnail,
[str(first), str(second), "--execute"],
)
assert result.exit_code == 0, result.output
run_in_sandbox.assert_not_called()
resolve_mode.assert_not_called()
playwright_require.assert_called_once_with("for thumbnail generation")
generate.assert_called_once()
assert generate.call_args.kwargs["sandbox_mode"] is SandboxMode.MULTI
| {
"repo_id": "marimo-team/marimo",
"file_path": "tests/_cli/test_cli_export_thumbnail.py",
"license": "Apache License 2.0",
"lines": 195,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
marimo-team/marimo:marimo/_messaging/thread_local_streams.py | # Copyright 2026 Marimo. All rights reserved.
"""Thread-local stream proxy for run mode.
In run mode, multiple sessions share the same process. Each session runs in its
own thread and needs sys.stdout / sys.stderr to route output to that session's
frontend connection.
ThreadLocalStreamProxy wraps the real sys.stdout / sys.stderr once at process
level and dispatches writes to a per-thread stream registered via
set_thread_local_streams().
"""
from __future__ import annotations
import io
import sys
import threading
from typing import TYPE_CHECKING, TextIO, Union
from marimo._messaging.types import Stderr, Stdout
if TYPE_CHECKING:
from collections.abc import Iterable
_proxies_installed = False
_install_lock = threading.Lock()
_original_stdout: TextIO | None = None
_original_stderr: TextIO | None = None
class ThreadLocalStreamProxy(io.TextIOBase):
"""A proxy that dispatches writes to a per-thread stream.
When a thread has registered a stream via set_thread_local_streams(),
writes go there; otherwise they fall through to the original stream
(real sys.stdout / sys.stderr).
"""
def __init__(
self, original: Union[io.TextIOBase, TextIO], name: str
) -> None:
self._original = original
self._local = threading.local()
self._name = name
# Expose the underlying binary buffer so that code writing to
# sys.stdout.buffer (e.g. package installation logging) keeps working.
self.buffer: io.BufferedIOBase | None = getattr(
original, "buffer", None
)
# -- per-thread registration -------------------------------------------
def _set_stream(self, stream: io.TextIOBase) -> None:
self._local.stream = stream
def _clear_stream(self) -> None:
self._local.stream = None
def _get_stream(self) -> Union[io.TextIOBase, TextIO]:
stream = getattr(self._local, "stream", None)
return stream if stream is not None else self._original
# -- TextIOBase interface ----------------------------------------------
@property
def name(self) -> str:
return self._name
@property
def encoding(self) -> str: # type: ignore[override]
return getattr(self._get_stream(), "encoding", "utf-8")
@property
def errors(self) -> str | None: # type: ignore[override]
return getattr(self._get_stream(), "errors", None)
def write(self, data: str) -> int:
return self._get_stream().write(data)
def writelines(self, lines: Iterable[str]) -> None: # type: ignore[override]
self._get_stream().writelines(lines)
def flush(self) -> None:
self._get_stream().flush()
def fileno(self) -> int:
return self._original.fileno()
def isatty(self) -> bool:
return self._original.isatty()
def readable(self) -> bool:
return False
def writable(self) -> bool:
return True
def seekable(self) -> bool:
return False
def install_thread_local_proxies() -> None:
"""Install thread-local proxies as sys.stdout / sys.stderr (idempotent).
Called once from the main server thread before kernel threads are spawned.
"""
global _proxies_installed, _original_stdout, _original_stderr
with _install_lock:
if _proxies_installed:
return
_original_stdout = sys.stdout
_original_stderr = sys.stderr
sys.stdout = ThreadLocalStreamProxy(sys.stdout, "<stdout>") # type: ignore[assignment]
sys.stderr = ThreadLocalStreamProxy(sys.stderr, "<stderr>") # type: ignore[assignment]
_proxies_installed = True
def uninstall_thread_local_proxies() -> None:
"""Remove thread-local proxies, restoring the original streams."""
global _proxies_installed, _original_stdout, _original_stderr
with _install_lock:
if not _proxies_installed:
return
if _original_stdout is not None:
sys.stdout = _original_stdout # type: ignore[assignment]
if _original_stderr is not None:
sys.stderr = _original_stderr # type: ignore[assignment]
_original_stdout = None
_original_stderr = None
_proxies_installed = False
def set_thread_local_streams(
stdout: Stdout | None, stderr: Stderr | None
) -> None:
"""Register per-thread streams (call from each session thread)."""
if isinstance(sys.stdout, ThreadLocalStreamProxy) and stdout is not None:
sys.stdout._set_stream(stdout) # type: ignore[union-attr]
if isinstance(sys.stderr, ThreadLocalStreamProxy) and stderr is not None:
sys.stderr._set_stream(stderr) # type: ignore[union-attr]
def clear_thread_local_streams() -> None:
"""Clear per-thread streams (call when a session thread exits)."""
if isinstance(sys.stdout, ThreadLocalStreamProxy):
sys.stdout._clear_stream() # type: ignore[union-attr]
if isinstance(sys.stderr, ThreadLocalStreamProxy):
sys.stderr._clear_stream() # type: ignore[union-attr]
| {
"repo_id": "marimo-team/marimo",
"file_path": "marimo/_messaging/thread_local_streams.py",
"license": "Apache License 2.0",
"lines": 112,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
marimo-team/marimo:tests/_messaging/test_thread_local_proxy.py | # Copyright 2026 Marimo. All rights reserved.
"""Tests for ThreadLocalStreamProxy and thread-local stream helpers."""
from __future__ import annotations
import io
import sys
import threading
from unittest.mock import MagicMock
from marimo._messaging.thread_local_streams import (
ThreadLocalStreamProxy,
clear_thread_local_streams,
install_thread_local_proxies,
set_thread_local_streams,
uninstall_thread_local_proxies,
)
# ---------------------------------------------------------------------------
# ThreadLocalStreamProxy unit tests
# ---------------------------------------------------------------------------
class TestThreadLocalStreamProxy:
def test_write_to_original_when_no_thread_local(self) -> None:
original = io.StringIO()
proxy = ThreadLocalStreamProxy(original, "<stdout>")
proxy.write("hello")
assert original.getvalue() == "hello"
def test_write_to_thread_local_stream(self) -> None:
original = io.StringIO()
proxy = ThreadLocalStreamProxy(original, "<stdout>")
thread_stream = io.StringIO()
proxy._set_stream(thread_stream)
proxy.write("hello")
assert thread_stream.getvalue() == "hello"
assert original.getvalue() == ""
def test_clear_falls_back_to_original(self) -> None:
original = io.StringIO()
proxy = ThreadLocalStreamProxy(original, "<stdout>")
thread_stream = io.StringIO()
proxy._set_stream(thread_stream)
proxy.write("a")
proxy._clear_stream()
proxy.write("b")
assert thread_stream.getvalue() == "a"
assert original.getvalue() == "b"
def test_thread_isolation(self) -> None:
"""Writes from different threads go to different streams."""
original = io.StringIO()
proxy = ThreadLocalStreamProxy(original, "<stdout>")
stream_a = io.StringIO()
stream_b = io.StringIO()
barrier = threading.Barrier(2)
def thread_fn(stream: io.StringIO, label: str) -> None:
proxy._set_stream(stream)
barrier.wait() # synchronise so both threads are active
proxy.write(label)
proxy.flush()
t1 = threading.Thread(target=thread_fn, args=(stream_a, "A"))
t2 = threading.Thread(target=thread_fn, args=(stream_b, "B"))
t1.start()
t2.start()
t1.join()
t2.join()
assert stream_a.getvalue() == "A"
assert stream_b.getvalue() == "B"
assert original.getvalue() == ""
def test_unregistered_thread_uses_original(self) -> None:
"""Threads that never call _set_stream write to original."""
original = io.StringIO()
proxy = ThreadLocalStreamProxy(original, "<stdout>")
def thread_fn() -> None:
proxy.write("unregistered")
t = threading.Thread(target=thread_fn)
t.start()
t.join()
assert original.getvalue() == "unregistered"
def test_writelines(self) -> None:
original = io.StringIO()
proxy = ThreadLocalStreamProxy(original, "<stdout>")
proxy.writelines(["hello", " ", "world"])
assert original.getvalue() == "hello world"
def test_flush_delegates(self) -> None:
original = MagicMock(spec=io.StringIO)
original.flush = MagicMock()
proxy = ThreadLocalStreamProxy(original, "<stdout>")
proxy.flush()
original.flush.assert_called_once()
def test_name_property(self) -> None:
original = io.StringIO()
proxy = ThreadLocalStreamProxy(original, "<stdout>")
assert proxy.name == "<stdout>"
def test_writable(self) -> None:
original = io.StringIO()
proxy = ThreadLocalStreamProxy(original, "<stdout>")
assert proxy.writable()
assert not proxy.readable()
assert not proxy.seekable()
def test_buffer_attribute_from_real_stdout(self) -> None:
"""Proxy exposes .buffer from the original stream."""
proxy = ThreadLocalStreamProxy(sys.__stdout__, "<stdout>")
assert proxy.buffer is sys.__stdout__.buffer # type: ignore[union-attr]
def test_buffer_attribute_none_for_stringio(self) -> None:
"""StringIO has no buffer, so proxy.buffer should be None."""
original = io.StringIO()
proxy = ThreadLocalStreamProxy(original, "<stdout>")
assert proxy.buffer is None
# ---------------------------------------------------------------------------
# install / set / clear helper tests
# ---------------------------------------------------------------------------
class TestHelperFunctions:
def test_install_thread_local_proxies_is_idempotent(self) -> None:
"""install_thread_local_proxies installs proxies and is idempotent."""
try:
install_thread_local_proxies()
first_stdout = sys.stdout
first_stderr = sys.stderr
assert isinstance(first_stdout, ThreadLocalStreamProxy)
assert isinstance(first_stderr, ThreadLocalStreamProxy)
# Second call should be a no-op
install_thread_local_proxies()
assert sys.stdout is first_stdout
assert sys.stderr is first_stderr
finally:
uninstall_thread_local_proxies()
def test_set_and_clear_thread_local_streams(self) -> None:
"""set/clear thread local streams operate on proxies."""
saved_stdout = sys.stdout
saved_stderr = sys.stderr
try:
install_thread_local_proxies()
mock_out = MagicMock()
mock_err = MagicMock()
set_thread_local_streams(mock_out, mock_err)
proxy_out: ThreadLocalStreamProxy = sys.stdout # type: ignore
proxy_err: ThreadLocalStreamProxy = sys.stderr # type: ignore
assert proxy_out._get_stream() is mock_out
assert proxy_err._get_stream() is mock_err
clear_thread_local_streams()
# After clear, should fall back to original
assert proxy_out._get_stream() is saved_stdout
assert proxy_err._get_stream() is saved_stderr
finally:
uninstall_thread_local_proxies()
def test_uninstall_restores_originals_even_if_stdout_replaced(
self,
) -> None:
"""uninstall restores originals even if sys.stdout was overwritten."""
saved_stdout = sys.stdout
saved_stderr = sys.stderr
try:
install_thread_local_proxies()
# Something replaces sys.stdout after install
sys.stdout = io.StringIO() # type: ignore
sys.stderr = io.StringIO() # type: ignore
# uninstall should still restore the real originals
uninstall_thread_local_proxies()
assert sys.stdout is saved_stdout
assert sys.stderr is saved_stderr
finally:
sys.stdout = saved_stdout
sys.stderr = saved_stderr
def test_set_noop_without_proxy(self) -> None:
"""set/clear are safe to call when proxies are not installed."""
saved_stdout = sys.stdout
saved_stderr = sys.stderr
try:
sys.stdout = io.StringIO() # type: ignore
sys.stderr = io.StringIO() # type: ignore
# Should not raise
set_thread_local_streams(MagicMock(), MagicMock())
clear_thread_local_streams()
finally:
sys.stdout = saved_stdout
sys.stderr = saved_stderr
# ---------------------------------------------------------------------------
# capture / redirect with proxies installed
# ---------------------------------------------------------------------------
class TestCaptureWithProxy:
"""Tests for capture_stdout / capture_stderr when proxies are active."""
def test_capture_stdout_with_proxy(self) -> None:
from marimo._runtime.capture import capture_stdout
saved_stdout = sys.stdout
try:
install_thread_local_proxies()
proxy: ThreadLocalStreamProxy = sys.stdout # type: ignore
session_stream = io.StringIO()
proxy._set_stream(session_stream)
with capture_stdout() as buf:
sys.stdout.write("captured")
# Writes inside the context should go to the buffer
assert buf.getvalue() == "captured"
# The session stream should not see captured output
assert session_stream.getvalue() == ""
# After exiting, writes should resume going to session stream
sys.stdout.write("after")
assert session_stream.getvalue() == "after"
finally:
sys.stdout = saved_stdout
uninstall_thread_local_proxies()
def test_capture_stderr_with_proxy(self) -> None:
from marimo._runtime.capture import capture_stderr
saved_stderr = sys.stderr
try:
install_thread_local_proxies()
proxy: ThreadLocalStreamProxy = sys.stderr # type: ignore
session_stream = io.StringIO()
proxy._set_stream(session_stream)
with capture_stderr() as buf:
sys.stderr.write("captured")
assert buf.getvalue() == "captured"
assert session_stream.getvalue() == ""
sys.stderr.write("after")
assert session_stream.getvalue() == "after"
finally:
sys.stderr = saved_stderr
uninstall_thread_local_proxies()
def test_redirect_stdout_with_proxy(self) -> None:
from marimo._runtime.capture import redirect_stdout
saved_stdout = sys.stdout
try:
install_thread_local_proxies()
proxy: ThreadLocalStreamProxy = sys.stdout # type: ignore
session_stream = io.StringIO()
proxy._set_stream(session_stream)
with redirect_stdout():
# Writes should go to _output (via _redirect), not session
sys.stdout.write("redirected")
# Session stream should not see redirected writes
assert session_stream.getvalue() == ""
# After exiting, writes should resume going to session stream
sys.stdout.write("after")
assert session_stream.getvalue() == "after"
finally:
sys.stdout = saved_stdout
uninstall_thread_local_proxies()
def test_redirect_stderr_with_proxy(self) -> None:
from marimo._runtime.capture import redirect_stderr
saved_stderr = sys.stderr
try:
install_thread_local_proxies()
proxy: ThreadLocalStreamProxy = sys.stderr # type: ignore
session_stream = io.StringIO()
proxy._set_stream(session_stream)
with redirect_stderr():
sys.stderr.write("redirected")
assert session_stream.getvalue() == ""
sys.stderr.write("after")
assert session_stream.getvalue() == "after"
finally:
sys.stderr = saved_stderr
uninstall_thread_local_proxies()
def test_capture_does_not_affect_other_threads(self) -> None:
"""capture_stdout in one thread should not affect another thread."""
from marimo._runtime.capture import capture_stdout
saved_stdout = sys.stdout
try:
install_thread_local_proxies()
proxy: ThreadLocalStreamProxy = sys.stdout # type: ignore
other_stream = io.StringIO()
barrier = threading.Barrier(2)
captured: list[str] = []
def other_thread() -> None:
proxy._set_stream(other_stream)
barrier.wait()
proxy.write("other")
session_stream = io.StringIO()
proxy._set_stream(session_stream)
t = threading.Thread(target=other_thread)
t.start()
with capture_stdout() as buf:
barrier.wait()
sys.stdout.write("main")
t.join()
captured.append(buf.getvalue())
assert captured[0] == "main"
assert other_stream.getvalue() == "other"
finally:
sys.stdout = saved_stdout
uninstall_thread_local_proxies()
# ---------------------------------------------------------------------------
# Integration: multiple "sessions" in threads
# ---------------------------------------------------------------------------
def test_multi_session_no_deadlock() -> None:
"""Simulate two sessions in threads — no deadlock, correct routing."""
original = io.StringIO()
proxy = ThreadLocalStreamProxy(original, "<stdout>")
results: dict[str, str] = {}
barrier = threading.Barrier(2)
def session(name: str) -> None:
buf = io.StringIO()
proxy._set_stream(buf)
barrier.wait()
for _ in range(100):
proxy.write(name)
proxy.flush()
results[name] = buf.getvalue()
proxy._clear_stream()
t1 = threading.Thread(target=session, args=("A",))
t2 = threading.Thread(target=session, args=("B",))
t1.start()
t2.start()
t1.join(timeout=5)
t2.join(timeout=5)
assert not t1.is_alive(), "Thread A deadlocked"
assert not t2.is_alive(), "Thread B deadlocked"
assert results["A"] == "A" * 100
assert results["B"] == "B" * 100
assert original.getvalue() == ""
| {
"repo_id": "marimo-team/marimo",
"file_path": "tests/_messaging/test_thread_local_proxy.py",
"license": "Apache License 2.0",
"lines": 304,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
marimo-team/marimo:tests/test_project_dependencies.py | from __future__ import annotations
from pathlib import Path
import pytest
from tests.mocks import snapshotter
snapshot = snapshotter(__file__)
def _load_pyproject():
import tomlkit
pyproject_path = Path(__file__).parent.parent / "pyproject.toml"
return tomlkit.loads(pyproject_path.read_text())
def test_required_dependencies():
pyproject = _load_pyproject()
deps = sorted(pyproject["project"]["dependencies"])
snapshot("dependencies.txt", "\n".join(deps))
@pytest.mark.parametrize(
"extra",
["sql", "sandbox", "recommended", "lsp", "mcp"],
)
def test_optional_dependencies(extra: str):
pyproject = _load_pyproject()
deps = sorted(pyproject["project"]["optional-dependencies"][extra])
snapshot(f"optional-dependencies-{extra}.txt", "\n".join(deps))
| {
"repo_id": "marimo-team/marimo",
"file_path": "tests/test_project_dependencies.py",
"license": "Apache License 2.0",
"lines": 21,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
marimo-team/marimo:marimo/_smoke_tests/markdown/latex_display_math.py | import marimo
__generated_with = "0.19.10"
app = marimo.App()
@app.cell(hide_code=True)
def _(mo):
mo.md(r"""
hello $$f(x) = y$$ world
""")
return
@app.cell(hide_code=True)
def _(mo):
mo.md(r"""
hello
$$f(x) = y$$
world
""")
return
@app.cell(hide_code=True)
def _(mo):
mo.md(r"""
hello $$this is not latex
$$ this is still not latex
""")
return
@app.cell
def _():
import marimo as mo
return (mo,)
if __name__ == "__main__":
app.run()
| {
"repo_id": "marimo-team/marimo",
"file_path": "marimo/_smoke_tests/markdown/latex_display_math.py",
"license": "Apache License 2.0",
"lines": 30,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
marimo-team/marimo:marimo/_output/formatters/pytorch_formatters.py | # Copyright 2026 Marimo. All rights reserved.
from __future__ import annotations
import dataclasses
import html
import re
import typing
from marimo._output.formatters.formatter_factory import FormatterFactory
from marimo._output.hypertext import Html
if typing.TYPE_CHECKING:
import torch # type: ignore[import-not-found]
ModuleCategory = typing.Literal["weight", "act", "norm", "reg"]
_LABELS: dict[ModuleCategory, str] = {
"weight": "Weight",
"act": "Activation",
"norm": "Normalization",
"reg": "Regularization",
}
# Map torch.nn.modules.<subpackage> to our display category.
#
# PyTorch organises its layers into subpackages by purpose
# (e.g. torch.nn.modules.conv, torch.nn.modules.activation), so we
# can derive the category from `type(module).__module__` without
# enumerating every class.
#
# Caveat: MultiheadAttention lives in activation.py for historical
# reasons, so it gets "act" here rather than "weight". Correcting
# that would require a per-class override; for now the colour is
# acceptable since attention is arguably its own category anyway.
_MODULE_CATEGORY: dict[str, ModuleCategory] = {
# Learnable / weighted layers
"linear": "weight",
"conv": "weight",
"sparse": "weight", # Embedding, EmbeddingBag
"rnn": "weight", # LSTM, GRU, RNN
"transformer": "weight",
# Activation functions (+ MultiheadAttention, see caveat above)
"activation": "act",
# Normalization
"batchnorm": "norm",
"normalization": "norm", # LayerNorm, GroupNorm, RMSNorm
"instancenorm": "norm",
# Regularization
"dropout": "reg",
}
# Matches "key=" at the start of a key=value token inside extra_repr().
_KEY_RE = re.compile(r"(?<![=\w])(\w+)=")
# Matches a comma followed by a space that is NOT inside parentheses.
_TOP_COMMA_RE = re.compile(r",\s+(?![^()]*\))")
@dataclasses.dataclass
class ExtraRepr:
"""Parsed HTML fragments from a module's extra_repr() output."""
positional: str
kwargs: str
@dataclasses.dataclass
class TrainableInfo:
"""Summary of parameter trainability for display."""
note: str
is_frozen: bool
def _comma_to_br(html_str: str) -> str:
"""Replace top-level comma separators with <br> for multi-line display.
Also replaces the ``=`` between key/value pairs with a space for the
expanded view, without touching ``=`` inside HTML attributes.
"""
result = _TOP_COMMA_RE.sub("<br>", html_str)
return result.replace("</span>=", "</span> ")
def _frozen_attr(is_frozen: bool) -> str:
"""Build the HTML data-frozen attribute string when needed."""
if is_frozen:
return ' data-frozen="true"'
return ""
def _trainable_info(total: int, trainable: int) -> TrainableInfo:
"""Compute trainability note and frozen flag from parameter counts."""
if total > 0 and trainable == 0:
return TrainableInfo(note=" (frozen)", is_frozen=True)
if total > 0 and trainable != total:
return TrainableInfo(
note=f" ({_fmt_integer(trainable)} trainable)",
is_frozen=False,
)
return TrainableInfo(note="", is_frozen=False)
def _collect_dtype_device(
params: typing.Iterable[torch.nn.Parameter],
) -> tuple[str, str]:
"""Summarise dtype and device across parameters.
Returns ``(dtype_str, device_str)``. When all parameters agree the
value is a single token (e.g. ``"float32"``); when mixed the unique
values are joined with ``"/"`` (e.g. ``"float32/float16"``).
If *params* is empty both strings are ``"–"``.
"""
dtypes: set[str] = set()
devices: set[str] = set()
for p in params:
dtypes.add(str(p.dtype).removeprefix("torch."))
devices.add(str(p.device))
if not dtypes:
return ("\u2013", "\u2013")
return (
"/".join(sorted(dtypes)),
"/".join(sorted(devices)),
)
def _extra_repr_html(module: torch.nn.Module) -> ExtraRepr:
"""Build HTML from a module's extra_repr().
Uses PyTorch's own extra_repr() hook -- every built-in layer already
implements this, and custom modules can override it too. We highlight
the ``key=`` portions of ``key=value`` pairs; positional arguments and
values are preserved as-is.
Returns an ExtraRepr with positional and keyword HTML fragments.
"""
raw = module.extra_repr()
if not raw:
return ExtraRepr("", "")
escaped = html.escape(raw)
key_repl = r'<span class="nn-t-key">\1</span>='
# Find where keyword args start in the raw string
m = _KEY_RE.search(escaped)
if m is None:
return ExtraRepr(positional=escaped, kwargs="")
pos = m.start()
if pos == 0:
return ExtraRepr(positional="", kwargs=_KEY_RE.sub(key_repl, escaped))
# Split: positional part is before the first key=, strip trailing ", "
positional = escaped[:pos].rstrip(", ")
kwargs = _KEY_RE.sub(key_repl, escaped[pos:])
return ExtraRepr(positional=positional, kwargs=kwargs)
def _layer_category(module: torch.nn.Module) -> ModuleCategory | None:
"""Classify a module for color-coding using its source subpackage."""
mod_path = type(module).__module__ or ""
if mod_path.startswith("torch.nn.modules."):
submod = mod_path.rsplit(".", 1)[-1]
return _MODULE_CATEGORY.get(submod)
return None
def _fmt_integer(n: int) -> str:
"""Format int into a human readable string."""
if n >= 1_000_000:
return f"{n / 1_000_000:.1f}M"
if n >= 1_000:
return f"{n / 1_000:.1f}K"
return str(n)
_CSS = """\
.nn-t {
font-size: 0.8125rem;
line-height: 1.5;
background-color: var(--slate-1);
color: var(--slate-12);
border-radius: 6px;
}
/* Header */
.nn-t-header {
display: flex;
align-items: center;
gap: 0.5rem;
padding: 0.625rem 0.75rem 0.5rem 0.75rem;
}
.nn-t-root {
font-family: monospace;
font-size: 0.875rem;
font-weight: 600;
color: var(--slate-12);
}
.nn-t-summary {
font-family: monospace;
font-size: 0.75rem;
color: var(--slate-11);
margin-left: auto;
}
.nn-t-divider {
height: 1px;
background-color: var(--slate-3);
margin: 0 0.75rem;
}
/* Body */
.nn-t-body {
padding: 0.5rem 0 0.5rem 0.75rem;
}
/* Shared row layout */
.nn-t-leaf,
.nn-t-node > summary,
.nn-t-expand > summary {
display: flex;
align-items: center;
gap: 0.5rem;
padding: 0.1875rem 0.75rem 0.1875rem 0;
white-space: nowrap;
}
.nn-t-leaf:hover,
.nn-t-node > summary:hover,
.nn-t-expand > summary:hover {
background: var(--slate-2);
}
/* Expandable nodes */
.nn-t-node {
margin: 0;
padding: 0;
}
.nn-t-node > summary {
cursor: pointer;
list-style: none;
}
.nn-t-node > summary::-webkit-details-marker {
display: none;
}
/* Disclosure arrow */
.nn-t-arrow {
display: inline-flex;
align-items: center;
justify-content: center;
width: 1rem;
flex-shrink: 0;
color: var(--slate-9);
transition: transform 0.12s;
font-size: 0.5rem;
}
.nn-t-node[open] > summary .nn-t-arrow {
transform: rotate(90deg);
}
/* Leaf spacer matches arrow width */
.nn-t-spacer {
display: inline-block;
width: 1rem;
flex-shrink: 0;
}
/* Children with indent guide */
.nn-t-children {
margin-left: calc(0.5rem - 1px);
padding-left: 0.75rem;
border-left: 1px solid var(--slate-3);
}
/* Text elements */
.nn-t-name {
font-family: monospace;
font-size: 0.8125rem;
font-weight: 500;
color: var(--slate-12);
}
.nn-t-type {
font-family: monospace;
font-size: 0.8125rem;
font-weight: 600;
color: var(--slate-12);
padding: 0.0625rem 0.375rem;
border-radius: 0.1875rem;
background: var(--slate-3);
}
.nn-t-type[data-cat="weight"] { --pill-bg: var(--blue-3); --pill-fg: var(--blue-11); }
.nn-t-type[data-cat="norm"] { --pill-bg: var(--green-3); --pill-fg: var(--green-11); }
.nn-t-type[data-cat="act"] { --pill-bg: var(--orange-3); --pill-fg: var(--orange-11); }
.nn-t-type[data-cat="reg"] { --pill-bg: var(--crimson-3); --pill-fg: var(--crimson-11); }
.nn-t-type[data-cat] {
background: var(--pill-bg);
color: var(--pill-fg);
}
/* Positional args (always visible, never truncated) */
.nn-t-pos {
font-family: monospace;
font-size: 0.8125rem;
color: var(--slate-11);
flex-shrink: 0;
}
/* Keyword args (truncated with ellipsis) */
.nn-t-args {
font-family: monospace;
font-size: 0.8125rem;
color: var(--slate-11);
overflow: hidden;
text-overflow: ellipsis;
min-width: 0;
}
/* Expandable args */
.nn-t-expand {
margin: 0;
padding: 0;
}
.nn-t-expand > summary {
cursor: pointer;
list-style: none;
}
.nn-t-expand > summary::-webkit-details-marker {
display: none;
}
.nn-t-expand[open] > summary .nn-t-args {
display: none;
}
.nn-t-expand-body {
font-family: monospace;
font-size: 0.8125rem;
color: var(--slate-11);
padding: 0 0.75rem 0.25rem 2.75rem;
line-height: 1.6;
}
.nn-t-key {
color: var(--slate-9);
}
.nn-t-expand-sep {
display: flex;
align-items: center;
gap: 0.25rem;
margin: 0.125rem 0 0 0;
}
.nn-t-expand-sep::after {
content: "";
flex: 1;
height: 1px;
background: var(--slate-3);
}
.nn-t-expand-sep-label {
font-size: 0.5625rem;
text-transform: uppercase;
letter-spacing: 0.04em;
color: var(--slate-8);
flex-shrink: 0;
}
/* Param count */
.nn-t-params {
color: var(--slate-10);
font-family: monospace;
font-size: 0.75rem;
margin-left: auto;
padding-left: 1rem;
flex-shrink: 0;
}
[data-frozen] > .nn-t-type,
[data-frozen] > .nn-t-pos,
[data-frozen] > .nn-t-args,
[data-frozen] > .nn-t-params,
[data-frozen] > .nn-t-spacer,
[data-frozen] > summary > .nn-t-type,
[data-frozen] > summary > .nn-t-pos,
[data-frozen] > summary > .nn-t-args,
[data-frozen] > summary > .nn-t-params,
[data-frozen] > summary > .nn-t-arrow {
opacity: 0.55;
}
/* Footer with info-hover legend */
.nn-t-footer {
display: flex;
justify-content: flex-end;
padding: 0.25rem 0.75rem 0.375rem 0.75rem;
}
.nn-t-info {
position: relative;
display: inline-flex;
align-items: center;
justify-content: center;
color: var(--slate-8);
cursor: default;
}
.nn-t-info:hover { color: var(--slate-10); }
.nn-t-info:hover .nn-t-legend {
visibility: visible;
opacity: 1;
}
.nn-t-info svg {
width: 0.875rem;
height: 0.875rem;
}
.nn-t-legend {
visibility: hidden;
opacity: 0;
position: absolute;
bottom: calc(100% + 6px);
right: 0;
z-index: 10;
max-height: 12rem;
overflow-y: auto;
display: flex;
flex-direction: column;
gap: 0.25rem;
padding: 0.375rem 0.625rem;
background: var(--slate-1);
border: 1px solid var(--slate-3);
border-radius: 6px;
white-space: nowrap;
transition: opacity 0.12s, visibility 0.12s;
font-family: monospace;
font-size: 0.75rem;
color: var(--slate-11);
}
.nn-t-legend-title {
font-size: 0.6875rem;
text-transform: uppercase;
letter-spacing: 0.04em;
color: var(--slate-9);
margin-bottom: 0.0625rem;
}
.nn-t-legend-item {
display: flex;
align-items: center;
gap: 0.375rem;
}
.nn-t-swatch {
display: inline-flex;
align-items: center;
justify-content: center;
width: 0.875rem;
height: 0.8125rem;
border-radius: 0.1875rem;
flex-shrink: 0;
background: var(--slate-3);
}
.nn-t-swatch[data-cat="weight"] { background: var(--blue-3); }
.nn-t-swatch[data-cat="norm"] { background: var(--green-3); }
.nn-t-swatch[data-cat="act"] { background: var(--orange-3); }
.nn-t-swatch[data-cat="reg"] { background: var(--crimson-3); }
.nn-t-swatch-dot {
width: 0.25rem;
height: 0.25rem;
border-radius: 50%;
background: var(--slate-8);
}
.nn-t-swatch[data-cat="weight"] .nn-t-swatch-dot { background: var(--blue-11); }
.nn-t-swatch[data-cat="norm"] .nn-t-swatch-dot { background: var(--green-11); }
.nn-t-swatch[data-cat="act"] .nn-t-swatch-dot { background: var(--orange-11); }
.nn-t-swatch[data-cat="reg"] .nn-t-swatch-dot { background: var(--crimson-11); }
.nn-t-swatch[data-dim] { opacity: 0.55; }
.nn-t-legend-sep {
height: 1px;
background: var(--slate-3);
margin: 0.125rem 0;
}"""
def _walk(mod: torch.nn.Module, name: str = "") -> str:
"""Recursively build HTML tree for an nn.Module (non-root nodes)."""
children = list(mod.named_children())
type_name = mod.__class__.__name__
extra = _extra_repr_html(mod)
cat = _layer_category(mod)
name_html = f'<span class="nn-t-name">{name}</span> ' if name else ""
cat_attr = f' data-cat="{cat}"' if cat is not None else ""
type_span = f'<span class="nn-t-type"{cat_attr}>{type_name}</span>'
pos_args = (
f' <span class="nn-t-pos">{extra.positional}</span>'
if extra.positional
else ""
)
if not children:
own_params = list(mod.parameters(recurse=False))
num_params = sum(p.numel() for p in own_params)
num_trainable = sum(p.numel() for p in own_params if p.requires_grad)
info = _trainable_info(num_params, num_trainable)
frozen = _frozen_attr(info.is_frozen or num_params == 0)
params = (
f'<span class="nn-t-params"{frozen}>'
f"{_fmt_integer(num_params)}{info.note}</span>"
if num_params > 0
else ""
)
# Build expand body: kwargs first, then dtype/device
body_parts: list[str] = []
if extra.kwargs:
body_parts.append(_comma_to_br(extra.kwargs))
if own_params:
dtype_s, device_s = _collect_dtype_device(own_params)
if body_parts:
body_parts.append(
'<div class="nn-t-expand-sep">'
'<span class="nn-t-expand-sep-label">tensor</span>'
"</div>"
)
body_parts.append(
f'<span class="nn-t-key">dtype</span> {dtype_s}'
f"<br>"
f'<span class="nn-t-key">device</span> {device_s}'
)
if body_parts:
kw_inline = (
f' <span class="nn-t-args">{extra.kwargs}</span>'
if extra.kwargs
else ""
)
return (
f'<details class="nn-t-expand"{frozen}>'
f"<summary>"
f'<span class="nn-t-spacer"></span>'
f"{name_html}{type_span}{pos_args}{kw_inline}"
f"{params}"
f"</summary>"
f'<div class="nn-t-expand-body">{"".join(body_parts)}</div>'
f"</details>"
)
return (
f'<div class="nn-t-leaf"{frozen}>'
f'<span class="nn-t-spacer"></span>'
f"{name_html}{type_span}{pos_args}"
f"{params}"
f"</div>"
)
# Container node: aggregate all descendant parameters
all_sub = list(mod.parameters())
total_sub = sum(p.numel() for p in all_sub)
total_trainable = sum(p.numel() for p in all_sub if p.requires_grad)
info = _trainable_info(total_sub, total_trainable)
total_params = (
f'<span class="nn-t-params">'
f"{_fmt_integer(total_sub)}{info.note}</span>"
)
children_html = "\n".join(
_walk(child_mod, child_name) for child_name, child_mod in children
)
return (
f'<details class="nn-t-node"{_frozen_attr(info.is_frozen)}>'
f"<summary>"
f'<span class="nn-t-arrow">▶</span>'
f"{name_html}{type_span}"
f"{total_params}"
f"</summary>"
f'<div class="nn-t-children">{children_html}</div>'
f"</details>"
)
def format(module: torch.nn.Module) -> Html: # noqa: A001
"""Render a PyTorch nn.Module as a collapsible tree.
The output shows the model name and summary in a fixed header,
with child modules rendered as an expandable tree below.
Args:
module: A ``torch.nn.Module`` instance.
Returns:
A ``marimo.Html`` object with the rendered tree.
"""
all_params = list(module.parameters())
children = list(module.named_children())
total_params = sum(p.numel() for p in all_params)
trainable_params = sum(p.numel() for p in all_params if p.requires_grad)
size_bytes = sum(p.numel() * p.element_size() for p in all_params)
size_mb = size_bytes / (1024 * 1024)
trainable_note = (
f" ({_fmt_integer(trainable_params)} trainable)"
if trainable_params != total_params
else ""
)
header = (
f'<div class="nn-t-header">'
f'<span class="nn-t-root">{module.__class__.__name__}</span>'
f'<span class="nn-t-summary">'
f"{_fmt_integer(total_params)} params{trainable_note}"
f" \u00b7 {size_mb:.1f} MB"
f"</span>"
f"</div>"
)
if children:
body_html = "\n".join(
_walk(child_mod, child_name) for child_name, child_mod in children
)
body = f'<div class="nn-t-body">{body_html}</div>'
else:
extra = _extra_repr_html(module)
combined = ", ".join(
part for part in (extra.positional, extra.kwargs) if part
)
extra_html = (
f'<span class="nn-t-args">{combined}</span>' if combined else ""
)
body = (
f'<div class="nn-t-body">'
f'<div class="nn-t-leaf">{extra_html}</div>'
f"</div>"
)
divider = '<div class="nn-t-divider"></div>'
legend_title = '<span class="nn-t-legend-title">Module types</span>'
legend_items = "".join(
f'<span class="nn-t-legend-item">'
f'<span class="nn-t-swatch" data-cat="{cat}">'
f'<span class="nn-t-swatch-dot"></span></span>{label}'
f"</span>"
for cat, label in _LABELS.items()
)
# Lucide "info" icon (ISC license)
info_svg = (
'<svg xmlns="http://www.w3.org/2000/svg" viewBox="0 0 24 24"'
' fill="none" stroke="currentColor" stroke-width="2"'
' stroke-linecap="round" stroke-linejoin="round">'
'<circle cx="12" cy="12" r="10"/>'
'<path d="M12 16v-4"/>'
'<path d="M12 8h.01"/>'
"</svg>"
)
footer = (
f'<div class="nn-t-footer">'
f'<span class="nn-t-info">{info_svg}'
f'<span class="nn-t-legend">{legend_title}{legend_items}'
f'<span class="nn-t-legend-sep"></span>'
f'<span class="nn-t-legend-item">'
f'<span class="nn-t-swatch"><span class="nn-t-swatch-dot"></span></span>'
f"Trainable</span>"
f'<span class="nn-t-legend-item">'
f'<span class="nn-t-swatch" data-dim><span class="nn-t-swatch-dot"></span></span>'
f"Frozen / no params</span>"
f"</span>"
f"</span>"
f"</div>"
)
html = (
f'<div class="nn-t"><style>{_CSS}</style>'
f"{header}{divider}{body}{footer}"
f"</div>"
)
return Html(html)
class PyTorchFormatter(FormatterFactory):
@staticmethod
def package_name() -> str:
return "torch"
def register(self) -> None:
import torch.nn # type: ignore[import-not-found,import-untyped,unused-ignore] # noqa: E501
from marimo._messaging.mimetypes import KnownMimeType
from marimo._output import formatting
from marimo._output.formatters.pytorch_formatters import format as fmt
@formatting.formatter(torch.nn.Module)
def _format_module(
module: torch.nn.Module,
) -> tuple[KnownMimeType, str]:
return ("text/html", fmt(module).text)
| {
"repo_id": "marimo-team/marimo",
"file_path": "marimo/_output/formatters/pytorch_formatters.py",
"license": "Apache License 2.0",
"lines": 608,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | documentation |
marimo-team/marimo:marimo/_smoke_tests/formatters/pytorch_formatters.py | # /// script
# dependencies = [
# "marimo",
# "torch==2.10.0",
# ]
# requires-python = ">=3.13"
# ///
import marimo
__generated_with = "0.19.11"
app = marimo.App()
with app.setup:
import torch.nn as nn
import numpy
import torch
torch.device("mps")
@app.cell
def _():
# Simple MLP
mlp = nn.Sequential(
nn.Linear(784, 256),
nn.ReLU(),
nn.Dropout(0.2),
nn.Linear(256, 128),
nn.ReLU(),
nn.Dropout(0.1),
nn.Linear(128, 10),
)
mlp
return
@app.cell
def _():
# CNN for image classification
class SimpleCNN(nn.Module):
def __init__(self):
super().__init__()
self.features = nn.Sequential(
nn.Conv2d(3, 32, kernel_size=3, padding=1),
nn.BatchNorm2d(32),
nn.ReLU(),
nn.MaxPool2d(2),
nn.Conv2d(32, 64, kernel_size=3, padding=1),
nn.BatchNorm2d(64),
nn.ReLU(),
nn.MaxPool2d(2),
)
self.classifier = nn.Sequential(
nn.Linear(64 * 8 * 8, 256),
nn.ReLU(),
nn.Dropout(0.5),
nn.Linear(256, 10),
)
def forward(self, x):
x = self.features(x)
x = x.view(x.size(0), -1)
x = self.classifier(x)
return x
cnn = SimpleCNN()
cnn
return
@app.cell
def _():
# Mini ResNet with skip connections
class ResBlock(nn.Module):
def __init__(self, channels):
super().__init__()
self.conv1 = nn.Conv2d(channels, channels, kernel_size=3, padding=1)
self.bn1 = nn.BatchNorm2d(channels)
self.conv2 = nn.Conv2d(channels, channels, kernel_size=3, padding=1)
self.bn2 = nn.BatchNorm2d(channels)
self.relu = nn.ReLU()
def forward(self, x):
residual = x
out = self.relu(self.bn1(self.conv1(x)))
out = self.bn2(self.conv2(out))
out = out + residual
return self.relu(out)
class MiniResNet(nn.Module):
def __init__(self, num_classes=10):
super().__init__()
self.stem = nn.Sequential(
nn.Conv2d(3, 64, kernel_size=7, stride=2, padding=3),
nn.BatchNorm2d(64),
nn.ReLU(),
nn.MaxPool2d(3, stride=2, padding=1),
)
self.layer1 = nn.Sequential(ResBlock(64), ResBlock(64))
self.avgpool = nn.AdaptiveAvgPool2d((1, 1))
self.fc = nn.Linear(64, num_classes)
def forward(self, x):
x = self.stem(x)
x = self.layer1(x)
x = self.avgpool(x)
x = torch.flatten(x, 1)
x = self.fc(x)
return x
resnet = MiniResNet()
resnet
return
@app.cell
def _():
# Mini Transformer
class MiniTransformer(nn.Module):
def __init__(
self,
vocab_size=10000,
d_model=256,
nhead=4,
num_layers=2,
num_classes=5,
):
super().__init__()
self.embedding = nn.Embedding(vocab_size, d_model)
encoder_layer = nn.TransformerEncoderLayer(
d_model=d_model,
nhead=nhead,
dim_feedforward=512,
batch_first=True,
)
self.transformer = nn.TransformerEncoder(
encoder_layer, num_layers=num_layers
)
self.classifier = nn.Linear(d_model, num_classes)
def forward(self, x):
x = self.embedding(x)
x = self.transformer(x)
x = x.mean(dim=1)
x = self.classifier(x)
return x
transformer = MiniTransformer()
transformer
return
@app.cell
def _():
# Layers with many kwargs to test long extra_repr
verbose = nn.Sequential(
nn.Conv2d(
3,
64,
kernel_size=7,
stride=2,
padding=3,
dilation=1,
groups=1,
bias=False,
padding_mode="zeros",
),
nn.BatchNorm2d(
64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True
),
nn.LSTM(
input_size=256,
hidden_size=512,
num_layers=3,
batch_first=True,
dropout=0.3,
bidirectional=True,
),
nn.TransformerEncoderLayer(
d_model=512,
nhead=8,
dim_feedforward=2048,
dropout=0.1,
activation="gelu",
batch_first=True,
norm_first=True,
),
)
verbose
return
@app.cell
def _():
# Fully frozen model (e.g. pretrained feature extractor)
frozen_mlp = nn.Sequential(
nn.Linear(784, 256),
nn.ReLU(),
nn.Linear(256, 10),
)
for p in frozen_mlp.parameters():
p.requires_grad = False
frozen_mlp
return
@app.cell
def _():
# Partially frozen: freeze backbone, train classifier head
class FineTuned(nn.Module):
def __init__(self):
super().__init__()
self.backbone = nn.Sequential(
nn.Conv2d(3, 64, kernel_size=3, padding=1),
nn.BatchNorm2d(64),
nn.ReLU(),
nn.Conv2d(64, 128, kernel_size=3, padding=1),
nn.BatchNorm2d(128),
nn.ReLU(),
)
self.head = nn.Sequential(
nn.Linear(128, 64),
nn.ReLU(),
nn.Linear(64, 10),
)
# Freeze backbone
for p in self.backbone.parameters():
p.requires_grad = False
def forward(self, x):
x = self.backbone(x)
x = x.mean(dim=[2, 3])
x = self.head(x)
return x
FineTuned()
return
if __name__ == "__main__":
app.run()
| {
"repo_id": "marimo-team/marimo",
"file_path": "marimo/_smoke_tests/formatters/pytorch_formatters.py",
"license": "Apache License 2.0",
"lines": 212,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
marimo-team/marimo:tests/_output/formatters/test_pytorch_formatters.py | # Copyright 2026 Marimo. All rights reserved.
from __future__ import annotations
import pytest
from marimo._dependencies.dependencies import DependencyManager
from marimo._output.formatters.formatters import register_formatters
HAS_DEPS = DependencyManager.torch.has()
@pytest.mark.skipif(not HAS_DEPS, reason="torch not installed")
class TestPyTorchFormatter:
def test_format_simple_module(self) -> None:
import torch.nn as nn
from marimo._output.formatters.pytorch_formatters import format
model = nn.Linear(10, 5)
result = format(model)
html = result.text
assert "nn-t" in html
assert "Linear" in html
# Should contain param count
assert "nn-t-summary" in html
def test_format_sequential(self) -> None:
import torch.nn as nn
from marimo._output.formatters.pytorch_formatters import format
model = nn.Sequential(
nn.Linear(784, 256),
nn.ReLU(),
nn.Linear(256, 10),
)
result = format(model)
html = result.text
assert "Sequential" in html
assert "Linear" in html
assert "ReLU" in html
# Tree structure elements
assert "nn-t-node" in html
assert "nn-t-arrow" in html
# dtype/device in expand bodies of Linear layers
assert "float32" in html
assert "cpu" in html
def test_format_nested_module(self) -> None:
import torch.nn as nn
from marimo._output.formatters.pytorch_formatters import format
class SimpleNet(nn.Module):
def __init__(self) -> None:
super().__init__()
self.features = nn.Sequential(
nn.Conv2d(3, 16, 3),
nn.ReLU(),
)
self.classifier = nn.Linear(16, 10)
model = SimpleNet()
result = format(model)
html = result.text
assert "SimpleNet" in html
assert "features" in html
assert "classifier" in html
assert "Conv2d" in html
def test_format_frozen_model(self) -> None:
import torch.nn as nn
from marimo._output.formatters.pytorch_formatters import format
model = nn.Sequential(
nn.Linear(10, 5),
nn.ReLU(),
nn.Linear(5, 2),
)
for p in model.parameters():
p.requires_grad = False
result = format(model)
html = result.text
assert "frozen" in html.lower()
assert "data-frozen" in html
def test_format_partially_frozen(self) -> None:
import torch.nn as nn
from marimo._output.formatters.pytorch_formatters import format
class PartiallyFrozen(nn.Module):
def __init__(self) -> None:
super().__init__()
self.backbone = nn.Linear(10, 5)
self.head = nn.Linear(5, 2)
for p in self.backbone.parameters():
p.requires_grad = False
model = PartiallyFrozen()
result = format(model)
html = result.text
assert "trainable" in html.lower()
def test_category_badges(self) -> None:
import torch.nn as nn
from marimo._output.formatters.pytorch_formatters import format
model = nn.Sequential(
nn.Linear(10, 5), # weight
nn.ReLU(), # activation
nn.BatchNorm1d(5), # normalization
nn.Dropout(0.5), # regularization
)
result = format(model)
html = result.text
assert 'data-cat="weight"' in html
assert 'data-cat="act"' in html
assert 'data-cat="norm"' in html
assert 'data-cat="reg"' in html
def test_legend_present(self) -> None:
import torch.nn as nn
from marimo._output.formatters.pytorch_formatters import format
model = nn.Linear(10, 5)
result = format(model)
html = result.text
assert "nn-t-legend" in html
assert "Module types" in html
assert "Trainable" in html
assert "Frozen" in html
def test_param_count_formatting(self) -> None:
from marimo._output.formatters.pytorch_formatters import _fmt_integer
assert _fmt_integer(500) == "500"
assert _fmt_integer(1_500) == "1.5K"
assert _fmt_integer(1_500_000) == "1.5M"
def test_extra_repr_html(self) -> None:
import torch.nn as nn
from marimo._output.formatters.pytorch_formatters import (
_extra_repr_html,
)
# Linear uses all kwargs: in_features=10, out_features=5, bias=True
linear = nn.Linear(10, 5)
extra = _extra_repr_html(linear)
assert extra.positional == ""
assert "in_features" in extra.kwargs
# Conv2d has positional args: 3, 16, then kwargs
conv = nn.Conv2d(3, 16, kernel_size=3)
extra = _extra_repr_html(conv)
assert extra.positional == "3, 16"
assert "kernel_size" in extra.kwargs
def test_layer_category(self) -> None:
import torch.nn as nn
from marimo._output.formatters.pytorch_formatters import (
_layer_category,
)
assert _layer_category(nn.Linear(1, 1)) == "weight"
assert _layer_category(nn.ReLU()) == "act"
assert _layer_category(nn.BatchNorm1d(1)) == "norm"
assert _layer_category(nn.Dropout()) == "reg"
# Container has no category
assert _layer_category(nn.Sequential()) is None
def test_trainable_info(self) -> None:
from marimo._output.formatters.pytorch_formatters import (
_trainable_info,
)
# All trainable
info = _trainable_info(100, 100)
assert info.note == ""
assert info.is_frozen is False
# All frozen
info = _trainable_info(100, 0)
assert "frozen" in info.note
assert info.is_frozen is True
# Partially frozen
info = _trainable_info(100, 50)
assert "trainable" in info.note
assert info.is_frozen is False
def test_collect_dtype_device_uniform(self) -> None:
import torch
from marimo._output.formatters.pytorch_formatters import (
_collect_dtype_device,
)
params = [torch.zeros(2), torch.ones(3)]
dtype_str, device_str = _collect_dtype_device(params)
assert dtype_str == "float32"
assert device_str == "cpu"
def test_collect_dtype_device_mixed(self) -> None:
import torch
from marimo._output.formatters.pytorch_formatters import (
_collect_dtype_device,
)
params = [
torch.zeros(2, dtype=torch.float32),
torch.ones(3, dtype=torch.float16),
]
dtype_str, device_str = _collect_dtype_device(params)
assert dtype_str == "float16/float32"
assert device_str == "cpu"
def test_collect_dtype_device_empty(self) -> None:
from marimo._output.formatters.pytorch_formatters import (
_collect_dtype_device,
)
dtype_str, device_str = _collect_dtype_device([])
assert dtype_str == "\u2013"
assert device_str == "\u2013"
def test_expand_body_dtype_device(self) -> None:
"""Expanding a layer shows kwargs, then a 'tensor' divider,
then dtype/device."""
import torch.nn as nn
from marimo._output.formatters.pytorch_formatters import format
model = nn.Sequential(nn.Linear(10, 5))
html = format(model).text
# dtype/device present in expand body
assert "float32" in html
assert "cpu" in html
# Labeled divider between kwargs and dtype/device
assert "nn-t-expand-sep" in html
assert "tensor" in html.lower()
def test_comma_to_br_strips_equals(self) -> None:
"""Expanded view replaces = between key/value with space."""
from marimo._output.formatters.pytorch_formatters import _comma_to_br
html_in = (
'<span class="nn-t-key">in_features</span>=10, '
'<span class="nn-t-key">bias</span>=True'
)
result = _comma_to_br(html_in)
# = after </span> replaced with space
assert "</span> 10" in result
assert "</span> True" in result
# Commas replaced with <br>
assert "<br>" in result
# HTML attribute = signs preserved
assert 'class="nn-t-key"' in result
def test_returns_html_type(self) -> None:
import torch.nn as nn
from marimo._output.formatters.pytorch_formatters import format
from marimo._output.hypertext import Html
model = nn.Linear(10, 5)
result = format(model)
assert isinstance(result, Html)
def test_formatter_registration(self) -> None:
"""Smoke test: the formatter registers and produces output."""
register_formatters()
import torch.nn as nn
from marimo._output.formatting import get_formatter
model = nn.Linear(10, 5)
formatter = get_formatter(model)
assert formatter is not None
mimetype, data = formatter(model)
assert mimetype == "text/html"
assert "nn-t" in data
assert "Linear" in data
| {
"repo_id": "marimo-team/marimo",
"file_path": "tests/_output/formatters/test_pytorch_formatters.py",
"license": "Apache License 2.0",
"lines": 230,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
marimo-team/marimo:marimo/_cli/files/cloudflare.py | # Copyright 2026 Marimo. All rights reserved.
from __future__ import annotations
import os
import shutil
import subprocess
from pathlib import Path
from typing import TYPE_CHECKING, Optional
import click
from marimo._cli.files.file_path import FileHandler
if TYPE_CHECKING:
from tempfile import TemporaryDirectory
def is_r2_path(name: str) -> bool:
return name.startswith("r2://")
def parse_r2_path(url: str) -> tuple[str, str]:
"""Parse an r2://bucket/key URL into (bucket, key).
Raises ValueError if the URL is not a valid r2:// path.
"""
if not is_r2_path(url):
raise ValueError(f"Not an r2:// URL: {url}")
path = url[len("r2://") :]
if "/" not in path:
raise ValueError(
f"Invalid r2:// URL: {url}. Expected format: r2://bucket/key"
)
bucket, key = path.split("/", 1)
if not key:
raise ValueError(
f"Invalid r2:// URL: {url}. Missing object key after bucket name"
)
return bucket, key
def _check_npx_available() -> None:
if shutil.which("npx") is None:
raise click.ClickException(
"npx is not available on PATH. "
"Install Node.js (https://nodejs.org) to use r2:// paths."
)
def _download_r2_object(bucket: str, key: str, local_path: str) -> None:
_check_npx_available()
try:
subprocess.run(
[
"npx",
"wrangler",
"r2",
"object",
"get",
f"{bucket}/{key}",
"--file",
local_path,
"--remote",
],
check=True,
capture_output=True,
text=True,
)
except subprocess.CalledProcessError as e:
msg = (
f"Failed to download r2://{bucket}/{key}.\n\n"
f" wrangler stderr: {e.stderr.strip()}\n\n"
" Tip: run `npx wrangler login` to authenticate, "
"or check that the bucket and key are correct."
)
raise click.ClickException(msg) from e
class R2FileHandler(FileHandler):
def can_handle(self, name: str) -> bool:
return is_r2_path(name)
def handle(
self, name: str, temp_dir: TemporaryDirectory[str]
) -> tuple[str, Optional[TemporaryDirectory[str]]]:
bucket, key = parse_r2_path(name)
filename = os.path.basename(key)
local_path = str(Path(temp_dir.name) / filename)
_download_r2_object(bucket, key, local_path)
return local_path, temp_dir
| {
"repo_id": "marimo-team/marimo",
"file_path": "marimo/_cli/files/cloudflare.py",
"license": "Apache License 2.0",
"lines": 74,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
marimo-team/marimo:tests/_cli/test_cloudflare.py | # Copyright 2026 Marimo. All rights reserved.
from __future__ import annotations
import subprocess
import tempfile
from unittest.mock import patch
import click
import pytest
from marimo._cli.files.cloudflare import (
R2FileHandler,
_download_r2_object,
parse_r2_path,
)
from marimo._cli.files.file_path import validate_name
@pytest.mark.parametrize(
("url", "expected"),
[
("r2://bucket/file.py", ("bucket", "file.py")),
("r2://bucket/path/to/file.py", ("bucket", "path/to/file.py")),
],
)
def test_parse_r2_path(url: str, expected: tuple[str, str]) -> None:
assert parse_r2_path(url) == expected
@pytest.mark.parametrize(
("url", "match"),
[
("r2://bucket/", "Missing object key"),
("r2://bucket", "Expected format"),
("https://example.com", "Not an r2:// URL"),
],
)
def test_parse_r2_path_errors(url: str, match: str) -> None:
with pytest.raises(ValueError, match=match):
parse_r2_path(url)
class TestR2FileHandler:
def test_can_handle(self) -> None:
handler = R2FileHandler()
assert handler.can_handle("r2://bucket/file.py") is True
assert handler.can_handle("https://example.com") is False
assert handler.can_handle("file.py") is False
@patch("marimo._cli.files.cloudflare._download_r2_object")
def test_handle(self, mock_download, tmp_path) -> None:
handler = R2FileHandler()
temp_dir = tempfile.TemporaryDirectory(dir=tmp_path)
mock_download.side_effect = lambda _bucket, _key, local_path: open(
local_path, "w"
).close()
path, returned_temp_dir = handler.handle(
"r2://my-bucket/notebooks/test.py", temp_dir
)
assert path.endswith("test.py")
assert returned_temp_dir is temp_dir
mock_download.assert_called_once_with(
"my-bucket", "notebooks/test.py", path
)
@patch(
"marimo._cli.files.cloudflare.shutil.which",
return_value="/usr/bin/npx",
)
@patch("marimo._cli.files.cloudflare.subprocess.run")
def test_download_calls_wrangler(mock_run, mock_which) -> None:
del mock_which
mock_run.return_value = subprocess.CompletedProcess(args=[], returncode=0)
_download_r2_object("my-bucket", "path/to/file.py", "/tmp/file.py")
mock_run.assert_called_once_with(
[
"npx",
"wrangler",
"r2",
"object",
"get",
"my-bucket/path/to/file.py",
"--file",
"/tmp/file.py",
"--remote",
],
check=True,
capture_output=True,
text=True,
)
@patch(
"marimo._cli.files.cloudflare.shutil.which",
return_value="/usr/bin/npx",
)
@patch("marimo._cli.files.cloudflare.subprocess.run")
def test_download_wrangler_failure(mock_run, mock_which) -> None:
del mock_which
mock_run.side_effect = subprocess.CalledProcessError(
returncode=1,
cmd=["npx", "wrangler"],
stderr="authentication required",
)
with pytest.raises(click.ClickException, match="Failed to download r2://"):
_download_r2_object("bucket", "key.py", "/tmp/key.py")
@patch("marimo._cli.files.cloudflare.shutil.which", return_value=None)
def test_download_npx_not_found(mock_which) -> None:
del mock_which
with pytest.raises(click.ClickException, match="npx is not available"):
_download_r2_object("bucket", "key.py", "/tmp/key.py")
@patch("marimo._cli.files.cloudflare._download_r2_object")
def test_validate_name_routes_to_r2(mock_download) -> None:
mock_download.side_effect = lambda _bucket, _key, local_path: open(
local_path, "w"
).close()
path, temp_dir = validate_name(
"r2://my-bucket/notebook.py",
allow_new_file=False,
allow_directory=False,
)
assert path.endswith("notebook.py")
assert temp_dir is not None
mock_download.assert_called_once_with("my-bucket", "notebook.py", path)
temp_dir.cleanup()
| {
"repo_id": "marimo-team/marimo",
"file_path": "tests/_cli/test_cloudflare.py",
"license": "Apache License 2.0",
"lines": 112,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
marimo-team/marimo:marimo/_smoke_tests/issues/5129.py | # Regression test for https://github.com/marimo-team/marimo/issues/5129
# mo.lazy inside mo.ui.tabs should only execute the function once.
# Previously, the shadow DOM created a duplicate marimo-lazy element
# which fired a second load() request.
import marimo
__generated_with = "0.13.14"
app = marimo.App(width="medium")
@app.cell
def _():
import marimo as mo
def lazy_tab():
import time
print("LAZYTAB 1")
time.sleep(1)
print("LAZYTAB 2")
return mo.md("Finish loading lazy tab !")
mo.ui.tabs(
{
"normal-tab": mo.md("This is a normal tab"),
"lazy-tab": mo.lazy(lazy_tab),
}
)
return
if __name__ == "__main__":
app.run()
| {
"repo_id": "marimo-team/marimo",
"file_path": "marimo/_smoke_tests/issues/5129.py",
"license": "Apache License 2.0",
"lines": 25,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
marimo-team/marimo:marimo/_smoke_tests/nested.py | # Smoke tests for nested marimo custom elements.
# Verifies that interactive widgets, lazy loading, and layout components
# work correctly when nested inside each other (especially inside shadow DOM).
# Related: https://github.com/marimo-team/marimo/issues/5129
import marimo
__generated_with = "0.19.11"
app = marimo.App(width="medium")
@app.cell
def _():
import marimo as mo
return (mo,)
@app.cell(hide_code=True)
def _(mo):
mo.md("""
# Nested components smoke tests
""")
return
@app.cell(hide_code=True)
def _(mo):
mo.md("""
## Table inside tabs
""")
return
@app.cell
def _(mo):
import pandas as pd
df = pd.DataFrame({"name": ["Alice", "Bob", "Charlie"], "age": [25, 30, 35]})
table_in_tabs = mo.ui.tabs(
{
"Table": mo.ui.table(df),
"Plain": mo.md("No table here"),
}
)
table_in_tabs
return (pd,)
@app.cell(hide_code=True)
def _(mo):
mo.md("""
## Table inside accordion
""")
return
@app.cell
def _(mo, pd):
df2 = pd.DataFrame({"x": [1, 2, 3], "y": [4, 5, 6]})
mo.accordion(
{
"Show table": mo.ui.table(df2),
"Show text": mo.md("Just text"),
}
)
return
@app.cell(hide_code=True)
def _(mo):
mo.md("""
## Lazy inside tabs (issue #5129)
""")
return
@app.cell
def _(mo):
call_count_1 = []
def lazy_fn_1():
call_count_1.append(1)
print(f"lazy_fn_1 called (total: {len(call_count_1)}x)")
return mo.md(f"Loaded! Call count: {len(call_count_1)}")
lazy_tabs = mo.ui.tabs(
{
"Normal": mo.md("This tab is normal"),
"Lazy": mo.lazy(lazy_fn_1, show_loading_indicator=True),
}
)
lazy_tabs
return
@app.cell(hide_code=True)
def _(mo):
mo.md("""
## Lazy inside accordion
""")
return
@app.cell
def _(mo):
call_count_2 = []
def lazy_fn_2():
call_count_2.append(1)
print(f"lazy_fn_2 called (total: {len(call_count_2)}x)")
return mo.md(f"Loaded! Call count: {len(call_count_2)}")
mo.accordion(
{
"Click to lazy load": mo.lazy(lazy_fn_2, show_loading_indicator=True),
}
)
return
@app.cell(hide_code=True)
def _(mo):
mo.md("""
## Tabs with `lazy=True`
""")
return
@app.cell
def _(mo, pd):
df3 = pd.DataFrame({"a": range(10), "b": range(10, 20)})
auto_lazy_tabs = mo.ui.tabs(
{
"Tab A": mo.md("First tab content"),
"Tab B": mo.ui.table(df3),
"Tab C": mo.md("Third tab content"),
},
lazy=True,
)
auto_lazy_tabs
return
@app.cell(hide_code=True)
def _(mo):
mo.md("""
## Accordion with `lazy=True`
""")
return
@app.cell
def _(mo):
mo.accordion(
{
"Section 1": mo.md("Content 1"),
"Section 2": mo.md("Content 2"),
"Section 3": mo.md("Content 3"),
},
lazy=True,
)
return
@app.cell(hide_code=True)
def _(mo):
mo.md("""
## Interactive widgets inside tabs
""")
return
@app.cell
def _(mo):
slider = mo.ui.slider(0, 100, value=50, label="Slider in tab")
checkbox = mo.ui.checkbox(label="Checkbox in tab")
dropdown = mo.ui.dropdown(
["Option A", "Option B", "Option C"], label="Dropdown in tab"
)
text_input = mo.ui.text(placeholder="Type here...", label="Text in tab")
return checkbox, dropdown, slider, text_input
@app.cell
def _(checkbox, dropdown, mo, slider, text_input):
widget_tabs = mo.ui.tabs(
{
"Slider": mo.vstack([slider, mo.md(f"Value: {slider.value}")]),
"Checkbox": mo.vstack([checkbox, mo.md(f"Checked: {checkbox.value}")]),
"Dropdown": mo.vstack(
[dropdown, mo.md(f"Selected: {dropdown.value}")]
),
"Text": mo.vstack([text_input, mo.md(f"Typed: {text_input.value}")]),
}
)
widget_tabs
return
@app.cell(hide_code=True)
def _(mo):
mo.md("""
## Tabs inside tabs
""")
return
@app.cell
def _(mo):
inner_tabs = mo.ui.tabs(
{
"Inner A": mo.md("Inner tab A"),
"Inner B": mo.md("Inner tab B"),
}
)
outer_tabs = mo.ui.tabs(
{
"Outer 1": inner_tabs,
"Outer 2": mo.md("Outer tab 2"),
}
)
outer_tabs
return
@app.cell(hide_code=True)
def _(mo):
mo.md("""
## Accordion inside tabs
""")
return
@app.cell
def _(mo):
mo.ui.tabs(
{
"With accordion": mo.accordion(
{
"Section A": mo.md("Accordion content A"),
"Section B": mo.md("Accordion content B"),
}
),
"Plain tab": mo.md("Just a plain tab"),
}
)
return
@app.cell(hide_code=True)
def _(mo):
mo.md("""
## Tabs inside accordion
""")
return
@app.cell
def _(mo):
mo.accordion(
{
"Open for tabs": mo.ui.tabs(
{
"Tab X": mo.md("Tab X content"),
"Tab Y": mo.md("Tab Y content"),
}
),
}
)
return
@app.cell(hide_code=True)
def _(mo):
mo.md("""
## Lazy table inside tabs
""")
return
@app.cell
def _(mo, pd):
def lazy_table():
print("lazy_table called")
df = pd.DataFrame({"col1": range(5), "col2": range(5, 10)})
return mo.ui.table(df)
mo.ui.tabs(
{
"Normal": mo.md("Normal content"),
"Lazy table": mo.lazy(lazy_table, show_loading_indicator=True),
}
)
return
@app.cell(hide_code=True)
def _(mo):
mo.md("""
## Deeply nested: lazy inside accordion inside tabs
""")
return
@app.cell
def _(mo):
def deep_lazy_fn():
print("deep_lazy_fn called")
return mo.md("Deeply nested lazy content loaded!")
mo.ui.tabs(
{
"Top tab": mo.accordion(
{
"Open for lazy": mo.lazy(
deep_lazy_fn, show_loading_indicator=True
),
}
),
"Other tab": mo.md("Other"),
}
)
return
@app.cell(hide_code=True)
def _(mo):
mo.md("""
## hstack/vstack inside tabs
""")
return
@app.cell
def _(mo):
mo.ui.tabs(
{
"Horizontal": mo.hstack(
[mo.md("**Left**"), mo.md("**Center**"), mo.md("**Right**")],
justify="space-between",
),
"Vertical": mo.vstack(
[mo.md("**Top**"), mo.md("**Middle**"), mo.md("**Bottom**")],
gap=1,
),
}
)
return
@app.cell(hide_code=True)
def _(mo):
mo.md("""
## Callout inside tabs
""")
return
@app.cell
def _(mo):
mo.ui.tabs(
{
"Info": mo.callout(mo.md("This is an info callout"), kind="info"),
"Warning": mo.callout(mo.md("This is a warning callout"), kind="warn"),
"Danger": mo.callout(mo.md("This is a danger callout"), kind="danger"),
}
)
return
@app.cell(hide_code=True)
def _(mo):
mo.md("""
## Form inside tabs
""")
return
@app.cell
def _(mo):
form = mo.ui.text(placeholder="Enter name").form()
return (form,)
@app.cell
def _(form, mo):
mo.ui.tabs(
{
"Form tab": form,
"Result tab": mo.md(f"Submitted: {form.value}"),
}
)
return
if __name__ == "__main__":
app.run()
| {
"repo_id": "marimo-team/marimo",
"file_path": "marimo/_smoke_tests/nested.py",
"license": "Apache License 2.0",
"lines": 318,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
marimo-team/marimo:marimo/_data/_external_storage/get_storage.py | # Copyright 2026 Marimo. All rights reserved.
from __future__ import annotations
from typing import Any
from marimo._data._external_storage.models import (
StorageBackend,
StorageNamespace,
)
from marimo._data._external_storage.storage import FsspecFilesystem, Obstore
from marimo._types.ids import VariableName
STORAGE_BACKENDS: list[type[StorageBackend[Any]]] = [Obstore, FsspecFilesystem]
def get_storage_backends_from_variables(
variables: list[tuple[VariableName, object]],
) -> list[tuple[VariableName, StorageBackend[Any]]]:
storage_backends: list[tuple[VariableName, StorageBackend[Any]]] = []
for variable_name, value in variables:
for storage_backend in STORAGE_BACKENDS:
if storage_backend.is_compatible(value):
storage_backends.append(
(variable_name, storage_backend(value, variable_name))
)
break
return storage_backends
def storage_backend_to_storage_namespace(
storage_backend: StorageBackend[Any],
) -> StorageNamespace:
"""
Convert a storage backend to a storage namespace.
Currently, this is called as a synchronous post-execution hook which can block the kernel.
So, we don't fetch the entries here.
"""
return StorageNamespace(
name=storage_backend.variable_name,
display_name=storage_backend.display_name,
protocol=storage_backend.protocol,
root_path=storage_backend.root_path or "",
storage_entries=[],
)
| {
"repo_id": "marimo-team/marimo",
"file_path": "marimo/_data/_external_storage/get_storage.py",
"license": "Apache License 2.0",
"lines": 37,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
marimo-team/marimo:marimo/_data/_external_storage/models.py | # Copyright 2026 Marimo. All rights reserved.
from __future__ import annotations
import abc
from dataclasses import dataclass
from typing import Any, Generic, Literal, TypeVar, cast, get_args
import msgspec
from marimo._types.ids import VariableName
from marimo._utils.assert_never import log_never
CLOUD_STORAGE_TYPES = Literal["s3", "gcs", "azure", "cloudflare", "coreweave"]
KNOWN_STORAGE_TYPES = Literal[CLOUD_STORAGE_TYPES, "http", "file", "in-memory"]
SIGNED_URL_EXPIRATION = 60
# Note: We may want to consolidate with FileInfo from _server/models/files.py
class StorageEntry(msgspec.Struct, rename="camel"):
"""A storage entry is a file, directory, or object for external storage systems
Attributes:
path: The path of the storage entry.
kind: The kind of the storage entry.
size: The size of the storage entry.
last_modified: The last modified time of the storage entry.
metadata: The metadata of the storage entry.
mime_type: The MIME type of the storage entry, or None for directories.
"""
path: str
kind: Literal["file", "directory", "object"]
size: int
last_modified: float | None
metadata: dict[str, Any] = msgspec.field(default_factory=dict)
mime_type: str | None = None
class StorageNamespace(msgspec.Struct, rename="camel"):
"""Represents external storage systems (filesystems and object storage)
Attributes:
name: The variable name of the storage namespace.
display_name: The display name of the storage namespace.
protocol: The protocol of the storage namespace. E.g. s3, gcs, azure, http, file, in-memory.
root_path: The root path of the storage namespace.
storage_entries: The storage entries in the storage namespace.
"""
name: VariableName
display_name: str
protocol: str
root_path: str
storage_entries: list[StorageEntry]
DEFAULT_FETCH_LIMIT = 100
@dataclass
class DownloadResult:
"""Result of downloading a file from external storage.
Attributes:
file_bytes: The raw bytes of the downloaded file.
filename: The suggested filename extracted from the path.
ext: The file extension (without dot), or "bin" if none.
"""
file_bytes: bytes
filename: str
ext: str
Backend = TypeVar("Backend")
class StorageBackend(abc.ABC, Generic[Backend]):
def __init__(self, store: Backend, variable_name: VariableName) -> None:
self.store = store
self.variable_name = variable_name
# TODO: We can make this async, but currently post_execution_hooks are synchronous.
@abc.abstractmethod
def list_entries(
self,
prefix: str | None,
*,
limit: int = DEFAULT_FETCH_LIMIT,
) -> list[StorageEntry]:
"""
List the entries at the given prefix. If no prefix is provided, list the root entries.
"""
@abc.abstractmethod
async def get_entry(self, path: str) -> StorageEntry:
"""Get the entry at the given path."""
@abc.abstractmethod
async def download(self, path: str) -> bytes:
"""Download the file at the given path."""
@abc.abstractmethod
async def read_range(
self, path: str, *, offset: int = 0, length: int | None = None
) -> bytes:
"""Read a byte range from the file. If length is None, read the entire file."""
@abc.abstractmethod
async def sign_download_url(
self, path: str, expiration: int = SIGNED_URL_EXPIRATION
) -> str | None:
"""Return a signed URL for direct browser download, or None if unsupported."""
async def download_file(self, path: str) -> DownloadResult:
"""Download the file at the given path with extracted metadata.
Calls download() and extracts the filename and extension from the path.
Subclasses can override this to customize filename extraction
(e.g., using content-disposition headers).
"""
file_bytes = await self.download(path)
filename = path.rsplit("/", 1)[-1] or "download"
_, sep, suffix = filename.rpartition(".")
ext = suffix if sep and suffix else "bin"
return DownloadResult(
file_bytes=file_bytes,
filename=filename,
ext=ext,
)
@property
@abc.abstractmethod
def protocol(self) -> KNOWN_STORAGE_TYPES | str:
"""Return the protocol of the storage backend."""
@property
def display_name(self) -> str:
protocol = self.protocol
if protocol not in get_args(KNOWN_STORAGE_TYPES):
return protocol.capitalize()
known = cast(KNOWN_STORAGE_TYPES, protocol)
if known == "s3":
return "Amazon S3"
elif known == "gcs":
return "Google Cloud Storage"
elif known == "azure":
return "Azure Blob Storage"
elif known == "cloudflare":
return "Cloudflare R2"
elif known == "http":
return "HTTP"
elif known == "file":
return "File"
elif known == "in-memory":
return "In-memory"
elif known == "coreweave":
return "Coreweave"
else:
log_never(known)
return known
@property
@abc.abstractmethod
def root_path(self) -> str | None:
"""Return the root path of the storage backend. None if in-memory or cannot be found"""
@staticmethod
@abc.abstractmethod
def is_compatible(var: Any) -> bool:
"""Check if the backend is compatible with the given variable."""
| {
"repo_id": "marimo-team/marimo",
"file_path": "marimo/_data/_external_storage/models.py",
"license": "Apache License 2.0",
"lines": 138,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
marimo-team/marimo:marimo/_data/_external_storage/storage.py | # Copyright 2026 Marimo. All rights reserved.
from __future__ import annotations
import asyncio
import mimetypes
from datetime import timedelta
from typing import TYPE_CHECKING, Any, Literal, cast
from marimo import _loggers
from marimo._data._external_storage.models import (
CLOUD_STORAGE_TYPES,
DEFAULT_FETCH_LIMIT,
KNOWN_STORAGE_TYPES,
SIGNED_URL_EXPIRATION,
StorageBackend,
StorageEntry,
)
from marimo._dependencies.dependencies import DependencyManager
from marimo._utils.assert_never import log_never
from marimo._utils.dicts import remove_none_values
if TYPE_CHECKING:
from fsspec import ( # type: ignore[import-untyped]
AbstractFileSystem, # noqa: F401
)
from obstore import ObjectMeta
from obstore.store import ObjectStore # noqa: F401
LOGGER = _loggers.marimo_logger()
class Obstore(StorageBackend["ObjectStore"]):
def list_entries(
self,
prefix: str | None,
*,
limit: int = DEFAULT_FETCH_LIMIT,
) -> list[StorageEntry]:
result = self.store.list_with_delimiter(prefix=prefix)
storage_entries: list[StorageEntry] = []
# Common prefixes are virtual directories (e.g., "folder/")
# We can't identify the size / last modified time unless we recursively list the entries
for common_prefix in result["common_prefixes"]:
storage_entries.append(
StorageEntry(
path=common_prefix,
kind="directory",
size=0,
last_modified=None,
)
)
# Objects are actual files/objects at this level
for entry in result["objects"]:
# Skip zero-byte folder marker objects that some S3 clients
# create as directory placeholders (e.g., "folder" with size 0)
path = entry.get("path", "")
size = entry.get("size", 0)
if size == 0 and prefix and path == prefix.rstrip("/"):
continue
storage_entries.append(self._create_storage_entry(entry))
if len(storage_entries) > limit:
LOGGER.debug(
"Fetched %s entries, but limiting to %s",
len(storage_entries),
limit,
)
storage_entries = storage_entries[:limit]
return storage_entries
async def get_entry(self, path: str) -> StorageEntry:
entry = await self.store.head_async(path)
return self._create_storage_entry(entry)
def _create_storage_entry(self, entry: ObjectMeta) -> StorageEntry:
path, size = entry.get("path"), entry.get("size")
if path is None or size is None:
LOGGER.debug(
"Entry is missing required fields: path=%s, size=%s",
path,
size,
)
entry_meta = remove_none_values(
{"e_tag": entry.get("e_tag"), "version": entry.get("version")}
)
last_modified = entry.get("last_modified")
return StorageEntry(
path=path or "",
size=size or 0,
last_modified=last_modified.timestamp() if last_modified else None,
kind="object",
metadata=entry_meta,
mime_type=mimetypes.guess_type(path or "")[0],
)
async def download(self, path: str) -> bytes:
result = await self.store.get_async(path)
bytes_data = await result.bytes_async()
return bytes(bytes_data)
async def read_range(
self, path: str, *, offset: int = 0, length: int | None = None
) -> bytes:
if length is None:
data = await self.download(path)
return data[offset:]
from obstore import get_range_async
return bytes(
await get_range_async(
self.store, path, start=offset, length=length
)
)
async def sign_download_url(
self, path: str, expiration: int = SIGNED_URL_EXPIRATION
) -> str | None:
from obstore import sign_async
from obstore.store import AzureStore, GCSStore, S3Store
if not isinstance(self.store, (S3Store, GCSStore, AzureStore)):
return None
try:
return await sign_async(
self.store,
"GET",
path,
expires_in=timedelta(seconds=expiration),
)
except Exception:
LOGGER.info("Failed to sign URL for %s", path)
return None
@property
def protocol(self) -> KNOWN_STORAGE_TYPES | str:
from obstore.store import (
AzureStore,
GCSStore,
HTTPStore,
LocalStore,
MemoryStore,
S3Store,
)
# Try the endpoint URL which can give a more accurate protocol
if not isinstance(self.store, (MemoryStore, HTTPStore, LocalStore)):
endpoint = self.store.config.get("endpoint")
if isinstance(endpoint, str) and (
protocol := detect_protocol_from_url(endpoint)
):
return protocol
if isinstance(self.store, MemoryStore):
return "in-memory"
elif isinstance(self.store, HTTPStore):
return "http"
elif isinstance(self.store, LocalStore):
return "file"
elif isinstance(self.store, S3Store):
return "s3"
elif isinstance(self.store, AzureStore):
return "azure"
elif isinstance(self.store, GCSStore):
return "gcs"
else:
log_never(self.store)
return "unknown"
@property
def root_path(self) -> str | None:
from obstore.store import HTTPStore, LocalStore, MemoryStore
if isinstance(self.store, MemoryStore):
return None
elif isinstance(self.store, HTTPStore):
return self.store.url
prefix = self.store.prefix
if prefix is None:
if isinstance(self.store, LocalStore):
return None # root
config = self.store.config
bucket = config.get("bucket")
if bucket is None:
LOGGER.debug(
"No bucket found for storage backend. Config %s", config
)
elif not isinstance(bucket, str):
LOGGER.debug("Bucket is not a string: %s", bucket)
return str(bucket)
return bucket
return str(prefix)
@staticmethod
def is_compatible(var: Any) -> bool:
if not DependencyManager.obstore.imported():
return False
from obstore.store import ObjectStore
return isinstance(var, ObjectStore) # type: ignore[misc,arg-type]
# The async implementations has a few unimplemented methods (like ls), so it's better to use synchronous versions and
# wrap them in asyncio.to_thread
class FsspecFilesystem(StorageBackend["AbstractFileSystem"]):
def list_entries(
self,
prefix: str | None,
*,
limit: int = DEFAULT_FETCH_LIMIT,
) -> list[StorageEntry]:
# If no prefix provided, we use empty string to list root entries
# Else, an error is raised
if prefix is None:
prefix = ""
files = self.store.ls(path=prefix, detail=True)
if not isinstance(files, list):
raise ValueError(f"Files is not a list: {files}")
total_files = len(files)
if total_files > limit:
LOGGER.debug(
"Fetched %s files, but limiting to %s",
total_files,
limit,
)
files = files[:limit]
storage_entries = []
for file in files:
if isinstance(file, dict):
storage_entry = self._create_storage_entry(file)
storage_entries.append(storage_entry)
return storage_entries
def _identify_kind(self, entry_type: str) -> Literal["file", "directory"]:
entry_type = entry_type.strip().lower()
if entry_type == "file":
return "file"
elif entry_type == "directory":
return "directory"
else:
LOGGER.debug("Unknown entry type: %s", entry_type)
return "file"
async def get_entry(self, path: str) -> StorageEntry:
entry = await asyncio.to_thread(self.store.info, path)
if not isinstance(entry, dict):
raise ValueError(f"Entry at {path} is not a dictionary")
return self._create_storage_entry(entry)
def _create_storage_entry(self, file: dict[str, Any]) -> StorageEntry:
name, size = file.get("name"), file.get("size")
if name is None or size is None:
LOGGER.debug(
"File is missing required fields: name=%s, size=%s",
name,
size,
)
entry_meta = remove_none_values(
{
"e_tag": file.get("ETag"),
"is_link": file.get("islink"),
"mode": file.get("mode"),
"n_link": file.get("nlink"),
"created": file.get("created"),
}
)
resolved_kind: Literal["file", "directory"] = "file"
entry_type = file.get("type")
if entry_type is None:
LOGGER.debug(
"File is missing required fields: type=%s", entry_type
)
else:
resolved_kind = self._identify_kind(entry_type)
resolved_path = name or ""
return StorageEntry(
path=resolved_path,
size=size or 0,
last_modified=file.get("mtime"),
kind=resolved_kind,
metadata=entry_meta,
mime_type=mimetypes.guess_type(resolved_path)[0]
if resolved_kind != "directory"
else None,
)
async def download(self, path: str) -> bytes:
# There is no async version of open, so we wrap the synchronous open method
def _read() -> str | bytes:
return self.store.open(path).read() # type: ignore[no-any-return]
file = await asyncio.to_thread(_read)
if isinstance(file, str):
return file.encode("utf-8")
return file
async def read_range(
self, path: str, *, offset: int = 0, length: int | None = None
) -> bytes:
end = offset + length if length is not None else None
data = await asyncio.to_thread(
self.store.cat_file, path, start=offset, end=end
)
if isinstance(data, str):
return data.encode("utf-8")
return bytes(data)
async def sign_download_url(
self, path: str, expiration: int = SIGNED_URL_EXPIRATION
) -> str | None:
try:
url = await asyncio.to_thread(
self.store.sign, path, expiration=expiration
)
return str(url)
except NotImplementedError:
return None
except Exception:
LOGGER.info("Failed to sign URL for %s", path)
return None
@property
def protocol(self) -> KNOWN_STORAGE_TYPES | str:
store_protocol = self.store.protocol
storage_options = self.store.storage_options
# Try the endpoint URL which can give a more accurate protocol
endpoint_url = storage_options.get("endpoint_url")
if isinstance(endpoint_url, str) and (
protocol := detect_protocol_from_url(endpoint_url)
):
return protocol
if isinstance(store_protocol, tuple):
for store_protocol_item in store_protocol:
if normalized := normalize_protocol(store_protocol_item):
return normalized
return "-".join(store_protocol)
return normalize_protocol(store_protocol) or store_protocol
@property
def root_path(self) -> str | None:
return cast(str, self.store.root_marker)
@staticmethod
def is_compatible(var: Any) -> bool:
if not DependencyManager.fsspec.imported():
return False
from fsspec import AbstractFileSystem
return isinstance(var, AbstractFileSystem)
_PROTOCOL_MAP: dict[str, KNOWN_STORAGE_TYPES] = {
"s3": "s3",
"s3a": "s3",
"gs": "gcs",
"gcs": "gcs",
"abfs": "azure",
"abfss": "azure",
"az": "azure",
"adl": "azure",
"http": "http",
"https": "http",
"file": "file",
"local": "file",
"memory": "in-memory",
"r2": "cloudflare",
}
# Specific provider patterns checked before generic ones (e.g. S3),
# since S3-compatible services may also have "s3" in their URL.
# The order of the patterns is important, the first pattern that matches will be used.
_URL_PATTERNS: list[tuple[str, CLOUD_STORAGE_TYPES]] = [
("cloudflare", "cloudflare"),
("r2.", "cloudflare"),
("cwobject", "coreweave"),
("cwlota", "coreweave"),
("coreweave", "coreweave"),
("blob.core.windows", "azure"),
("azure", "azure"),
("googleapis", "gcs"),
("storage.google", "gcs"),
("s3", "s3"),
("amazonaws", "s3"),
]
def detect_protocol_from_url(url: str) -> CLOUD_STORAGE_TYPES | None:
"""Detect the storage provider from an endpoint URL."""
url = url.strip().lower()
for pattern, protocol in _URL_PATTERNS:
if pattern in url:
return protocol
return None
def normalize_protocol(protocol: str) -> KNOWN_STORAGE_TYPES | None:
"""Normalize a protocol string (e.g. 's3a', 'gs', 'abfs') to a known storage type."""
return _PROTOCOL_MAP.get(protocol.strip().lower())
| {
"repo_id": "marimo-team/marimo",
"file_path": "marimo/_data/_external_storage/storage.py",
"license": "Apache License 2.0",
"lines": 356,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
marimo-team/marimo:tests/_data/_external_storage/test_get_storage.py | # Copyright 2026 Marimo. All rights reserved.
from __future__ import annotations
from typing import Any
from unittest.mock import MagicMock
import pytest
from inline_snapshot import snapshot
from marimo._data._external_storage.get_storage import (
get_storage_backends_from_variables,
storage_backend_to_storage_namespace,
)
from marimo._data._external_storage.models import StorageNamespace
from marimo._data._external_storage.storage import FsspecFilesystem, Obstore
from marimo._dependencies.dependencies import DependencyManager
from marimo._types.ids import VariableName
HAS_OBSTORE = DependencyManager.obstore.has()
HAS_FSSPEC = DependencyManager.fsspec.has()
class TestGetStorageBackendsFromVariables:
def test_empty_variables(self) -> None:
result = get_storage_backends_from_variables([])
assert result == []
def test_no_compatible_variables(self) -> None:
variables: list[tuple[VariableName, object]] = [
(VariableName("x"), "just a string"),
(VariableName("y"), 42),
(VariableName("z"), [1, 2, 3]),
]
result = get_storage_backends_from_variables(variables)
assert result == []
@pytest.mark.skipif(not HAS_OBSTORE, reason="obstore not installed")
def test_detects_obstore(self) -> None:
from obstore.store import MemoryStore
store = MemoryStore()
variables: list[tuple[VariableName, object]] = [
(VariableName("my_store"), store),
]
result = get_storage_backends_from_variables(variables)
assert len(result) == 1
var_name, backend = result[0]
assert var_name == "my_store"
assert isinstance(backend, Obstore)
@pytest.mark.skipif(not HAS_FSSPEC, reason="fsspec not installed")
def test_detects_fsspec(self) -> None:
from fsspec.implementations.memory import MemoryFileSystem
fs = MemoryFileSystem()
variables: list[tuple[VariableName, object]] = [
(VariableName("mem_fs"), fs),
]
result = get_storage_backends_from_variables(variables)
assert len(result) == 1
var_name, backend = result[0]
assert var_name == "mem_fs"
assert isinstance(backend, FsspecFilesystem)
@pytest.mark.skipif(
not (HAS_OBSTORE and HAS_FSSPEC),
reason="obstore and fsspec both required",
)
def test_detects_multiple_backends(self) -> None:
from fsspec.implementations.memory import MemoryFileSystem
from obstore.store import MemoryStore
ob_store = MemoryStore()
fs = MemoryFileSystem()
variables: list[tuple[VariableName, object]] = [
(VariableName("ob"), ob_store),
(VariableName("not_storage"), "hello"),
(VariableName("fs"), fs),
]
result = get_storage_backends_from_variables(variables)
assert len(result) == 2
names = [name for name, _ in result]
assert VariableName("ob") in names
assert VariableName("fs") in names
types = [type(backend) for _, backend in result]
assert Obstore in types
assert FsspecFilesystem in types
def test_mixed_with_none_values(self) -> None:
variables: list[tuple[VariableName, object]] = [
(VariableName("n"), None),
(VariableName("d"), {"key": "value"}),
]
result = get_storage_backends_from_variables(variables)
assert result == []
class TestStorageBackendToStorageNamespace:
def test_converts_backend_to_namespace(self) -> None:
mock_backend: Any = MagicMock()
mock_backend.variable_name = VariableName("test_store")
mock_backend.display_name = "Amazon S3"
mock_backend.protocol = "s3"
mock_backend.root_path = "my-bucket"
result = storage_backend_to_storage_namespace(mock_backend)
assert result == snapshot(
StorageNamespace(
name=VariableName("test_store"),
display_name="Amazon S3",
protocol="s3",
root_path="my-bucket",
storage_entries=[],
)
)
mock_backend.list_entries.assert_not_called()
def test_handles_none_root_path(self) -> None:
mock_backend: Any = MagicMock()
mock_backend.variable_name = VariableName("mem")
mock_backend.protocol = "in-memory"
mock_backend.root_path = None
mock_backend.display_name = "In-memory"
mock_backend.list_entries.return_value = []
result = storage_backend_to_storage_namespace(mock_backend)
assert result == snapshot(
StorageNamespace(
name=VariableName("mem"),
display_name="In-memory",
protocol="in-memory",
root_path="",
storage_entries=[],
)
)
@pytest.mark.skipif(not HAS_FSSPEC, reason="fsspec not installed")
def test_with_real_fsspec_backend(self) -> None:
from fsspec.implementations.memory import MemoryFileSystem
fs = MemoryFileSystem()
fs.pipe("/hello.txt", b"hi")
backend = FsspecFilesystem(fs, VariableName("mem_fs"))
result = storage_backend_to_storage_namespace(backend)
assert result == snapshot(
StorageNamespace(
name=VariableName("mem_fs"),
display_name="In-memory",
protocol="in-memory",
root_path="/",
storage_entries=[],
)
)
@pytest.mark.skipif(not HAS_OBSTORE, reason="obstore not installed")
def test_with_real_obstore_backend(self) -> None:
from obstore.store import MemoryStore
store = MemoryStore()
backend = Obstore(store, VariableName("mem_store"))
result = storage_backend_to_storage_namespace(backend)
assert result == snapshot(
StorageNamespace(
name=VariableName("mem_store"),
display_name="In-memory",
protocol="in-memory",
root_path="",
storage_entries=[],
)
)
| {
"repo_id": "marimo-team/marimo",
"file_path": "tests/_data/_external_storage/test_get_storage.py",
"license": "Apache License 2.0",
"lines": 147,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
marimo-team/marimo:tests/_data/_external_storage/test_storage_models.py | # Copyright 2026 Marimo. All rights reserved.
from __future__ import annotations
import tempfile
from datetime import datetime, timedelta, timezone
from typing import Any
from unittest.mock import AsyncMock, MagicMock, patch
import pytest
from dirty_equals import IsDatetime, IsPositiveFloat
from inline_snapshot import snapshot
from marimo._data._external_storage.models import DownloadResult, StorageEntry
from marimo._data._external_storage.storage import (
FsspecFilesystem,
Obstore,
detect_protocol_from_url,
normalize_protocol,
)
from marimo._dependencies.dependencies import DependencyManager
from marimo._types.ids import VariableName
HAS_OBSTORE = DependencyManager.obstore.has()
HAS_FSSPEC = DependencyManager.fsspec.has()
@pytest.mark.skipif(not HAS_OBSTORE, reason="obstore not installed")
class TestObstore:
def _make_backend(self, store: Any, name: str = "my_store") -> Obstore:
return Obstore(store, VariableName(name))
def test_list_entries(self) -> None:
now = datetime.now(tz=timezone.utc)
mock_store = MagicMock()
mock_store.list_with_delimiter.return_value = {
"common_prefixes": ["subdir/"],
"objects": [
{
"path": "file1.txt",
"size": 100,
"last_modified": now,
"e_tag": "abc",
"version": None,
},
{
"path": "dir/file2.txt",
"size": 200,
"last_modified": now,
"e_tag": None,
"version": "v1",
},
],
}
backend = self._make_backend(mock_store)
result = backend.list_entries(prefix="some/prefix", limit=10)
mock_store.list_with_delimiter.assert_called_once_with(
prefix="some/prefix",
)
assert result == snapshot(
[
StorageEntry(
path="subdir/",
kind="directory",
size=0,
last_modified=None,
metadata={},
mime_type=None,
),
StorageEntry(
path="file1.txt",
kind="object",
size=100,
last_modified=now.timestamp(),
metadata={"e_tag": "abc"},
mime_type="text/plain",
),
StorageEntry(
path="dir/file2.txt",
kind="object",
size=200,
last_modified=now.timestamp(),
metadata={"version": "v1"},
mime_type="text/plain",
),
]
)
def test_list_entries_skips_zero_byte_folder_marker(self) -> None:
now = datetime.now(tz=timezone.utc)
mock_store = MagicMock()
mock_store.list_with_delimiter.return_value = {
"common_prefixes": [],
"objects": [
{
"path": "folder",
"size": 0,
"last_modified": now,
"e_tag": "abcde",
"version": None,
},
{
"path": "folder/order_details.csv",
"size": 5426089,
"last_modified": now,
"e_tag": "fghij",
"version": None,
},
],
}
backend = self._make_backend(mock_store)
result = backend.list_entries(prefix="folder")
assert result == [
StorageEntry(
path="folder/order_details.csv",
kind="object",
size=5426089,
last_modified=now.timestamp(),
metadata={"e_tag": "fghij"},
mime_type="text/csv",
),
]
def test_list_entries_empty(self) -> None:
mock_store = MagicMock()
mock_store.list_with_delimiter.return_value = {
"common_prefixes": [],
"objects": [],
}
backend = self._make_backend(mock_store)
result = backend.list_entries(prefix=None)
assert result == []
def test_create_storage_entry_missing_fields(self) -> None:
mock_store = MagicMock()
backend = self._make_backend(mock_store)
entry = backend._create_storage_entry(
{ # pyright: ignore[reportArgumentType]
"path": None,
"size": None,
"last_modified": None,
"e_tag": None,
"version": None,
}
)
assert entry == snapshot(
StorageEntry(
path="",
kind="object",
size=0,
last_modified=None,
metadata={},
mime_type=None,
)
)
def test_create_storage_entry_with_all_metadata(self) -> None:
now = datetime(2025, 6, 15, 12, 0, 0, tzinfo=timezone.utc)
mock_store = MagicMock()
backend = self._make_backend(mock_store)
entry = backend._create_storage_entry(
{
"path": "test.csv",
"size": 500,
"last_modified": now,
"e_tag": "etag123",
"version": "v2",
}
)
assert entry == snapshot(
StorageEntry(
path="test.csv",
kind="object",
size=500,
last_modified=now.timestamp(),
metadata={"e_tag": "etag123", "version": "v2"},
mime_type="text/csv",
)
)
async def test_get_entry(self) -> None:
now = datetime(2025, 6, 15, 12, 0, 0, tzinfo=timezone.utc)
mock_store = MagicMock()
head_result = {
"path": "test.txt",
"size": 42,
"last_modified": now,
"e_tag": "e1",
"version": None,
}
mock_store.head_async = MagicMock(
return_value=_async_return(head_result)
)
backend = self._make_backend(mock_store)
result = await backend.get_entry("test.txt")
assert result == snapshot(
StorageEntry(
path="test.txt",
kind="object",
size=42,
last_modified=now.timestamp(),
metadata={"e_tag": "e1"},
mime_type="text/plain",
)
)
mock_store.head_async.assert_called_once_with("test.txt")
async def test_download(self) -> None:
mock_store = MagicMock()
mock_bytes_result = MagicMock()
mock_bytes_result.bytes_async = MagicMock(
return_value=_async_return(b"hello world")
)
mock_store.get_async = MagicMock(
return_value=_async_return(mock_bytes_result)
)
backend = self._make_backend(mock_store)
result = await backend.download("some/path.txt")
assert result == b"hello world"
mock_store.get_async.assert_called_once_with("some/path.txt")
async def test_download_file(self) -> None:
mock_store = MagicMock()
mock_bytes_result = MagicMock()
mock_bytes_result.bytes_async = MagicMock(
return_value=_async_return(b"file content")
)
mock_store.get_async = MagicMock(
return_value=_async_return(mock_bytes_result)
)
backend = self._make_backend(mock_store)
result = await backend.download_file("bucket/data/report.csv")
assert result == DownloadResult(
file_bytes=b"file content",
filename="report.csv",
ext="csv",
)
async def test_download_file_no_extension(self) -> None:
mock_store = MagicMock()
mock_bytes_result = MagicMock()
mock_bytes_result.bytes_async = MagicMock(
return_value=_async_return(b"data")
)
mock_store.get_async = MagicMock(
return_value=_async_return(mock_bytes_result)
)
backend = self._make_backend(mock_store)
result = await backend.download_file("bucket/noext")
assert result == DownloadResult(
file_bytes=b"data",
filename="noext",
ext="bin",
)
async def test_download_file_nested_path(self) -> None:
mock_store = MagicMock()
mock_bytes_result = MagicMock()
mock_bytes_result.bytes_async = MagicMock(
return_value=_async_return(b"nested")
)
mock_store.get_async = MagicMock(
return_value=_async_return(mock_bytes_result)
)
backend = self._make_backend(mock_store)
result = await backend.download_file("a/b/c/deep.tar.gz")
assert result == DownloadResult(
file_bytes=b"nested",
filename="deep.tar.gz",
ext="gz",
)
async def test_download_file_trailing_dot(self) -> None:
mock_store = MagicMock()
mock_bytes_result = MagicMock()
mock_bytes_result.bytes_async = MagicMock(
return_value=_async_return(b"data")
)
mock_store.get_async = MagicMock(
return_value=_async_return(mock_bytes_result)
)
backend = self._make_backend(mock_store)
result = await backend.download_file("bucket/file.")
assert result == DownloadResult(
file_bytes=b"data",
filename="file.",
ext="bin",
)
async def test_download_file_empty_path(self) -> None:
mock_store = MagicMock()
mock_bytes_result = MagicMock()
mock_bytes_result.bytes_async = MagicMock(
return_value=_async_return(b"data")
)
mock_store.get_async = MagicMock(
return_value=_async_return(mock_bytes_result)
)
backend = self._make_backend(mock_store)
result = await backend.download_file("")
assert result == DownloadResult(
file_bytes=b"data",
filename="download",
ext="bin",
)
async def test_read_range_full_file_delegates_to_download(self) -> None:
mock_store = MagicMock()
mock_bytes_result = MagicMock()
mock_bytes_result.bytes_async = MagicMock(
return_value=_async_return(b"full content")
)
mock_store.get_async = MagicMock(
return_value=_async_return(mock_bytes_result)
)
backend = self._make_backend(mock_store)
result = await backend.read_range("file.txt")
assert result == b"full content"
mock_store.get_async.assert_called_once_with("file.txt")
async def test_read_range_offset_without_length_slices_download(
self,
) -> None:
mock_store = MagicMock()
mock_bytes_result = MagicMock()
mock_bytes_result.bytes_async = MagicMock(
return_value=_async_return(b"hello world")
)
mock_store.get_async = MagicMock(
return_value=_async_return(mock_bytes_result)
)
backend = self._make_backend(mock_store)
result = await backend.read_range("file.txt", offset=6)
assert result == b"world"
mock_store.get_async.assert_called_once_with("file.txt")
async def test_read_range_with_offset_and_length(self) -> None:
mock_store = MagicMock()
backend = self._make_backend(mock_store)
with patch(
"obstore.get_range_async",
new_callable=AsyncMock,
return_value=b"partial",
) as mock_get_range:
result = await backend.read_range(
"file.txt", offset=10, length=100
)
assert result == b"partial"
mock_get_range.assert_called_once_with(
mock_store, "file.txt", start=10, length=100
)
async def test_read_range_with_length_only(self) -> None:
mock_store = MagicMock()
backend = self._make_backend(mock_store)
with patch(
"obstore.get_range_async",
new_callable=AsyncMock,
return_value=b"first bytes",
) as mock_get_range:
result = await backend.read_range("file.txt", length=50)
assert result == b"first bytes"
mock_get_range.assert_called_once_with(
mock_store, "file.txt", start=0, length=50
)
def test_protocol_memory(self) -> None:
from obstore.store import MemoryStore
store = MemoryStore()
backend = self._make_backend(store)
assert backend.protocol == "in-memory"
def test_protocol_local(self) -> None:
import tempfile
from obstore.store import LocalStore
with tempfile.TemporaryDirectory() as tmpdir:
store = LocalStore(tmpdir)
backend = self._make_backend(store)
assert backend.protocol == "file"
def test_root_path_memory(self) -> None:
from obstore.store import MemoryStore
store = MemoryStore()
backend = self._make_backend(store)
assert backend.root_path is None
def test_root_path_local(self) -> None:
import tempfile
from obstore.store import LocalStore
with tempfile.TemporaryDirectory() as tmpdir:
store = LocalStore(tmpdir)
backend = self._make_backend(store)
# LocalStore without a prefix should return None
root = backend.root_path
# This depends on whether there's a prefix; for a bare LocalStore it's None
assert root is None or isinstance(root, str)
def test_root_path_s3_with_prefix(self) -> None:
from obstore.store import S3Store
store = S3Store("test-bucket", prefix="my/prefix", skip_signature=True)
backend = self._make_backend(store)
root = backend.root_path
assert root is not None
assert "my/prefix" in str(root)
def test_root_path_s3_without_prefix(self) -> None:
from obstore.store import S3Store
store = S3Store("test-bucket", skip_signature=True)
backend = self._make_backend(store)
root = backend.root_path
assert root == "test-bucket"
def test_is_compatible_with_obstore(self) -> None:
from obstore.store import MemoryStore
store = MemoryStore()
assert Obstore.is_compatible(store) is True
def test_is_compatible_with_non_obstore(self) -> None:
assert Obstore.is_compatible("not a store") is False
assert Obstore.is_compatible(42) is False
assert Obstore.is_compatible(None) is False
async def test_sign_download_url_returns_none_for_non_cloud_store(
self,
) -> None:
from obstore.store import MemoryStore
store = MemoryStore()
backend = self._make_backend(store)
result = await backend.sign_download_url("some/path.txt")
assert result is None
async def test_sign_download_url_returns_none_for_local_store(
self,
) -> None:
from obstore.store import LocalStore
with tempfile.TemporaryDirectory() as tmpdir:
store = LocalStore(tmpdir)
backend = self._make_backend(store)
result = await backend.sign_download_url("some/path.txt")
assert result is None
async def test_sign_download_url_calls_sign_async_for_s3(self) -> None:
from obstore.store import S3Store
store = S3Store("test-bucket", skip_signature=True)
backend = self._make_backend(store)
with patch(
"obstore.sign_async",
new_callable=AsyncMock,
return_value="https://signed.example.com/file",
) as mock_sign:
result = await backend.sign_download_url(
"data/file.csv", expiration=600
)
assert result == "https://signed.example.com/file"
mock_sign.assert_called_once()
args, kwargs = mock_sign.call_args
assert args == (store, "GET", "data/file.csv")
assert kwargs["expires_in"] == timedelta(seconds=600)
async def test_sign_download_url_returns_none_on_exception(self) -> None:
from obstore.store import S3Store
store = S3Store("test-bucket", skip_signature=True)
backend = self._make_backend(store)
with patch(
"obstore.sign_async",
new_callable=AsyncMock,
side_effect=RuntimeError("signing failed"),
):
result = await backend.sign_download_url("data/file.csv")
assert result is None
def test_display_name_known_protocol(self) -> None:
from obstore.store import MemoryStore
store = MemoryStore()
backend = self._make_backend(store)
assert backend.display_name == "In-memory"
@pytest.mark.skipif(not HAS_FSSPEC, reason="fsspec not installed")
class TestFsspecFilesystem:
def _make_backend(
self, store: Any, name: str = "my_fs"
) -> FsspecFilesystem:
return FsspecFilesystem(store, VariableName(name))
def test_list_entries(self) -> None:
mock_store = MagicMock()
files = [
{
"name": "file1.txt",
"size": 100,
"type": "file",
"mtime": 1234567890.0,
},
{
"name": "subdir",
"size": 0,
"type": "directory",
"mtime": 1234567891.0,
},
]
mock_store.ls.return_value = files
backend = self._make_backend(mock_store)
result = backend.list_entries(prefix="some/path")
mock_store.ls.assert_called_once_with(path="some/path", detail=True)
assert result == snapshot(
[
StorageEntry(
path="file1.txt",
kind="file",
size=100,
last_modified=1234567890.0,
metadata={},
mime_type="text/plain",
),
StorageEntry(
path="subdir",
kind="directory",
size=0,
last_modified=1234567891.0,
metadata={},
mime_type=None,
),
]
)
def test_list_entries_none_prefix_uses_empty_string(self) -> None:
mock_store = MagicMock()
mock_store.ls.return_value = []
backend = self._make_backend(mock_store)
backend.list_entries(prefix=None)
mock_store.ls.assert_called_once_with(path="", detail=True)
def test_list_entries_respects_limit(self) -> None:
mock_store = MagicMock()
files = [
{
"name": f"file{i}.txt",
"size": i * 10,
"type": "file",
"mtime": None,
}
for i in range(10)
]
mock_store.ls.return_value = files
backend = self._make_backend(mock_store)
result = backend.list_entries(prefix="", limit=3)
assert result == snapshot(
[
StorageEntry(
path="file0.txt",
kind="file",
size=0,
last_modified=None,
metadata={},
mime_type="text/plain",
),
StorageEntry(
path="file1.txt",
kind="file",
size=10,
last_modified=None,
metadata={},
mime_type="text/plain",
),
StorageEntry(
path="file2.txt",
kind="file",
size=20,
last_modified=None,
metadata={},
mime_type="text/plain",
),
]
)
def test_list_entries_raises_on_non_list(self) -> None:
mock_store = MagicMock()
mock_store.ls.return_value = "not_a_list"
backend = self._make_backend(mock_store)
with pytest.raises(ValueError, match="Files is not a list"):
backend.list_entries(prefix="")
def test_list_entries_skips_non_dict_entries(self) -> None:
mock_store = MagicMock()
mock_store.ls.return_value = [
{"name": "good.txt", "size": 10, "type": "file"},
"bad_entry",
{"name": "also_good.txt", "size": 20, "type": "file"},
]
backend = self._make_backend(mock_store)
result = backend.list_entries(prefix="")
assert result == snapshot(
[
StorageEntry(
path="good.txt",
kind="file",
size=10,
last_modified=None,
metadata={},
mime_type="text/plain",
),
StorageEntry(
path="also_good.txt",
kind="file",
size=20,
last_modified=None,
metadata={},
mime_type="text/plain",
),
]
)
def test_identify_kind(self) -> None:
mock_store = MagicMock()
backend = self._make_backend(mock_store)
assert backend._identify_kind("file") == "file"
assert backend._identify_kind("FILE") == "file"
assert backend._identify_kind(" file ") == "file"
assert backend._identify_kind("directory") == "directory"
assert backend._identify_kind("DIRECTORY") == "directory"
assert backend._identify_kind(" directory ") == "directory"
# Unknown types default to "file"
assert backend._identify_kind("unknown") == "file"
assert backend._identify_kind("symlink") == "file"
def test_create_storage_entry_full(self) -> None:
mock_store = MagicMock()
backend = self._make_backend(mock_store)
entry = backend._create_storage_entry(
{
"name": "data.csv",
"size": 1024,
"type": "file",
"mtime": 1700000000.0,
"ETag": "abc123",
"islink": False,
"mode": 0o644,
"nlink": 1,
"created": 1699000000.0,
}
)
assert entry == snapshot(
StorageEntry(
path="data.csv",
kind="file",
size=1024,
last_modified=1700000000.0,
metadata={
"e_tag": "abc123",
"is_link": False,
"mode": 420,
"n_link": 1,
"created": 1699000000.0,
},
mime_type="text/csv",
)
)
def test_create_storage_entry_missing_fields(self) -> None:
mock_store = MagicMock()
backend = self._make_backend(mock_store)
entry = backend._create_storage_entry(
{"name": None, "size": None, "type": None}
)
assert entry == snapshot(
StorageEntry(
path="",
kind="file",
size=0,
last_modified=None,
metadata={},
mime_type=None,
)
)
def test_create_storage_entry_directory(self) -> None:
mock_store = MagicMock()
backend = self._make_backend(mock_store)
entry = backend._create_storage_entry(
{"name": "my_dir/", "size": 0, "type": "directory", "mtime": None}
)
assert entry == snapshot(
StorageEntry(
path="my_dir/",
kind="directory",
size=0,
last_modified=None,
metadata={},
mime_type=None,
)
)
async def test_get_entry(self) -> None:
mock_store = MagicMock()
mock_store.info.return_value = {
"name": "test.txt",
"size": 42,
"type": "file",
"mtime": 1700000000.0,
}
backend = self._make_backend(mock_store)
result = await backend.get_entry("test.txt")
assert result == snapshot(
StorageEntry(
path="test.txt",
kind="file",
size=42,
last_modified=1700000000.0,
metadata={},
mime_type="text/plain",
)
)
async def test_get_entry_raises_on_non_dict(self) -> None:
mock_store = MagicMock()
mock_store.info.return_value = "not_a_dict"
backend = self._make_backend(mock_store)
with pytest.raises(ValueError, match="is not a dictionary"):
await backend.get_entry("test.txt")
async def test_download_bytes(self) -> None:
mock_store = MagicMock()
mock_file = MagicMock()
mock_file.read.return_value = b"binary content"
mock_store.open.return_value = mock_file
backend = self._make_backend(mock_store)
result = await backend.download("path/to/file.bin")
assert result == b"binary content"
mock_store.open.assert_called_once_with("path/to/file.bin")
async def test_download_string_encoded_to_bytes(self) -> None:
mock_store = MagicMock()
mock_file = MagicMock()
mock_file.read.return_value = "text content"
mock_store.open.return_value = mock_file
backend = self._make_backend(mock_store)
result = await backend.download("path/to/file.txt")
assert result == b"text content"
async def test_download_file(self) -> None:
mock_store = MagicMock()
mock_file = MagicMock()
mock_file.read.return_value = b"csv data"
mock_store.open.return_value = mock_file
backend = self._make_backend(mock_store)
result = await backend.download_file("bucket/export.csv")
assert result == DownloadResult(
file_bytes=b"csv data",
filename="export.csv",
ext="csv",
)
async def test_read_range_returns_bytes(self) -> None:
mock_store = MagicMock()
mock_store.cat_file.return_value = b"partial content"
backend = self._make_backend(mock_store)
result = await backend.read_range("path/file.txt", offset=0, length=15)
assert result == b"partial content"
mock_store.cat_file.assert_called_once_with(
"path/file.txt", start=0, end=15
)
async def test_read_range_encodes_string_to_bytes(self) -> None:
mock_store = MagicMock()
mock_store.cat_file.return_value = "text content"
backend = self._make_backend(mock_store)
result = await backend.read_range("path/file.txt", offset=0, length=50)
assert result == b"text content"
async def test_read_range_with_offset(self) -> None:
mock_store = MagicMock()
mock_store.cat_file.return_value = b"middle"
backend = self._make_backend(mock_store)
result = await backend.read_range("path/file.txt", offset=10, length=6)
assert result == b"middle"
mock_store.cat_file.assert_called_once_with(
"path/file.txt", start=10, end=16
)
async def test_read_range_full_file(self) -> None:
mock_store = MagicMock()
mock_store.cat_file.return_value = b"entire file"
backend = self._make_backend(mock_store)
result = await backend.read_range("path/file.txt")
assert result == b"entire file"
mock_store.cat_file.assert_called_once_with(
"path/file.txt", start=0, end=None
)
def test_protocol_tuple(self) -> None:
mock_store = MagicMock()
mock_store.protocol = ("gcs", "gs")
backend = self._make_backend(mock_store)
assert backend.protocol == "gcs"
def test_root_path(self) -> None:
mock_store = MagicMock()
mock_store.root_marker = "/some/root"
backend = self._make_backend(mock_store)
assert backend.root_path == "/some/root"
def test_root_path_empty(self) -> None:
mock_store = MagicMock()
mock_store.root_marker = ""
backend = self._make_backend(mock_store)
assert backend.root_path == ""
def test_is_compatible_with_fsspec(self) -> None:
from fsspec import AbstractFileSystem
mock_fs = MagicMock(spec=AbstractFileSystem)
assert FsspecFilesystem.is_compatible(mock_fs) is True
def test_is_compatible_with_non_fsspec(self) -> None:
assert FsspecFilesystem.is_compatible("not a fs") is False
assert FsspecFilesystem.is_compatible(42) is False
assert FsspecFilesystem.is_compatible(None) is False
def test_is_compatible_with_concrete_filesystem(self) -> None:
from fsspec.implementations.memory import MemoryFileSystem
fs = MemoryFileSystem()
assert FsspecFilesystem.is_compatible(fs) is True
def test_display_name_known_protocol(self) -> None:
mock_store = MagicMock()
mock_store.protocol = "s3"
backend = self._make_backend(mock_store)
assert backend.display_name == "Amazon S3"
async def test_sign_download_url_returns_signed_url(self) -> None:
mock_store = MagicMock()
mock_store.sign.return_value = "https://signed.example.com/path"
backend = self._make_backend(mock_store)
result = await backend.sign_download_url(
"bucket/file.csv", expiration=900
)
assert result == "https://signed.example.com/path"
mock_store.sign.assert_called_once_with(
"bucket/file.csv", expiration=900
)
async def test_sign_download_url_returns_none_on_not_implemented(
self,
) -> None:
mock_store = MagicMock()
mock_store.sign.side_effect = NotImplementedError
backend = self._make_backend(mock_store)
result = await backend.sign_download_url("bucket/file.csv")
assert result is None
async def test_sign_download_url_returns_none_on_exception(self) -> None:
mock_store = MagicMock()
mock_store.sign.side_effect = RuntimeError("unexpected error")
backend = self._make_backend(mock_store)
result = await backend.sign_download_url("bucket/file.csv")
assert result is None
async def test_sign_download_url_converts_result_to_str(self) -> None:
mock_store = MagicMock()
mock_store.sign.return_value = 12345
backend = self._make_backend(mock_store)
result = await backend.sign_download_url("path")
assert result == "12345"
def test_display_name_unknown_protocol(self) -> None:
mock_store = MagicMock()
mock_store.protocol = "custom-proto"
backend = self._make_backend(mock_store)
assert backend.display_name == "Custom-proto"
@pytest.mark.skipif(not HAS_FSSPEC, reason="fsspec not installed")
class TestFsspecFilesystemIntegration:
"""Integration tests using a real fsspec MemoryFileSystem."""
async def test_list_and_download_with_memory_fs(self) -> None:
from fsspec.implementations.memory import MemoryFileSystem
fs = MemoryFileSystem()
fs.mkdir("/test")
fs.pipe("/test/hello.txt", b"hello world")
fs.pipe("/test/data.csv", b"a,b,c\n1,2,3")
backend = FsspecFilesystem(fs, VariableName("mem_fs"))
entries = backend.list_entries(prefix="/test")
assert entries == snapshot(
[
StorageEntry(
path="/test/hello.txt",
kind="file",
size=11,
last_modified=None,
metadata={"created": IsPositiveFloat()},
mime_type="text/plain",
),
StorageEntry(
path="/test/data.csv",
kind="file",
size=11,
last_modified=None,
metadata={"created": IsPositiveFloat()},
mime_type="text/csv",
),
]
)
result = await backend.download("/test/hello.txt")
assert result == b"hello world"
async def test_get_entry_with_memory_fs(self) -> None:
from fsspec.implementations.memory import MemoryFileSystem
fs = MemoryFileSystem()
fs.pipe("/myfile.txt", b"content here")
backend = FsspecFilesystem(fs, VariableName("mem_fs"))
entry = await backend.get_entry("/myfile.txt")
assert entry == snapshot(
StorageEntry(
path="/myfile.txt",
kind="file",
size=12,
last_modified=None,
metadata={"created": IsDatetime()},
mime_type="text/plain",
)
)
async def test_sign_download_url_not_implemented_by_memory_fs(
self,
) -> None:
from fsspec.implementations.memory import MemoryFileSystem
fs = MemoryFileSystem()
fs.pipe("/test/file.txt", b"hello")
backend = FsspecFilesystem(fs, VariableName("mem_fs"))
result = await backend.sign_download_url("/test/file.txt")
assert result is None
async def test_read_range_full_file(self) -> None:
from fsspec.implementations.memory import MemoryFileSystem
fs = MemoryFileSystem()
fs.pipe("/test/data.txt", b"hello world")
backend = FsspecFilesystem(fs, VariableName("mem_fs"))
result = await backend.read_range("/test/data.txt")
assert result == b"hello world"
async def test_read_range_partial(self) -> None:
from fsspec.implementations.memory import MemoryFileSystem
fs = MemoryFileSystem()
fs.pipe("/test/data.txt", b"hello world")
backend = FsspecFilesystem(fs, VariableName("mem_fs"))
result = await backend.read_range("/test/data.txt", offset=0, length=5)
assert result == b"hello"
async def test_read_range_with_offset(self) -> None:
from fsspec.implementations.memory import MemoryFileSystem
fs = MemoryFileSystem()
fs.pipe("/test/data.txt", b"hello world")
backend = FsspecFilesystem(fs, VariableName("mem_fs"))
result = await backend.read_range("/test/data.txt", offset=6, length=5)
assert result == b"world"
async def test_read_range_offset_without_length(self) -> None:
from fsspec.implementations.memory import MemoryFileSystem
fs = MemoryFileSystem()
fs.pipe("/test/data.txt", b"hello world")
backend = FsspecFilesystem(fs, VariableName("mem_fs"))
result = await backend.read_range("/test/data.txt", offset=6)
assert result == b"world"
def test_protocol_memory_filesystem(self) -> None:
from fsspec.implementations.memory import MemoryFileSystem
fs = MemoryFileSystem()
backend = FsspecFilesystem(fs, VariableName("mem_fs"))
assert backend.protocol == "in-memory"
@pytest.mark.skipif(not HAS_OBSTORE, reason="obstore not installed")
class TestObstoreIntegration:
"""Integration tests using a real obstore MemoryStore."""
async def test_list_entries_with_memory_store(self) -> None:
from obstore.store import MemoryStore
store = MemoryStore()
# Put some data
await store.put_async("test/file1.txt", b"hello")
await store.put_async("test/file2.txt", b"world!")
backend = Obstore(store, VariableName("mem_store"))
entries = backend.list_entries(prefix="test/")
assert entries == snapshot(
[
StorageEntry(
path="test/file1.txt",
kind="object",
size=5,
last_modified=IsPositiveFloat(), # pyright: ignore[reportArgumentType]
metadata={"e_tag": "0"},
mime_type="text/plain",
),
StorageEntry(
path="test/file2.txt",
kind="object",
size=6,
last_modified=IsPositiveFloat(), # pyright: ignore[reportArgumentType]
metadata={"e_tag": "1"},
mime_type="text/plain",
),
]
)
async def test_download_with_memory_store(self) -> None:
from obstore.store import MemoryStore
store = MemoryStore()
await store.put_async("data.bin", b"binary data")
backend = Obstore(store, VariableName("mem_store"))
result = await backend.download("data.bin")
assert result == b"binary data"
async def test_get_entry_with_memory_store(self) -> None:
from obstore.store import MemoryStore
store = MemoryStore()
await store.put_async("info.txt", b"some content")
backend = Obstore(store, VariableName("mem_store"))
entry = await backend.get_entry("info.txt")
assert entry == snapshot(
StorageEntry(
path="info.txt",
kind="object",
size=12,
last_modified=IsPositiveFloat(), # pyright: ignore[reportArgumentType]
metadata={"e_tag": "0"},
mime_type="text/plain",
)
)
async def test_sign_download_url_returns_none_for_memory_store(
self,
) -> None:
from obstore.store import MemoryStore
store = MemoryStore()
await store.put_async("data.txt", b"test")
backend = Obstore(store, VariableName("mem_store"))
result = await backend.sign_download_url("data.txt")
assert result is None
async def test_read_range_full_file(self) -> None:
from obstore.store import MemoryStore
store = MemoryStore()
await store.put_async("file.txt", b"hello world")
backend = Obstore(store, VariableName("mem_store"))
result = await backend.read_range("file.txt")
assert result == b"hello world"
async def test_read_range_partial(self) -> None:
from obstore.store import MemoryStore
store = MemoryStore()
await store.put_async("file.txt", b"hello world")
backend = Obstore(store, VariableName("mem_store"))
result = await backend.read_range("file.txt", offset=0, length=5)
assert result == b"hello"
async def test_read_range_with_offset(self) -> None:
from obstore.store import MemoryStore
store = MemoryStore()
await store.put_async("file.txt", b"hello world")
backend = Obstore(store, VariableName("mem_store"))
result = await backend.read_range("file.txt", offset=6, length=5)
assert result == b"world"
async def test_read_range_offset_without_length(self) -> None:
from obstore.store import MemoryStore
store = MemoryStore()
await store.put_async("file.txt", b"hello world")
backend = Obstore(store, VariableName("mem_store"))
result = await backend.read_range("file.txt", offset=6)
assert result == b"world"
class TestNormalizeProtocol:
@pytest.mark.parametrize(
("protocol", "expected"),
[
("s3", "s3"),
("s3a", "s3"),
("S3", "s3"),
("gs", "gcs"),
("gcs", "gcs"),
("abfs", "azure"),
("abfss", "azure"),
("az", "azure"),
("http", "http"),
("https", "http"),
("file", "file"),
("local", "file"),
("memory", "in-memory"),
("r2", "cloudflare"),
(" s3 ", "s3"),
("unknown", None),
("ftp", None),
],
)
def test_normalize_protocol(
self, protocol: str, expected: str | None
) -> None:
assert normalize_protocol(protocol) == expected
class TestDetectProtocolFromUrl:
@pytest.mark.parametrize(
("url", "expected"),
[
("https://account.r2.cloudflarestorage.com", "cloudflare"),
("https://s3.amazonaws.com", "s3"),
("https://s3.us-east-1.amazonaws.com", "s3"),
("https://storage.googleapis.com", "gcs"),
("https://account.blob.core.windows.net", "azure"),
("https://minio.example.com", None),
("https://my-custom-endpoint.com", None),
(
"https://s3.cloudflare.com",
"cloudflare",
), # Although there is S3, it will match the cloudflare pattern first
],
)
def test_detect_protocol_from_url(
self, url: str, expected: str | None
) -> None:
assert detect_protocol_from_url(url) == expected
# --- Helpers ---
async def _async_return(value: Any) -> Any:
return value
| {
"repo_id": "marimo-team/marimo",
"file_path": "tests/_data/_external_storage/test_storage_models.py",
"license": "Apache License 2.0",
"lines": 1034,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
marimo-team/marimo:marimo/_smoke_tests/issues/8184_mpl_interactive_large_plot.py | import marimo
__generated_with = "0.19.11"
app = marimo.App()
@app.cell
def _():
import marimo as mo
import matplotlib.pyplot as plt
import numpy as np
return mo, np, plt
@app.cell
def _(mo, np, plt):
# n=200 should work
plt.figure()
plt.imshow(np.random.random([200, 200]))
mo.mpl.interactive(plt.gca())
return
@app.cell
def _(mo, np, plt):
# n=300 was blank before fix (GH-8184)
plt.figure()
plt.imshow(np.random.random([300, 300]))
mo.mpl.interactive(plt.gca())
return
@app.cell
def _(mo, np, plt):
# n=500 stress test
plt.figure()
plt.imshow(np.random.random([500, 500]))
mo.mpl.interactive(plt.gca())
return
if __name__ == "__main__":
app.run()
| {
"repo_id": "marimo-team/marimo",
"file_path": "marimo/_smoke_tests/issues/8184_mpl_interactive_large_plot.py",
"license": "Apache License 2.0",
"lines": 32,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
marimo-team/marimo:tests/_utils/test_format_signature.py | # Copyright 2026 Marimo. All rights reserved.
from __future__ import annotations
import pytest
from marimo._utils.format_signature import format_signature
class TestFormatSignature:
@pytest.mark.parametrize(
("sig", "expected_return"),
[
# Short (fits on 1 line)
("f() -> int", "int"),
("f() -> str", "str"),
("f() -> list[int]", "list[int]"),
# Medium (multiline)
("f(x: int) -> Optional[str]", "Optional[str]"),
("f(x: int, y: str) -> dict[str, Any]", "dict[str, Any]"),
("medium_func(x: int, y: str) -> float", "float"),
("func(x: int, y: str = None) -> bool", "bool"),
# Long (like hstack)
(
"hstack(items: Sequence[object], *, "
'justify: str = "space-between", '
"align: str = None, "
"wrap: bool = False, "
"gap: float = 0.5, "
"widths: str = None) -> Html",
"Html",
),
],
)
def test_return_type_preserved(
self, sig: str, expected_return: str
) -> None:
result = format_signature("def ", sig)
assert "->" in result
assert result.strip().endswith(expected_return)
def test_no_return_type(self) -> None:
result = format_signature("def ", "func(x: int, y: str)")
assert "->" not in result
assert "func" in result
def test_class_prefix(self) -> None:
result = format_signature("class ", "MyClass(x: int, y: str)")
assert result.startswith("class ")
assert "MyClass" in result
def test_default_values_not_truncated(self) -> None:
result = format_signature(
"def ", "func(x: int, y: str = None) -> bool"
)
assert "None" in result
| {
"repo_id": "marimo-team/marimo",
"file_path": "tests/_utils/test_format_signature.py",
"license": "Apache License 2.0",
"lines": 48,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
marimo-team/marimo:tests/_runtime/runner/test_cancelled_cells.py | # Copyright 2026 Marimo. All rights reserved.
from __future__ import annotations
import pytest
from marimo._runtime.runner.hook_context import CancelledCells
from marimo._types.ids import CellId_t
class TestCancelledCells:
def test_empty(self) -> None:
cc = CancelledCells()
assert not cc
assert CellId_t("a") not in cc
assert list(cc) == []
def test_add_and_contains(self) -> None:
cc = CancelledCells()
cc.add(
CellId_t("raiser"),
{CellId_t("child1"), CellId_t("child2")},
)
assert cc
assert CellId_t("child1") in cc
assert CellId_t("child2") in cc
assert CellId_t("other") not in cc
def test_contains_checks_descendants_not_raisers(self) -> None:
"""The `in` operator checks the flat descendant set, not just raisers."""
cc = CancelledCells()
cc.add(CellId_t("raiser"), {CellId_t("raiser"), CellId_t("child")})
# Both raiser and descendant should be found
assert CellId_t("raiser") in cc
assert CellId_t("child") in cc
def test_iter_yields_raising_cells(self) -> None:
cc = CancelledCells()
cc.add(CellId_t("r1"), {CellId_t("a")})
cc.add(CellId_t("r2"), {CellId_t("b")})
assert set(cc) == {CellId_t("r1"), CellId_t("r2")}
def test_getitem(self) -> None:
cc = CancelledCells()
descendants = {CellId_t("x"), CellId_t("y")}
cc.add(CellId_t("r"), descendants)
assert cc[CellId_t("r")] == descendants
def test_getitem_missing_raises(self) -> None:
cc = CancelledCells()
with pytest.raises(KeyError):
cc[CellId_t("missing")]
def test_multiple_raisers_flat_set_merges(self) -> None:
"""Flat set is the union of all descendants across raisers."""
cc = CancelledCells()
cc.add(CellId_t("r1"), {CellId_t("a"), CellId_t("b")})
cc.add(CellId_t("r2"), {CellId_t("b"), CellId_t("c")})
assert CellId_t("a") in cc
assert CellId_t("b") in cc
assert CellId_t("c") in cc
assert CellId_t("d") not in cc
def test_same_raiser_accumulates_descendants(self) -> None:
"""add() for the same raising cell unions, not overwrites, descendants."""
cc = CancelledCells()
cc.add(CellId_t("r"), {CellId_t("a")})
cc.add(CellId_t("r"), {CellId_t("b")})
assert CellId_t("a") in cc
assert CellId_t("b") in cc
assert cc[CellId_t("r")] == {CellId_t("a"), CellId_t("b")}
def test_shared_reference_semantics(self) -> None:
"""Mutations after passing to a frozen dataclass are visible."""
cc = CancelledCells()
# Simulate: PostExecutionHookContext holds a reference,
# then Runner.cancel() adds entries after construction.
assert CellId_t("x") not in cc
cc.add(CellId_t("r"), {CellId_t("x")})
assert CellId_t("x") in cc
| {
"repo_id": "marimo-team/marimo",
"file_path": "tests/_runtime/runner/test_cancelled_cells.py",
"license": "Apache License 2.0",
"lines": 67,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
marimo-team/marimo:marimo/_utils/variable_name.py | # Copyright 2026 Marimo. All rights reserved.
from __future__ import annotations
import inspect
from typing import Any
from marimo import _loggers
LOGGER = _loggers.marimo_logger()
def infer_variable_name(value: Any, fallback: str) -> str:
"""Infer the variable name that holds ``value`` in the caller's caller.
Walks up two frames (skipping this helper and the direct caller) and
searches locals for an identity match (``is``). Returns *fallback* if
the lookup fails for any reason.
Frame references are always cleaned up to avoid reference cycles.
"""
frame = None
target_frame = None
try:
frame = inspect.currentframe()
if frame is not None and frame.f_back is not None:
target_frame = frame.f_back.f_back
if target_frame is not None:
for var_name, var_value in target_frame.f_locals.items():
if var_value is value:
return var_name
except Exception:
LOGGER.debug(
"Failed to infer variable name from caller frame.",
exc_info=True,
)
finally:
del frame
del target_frame
return fallback
| {
"repo_id": "marimo-team/marimo",
"file_path": "marimo/_utils/variable_name.py",
"license": "Apache License 2.0",
"lines": 32,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
marimo-team/marimo:marimo/_runtime/runner/hook_context.py | # Copyright 2026 Marimo. All rights reserved.
from __future__ import annotations
from dataclasses import dataclass
from typing import TYPE_CHECKING, Any, Callable, TypeAlias, Union
from marimo._config.config import OnCellChangeType
from marimo._messaging.errors import Error
from marimo._types.ids import CellId_t
if TYPE_CHECKING:
from collections.abc import Iterator, Mapping, Sequence
from contextlib import AbstractContextManager
from marimo._runtime.context.types import ExecutionContext
from marimo._runtime.dataflow.graph import DirectedGraph
ExceptionOrError = Union[BaseException, Error]
ExecutionContextManager: TypeAlias = Callable[
[CellId_t], "AbstractContextManager[ExecutionContext]"
]
class CancelledCells:
"""Tracks cancelled cells with both structured and flat views.
Maintains a mapping from raising cell -> cancelled descendants,
and a flat set for O(1) membership checks.
"""
def __init__(self) -> None:
self._by_raising_cell: dict[CellId_t, set[CellId_t]] = {}
self._all: set[CellId_t] = set()
def add(self, raising_cell: CellId_t, descendants: set[CellId_t]) -> None:
"""Record that raising_cell caused descendants to be cancelled."""
if raising_cell in self._by_raising_cell:
self._by_raising_cell[raising_cell].update(descendants)
else:
self._by_raising_cell[raising_cell] = descendants
self._all.update(descendants)
def __contains__(self, cell_id: object) -> bool:
"""O(1) check if a cell has been cancelled."""
return cell_id in self._all
def __iter__(self) -> Iterator[CellId_t]:
"""Iterate over raising cells."""
return iter(self._by_raising_cell)
def __getitem__(self, raising_cell: CellId_t) -> set[CellId_t]:
"""Get descendants cancelled by a specific raising cell."""
return self._by_raising_cell[raising_cell]
def __bool__(self) -> bool:
return bool(self._by_raising_cell)
@dataclass(frozen=True)
class PreparationHookContext:
graph: DirectedGraph
execution_mode: OnCellChangeType
cells_to_run: Sequence[CellId_t]
@dataclass(frozen=True)
class PreExecutionHookContext:
graph: DirectedGraph
execution_mode: OnCellChangeType
@dataclass(frozen=True)
class PostExecutionHookContext:
graph: DirectedGraph
glbls: dict[str, Any]
execution_context: ExecutionContextManager | None
# Dict, because errors get mutated (formatted) by hooks.
exceptions: dict[CellId_t, ExceptionOrError]
cancelled_cells: CancelledCells
# Pre-computed union of all cell temporaries
all_temporaries: frozenset[str]
# Whether data (variables, datasets, etc.) should be broadcast
# to the frontend. Computed once per run to avoid repeated checks.
should_broadcast_data: bool = False
@dataclass(frozen=True)
class OnFinishHookContext:
graph: DirectedGraph
cells_to_run: Sequence[CellId_t]
interrupted: bool
cancelled_cells: CancelledCells
exceptions: Mapping[CellId_t, ExceptionOrError]
| {
"repo_id": "marimo-team/marimo",
"file_path": "marimo/_runtime/runner/hook_context.py",
"license": "Apache License 2.0",
"lines": 71,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
marimo-team/marimo:tests/_runtime/runner/test_hooks.py | # Copyright 2026 Marimo. All rights reserved.
from __future__ import annotations
import pytest
from marimo._runtime.runner.hooks import (
NotebookCellHooks,
Priority,
create_default_hooks,
)
@pytest.fixture
def hooks() -> NotebookCellHooks:
return NotebookCellHooks()
class TestNotebookCellHooks:
def test_add_and_retrieve(self, hooks: NotebookCellHooks) -> None:
hook = lambda _: None # noqa: E731
hooks.add_preparation(hook)
assert hook in hooks.preparation_hooks
def test_priority_ordering(self, hooks: NotebookCellHooks) -> None:
order: list[str] = []
hooks.add_post_execution(
lambda _c, _r, _res: order.append("late"), Priority.LATE
)
hooks.add_post_execution(
lambda _c, _r, _res: order.append("early"), Priority.EARLY
)
hooks.add_post_execution(
lambda _c, _r, _res: order.append("final"), Priority.FINAL
)
hooks.add_post_execution(lambda _c, _r, _res: order.append("normal"))
for hook in hooks.post_execution_hooks:
hook(None, None, None) # type: ignore
assert order == ["early", "normal", "late", "final"]
def test_copy_is_independent(self, hooks: NotebookCellHooks) -> None:
hook1 = lambda _: None # noqa: E731
hooks.add_preparation(hook1)
hooks_copy = hooks.copy()
hook2 = lambda _: None # noqa: E731
hooks_copy.add_preparation(hook2)
assert len(hooks.preparation_hooks) == 1
assert len(hooks_copy.preparation_hooks) == 2
class TestCreateDefaultHooks:
def test_creates_all_hook_types(self) -> None:
hooks = create_default_hooks()
assert len(hooks.preparation_hooks) > 0
assert len(hooks.pre_execution_hooks) > 0
assert len(hooks.post_execution_hooks) > 0
assert len(hooks.on_finish_hooks) > 0
def test_set_status_idle_is_last_post_execution_hook(self) -> None:
"""Verify _set_status_idle is the last hook in POST_EXECUTION_HOOKS.
This is important because status should only be set to idle after all
other post-execution work (like broadcasting outputs) is complete.
"""
from marimo._runtime.runner.hooks_post_execution import (
POST_EXECUTION_HOOKS,
_set_status_idle,
)
assert POST_EXECUTION_HOOKS[-1] is _set_status_idle
| {
"repo_id": "marimo-team/marimo",
"file_path": "tests/_runtime/runner/test_hooks.py",
"license": "Apache License 2.0",
"lines": 56,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
marimo-team/marimo:marimo/_utils/subprocess.py | # Copyright 2026 Marimo. All rights reserved.
from __future__ import annotations
import subprocess
from collections.abc import Callable, Mapping, Sequence
from typing import (
IO,
Any,
Literal,
overload,
)
from marimo import _loggers
LOGGER = _loggers.marimo_logger()
# Type aliases matching typeshed's subprocess stubs
_CMD = str | bytes | Sequence[str | bytes]
_ENV = Mapping[str, str] | Mapping[bytes, bytes]
_FILE = int | IO[Any] | None
@overload
def safe_popen(
args: _CMD,
bufsize: int = ...,
executable: str | bytes | None = ...,
stdin: _FILE = ...,
stdout: _FILE = ...,
stderr: _FILE = ...,
preexec_fn: Callable[[], Any] | None = ...,
close_fds: bool = ...,
shell: bool = ...,
cwd: str | bytes | None = ...,
env: _ENV | None = ...,
universal_newlines: bool = ...,
startupinfo: Any = ...,
creationflags: int = ...,
restore_signals: bool = ...,
start_new_session: bool = ...,
pass_fds: Sequence[int] = ...,
*,
encoding: str,
errors: str | None = ...,
text: bool | None = ...,
user: str | int | None = ...,
group: str | int | None = ...,
extra_groups: Sequence[str | int] | None = ...,
umask: int = ...,
pipesize: int = ...,
) -> subprocess.Popen[str] | None: ...
@overload
def safe_popen(
args: _CMD,
bufsize: int = ...,
executable: str | bytes | None = ...,
stdin: _FILE = ...,
stdout: _FILE = ...,
stderr: _FILE = ...,
preexec_fn: Callable[[], Any] | None = ...,
close_fds: bool = ...,
shell: bool = ...,
cwd: str | bytes | None = ...,
env: _ENV | None = ...,
universal_newlines: bool = ...,
startupinfo: Any = ...,
creationflags: int = ...,
restore_signals: bool = ...,
start_new_session: bool = ...,
pass_fds: Sequence[int] = ...,
*,
encoding: str | None = ...,
errors: str,
text: bool | None = ...,
user: str | int | None = ...,
group: str | int | None = ...,
extra_groups: Sequence[str | int] | None = ...,
umask: int = ...,
pipesize: int = ...,
) -> subprocess.Popen[str] | None: ...
@overload
def safe_popen(
args: _CMD,
bufsize: int = ...,
executable: str | bytes | None = ...,
stdin: _FILE = ...,
stdout: _FILE = ...,
stderr: _FILE = ...,
preexec_fn: Callable[[], Any] | None = ...,
close_fds: bool = ...,
shell: bool = ...,
cwd: str | bytes | None = ...,
env: _ENV | None = ...,
*,
universal_newlines: Literal[True],
startupinfo: Any = ...,
creationflags: int = ...,
restore_signals: bool = ...,
start_new_session: bool = ...,
pass_fds: Sequence[int] = ...,
encoding: str | None = ...,
errors: str | None = ...,
text: bool | None = ...,
user: str | int | None = ...,
group: str | int | None = ...,
extra_groups: Sequence[str | int] | None = ...,
umask: int = ...,
pipesize: int = ...,
) -> subprocess.Popen[str] | None: ...
@overload
def safe_popen(
args: _CMD,
bufsize: int = ...,
executable: str | bytes | None = ...,
stdin: _FILE = ...,
stdout: _FILE = ...,
stderr: _FILE = ...,
preexec_fn: Callable[[], Any] | None = ...,
close_fds: bool = ...,
shell: bool = ...,
cwd: str | bytes | None = ...,
env: _ENV | None = ...,
universal_newlines: bool = ...,
startupinfo: Any = ...,
creationflags: int = ...,
restore_signals: bool = ...,
start_new_session: bool = ...,
pass_fds: Sequence[int] = ...,
*,
text: Literal[True],
encoding: str | None = ...,
errors: str | None = ...,
user: str | int | None = ...,
group: str | int | None = ...,
extra_groups: Sequence[str | int] | None = ...,
umask: int = ...,
pipesize: int = ...,
) -> subprocess.Popen[str] | None: ...
@overload
def safe_popen(
args: _CMD,
bufsize: int = ...,
executable: str | bytes | None = ...,
stdin: _FILE = ...,
stdout: _FILE = ...,
stderr: _FILE = ...,
preexec_fn: Callable[[], Any] | None = ...,
close_fds: bool = ...,
shell: bool = ...,
cwd: str | bytes | None = ...,
env: _ENV | None = ...,
universal_newlines: Literal[False] = ...,
startupinfo: Any = ...,
creationflags: int = ...,
restore_signals: bool = ...,
start_new_session: bool = ...,
pass_fds: Sequence[int] = ...,
*,
text: Literal[False, None] = ...,
encoding: None = ...,
errors: None = ...,
user: str | int | None = ...,
group: str | int | None = ...,
extra_groups: Sequence[str | int] | None = ...,
umask: int = ...,
pipesize: int = ...,
) -> subprocess.Popen[bytes] | None: ...
@overload
def safe_popen(
args: _CMD,
bufsize: int = ...,
executable: str | bytes | None = ...,
stdin: _FILE = ...,
stdout: _FILE = ...,
stderr: _FILE = ...,
preexec_fn: Callable[[], Any] | None = ...,
close_fds: bool = ...,
shell: bool = ...,
cwd: str | bytes | None = ...,
env: _ENV | None = ...,
universal_newlines: bool = ...,
startupinfo: Any = ...,
creationflags: int = ...,
restore_signals: bool = ...,
start_new_session: bool = ...,
pass_fds: Sequence[int] = ...,
*,
text: bool | None = ...,
encoding: str | None = ...,
errors: str | None = ...,
user: str | int | None = ...,
group: str | int | None = ...,
extra_groups: Sequence[str | int] | None = ...,
umask: int = ...,
pipesize: int = ...,
) -> subprocess.Popen[Any] | None: ...
def safe_popen(
args: _CMD,
bufsize: int = -1,
executable: str | bytes | None = None,
stdin: _FILE = None,
stdout: _FILE = None,
stderr: _FILE = None,
preexec_fn: Callable[[], Any] | None = None,
close_fds: bool = True,
shell: bool = False,
cwd: str | bytes | None = None,
env: _ENV | None = None,
universal_newlines: bool = False,
startupinfo: Any = None,
creationflags: int = 0,
restore_signals: bool = True,
start_new_session: bool = False,
pass_fds: Sequence[int] = (),
*,
text: bool | None = None,
encoding: str | None = None,
errors: str | None = None,
user: str | int | None = None,
group: str | int | None = None,
extra_groups: Sequence[str | int] | None = None,
umask: int = -1,
pipesize: int = -1,
# Note: process_group is omitted because it was added in Python 3.11
) -> subprocess.Popen[Any] | None:
"""Wrapper around subprocess.Popen that never raises.
Returns None and logs the error if the subprocess cannot be created
(e.g. FileNotFoundError, PermissionError, OSError).
"""
try:
return subprocess.Popen(
args,
bufsize=bufsize,
executable=executable,
stdin=stdin,
stdout=stdout,
stderr=stderr,
preexec_fn=preexec_fn,
close_fds=close_fds,
shell=shell,
cwd=cwd,
env=env,
universal_newlines=universal_newlines,
startupinfo=startupinfo,
creationflags=creationflags,
restore_signals=restore_signals,
start_new_session=start_new_session,
pass_fds=pass_fds,
text=text,
encoding=encoding,
errors=errors,
user=user,
group=group,
extra_groups=extra_groups,
umask=umask,
pipesize=pipesize,
)
except Exception as e:
LOGGER.error("Failed to create subprocess for command %s: %s", args, e)
return None
| {
"repo_id": "marimo-team/marimo",
"file_path": "marimo/_utils/subprocess.py",
"license": "Apache License 2.0",
"lines": 254,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
marimo-team/marimo:tests/_utils/test_subprocess.py | # Copyright 2026 Marimo. All rights reserved.
from __future__ import annotations
import subprocess
from unittest.mock import patch
from marimo._utils.subprocess import safe_popen
class TestSafePopen:
def test_successful_popen(self):
proc = safe_popen(
["echo", "hello"],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
)
assert proc is not None
stdout, _ = proc.communicate()
assert b"hello" in stdout
proc.wait()
def test_returns_none_on_file_not_found(self):
result = safe_popen(["nonexistent_binary_abc123"])
assert result is None
def test_returns_none_on_permission_error(self):
with patch(
"subprocess.Popen", side_effect=PermissionError("not allowed")
):
result = safe_popen(["echo", "hello"])
assert result is None
def test_returns_none_on_os_error(self):
with patch(
"subprocess.Popen",
side_effect=OSError("some os error"),
):
result = safe_popen(["echo", "hello"])
assert result is None
def test_returns_none_on_generic_exception(self):
with patch(
"subprocess.Popen",
side_effect=RuntimeError("unexpected"),
):
result = safe_popen(["echo", "hello"])
assert result is None
def test_passes_kwargs_through(self):
proc = safe_popen(
["echo", "test"],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
encoding="utf-8",
)
assert proc is not None
stdout, _ = proc.communicate()
assert "test" in stdout
proc.wait()
def test_returns_none_on_bad_cwd(self):
result = safe_popen(
["echo", "hello"],
cwd="/nonexistent/directory/abc123",
)
assert result is None
| {
"repo_id": "marimo-team/marimo",
"file_path": "tests/_utils/test_subprocess.py",
"license": "Apache License 2.0",
"lines": 56,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
marimo-team/marimo:marimo/_cli/help_formatter.py | # Copyright 2026 Marimo. All rights reserved.
#
# Colored Click classes for CLI help formatting.
#
# These follow cargo's color conventions:
# - Bold bright green for section headers (Usage:, Options:, Commands:)
# - Bold bright cyan for command/option names
from __future__ import annotations
import click
from click.utils import make_str
from marimo._cli.print import bright_green, light_blue
from marimo._cli.suggestions import suggest_commands, suggest_short_options
def _split_option_token(token: str) -> tuple[str, str]:
"""Split option-like tokens without depending on click's private parser."""
first = token[:1]
if first.isalnum():
return "", token
if token[1:2] == first:
return token[:2], token[2:]
return first, token[1:]
def _collect_short_options(
command: click.Command, ctx: click.Context
) -> list[str]:
"""Collect all short flag names (e.g. -p) declared on a command."""
options: set[str] = set()
for param in command.get_params(ctx):
if not isinstance(param, click.Option):
continue
for option in [*param.opts, *param.secondary_opts]:
if option.startswith("-") and not option.startswith("--"):
options.add(option)
return sorted(options)
def _augment_short_option_error(
command: click.Command,
ctx: click.Context,
error: click.NoSuchOption,
) -> None:
"""Populate click's possibilities for misspelled short flags."""
if error.possibilities:
return
if not error.option_name.startswith("-") or error.option_name.startswith(
"--"
):
return
short_options = _collect_short_options(command, ctx)
suggestions = suggest_short_options(error.option_name, short_options)
if suggestions:
error.possibilities = suggestions
class ColoredCommand(click.Command):
"""Click Command with colored help output (cargo-style)."""
def parse_args(self, ctx: click.Context, args: list[str]) -> list[str]:
try:
return super().parse_args(ctx, args)
except click.NoSuchOption as error:
_augment_short_option_error(self, ctx, error)
raise
def format_usage(
self, ctx: click.Context, formatter: click.HelpFormatter
) -> None:
pieces = self.collect_usage_pieces(ctx)
formatter.write_usage(
ctx.command_path,
" ".join(pieces),
bright_green("Usage: ", bold=True),
)
def format_options(
self, ctx: click.Context, formatter: click.HelpFormatter
) -> None:
opts = []
for param in self.get_params(ctx):
rv = param.get_help_record(ctx)
if rv is not None:
opts.append(rv)
if opts:
rows = [(light_blue(opt, bold=True), desc) for opt, desc in opts]
with formatter.section(bright_green("Options", bold=True)):
formatter.write_dl(rows)
class ColoredGroup(click.Group):
"""Click Group with colored help output (cargo-style)."""
command_class = ColoredCommand
def parse_args(self, ctx: click.Context, args: list[str]) -> list[str]:
try:
return super().parse_args(ctx, args)
except click.NoSuchOption as error:
_augment_short_option_error(self, ctx, error)
raise
def resolve_command(
self, ctx: click.Context, args: list[str]
) -> tuple[str | None, click.Command | None, list[str]]:
"""Resolve subcommands and emit close-match suggestions on typos."""
cmd_name = make_str(args[0])
original_cmd_name = cmd_name
cmd = self.get_command(ctx, cmd_name)
if cmd is None and ctx.token_normalize_func is not None:
cmd_name = ctx.token_normalize_func(cmd_name)
cmd = self.get_command(ctx, cmd_name)
if cmd is None and not ctx.resilient_parsing:
if _split_option_token(cmd_name)[0]:
self.parse_args(ctx, args)
command_names: list[str] = []
for name in self.list_commands(ctx):
command = self.get_command(ctx, name)
if command is None or command.hidden:
continue
command_names.append(name)
suggestions = suggest_commands(original_cmd_name, command_names)
if len(suggestions) == 1:
ctx.fail(
f"unrecognized command {original_cmd_name!r}\n\n"
f" tip: a similar command exists: {suggestions[0]!r}"
)
if len(suggestions) > 1:
joined = ", ".join(repr(item) for item in suggestions)
ctx.fail(
f"unrecognized command {original_cmd_name!r}\n\n"
f" tip: some similar commands exist: {joined}"
)
ctx.fail(f"unrecognized command {original_cmd_name!r}")
return cmd_name if cmd else None, cmd, args[1:]
def format_usage(
self, ctx: click.Context, formatter: click.HelpFormatter
) -> None:
pieces = self.collect_usage_pieces(ctx)
formatter.write_usage(
ctx.command_path,
" ".join(pieces),
bright_green("Usage: ", bold=True),
)
def format_options(
self, ctx: click.Context, formatter: click.HelpFormatter
) -> None:
opts = []
for param in self.get_params(ctx):
rv = param.get_help_record(ctx)
if rv is not None:
opts.append(rv)
if opts:
rows = [(light_blue(opt, bold=True), desc) for opt, desc in opts]
with formatter.section(bright_green("Options", bold=True)):
formatter.write_dl(rows)
# Click's MultiCommand.format_options calls format_commands
# internally, so we must do the same since we're overriding
# the method completely
self.format_commands(ctx, formatter)
def format_commands(
self, ctx: click.Context, formatter: click.HelpFormatter
) -> None:
"""Write all commands with colored names."""
commands = []
for subcommand in self.list_commands(ctx):
cmd = self.get_command(ctx, subcommand)
if cmd is None or cmd.hidden:
continue
commands.append((subcommand, cmd))
if commands:
rows = [
(
light_blue(subcommand, bold=True),
cmd.get_short_help_str(),
)
for subcommand, cmd in commands
]
with formatter.section(bright_green("Commands", bold=True)):
formatter.write_dl(rows)
| {
"repo_id": "marimo-team/marimo",
"file_path": "marimo/_cli/help_formatter.py",
"license": "Apache License 2.0",
"lines": 164,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
marimo-team/marimo:marimo/_smoke_tests/latex/704.py | import marimo
__generated_with = "0.19.6"
app = marimo.App(width="medium")
@app.cell
def _():
import marimo as mo
return (mo,)
@app.cell(hide_code=True)
def _(mo):
mo.md(r"""
# Incrementing functions
Bug from [#704](https://github.com/marimo-team/marimo/discussions/704)
""")
return
@app.cell(hide_code=True)
def _(mo):
mo.md(r"""
\begin{align}
B' &=-\nabla \times E,\\
E' &=\nabla \times B - 4\pi j\\
e^{\pi i} + 1 = 0
\end{align}
""")
return
if __name__ == "__main__":
app.run()
| {
"repo_id": "marimo-team/marimo",
"file_path": "marimo/_smoke_tests/latex/704.py",
"license": "Apache License 2.0",
"lines": 26,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | documentation |
marimo-team/marimo:marimo/_metadata/opengraph.py | # Copyright 2026 Marimo. All rights reserved.
from __future__ import annotations
import ast
import asyncio
import importlib
import inspect
import re
from dataclasses import dataclass
from pathlib import Path
from typing import Any, Callable, Literal, cast
from urllib.parse import urlparse
import msgspec
from marimo import _loggers
from marimo._utils.paths import normalize_path
from marimo._utils.scripts import read_pyproject_from_script
LOGGER = _loggers.marimo_logger()
DEFAULT_OPENGRAPH_IMAGE_FILENAME = "opengraph.png"
_WORD_SPLIT_RE = re.compile(r"[_-]+")
_MARIMO_GREEN = "#59b39a"
class OpenGraphMetadata(msgspec.Struct, rename="camel"):
"""OpenGraph-style metadata for a notebook.
The `image` field may be either:
- a relative path (typically under `__marimo__/`), or
- an absolute HTTPS URL.
"""
title: str | None = None
description: str | None = None
image: str | None = None
@dataclass(frozen=True)
class OpenGraphConfig:
"""Declarative configuration for resolving notebook OpenGraph metadata."""
title: str | None = None
description: str | None = None
image: str | None = None
generator: str | None = None
def _maybe_str(value: Any) -> str | None:
return value if isinstance(value, str) else None
def is_https_url(value: str) -> bool:
"""Return True if value is an absolute HTTPS URL."""
try:
parsed = urlparse(value)
except Exception:
return False
return parsed.scheme == "https" and bool(parsed.netloc)
def _normalize_opengraph_image(value: str | None) -> str | None:
"""Normalize an opengraph image value to a safe path (typically under `__marimo__/`) or HTTPS URL."""
if value is None:
return None
if is_https_url(value):
return value
# Disallow URLs with other schemes (e.g. http, data, file).
parsed = urlparse(value)
if parsed.scheme:
return None
path = Path(value)
if path.is_absolute():
return None
return value
OpenGraphMode = Literal["run", "edit"]
@dataclass(frozen=True)
class OpenGraphContext:
"""Context passed to OpenGraph generator functions."""
filepath: str
# File router key (often a workspace-relative path); may be None.
file_key: str | None = None
# Server base URL (e.g. http://localhost:2718); may be None in CLI contexts.
base_url: str | None = None
mode: OpenGraphMode | None = None
OpenGraphGeneratorReturn = OpenGraphMetadata | dict[str, Any] | None
OpenGraphGenerator = Callable[..., OpenGraphGeneratorReturn]
OpenGraphGeneratorArity = Literal[0, 1, 2]
@dataclass(frozen=True)
class OpenGraphGeneratorSpec:
fn: OpenGraphGenerator
arity: OpenGraphGeneratorArity
def read_opengraph_from_pyproject(
pyproject: dict[str, Any],
) -> OpenGraphConfig | None:
"""Extract OpenGraph metadata from a parsed PEP 723 pyproject dict."""
tool = pyproject.get("tool")
if not isinstance(tool, dict):
return None
marimo = tool.get("marimo")
if not isinstance(marimo, dict):
return None
opengraph = marimo.get("opengraph")
if not isinstance(opengraph, dict):
return None
config = OpenGraphConfig(
title=_maybe_str(opengraph.get("title")),
description=_maybe_str(opengraph.get("description")),
image=_normalize_opengraph_image(_maybe_str(opengraph.get("image"))),
generator=_maybe_str(opengraph.get("generator")),
)
if (
config.title is None
and config.description is None
and config.image is None
and config.generator is None
):
return None
return config
def read_opengraph_from_file(filepath: str) -> OpenGraphConfig | None:
"""Read OpenGraph metadata from a notebook's PEP 723 header."""
try:
script = Path(filepath).read_text(encoding="utf-8")
project = read_pyproject_from_script(script) or {}
except Exception:
# Parsing errors are treated as "no metadata" so that listing and thumbnail generation don't spam warnings on malformed headers.
return None
return read_opengraph_from_pyproject(project)
def _title_case(text: str) -> str:
return text[:1].upper() + text[1:].lower()
def derive_title_from_path(filepath: str) -> str:
stem = Path(filepath).stem
return " ".join(
_title_case(part) for part in _WORD_SPLIT_RE.split(stem) if part
)
def default_opengraph_image(filepath: str) -> str:
"""Return the default relative image path for a given notebook."""
stem = Path(filepath).stem
return f"__marimo__/assets/{stem}/{DEFAULT_OPENGRAPH_IMAGE_FILENAME}"
def _default_image_exists(filepath: str) -> bool:
notebook_dir = normalize_path(Path(filepath)).parent
return (notebook_dir / default_opengraph_image(filepath)).is_file()
def _merge_opengraph_metadata(
parent: OpenGraphMetadata,
override: OpenGraphMetadata | None,
) -> OpenGraphMetadata:
"""Merge two metadata objects with override values taking precedence."""
if override is None:
return parent
def coalesce(a: Any, b: Any) -> Any:
return a if a is not None else b
return OpenGraphMetadata(
title=coalesce(override.title, parent.title),
description=coalesce(override.description, parent.description),
image=coalesce(override.image, parent.image),
)
def _coerce_opengraph_metadata(value: Any) -> OpenGraphMetadata | None:
"""Coerce a generator return value into OpenGraphMetadata."""
if value is None:
return None
if isinstance(value, OpenGraphMetadata):
return value
if isinstance(value, dict):
image = _maybe_str(value.get("image"))
if image is None:
image = _maybe_str(value.get("imageUrl"))
return OpenGraphMetadata(
title=_maybe_str(value.get("title")),
description=_maybe_str(value.get("description")),
image=_normalize_opengraph_image(image),
)
return None
def _parse_generator_signature(
fn: OpenGraphGenerator, *, generator: str
) -> OpenGraphGeneratorSpec | None:
"""Validate a generator signature and return a normalized call spec."""
if asyncio.iscoroutinefunction(fn):
LOGGER.warning(
"OpenGraph generator must be synchronous: %s", generator
)
return None
try:
sig = inspect.signature(fn)
except Exception as e:
LOGGER.warning(
"Failed to inspect OpenGraph generator signature (%s): %s",
generator,
e,
)
return None
params = tuple(sig.parameters.values())
unsupported = tuple(
param
for param in params
if param.kind
in (
inspect.Parameter.VAR_POSITIONAL,
inspect.Parameter.VAR_KEYWORD,
inspect.Parameter.KEYWORD_ONLY,
)
)
if unsupported:
LOGGER.warning(
"OpenGraph generator signature must use 0-2 positional args: %s",
generator,
)
return None
positional = tuple(
param
for param in params
if param.kind
in (
inspect.Parameter.POSITIONAL_ONLY,
inspect.Parameter.POSITIONAL_OR_KEYWORD,
)
)
if len(positional) > 2:
LOGGER.warning(
"OpenGraph generator signature must accept at most 2 args: %s",
generator,
)
return None
arity = cast(OpenGraphGeneratorArity, len(positional))
return OpenGraphGeneratorSpec(fn=fn, arity=arity)
def _load_generator_from_module(
module_spec: str, name: str, *, generator: str
) -> OpenGraphGeneratorSpec | None:
try:
module = importlib.import_module(module_spec)
except Exception as e:
LOGGER.warning("Failed to import OpenGraph generator module: %s", e)
return None
attr = getattr(module, name, None)
if attr is None:
LOGGER.warning(
"OpenGraph generator %s not found in module %s", name, module_spec
)
return None
if not callable(attr):
LOGGER.warning("OpenGraph generator is not callable: %s", generator)
return None
return _parse_generator_signature(
cast(OpenGraphGenerator, attr), generator=generator
)
def _load_generator_from_notebook_source(
notebook_path: str, name: str
) -> OpenGraphGeneratorSpec | None:
"""Load a generator function from the notebook source without executing it.
We compile and exec a small synthetic module containing only:
- import statements (so the generator can import deps)
- the named function definition
"""
try:
source = Path(notebook_path).read_text(encoding="utf-8")
except Exception as e:
LOGGER.warning(
"Failed to read notebook when loading OpenGraph generator: %s", e
)
return None
try:
module_ast = ast.parse(source, filename=notebook_path)
except Exception as e:
LOGGER.warning(
"Failed to parse notebook when loading OpenGraph generator: %s", e
)
return None
def is_setup_expr(expr: ast.expr) -> bool:
if isinstance(expr, ast.Attribute):
return (
isinstance(expr.value, ast.Name)
and expr.value.id == "app"
and expr.attr == "setup"
)
if isinstance(expr, ast.Call):
return is_setup_expr(expr.func)
return False
imports: list[ast.stmt] = []
setup: list[ast.stmt] = []
target: ast.FunctionDef | ast.AsyncFunctionDef | None = None
for node in module_ast.body:
if isinstance(node, (ast.Import, ast.ImportFrom)):
imports.append(node)
elif isinstance(node, ast.With) and len(node.items) == 1:
context_expr = node.items[0].context_expr
if is_setup_expr(context_expr):
# Inline the setup block body so that dependencies imported under `with app.setup:`
# are available to the generator, without invoking marimo's setup cell registration.
setup.extend(node.body)
elif isinstance(node, (ast.FunctionDef, ast.AsyncFunctionDef)):
if node.name == name:
target = node
if target is None:
LOGGER.warning(
"OpenGraph generator %s not found in notebook %s",
name,
notebook_path,
)
return None
# Ignore decorators so we don't execute notebook/app registration logic.
# Metadata generators are treated as plain Python functions.
if getattr(target, "decorator_list", None):
target.decorator_list = []
extracted = ast.Module(body=[*imports, *setup, target], type_ignores=[])
ast.fix_missing_locations(extracted)
namespace: dict[str, Any] = {}
try:
exec(compile(extracted, notebook_path, "exec"), namespace)
except Exception as e:
LOGGER.warning("Failed to exec OpenGraph generator stub: %s", e)
return None
fn = namespace.get(name)
if not callable(fn):
LOGGER.warning(
"OpenGraph generator %s is not callable (in %s)",
name,
notebook_path,
)
return None
return _parse_generator_signature(
cast(OpenGraphGenerator, fn), generator=name
)
def _load_opengraph_generator(
generator: str, *, notebook_path: str
) -> OpenGraphGeneratorSpec | None:
"""Resolve a generator string into a callable.
Supported forms:
- "module.submodule:function"
- "module.submodule.function"
- "function_name" (loaded from notebook source via AST extraction)
"""
value = generator.strip()
if not value:
return None
module_spec: str | None
name: str
if ":" in value:
module_spec, name = value.split(":", 1)
module_spec = module_spec.strip()
name = name.strip()
elif "." in value:
module_spec, name = value.rsplit(".", 1)
module_spec = module_spec.strip()
name = name.strip()
else:
return _load_generator_from_notebook_source(notebook_path, value)
if not module_spec or not name:
return None
# Disallow filesystem-based generator specs to keep behavior predictable
if (
module_spec.endswith(".py")
or "/" in module_spec
or "\\" in module_spec
):
LOGGER.warning(
"OpenGraph generator must be importable as a Python module: %s",
generator,
)
return None
return _load_generator_from_module(module_spec, name, generator=generator)
def _call_opengraph_generator(
spec: OpenGraphGeneratorSpec,
*,
context: OpenGraphContext,
parent: OpenGraphMetadata,
) -> OpenGraphGeneratorReturn:
"""Invoke a generator with a stable calling convention."""
if spec.arity == 2:
return spec.fn(context, parent)
if spec.arity == 1:
return spec.fn(context)
return spec.fn()
def _run_opengraph_generator(
generator: str,
*,
context: OpenGraphContext,
parent: OpenGraphMetadata,
) -> OpenGraphMetadata | None:
spec = _load_opengraph_generator(generator, notebook_path=context.filepath)
if spec is None:
return None
try:
result = _call_opengraph_generator(
spec, context=context, parent=parent
)
except Exception as e:
LOGGER.warning("OpenGraph generator raised: %s", e)
return None
if inspect.isawaitable(result):
# Avoid "coroutine was never awaited" warnings.
close = getattr(result, "close", None)
if callable(close):
try:
close()
except Exception:
pass
LOGGER.warning(
"OpenGraph generator returned an awaitable (must be sync): %s",
generator,
)
return None
dynamic = _coerce_opengraph_metadata(result)
if dynamic is None:
LOGGER.warning(
"OpenGraph generator returned unsupported value: %s", type(result)
)
return None
return dynamic
def resolve_opengraph_metadata(
filepath: str,
*,
app_title: str | None = None,
context: OpenGraphContext | None = None,
) -> OpenGraphMetadata:
"""Resolve OpenGraph metadata from config, defaults, and a generator hook."""
declared = read_opengraph_from_file(filepath) or OpenGraphConfig()
title = declared.title or app_title or derive_title_from_path(filepath)
description = declared.description
image = _normalize_opengraph_image(declared.image)
if image is None and _default_image_exists(filepath):
image = default_opengraph_image(filepath)
resolved = OpenGraphMetadata(
title=title,
description=description,
image=image,
)
if declared.generator:
ctx = context or OpenGraphContext(filepath=filepath)
dynamic = _run_opengraph_generator(
declared.generator, context=ctx, parent=resolved
)
resolved = _merge_opengraph_metadata(resolved, dynamic)
return resolved
def _xml_escape(text: str) -> str:
return (
text.replace("&", "&")
.replace("<", "<")
.replace(">", ">")
.replace('"', """)
.replace("'", "'")
)
def _wrap_title_lines(title: str, *, max_chars: int = 32) -> list[str]:
words = title.split()
if not words:
return ["marimo"]
lines: list[str] = []
current: list[str] = []
current_len = 0
for word in words:
extra = (1 if current else 0) + len(word)
if current and current_len + extra > max_chars:
lines.append(" ".join(current))
current = [word]
current_len = len(word)
else:
current.append(word)
current_len += extra
if current:
lines.append(" ".join(current))
# Keep the placeholder compact.
if len(lines) > 3:
lines = lines[:3]
lines[-1] = lines[-1].rstrip(".") + "..."
return lines
@dataclass(frozen=True)
class OpenGraphImage:
content: bytes
media_type: str
@dataclass(frozen=True)
class DefaultOpenGraphPlaceholderImageGenerator:
"""Generate a deterministic placeholder thumbnail image."""
width: int = 1200
height: int = 630
def __call__(self, title: str) -> OpenGraphImage:
svg = self._render_svg(title)
return OpenGraphImage(
content=svg.encode("utf-8"),
media_type="image/svg+xml",
)
def _render_svg(self, title: str) -> str:
accent = _MARIMO_GREEN
lines = _wrap_title_lines(title)
escaped = [_xml_escape(line) for line in lines]
# Center the title inside an inset card.
card_x = 48
card_y = 48
card_w = self.width - 2 * card_x
card_h = self.height - 2 * card_y
stripe_w = 16
line_height = 72
block_height = line_height * len(escaped)
start_y = card_y + int((card_h - block_height) / 2) + 56
text_x = card_x + stripe_w + 36
text_nodes: list[str] = []
for i, line in enumerate(escaped):
y = start_y + i * line_height
text_nodes.append(
f'<text x="{text_x}" y="{y}" font-size="60">{line}</text>'
)
text_svg = "\n ".join(text_nodes)
return f"""<svg xmlns="http://www.w3.org/2000/svg" width="{self.width}" height="{self.height}" viewBox="0 0 {self.width} {self.height}">
<defs>
<clipPath id="card">
<rect x="{card_x}" y="{card_y}" width="{card_w}" height="{card_h}" rx="24" />
</clipPath>
</defs>
<rect width="{self.width}" height="{self.height}" fill="#f8fafc"/>
<rect x="{card_x}" y="{card_y}" width="{card_w}" height="{card_h}" rx="24" fill="#ffffff" stroke="#e2e8f0" stroke-width="2"/>
<rect x="{card_x}" y="{card_y}" width="{stripe_w}" height="{card_h}" fill="{accent}" clip-path="url(#card)"/>
<g fill="#0f172a" font-family="ui-sans-serif, system-ui, -apple-system, Segoe UI, Roboto, Arial" font-weight="700">
{text_svg}
</g>
<text x="{text_x}" y="{card_y + card_h - 32}" fill="#64748b" font-family="ui-sans-serif, system-ui, -apple-system, Segoe UI, Roboto, Arial" font-size="22" font-weight="600">
marimo
</text>
</svg>
"""
DEFAULT_OPENGRAPH_PLACEHOLDER_IMAGE_GENERATOR = (
DefaultOpenGraphPlaceholderImageGenerator()
)
| {
"repo_id": "marimo-team/marimo",
"file_path": "marimo/_metadata/opengraph.py",
"license": "Apache License 2.0",
"lines": 502,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
marimo-team/marimo:tests/_metadata/test_opengraph.py | from __future__ import annotations
import textwrap
from marimo._metadata.opengraph import (
DEFAULT_OPENGRAPH_PLACEHOLDER_IMAGE_GENERATOR,
OpenGraphConfig,
default_opengraph_image,
read_opengraph_from_file,
read_opengraph_from_pyproject,
resolve_opengraph_metadata,
)
def test_read_opengraph_from_pyproject() -> None:
pyproject = {
"tool": {
"marimo": {
"opengraph": {
"title": "My Title",
"description": "My Description",
"image": "__marimo__/opengraph.png",
}
}
}
}
assert read_opengraph_from_pyproject(pyproject) == OpenGraphConfig(
title="My Title",
description="My Description",
image="__marimo__/opengraph.png",
generator=None,
)
# Invalid shapes should be ignored.
assert (
read_opengraph_from_pyproject(
{"tool": {"marimo": {"opengraph": {"title": 123}}}}
)
is None
)
def test_read_opengraph_from_file(tmp_path) -> None:
script = textwrap.dedent(
"""
# /// script
# [tool.marimo.opengraph]
# title = "Hello World"
# description = "A notebook"
# image = "__marimo__/opengraph.png"
# ///
"""
).lstrip()
path = tmp_path / "notebook.py"
path.write_text(script, encoding="utf-8")
assert read_opengraph_from_file(str(path)) == OpenGraphConfig(
title="Hello World",
description="A notebook",
image="__marimo__/opengraph.png",
generator=None,
)
def test_resolve_opengraph_metadata_defaults(tmp_path) -> None:
script = "import marimo as mo\napp = mo.App()\n"
path = tmp_path / "my_notebook.py"
path.write_text(script, encoding="utf-8")
resolved = resolve_opengraph_metadata(str(path))
assert resolved.title == "My Notebook"
assert resolved.description is None
assert resolved.image is None
def test_resolve_opengraph_metadata_uses_app_title(tmp_path) -> None:
script = textwrap.dedent(
"""
# /// script
# [tool.marimo.opengraph]
# description = "desc"
# ///
import marimo as mo
app = mo.App(app_title="App Title")
"""
).lstrip()
path = tmp_path / "notebook.py"
path.write_text(script, encoding="utf-8")
resolved = resolve_opengraph_metadata(str(path), app_title="App Title")
assert resolved.title == "App Title"
assert resolved.description == "desc"
def test_resolve_opengraph_metadata_defaults_image_if_present(
tmp_path,
) -> None:
script = "import marimo as mo\napp = mo.App()\n"
path = tmp_path / "notebook.py"
path.write_text(script, encoding="utf-8")
default_image = default_opengraph_image(str(path))
image_path = tmp_path / default_image
image_path.parent.mkdir(parents=True, exist_ok=True)
image_path.write_bytes(b"fake-png")
resolved = resolve_opengraph_metadata(str(path))
assert resolved.image == default_image
def test_resolve_opengraph_metadata_supports_https_url_image(tmp_path) -> None:
script = textwrap.dedent(
"""
# /// script
# [tool.marimo.opengraph]
# image = "https://example.com/opengraph.png"
# ///
import marimo as mo
app = mo.App()
"""
).lstrip()
path = tmp_path / "notebook.py"
path.write_text(script, encoding="utf-8")
resolved = resolve_opengraph_metadata(str(path))
assert resolved.image == "https://example.com/opengraph.png"
def test_default_opengraph_placeholder_generator() -> None:
image = DEFAULT_OPENGRAPH_PLACEHOLDER_IMAGE_GENERATOR("Hello World")
assert image.media_type == "image/svg+xml"
assert image.content.startswith(b"<svg")
assert b"Hello World" in image.content
def test_resolve_opengraph_metadata_merges_generator_overrides(
tmp_path,
) -> None:
script = textwrap.dedent(
"""
# /// script
# [tool.marimo.opengraph]
# title = "Static Title"
# generator = "generate_opengraph"
# ///
def generate_opengraph(ctx, parent):
# Merge semantics: return only what you want to override.
return {
"image": "https://example.com/generated.png",
}
import marimo as mo
app = mo.App()
"""
).lstrip()
path = tmp_path / "notebook.py"
path.write_text(script, encoding="utf-8")
resolved = resolve_opengraph_metadata(str(path))
assert resolved.title == "Static Title"
assert resolved.image == "https://example.com/generated.png"
def test_resolve_opengraph_metadata_ignores_invalid_generator_signature(
tmp_path,
) -> None:
script = textwrap.dedent(
"""
# /// script
# [tool.marimo.opengraph]
# title = "Static Title"
# generator = "generate_opengraph"
# ///
def generate_opengraph(ctx, parent, extra):
return {"image": "https://example.com/generated.png"}
import marimo as mo
app = mo.App()
"""
).lstrip()
path = tmp_path / "notebook.py"
path.write_text(script, encoding="utf-8")
resolved = resolve_opengraph_metadata(str(path))
assert resolved.title == "Static Title"
assert resolved.image is None
def test_resolve_opengraph_metadata_supports_app_setup_imports(
tmp_path,
) -> None:
script = textwrap.dedent(
"""
# /// script
# [tool.marimo.opengraph]
# title = "Static Title"
# generator = "generate_opengraph"
# ///
import marimo
app = marimo.App()
with app.setup:
import math
@app.function
def generate_opengraph(ctx, parent):
return {"image": f"https://example.com/{math.floor(math.pi)}.png"}
"""
).lstrip()
path = tmp_path / "notebook.py"
path.write_text(script, encoding="utf-8")
resolved = resolve_opengraph_metadata(str(path))
assert resolved.title == "Static Title"
assert resolved.image == "https://example.com/3.png"
| {
"repo_id": "marimo-team/marimo",
"file_path": "tests/_metadata/test_opengraph.py",
"license": "Apache License 2.0",
"lines": 179,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
marimo-team/marimo:tests/_utils/test_methods.py | # Copyright 2026 Marimo. All rights reserved.
from marimo._utils.methods import getcallable, is_callable_method
def test_getcallable() -> None:
"""Test the getcallable utility function."""
class WithCallable:
def my_method(self) -> str:
return "called"
class WithNonCallable:
my_method = "not callable"
class WithGetattr:
def __getattr__(self, name: str) -> str:
return f"attr_{name}"
# Returns callable when attribute exists and is callable
obj_callable = WithCallable()
result = getcallable(obj_callable, "my_method")
assert result is not None
assert callable(result)
assert result() == "called"
# Returns None when attribute exists but is not callable
obj_non_callable = WithNonCallable()
result = getcallable(obj_non_callable, "my_method")
assert result is None
# Returns None when attribute doesn't exist
result = getcallable(obj_callable, "nonexistent")
assert result is None
# Returns None for objects with __getattr__ returning non-callable
obj_getattr = WithGetattr()
assert hasattr(obj_getattr, "any_attr") # hasattr returns True
result = getcallable(obj_getattr, "any_attr")
assert result is None
def test_is_callable_method() -> None:
"""Test the is_callable_method utility function."""
class WithMethod:
def my_method(self) -> str:
return "called"
class WithNonCallable:
my_attr = "not callable"
class WithGetattr:
def __getattr__(self, name: str) -> str:
return f"attr_{name}"
# Returns True when attribute exists and is callable
obj = WithMethod()
assert is_callable_method(obj, "my_method") is True
# Returns False when attribute exists but is not callable
obj_non_callable = WithNonCallable()
assert is_callable_method(obj_non_callable, "my_attr") is False
# Returns False when attribute doesn't exist
assert is_callable_method(obj, "nonexistent") is False
# Returns False for objects with __getattr__ returning non-callable
obj_getattr = WithGetattr()
assert hasattr(obj_getattr, "any_attr") # hasattr returns True
assert is_callable_method(obj_getattr, "any_attr") is False
| {
"repo_id": "marimo-team/marimo",
"file_path": "tests/_utils/test_methods.py",
"license": "Apache License 2.0",
"lines": 52,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
marimo-team/marimo:marimo/_smoke_tests/issues/8023_stale_table.py | import marimo
app = marimo.App()
@app.cell
def _():
import marimo as mo
import pandas as pd
return mo, pd
@app.cell
def _(mo):
data_version, set_data_version = mo.state(0)
return data_version, set_data_version
@app.cell
def _():
data = [
{"id": 1, "status": "pending", "value": 10},
{"id": 2, "status": "pending", "value": 20},
{"id": 3, "status": "pending", "value": 30},
{"id": 4, "status": "pending", "value": 40},
]
return (data,)
@app.cell
def _(data, data_version, mo, pd):
_ = data_version()
df = pd.DataFrame(data)
table = mo.ui.table(df, selection="single", page_size=3)
mo.output.replace(table)
return (table,)
@app.cell
def _(mo):
approve_btn = mo.ui.run_button(label="Approve")
mo.output.replace(approve_btn)
return (approve_btn,)
@app.cell
def _(approve_btn, data, set_data_version, table):
import time
if approve_btn.value:
_sel = table.value
if _sel is not None and len(_sel) > 0:
_id = _sel.iloc[0]["id"]
for item in data:
if item["id"] == _id:
item["status"] = "approved"
break
set_data_version(time.time())
return
if __name__ == "__main__":
app.run()
| {
"repo_id": "marimo-team/marimo",
"file_path": "marimo/_smoke_tests/issues/8023_stale_table.py",
"license": "Apache License 2.0",
"lines": 47,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
marimo-team/marimo:marimo/_server/api/endpoints/lsp.py | # Copyright 2026 Marimo. All rights reserved.
from __future__ import annotations
from typing import TYPE_CHECKING
from starlette.authentication import requires
from marimo._server.api.deps import AppState
from marimo._server.api.utils import parse_request
from marimo._server.models.lsp import (
LspHealthResponse,
LspRestartRequest,
LspRestartResponse,
)
from marimo._server.router import APIRouter
if TYPE_CHECKING:
from starlette.requests import Request
# Router for LSP endpoints
router = APIRouter()
@router.get("/health")
@requires("edit")
async def lsp_health(request: Request) -> LspHealthResponse:
"""
responses:
200:
description: Get health status of all LSP servers
content:
application/json:
schema:
$ref: "#/components/schemas/LspHealthResponse"
"""
app_state = AppState(request)
return await app_state.session_manager.lsp_server.get_health()
@router.post("/restart")
@requires("edit")
async def lsp_restart(request: Request) -> LspRestartResponse:
"""
requestBody:
content:
application/json:
schema:
$ref: "#/components/schemas/LspRestartRequest"
responses:
200:
description: Restart LSP servers
content:
application/json:
schema:
$ref: "#/components/schemas/LspRestartResponse"
"""
app_state = AppState(request)
body = await parse_request(request, cls=LspRestartRequest)
return await app_state.session_manager.lsp_server.restart(
server_ids=body.server_ids
)
| {
"repo_id": "marimo-team/marimo",
"file_path": "marimo/_server/api/endpoints/lsp.py",
"license": "Apache License 2.0",
"lines": 52,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | documentation |
marimo-team/marimo:marimo/_server/models/lsp.py | # Copyright 2026 Marimo. All rights reserved.
from __future__ import annotations
from typing import Literal, NewType, Optional
import msgspec
# Type-safe server identifier
LspServerId = NewType("LspServerId", str)
# Status enum for LSP server health
LspServerStatus = Literal[
"starting", # process launched, initializing
"running", # healthy and responsive to pings
"stopped", # not running (never started or cleanly stopped)
"crashed", # exited with non-zero code
"unresponsive", # process alive but not responding to pings
]
class LspServerHealth(msgspec.Struct, rename="camel"):
"""Health status for a single LSP server.
Status meanings:
- starting: process launched, initializing
- running: healthy and responsive to pings
- stopped: not running (never started or cleanly stopped)
- crashed: exited with non-zero code
- unresponsive: process alive but not responding to pings
"""
server_id: LspServerId
status: LspServerStatus
port: int
last_ping_ms: Optional[float] = None
error: Optional[str] = None
started_at: Optional[float] = None # Unix timestamp
class LspHealthResponse(msgspec.Struct, rename="camel"):
"""Aggregated health response for all LSP servers."""
status: Literal["healthy", "degraded", "unhealthy"]
servers: list[LspServerHealth]
class LspRestartRequest(msgspec.Struct, rename="camel"):
"""Request to restart LSP servers."""
server_ids: Optional[list[LspServerId]] = (
None # None = restart failed servers
)
class LspRestartResponse(msgspec.Struct, rename="camel"):
"""Response from restart operation."""
success: bool
restarted: list[LspServerId] # Server IDs that were restarted
errors: dict[LspServerId, str] = {} # Server ID -> error message
| {
"repo_id": "marimo-team/marimo",
"file_path": "marimo/_server/models/lsp.py",
"license": "Apache License 2.0",
"lines": 43,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
marimo-team/marimo:tests/_server/api/endpoints/test_lsp_endpoints.py | # Copyright 2026 Marimo. All rights reserved.
from __future__ import annotations
from typing import TYPE_CHECKING, Any
from unittest.mock import AsyncMock, MagicMock, patch
import pytest
import uvicorn
from starlette.testclient import TestClient
from marimo._server.lsp import (
BaseLspServer,
CompositeLspServer,
NoopLspServer,
)
from marimo._server.main import create_starlette_app
from marimo._server.models.lsp import (
LspHealthResponse,
LspRestartResponse,
LspServerHealth,
)
from tests._server.mocks import (
get_mock_session_manager,
get_starlette_server_state_init,
token_header,
)
if TYPE_CHECKING:
from starlette.applications import Starlette
@pytest.fixture
def edit_app() -> Starlette:
app = create_starlette_app(base_url="")
session_manager = get_mock_session_manager()
uvicorn_server = uvicorn.Server(uvicorn.Config(app))
uvicorn_server.servers = []
app.state.server = uvicorn_server
get_starlette_server_state_init(session_manager=session_manager).apply(
app.state
)
return app
class TestLspEndpoints:
"""Test LSP API endpoints."""
@pytest.mark.parametrize(
("method", "path"),
[
("get", "/api/lsp/health"),
("post", "/api/lsp/restart"),
],
)
def test_unauthorized(
self, edit_app: Starlette, method: str, path: str
) -> None:
client = TestClient(edit_app)
kwargs = {"json": {}} if method == "post" else {}
response = getattr(client, method)(path, **kwargs)
assert response.status_code == 401
def test_health(self, edit_app: Starlette) -> None:
client = TestClient(edit_app)
response = client.get("/api/lsp/health", headers=token_header())
assert response.status_code == 200
content = response.json()
# NoopLspServer returns healthy with empty servers
assert content == {"status": "healthy", "servers": []}
@pytest.mark.parametrize(
"json_body",
[
{},
{"serverIds": ["pylsp", "copilot"]},
],
)
def test_restart(
self, edit_app: Starlette, json_body: dict[str, Any]
) -> None:
client = TestClient(edit_app)
response = client.post(
"/api/lsp/restart", json=json_body, headers=token_header()
)
assert response.status_code == 200
content = response.json()
# NoopLspServer returns success with empty restarted
assert content == {"success": True, "restarted": [], "errors": {}}
class TestLspModels:
"""Test LSP model serialization."""
def test_models(self) -> None:
# LspServerHealth
health = LspServerHealth(
server_id="pylsp",
status="running",
port=8080,
last_ping_ms=5.0,
)
assert health.server_id == "pylsp"
assert health.status == "running"
assert health.error is None
# LspHealthResponse
response = LspHealthResponse(status="healthy", servers=[health])
assert response.status == "healthy"
assert len(response.servers) == 1
# LspRestartResponse
restart = LspRestartResponse(
success=True, restarted=["pylsp"], errors={"copilot": "Failed"}
)
assert restart.success is True
assert restart.errors == {"copilot": "Failed"}
class TestNoopLspServer:
"""Test NoopLspServer returns safe defaults."""
@pytest.mark.asyncio
async def test_health_and_restart(self) -> None:
server = NoopLspServer()
# Health returns healthy with no servers
health = await server.get_health()
assert health.status == "healthy"
assert health.servers == []
# Restart always succeeds with no action
for server_ids in [None, ["pylsp"]]:
result = await server.restart(server_ids=server_ids)
assert result.success is True
assert result.restarted == []
assert result.errors == {}
class TestBaseLspServer:
"""Test BaseLspServer ping, health, and restart logic."""
@pytest.fixture
def mock_server(self) -> BaseLspServer:
class MockLspServer(BaseLspServer):
id = "mock-lsp"
def validate_requirements(self):
return True
def get_command(self):
return ["echo", "mock"]
def missing_binary_alert(self):
return None
return MockLspServer(port=8080)
@pytest.mark.asyncio
async def test_ping_not_running(self, mock_server: BaseLspServer) -> None:
is_responsive, ping_ms = await mock_server.ping()
assert is_responsive is False
assert ping_ms is None
@pytest.mark.asyncio
async def test_get_health_not_running(
self, mock_server: BaseLspServer
) -> None:
health = await mock_server.get_health()
assert health.status == "unhealthy"
assert health.servers[0].status == "stopped"
@pytest.mark.asyncio
@pytest.mark.parametrize(
("server_ids", "should_restart"),
[
(["other-server"], False),
(["mock-lsp"], True),
(None, True),
],
)
async def test_restart(
self,
mock_server: BaseLspServer,
server_ids: list[str] | None,
should_restart: bool,
) -> None:
mock_server.restart_server = AsyncMock(return_value=None)
result = await mock_server.restart(server_ids=server_ids)
assert result.success is True
if should_restart:
assert result.restarted == ["mock-lsp"]
mock_server.restart_server.assert_called_once()
else:
assert result.restarted == []
@pytest.mark.asyncio
async def test_restart_with_error(
self, mock_server: BaseLspServer
) -> None:
mock_server.restart_server = AsyncMock(
side_effect=Exception("Test error")
)
result = await mock_server.restart(server_ids=["mock-lsp"])
assert result.success is False
assert "Test error" in result.errors["mock-lsp"]
@pytest.mark.parametrize(
("startup_failed", "returncode", "expected"),
[
(True, None, True), # startup failed
(False, 1, True), # process crashed
(False, None, False), # no failure
],
)
def test_has_failed(
self,
mock_server: BaseLspServer,
startup_failed: bool,
returncode: int | None,
expected: bool,
) -> None:
mock_server._startup_failed = startup_failed
if returncode is not None:
mock_server.process = MagicMock()
mock_server.process.returncode = returncode
assert mock_server.has_failed() is expected
class TestCompositeLspServer:
"""Test CompositeLspServer aggregation logic."""
@pytest.fixture
def mock_ports(self):
"""Mock find_free_port to return incrementing ports."""
port = [8000]
def side_effect(_: int) -> int:
port[0] += 1
return port[0]
with patch(
"marimo._server.lsp.find_free_port", side_effect=side_effect
):
yield
@pytest.fixture
def config_reader(self) -> MagicMock:
reader = MagicMock()
reader.get_config.return_value = {
"completion": {"copilot": False},
"language_servers": {"pylsp": {"enabled": True}},
}
return reader
@pytest.mark.asyncio
async def test_health_no_servers_enabled(
self, mock_ports: None, config_reader: MagicMock
) -> None:
del mock_ports
config_reader.get_config.return_value = {
"completion": {"copilot": False},
"language_servers": {},
}
server = CompositeLspServer(config_reader, min_port=8000)
health = await server.get_health()
assert health.status == "healthy"
assert health.servers == []
@pytest.mark.asyncio
async def test_restart_unknown_server(
self, mock_ports: None, config_reader: MagicMock
) -> None:
del mock_ports
server = CompositeLspServer(config_reader, min_port=8000)
result = await server.restart(server_ids=["unknown-server"])
assert result.success is False
assert "Unknown server" in result.errors["unknown-server"]
@pytest.mark.parametrize(
("copilot_value", "server_name", "expected"),
[
(True, "copilot", True),
("github", "copilot", True),
(False, "copilot", False),
(False, "pylsp", True), # enabled in fixture
],
)
def test_is_enabled(
self,
mock_ports: None,
config_reader: MagicMock,
copilot_value: Any,
server_name: str,
expected: bool,
) -> None:
del mock_ports
config_reader.get_config.return_value = {
"completion": {"copilot": copilot_value},
"language_servers": {"pylsp": {"enabled": True}},
}
server = CompositeLspServer(config_reader, min_port=8000)
config = config_reader.get_config()
assert server._is_enabled(config, server_name) is expected
| {
"repo_id": "marimo-team/marimo",
"file_path": "tests/_server/api/endpoints/test_lsp_endpoints.py",
"license": "Apache License 2.0",
"lines": 261,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
marimo-team/marimo:marimo/_smoke_tests/plotly/scattermap_selection.py | import marimo
__generated_with = "0.19.5"
app = marimo.App(width="medium", auto_download=["html"])
with app.setup:
import marimo as mo
import pandas as pd
import plotly.graph_objects as go
@app.cell
def _():
df_mock = pd.DataFrame(
{
"id": ["WC_0001", "WC_0002", "WC_0003", "WC_0004", "WC_0005"],
"Region": ["WC", "WC", "WC", "WC", "WC"],
"lon": [18.42, 18.46, 18.50, 18.54, 18.58],
"lat": [-33.93, -33.95, -33.91, -33.97, -33.89],
"cluster": [0, 1, 0, 2, 1],
}
)
figg = go.Figure()
figg.add_trace(
go.Scattermap(
lon=df_mock["lon"],
lat=df_mock["lat"],
mode="markers",
marker=dict(size=12, color="red", opacity=0.9),
customdata=df_mock[["id", "cluster"]].values,
hovertemplate=(
"<b>%{customdata[0]}</b><br>"
"cluster: %{customdata[1]}<br>"
"<extra></extra>"
),
name="Mock sites",
)
)
figg.update_layout(
dragmode="lasso",
map=dict(
style="open-street-map",
zoom=10,
center=dict(lat=df_mock["lat"].mean(), lon=df_mock["lon"].mean()),
),
margin=dict(l=0, r=0, t=0, b=0),
)
plot = mo.ui.plotly(figg)
plot
return (plot,)
@app.cell
def _(plot):
plot.value
return
if __name__ == "__main__":
app.run()
| {
"repo_id": "marimo-team/marimo",
"file_path": "marimo/_smoke_tests/plotly/scattermap_selection.py",
"license": "Apache License 2.0",
"lines": 52,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
marimo-team/marimo:tests/_runtime/runner/test_kernel_runner.py | from typing import cast
import pytest
from marimo._runtime.app.kernel_runner import AppKernelRunner
from marimo._types.ids import CellId_t
def _make_runner():
# Bypass __init__ since we only test caching logic
runner = AppKernelRunner.__new__(AppKernelRunner)
runner._previously_seen_defs = None
runner._outputs = {cast(CellId_t, "cell-1"): "dummy"}
return runner
@pytest.mark.requires("numpy")
def test_numpy_defs_do_not_crash_and_invalidate_cache():
runner = _make_runner()
import numpy as np
defs1 = {"arr": np.ones(1)}
defs2 = {"arr": np.zeros(2)}
runner._previously_seen_defs = defs1
# Must not raise ValueError
cached = runner.are_outputs_cached(defs2)
assert cached is False
@pytest.mark.requires("numpy")
def test_numpy_defs_equal_use_cache():
runner = _make_runner()
import numpy as np
arr = np.array([1, 2, 3])
defs1 = {"arr": arr}
defs2 = {"arr": arr.copy()} # different object, same values
runner._previously_seen_defs = defs1
cached = runner.are_outputs_cached(defs2)
assert cached is True
| {
"repo_id": "marimo-team/marimo",
"file_path": "tests/_runtime/runner/test_kernel_runner.py",
"license": "Apache License 2.0",
"lines": 30,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
marimo-team/marimo:marimo/_smoke_tests/plotly/shapes.py | import marimo
__generated_with = "0.19.5"
app = marimo.App(width="medium", auto_download=["html"])
@app.cell
def _():
import marimo as mo
return (mo,)
@app.cell
def _():
import plotly.express as px
import numpy as np
return np, px
@app.cell
def _(mo):
plot_boxes = mo.ui.checkbox(label="Plot boxes", value=False)
return (plot_boxes,)
@app.cell
def _(plot_boxes):
plot_boxes
return
@app.cell
def _(np, plot_boxes, px):
img_rgb = np.array([[[255, 0, 0], [0, 255, 0], [0, 0, 255]],
[[0, 255, 0], [0, 0, 255], [255, 0, 0]]
], dtype=np.uint8)
fig = px.imshow(img_rgb)
if plot_boxes.value:
fig.add_shape(type="rect",
xref="paper", yref="paper",
x0=0.4, y0=0.4,
x1=0.6, y1=0.6,
line=dict(
color="Red",
width=3,
)
)
fig.update_layout(
dragmode='drawrect',
newshape=dict(line_color='cyan'))
fig
return (fig,)
@app.cell
def _(fig):
fig.layout.shapes
return
if __name__ == "__main__":
app.run()
| {
"repo_id": "marimo-team/marimo",
"file_path": "marimo/_smoke_tests/plotly/shapes.py",
"license": "Apache License 2.0",
"lines": 47,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
marimo-team/marimo:tests/_utils/test_platform.py | # Copyright 2026 Marimo. All rights reserved.
from __future__ import annotations
from unittest.mock import patch
from click.testing import CliRunner
from marimo._cli.cli import main
from marimo._utils.platform import (
check_shared_memory_available,
is_pyodide,
is_windows,
)
class TestIsWindows:
def test_is_windows_on_win32(self) -> None:
with patch("marimo._utils.platform.sys.platform", "win32"):
assert is_windows()
def test_is_windows_on_cygwin(self) -> None:
with patch("marimo._utils.platform.sys.platform", "cygwin"):
assert is_windows()
def test_is_windows_on_linux(self) -> None:
with patch("marimo._utils.platform.sys.platform", "linux"):
assert not is_windows()
def test_is_windows_on_darwin(self) -> None:
with patch("marimo._utils.platform.sys.platform", "darwin"):
assert not is_windows()
class TestIsPyodide:
def test_is_pyodide_when_not_loaded(self) -> None:
# By default, pyodide should not be in sys.modules
assert not is_pyodide()
def test_is_pyodide_when_loaded(self) -> None:
with patch(
"marimo._utils.platform.sys.modules", {"pyodide": object()}
):
assert is_pyodide()
class TestCheckSharedMemoryAvailable:
def test_shared_memory_available(self) -> None:
# On a normal system, shared memory should be available
is_available, error = check_shared_memory_available()
assert is_available
assert error == ""
def test_shared_memory_unavailable_on_pyodide(self) -> None:
with patch("marimo._utils.platform.is_pyodide", return_value=True):
is_available, error = check_shared_memory_available()
assert not is_available
assert "Pyodide" in error
def test_shared_memory_oserror(self) -> None:
with patch("marimo._utils.platform.is_pyodide", return_value=False):
# Mock the SharedMemory class to raise OSError
mock_shm_class = type(
"MockSharedMemory",
(),
{
"__init__": lambda _self, **_kwargs: (_ for _ in ()).throw(
OSError("Cannot allocate memory")
)
},
)
with patch(
"multiprocessing.shared_memory.SharedMemory", mock_shm_class
):
is_available, error = check_shared_memory_available()
assert not is_available
assert "Unable to create shared memory" in error
assert "Cannot allocate memory" in error
assert (
"Docker" in error
) # Should mention Docker as a possible cause
def test_edit_exits_with_error_when_shared_memory_unavailable(
self,
) -> None:
runner = CliRunner()
with patch(
"marimo._utils.platform.check_shared_memory_available",
return_value=(False, "Test shared memory error"),
):
result = runner.invoke(
main,
["edit", "--headless", "--no-token", "--skip-update-check"],
)
# Should exit with error code 1
assert result.exit_code == 1
| {
"repo_id": "marimo-team/marimo",
"file_path": "tests/_utils/test_platform.py",
"license": "Apache License 2.0",
"lines": 79,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
marimo-team/marimo:marimo/_internal/commands.py | # Copyright 2026 Marimo. All rights reserved.
"""Internal API for commands."""
from marimo._runtime.commands import (
AppMetadata,
ClearCacheCommand,
CodeCompletionCommand,
Command,
CommandMessage,
CreateNotebookCommand,
DebugCellCommand,
DeleteCellCommand,
ExecuteCellCommand,
ExecuteCellsCommand,
ExecuteScratchpadCommand,
ExecuteStaleCellsCommand,
GetCacheInfoCommand,
HTTPRequest,
InstallPackagesCommand,
InvokeFunctionCommand,
ListDataSourceConnectionCommand,
ListOrValue,
ListSecretKeysCommand,
ListSQLTablesCommand,
ModelCommand,
ModelMessage,
PreviewDatasetColumnCommand,
PreviewSQLTableCommand,
Primitive,
RefreshSecretsCommand,
RenameNotebookCommand,
SerializedCLIArgs,
SerializedQueryParams,
StopKernelCommand,
SyncGraphCommand,
UpdateCellConfigCommand,
UpdateUIElementCommand,
UpdateUserConfigCommand,
ValidateSQLCommand,
kebab_case,
)
__all__ = [
"AppMetadata",
"ClearCacheCommand",
"CodeCompletionCommand",
"Command",
"CommandMessage",
"CreateNotebookCommand",
"DebugCellCommand",
"DeleteCellCommand",
"ExecuteCellCommand",
"ExecuteCellsCommand",
"ExecuteScratchpadCommand",
"ExecuteStaleCellsCommand",
"GetCacheInfoCommand",
"HTTPRequest",
"InstallPackagesCommand",
"InvokeFunctionCommand",
"ListDataSourceConnectionCommand",
"ListOrValue",
"ListSQLTablesCommand",
"ListSecretKeysCommand",
"ModelMessage",
"PreviewDatasetColumnCommand",
"PreviewSQLTableCommand",
"Primitive",
"RefreshSecretsCommand",
"RenameNotebookCommand",
"SerializedCLIArgs",
"SerializedQueryParams",
"StopKernelCommand",
"SyncGraphCommand",
"UpdateCellConfigCommand",
"UpdateUIElementCommand",
"UpdateUserConfigCommand",
"ModelCommand",
"ValidateSQLCommand",
"kebab_case",
]
| {
"repo_id": "marimo-team/marimo",
"file_path": "marimo/_internal/commands.py",
"license": "Apache License 2.0",
"lines": 78,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
marimo-team/marimo:marimo/_internal/config.py | # Copyright 2026 Marimo. All rights reserved.
"""Internal API for configuration management."""
from marimo._config.config import (
DisplayConfig,
MarimoConfig,
PartialMarimoConfig,
)
from marimo._config.manager import (
MarimoConfigManager,
get_default_config_manager,
)
__all__ = [
"DisplayConfig",
"MarimoConfig",
"MarimoConfigManager",
"PartialMarimoConfig",
"get_default_config_manager",
]
| {
"repo_id": "marimo-team/marimo",
"file_path": "marimo/_internal/config.py",
"license": "Apache License 2.0",
"lines": 18,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
marimo-team/marimo:marimo/_internal/converters.py | # Copyright 2026 Marimo. All rights reserved.
"""Internal API for notebook converters."""
from marimo._convert.converters import MarimoConvert
__all__ = [
"MarimoConvert",
]
| {
"repo_id": "marimo-team/marimo",
"file_path": "marimo/_internal/converters.py",
"license": "Apache License 2.0",
"lines": 6,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
marimo-team/marimo:marimo/_internal/ids.py | # Copyright 2026 Marimo. All rights reserved.
"""Internal API for ID types."""
from marimo._types.ids import (
CellId_t,
ConsumerId,
SessionId,
UIElementId,
VariableName,
)
__all__ = [
"CellId_t",
"ConsumerId",
"SessionId",
"UIElementId",
"VariableName",
]
| {
"repo_id": "marimo-team/marimo",
"file_path": "marimo/_internal/ids.py",
"license": "Apache License 2.0",
"lines": 16,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
marimo-team/marimo:marimo/_internal/ipc.py | # Copyright 2026 Marimo. All rights reserved.
"""Internal API for inter-process communication (IPC)."""
from marimo._ipc.connection import Channel, Connection
from marimo._ipc.queue_manager import QueueManager
from marimo._ipc.types import ConnectionInfo, KernelArgs
__all__ = [
"Channel",
"Connection",
"ConnectionInfo",
"KernelArgs",
"QueueManager",
]
| {
"repo_id": "marimo-team/marimo",
"file_path": "marimo/_internal/ipc.py",
"license": "Apache License 2.0",
"lines": 12,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.