sample_id stringlengths 21 196 | text stringlengths 105 936k | metadata dict | category stringclasses 6
values |
|---|---|---|---|
marimo-team/marimo:marimo/_server/ai/ids.py | # Copyright 2026 Marimo. All rights reserved.
from __future__ import annotations
from dataclasses import dataclass
from typing import NewType
from marimo import _loggers
LOGGER = _loggers.marimo_logger()
AiProviderId = NewType("AiProviderId", str)
QualifiedModelId = NewType("QualifiedModelId", str)
ShortModelId = NewType("ShortModelId", str)
@dataclass
class AiModelId:
provider: AiProviderId
model: ShortModelId
def __str__(self) -> QualifiedModelId:
return QualifiedModelId(f"{self.provider}/{self.model}")
def __repr__(self) -> str:
return f"AiModelId(provider={self.provider}, model={self.model})"
@staticmethod
def from_model(model_id: str) -> AiModelId:
if "/" not in model_id:
LOGGER.warning(
f"Invalid model ID: {model_id}. Model ID must be in the format <provider>/<model>"
)
guess = _guess_provider(model_id)
LOGGER.warning(f"Guessing provider for {model_id} as {guess}")
return AiModelId(provider=guess, model=ShortModelId(model_id))
provider, short_id = model_id.split("/", 1)
return AiModelId(
provider=AiProviderId(provider), model=ShortModelId(short_id)
)
def _guess_provider(model: str) -> AiProviderId:
def is_google(model: str) -> bool:
return model.startswith("google") or model.startswith("gemini")
def is_anthropic(model: str) -> bool:
return model.startswith("claude")
def is_openai(model: str) -> bool:
return (
model.startswith("gpt")
or model.startswith("o4")
or model.startswith("o3")
or model.startswith("o1")
)
if is_google(model):
return AiProviderId("google")
elif is_anthropic(model):
return AiProviderId("anthropic")
elif is_openai(model):
return AiProviderId("openai")
else:
return AiProviderId("ollama")
| {
"repo_id": "marimo-team/marimo",
"file_path": "marimo/_server/ai/ids.py",
"license": "Apache License 2.0",
"lines": 50,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
marimo-team/marimo:tests/_server/ai/test_ai_ids.py | from unittest.mock import Mock, patch
from marimo._server.ai.ids import (
AiModelId,
AiProviderId,
QualifiedModelId,
ShortModelId,
_guess_provider,
)
class TestAiModelId:
def test_init(self):
"""Test AiModelId initialization."""
model_id = AiModelId(
provider=AiProviderId("openai"), model=ShortModelId("gpt-4")
)
assert model_id.provider == "openai"
assert model_id.model == "gpt-4"
def test_str_conversion(self):
"""Test string conversion returns qualified model ID."""
model_id = AiModelId(
provider=AiProviderId("anthropic"), model=ShortModelId("claude-3")
)
result = str(model_id)
assert result == "anthropic/claude-3"
assert isinstance(result, str)
def test_repr(self):
"""Test repr returns proper representation."""
model_id = AiModelId(
provider=AiProviderId("google"), model=ShortModelId("gemini-pro")
)
result = repr(model_id)
assert result == "AiModelId(provider=google, model=gemini-pro)"
def test_from_model_valid_format(self):
"""Test from_model with valid provider/model format."""
model_id = AiModelId.from_model("openai/gpt-4")
assert model_id.provider == "openai"
assert model_id.model == "gpt-4"
def test_from_model_complex_model_name(self):
"""Test from_model with complex model names containing slashes."""
model_id = AiModelId.from_model(
"huggingface/microsoft/DialoGPT-medium"
)
assert model_id.provider == "huggingface"
assert model_id.model == "microsoft/DialoGPT-medium"
@patch("marimo._server.ai.ids.LOGGER")
def test_from_model_invalid_format_openai(self, mock_logger: Mock):
"""Test from_model with invalid format - should guess OpenAI."""
model_id = AiModelId.from_model("gpt-4")
assert model_id.provider == "openai"
assert model_id.model == "gpt-4"
# Verify warning was logged
mock_logger.warning.assert_any_call(
"Invalid model ID: gpt-4. Model ID must be in the format <provider>/<model>"
)
mock_logger.warning.assert_any_call(
"Guessing provider for gpt-4 as openai"
)
@patch("marimo._server.ai.ids.LOGGER")
def test_from_model_invalid_format_anthropic(self, mock_logger: Mock):
"""Test from_model with invalid format - should guess Anthropic."""
model_id = AiModelId.from_model("claude-3-opus")
assert model_id.provider == "anthropic"
assert model_id.model == "claude-3-opus"
mock_logger.warning.assert_any_call(
"Invalid model ID: claude-3-opus. Model ID must be in the format <provider>/<model>"
)
@patch("marimo._server.ai.ids.LOGGER")
def test_from_model_invalid_format_google(self, mock_logger: Mock):
del mock_logger
"""Test from_model with invalid format - should guess Google."""
model_id = AiModelId.from_model("gemini-pro")
assert model_id.provider == "google"
assert model_id.model == "gemini-pro"
@patch("marimo._server.ai.ids.LOGGER")
def test_from_model_invalid_format_ollama_fallback(
self, mock_logger: Mock
):
del mock_logger
"""Test from_model with invalid format - should fallback to Ollama."""
model_id = AiModelId.from_model("llama2")
assert model_id.provider == "ollama"
assert model_id.model == "llama2"
class TestGuessProvider:
def test_guess_openai_gpt(self):
"""Test guessing OpenAI provider for GPT models."""
assert _guess_provider("gpt-4") == "openai"
assert _guess_provider("gpt-3.5-turbo") == "openai"
assert _guess_provider("gpt-4o") == "openai"
def test_guess_openai_o3(self):
"""Test guessing OpenAI provider for O3 models."""
assert _guess_provider("o3-mini") == "openai"
assert _guess_provider("o3-max") == "openai"
def test_guess_openai_o1(self):
"""Test guessing OpenAI provider for O1 models."""
assert _guess_provider("o1-preview") == "openai"
assert _guess_provider("o1-mini") == "openai"
def test_guess_anthropic_claude(self):
"""Test guessing Anthropic provider for Claude models."""
assert _guess_provider("claude-3-opus") == "anthropic"
assert _guess_provider("claude-3-sonnet") == "anthropic"
assert _guess_provider("claude-3-haiku") == "anthropic"
assert _guess_provider("claude-2") == "anthropic"
def test_guess_google_gemini(self):
"""Test guessing Google provider for Gemini models."""
assert _guess_provider("gemini-pro") == "google"
assert _guess_provider("gemini-1.5-pro") == "google"
assert _guess_provider("gemini-flash") == "google"
def test_guess_google_google_prefix(self):
"""Test guessing Google provider for models with google prefix."""
assert _guess_provider("google-palm") == "google"
assert _guess_provider("google-bard") == "google"
def test_guess_ollama_fallback(self):
"""Test fallback to Ollama for unknown models."""
assert _guess_provider("llama2") == "ollama"
assert _guess_provider("mistral") == "ollama"
assert _guess_provider("codellama") == "ollama"
assert _guess_provider("unknown-model") == "ollama"
def test_guess_provider_edge_cases(self):
"""Test edge cases for provider guessing."""
# Empty string
assert _guess_provider("") == "ollama"
# Models that might be ambiguous
assert _guess_provider("gpt") == "openai" # Starts with gpt
assert _guess_provider("claude") == "anthropic" # Starts with claude
assert _guess_provider("gemini") == "google" # Starts with gemini
# Case sensitivity (should work as expected)
assert (
_guess_provider("GPT-4") == "ollama"
) # Doesn't start with lowercase "gpt"
assert (
_guess_provider("Claude-3") == "ollama"
) # Doesn't start with lowercase "claude"
class TestTypeAliases:
def test_type_aliases_are_strings(self):
"""Test that type aliases behave as strings."""
provider_id = AiProviderId("test-provider")
qualified_id = QualifiedModelId("test/model")
short_id = ShortModelId("model")
assert isinstance(provider_id, str)
assert isinstance(qualified_id, str)
assert isinstance(short_id, str)
assert provider_id == "test-provider"
assert qualified_id == "test/model"
assert short_id == "model"
| {
"repo_id": "marimo-team/marimo",
"file_path": "tests/_server/ai/test_ai_ids.py",
"license": "Apache License 2.0",
"lines": 143,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
marimo-team/marimo:marimo/_smoke_tests/sql/ibis_backend_catalog.py | # /// script
# requires-python = ">=3.12"
# dependencies = [
# "altair==5.5.0",
# "datafusion==48.0.0",
# "ibis-framework[datafusion,duckdb]==10.8.0",
# "marimo",
# ]
# ///
import marimo
__generated_with = "0.15.5"
app = marimo.App(width="medium")
@app.cell
def _():
import ibis
import marimo as mo
from ibis import _
ibis.options.interactive = True
return ibis, mo
@app.cell(hide_code=True)
def _(mo):
mo.md(r"""# Multi-Catalog Ibis Backend Tests""")
return
@app.cell(hide_code=True)
def _(mo):
mo.md(r"""## DuckDB Multi-Catalog with Temp Tables""")
return
@app.cell
def _(ibis):
# Create DuckDB connection
duckdb_con = ibis.duckdb.connect()
# Create test data using ibis.memtable
sales_data = ibis.memtable(
{"product": ["laptop", "mouse", "keyboard"], "price": [1200, 25, 75], "quantity": [10, 100, 50]}
)
customer_data = ibis.memtable(
{"customer_id": [1, 2, 3], "name": ["Alice", "Bob", "Charlie"], "region": ["US", "EU", "APAC"]}
)
# Create regular table
duckdb_con.create_table("sales", obj=sales_data, overwrite=True)
# Create temp table
duckdb_con.create_table("temp_customers", obj=customer_data, temp=True, overwrite=True)
return (duckdb_con,)
@app.cell
def _(duckdb_con):
# Query the regular table using Ibis API with deferred API
sales_table = duckdb_con.table("sales")
sales_query = (
sales_table.mutate(total_value=_.price * _.quantity)
.select(_.product, _.price, _.quantity, _.total_value)
.order_by(_.total_value.desc())
)
return
@app.cell
def _(duckdb_con):
# Query the temp table using Ibis API with deferred API
temp_customers_table = duckdb_con.table("temp_customers")
temp_query = temp_customers_table.order_by(_.customer_id)
return
@app.cell(hide_code=True)
def _(mo):
mo.md(r"""## DuckDB Attached Catalogs""")
return
@app.cell
def _(duckdb_con, ibis):
# Attach additional in-memory catalogs using DuckDB's ATTACH
duckdb_con.raw_sql("ATTACH ':memory:' AS catalog_a")
duckdb_con.raw_sql("ATTACH ':memory:' AS catalog_b")
# Create tables in different catalogs using ibis API
inventory_data = ibis.memtable({"product": ["laptop", "mouse", "keyboard"], "stock": [50, 200, 100]})
orders_data = ibis.memtable({"order_id": [1, 2, 3], "product": ["laptop", "mouse", "laptop"], "quantity": [2, 1, 1]})
# Create tables in specific catalogs using dot notation
duckdb_con.create_table("inventory", obj=inventory_data, database="catalog_a.main", overwrite=True)
duckdb_con.create_table("orders", obj=orders_data, database="catalog_b.main", overwrite=True)
return
@app.cell
def _(duckdb_con):
# Cross-catalog query using Ibis API with deferred API
orders_table = duckdb_con.table("orders", database="catalog_b.main")
inventory_table = duckdb_con.table("inventory", database="catalog_a.main")
cross_catalog_query = (
orders_table.join(inventory_table, _.product == inventory_table.product)
.select(_.order_id, _.product, _.quantity, inventory_table.stock)
.order_by(_.order_id)
)
cross_catalog_query
return
@app.cell(hide_code=True)
def _(mo):
mo.md(r"""## DataFusion Backend""")
return
@app.cell
def _(ibis):
# Create DataFusion connection
datafusion_con = ibis.datafusion.connect()
# Create test data using ibis.memtable
products_data = ibis.memtable(
{
"product_id": [1, 2, 3],
"name": ["Widget A", "Widget B", "Widget C"],
"category": ["Electronics", "Tools", "Electronics"],
}
)
# Create table
datafusion_con.create_table("products", obj=products_data, overwrite=True)
return (datafusion_con,)
@app.cell
def _(datafusion_con):
# Query using Ibis API with deferred API
products_table = datafusion_con.table("products")
electronics_query = (
products_table.filter(_.category == "Electronics").select(_.product_id, _.name, _.category).order_by(_.product_id)
)
datafusion_con.create_table("electronics", electronics_query, overwrite=True)
return
if __name__ == "__main__":
app.run()
| {
"repo_id": "marimo-team/marimo",
"file_path": "marimo/_smoke_tests/sql/ibis_backend_catalog.py",
"license": "Apache License 2.0",
"lines": 117,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
marimo-team/marimo:marimo/_ast/cell_id.py | # Copyright 2026 Marimo. All rights reserved.
from __future__ import annotations
import random
import string
from uuid import UUID, uuid4
from marimo._types.ids import CellId_t
class CellIdGenerator:
def __init__(self, prefix: str = "") -> None:
self.prefix = prefix
self.random_seed = random.Random(42)
self.seen_ids: set[CellId_t] = set()
def create_cell_id(self) -> CellId_t:
"""Create a new unique cell ID.
Returns:
CellId_t: A new cell ID consisting of the manager's prefix followed by 4 random letters.
"""
attempts = 0
while attempts < 100:
# 4 random letters
_id = self.prefix + "".join(
self.random_seed.choices(string.ascii_letters, k=4)
)
if _id not in self.seen_ids:
self.seen_ids.add(CellId_t(_id))
return CellId_t(_id)
attempts += 1
raise ValueError(
f"Failed to create a unique cell ID after {attempts} attempts"
)
def external_prefix() -> str:
"""Get the prefix for external cell IDs."""
return str(uuid4())
def is_external_cell_id(cell_id: CellId_t) -> bool:
"""
Check if cell_id is from an embedded/nested app.
Detects only the embedded case: a UUID4 prefix (36 chars) followed by
a non-empty cell ID suffix. Returns False for all other formats,
including normal cell IDs and bare UUIDs (e.g. from VSCode).
Cell ID formats:
- "Hbol" -> normal cell (4 chars)
- "<uuid>" -> VSCode cell (36 chars)
- "<uuid>Hbol" -> embedded cell (37+ chars) ← detected here
>>> is_external_cell_id("c9bf9e57-1685-4c89-bafb-ff5af830be8aHbol")
True
>>> is_external_cell_id("Hbol")
False
>>> is_external_cell_id("c9bf9e57-1685-4c89-bafb-ff5af830be8a")
False
"""
cell_id_str = str(cell_id)
# External IDs are UUID (36 chars) + suffix; bare UUIDs are not external
if len(cell_id_str) <= 36:
return False
uuid_to_test = cell_id_str[:36]
try:
uuid_obj = UUID(uuid_to_test, version=4)
except ValueError:
return False
return str(uuid_obj) == uuid_to_test
| {
"repo_id": "marimo-team/marimo",
"file_path": "marimo/_ast/cell_id.py",
"license": "Apache License 2.0",
"lines": 59,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
marimo-team/marimo:tests/_save/external_decorators/app.py | # /// script
# requires-python = ">=3.13"
# dependencies = [
# "marimo",
# ]
# ///
import marimo
__generated_with = "0.14.16"
app = marimo.App(width="medium")
with app.setup:
import marimo as mo
@app.cell
def decorator_wrap():
@mo.cache
def cache(x):
return x + 1
bar = cache(1)
return (bar, cache)
@app.cell
def block_wrap(mo):
with mo.cache("random") as cache_block:
x = []
a = "need a final line to trigger invalid block capture"
return (x, cache_block)
if __name__ == "__main__":
app.run()
| {
"repo_id": "marimo-team/marimo",
"file_path": "tests/_save/external_decorators/app.py",
"license": "Apache License 2.0",
"lines": 26,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
marimo-team/marimo:tests/utils.py | from __future__ import annotations
import inspect
import time
from typing import TYPE_CHECKING, Any, Optional
from marimo._messaging.msgspec_encoder import (
asdict,
encode_json_bytes,
)
from marimo._utils.parse_dataclass import parse_raw
if TYPE_CHECKING:
from typing import Callable
import msgspec
def try_assert_n_times(n: int, assert_fn: Callable[[], None]) -> None:
"""Attempt an assert multiple times.
Sleeps between each attempt.
"""
n_tries = 0
while n_tries <= n - 1:
try:
assert_fn()
return
except Exception:
n_tries += 1
time.sleep(0.1)
assert_fn()
def assert_serialize_roundtrip(obj: msgspec.Struct) -> None:
serialized = encode_json_bytes(obj)
cls = type(obj)
parsed = parse_raw(serialized, cls)
assert asdict(obj) == asdict(parsed), f"{asdict(obj)} != {asdict(parsed)}"
def explore_module(
module: Any, indent: int = 0, visited: Optional[set[int]] = None
) -> list[str]:
"""
Recursively explore a module and print all public exported items.
Args:
module: The module or object to explore
indent: Current indentation level (for pretty printing)
visited: Set[int] = set()
"""
if visited is None:
visited = set()
# Skip if we've already visited this object
if id(module) in visited:
return []
visited.add(id(module))
results: list[str] = []
# Get all attributes of the module
for name, obj in inspect.getmembers(module):
# Skip private/special attributes (starting with _)
if name.startswith("_"):
continue
# Create indentation string
indent_str = " " * indent
# Print the current item
results.append(f"{indent_str}{name}")
# Recursively explore if it's a module, class, or other container type
if inspect.ismodule(obj) and obj.__name__.startswith(module.__name__):
# Only recurse into submodules of the original module
results.extend(explore_module(obj, indent + 1, visited))
return results
| {
"repo_id": "marimo-team/marimo",
"file_path": "tests/utils.py",
"license": "Apache License 2.0",
"lines": 61,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
marimo-team/marimo:marimo/_plugins/stateless/inspect.py | # Copyright 2026 Marimo. All rights reserved.
from __future__ import annotations
import html
import inspect as inspect_
from marimo._output.builder import h
from marimo._output.formatting import as_html
from marimo._output.hypertext import Html
class inspect(Html):
"""Inspect a Python object.
Displays objects with their attributes, methods, and documentation
in a rich HTML format. Useful for exploring objects that lack a rich repr.
Args:
obj: The object to inspect.
help: Show full help text (otherwise just first paragraph).
methods: Show methods.
docs: Show documentation for attributes/methods.
private: Show private attributes (starting with '_').
dunder: Show dunder attributes (starting with '__').
sort: Sort attributes alphabetically.
all: Show all attributes (methods, private, and dunder).
value: Show the object's value/repr.
Returns:
(Html): An `Html` object.
Example:
```python
mo.inspect(obj, methods=True)
```
"""
def __init__(
self,
obj: object,
*,
help: bool = False, # noqa: A002
methods: bool = False,
docs: bool = True,
private: bool = False,
dunder: bool = False,
sort: bool = True,
all: bool = False, # noqa: A002
value: bool = True,
) -> None:
self._obj = obj
if all:
methods = True
private = True
dunder = True
type_label, name = _get_object_title(obj)
type_colors = {
"class": (
"background-color: var(--blue-3); color: var(--blue-11);"
),
"function": (
"background-color: var(--green-3); color: var(--green-11);"
),
"method": (
"background-color: var(--purple-3); color: var(--purple-11);"
),
"module": (
"background-color: var(--orange-3); color: var(--orange-11);"
),
"instance": (
"background-color: var(--crimson-3); color: var(--crimson-11);"
),
"object": (
"background-color: var(--slate-3); color: var(--slate-11);"
),
}
pill_style = type_colors.get(type_label, type_colors["object"])
docstring = inspect_.getdoc(obj) if docs else None
if docstring and not help:
docstring = docstring.split("\n\n")[0]
attributes = _get_filtered_attributes(obj, methods, private, dunder)
if sort:
attributes.sort(key=lambda x: x[0])
header = h.div(
[
h.span(
html.escape(type_label),
style=(
pill_style + "padding: 2px 8px; "
"border-radius: 4px; "
"font-family: monospace; "
"font-size: 0.75rem; "
"font-weight: 600; "
"margin-right: 8px; "
"display: inline-block;"
),
),
h.span(
html.escape(name),
style=(
"font-family: monospace; "
"font-size: 0.875rem; "
"color: var(--slate-12);"
),
),
],
style=(
"padding: 10px 12px 8px 12px; display: flex; align-items: center;"
),
)
main_content: list[str] = []
# Add divider after header
main_content.append(
h.div(
"", # Empty string for divider line
style=(
"height: 1px; "
"background-color: var(--slate-3); "
"margin: 0 12px 8px 12px;"
),
)
)
if docstring:
doc_style = (
"color: var(--slate-11); "
"margin: 0 12px 8px 12px; "
"font-size: 0.75rem; "
"font-family: monospace; "
"padding: 0; "
"white-space: pre-wrap;"
)
main_content.append(h.div(html.escape(docstring), style=doc_style))
if value and not inspect_.isclass(obj) and not callable(obj):
main_content.append(_render_value(obj))
if callable(obj):
sig = _get_signature(obj)
if sig:
if inspect_.isfunction(obj) or inspect_.ismethod(obj):
# For functions/methods, show the full definition
func_name = (
obj.__name__ if hasattr(obj, "__name__") else ""
)
prefix = (
"async def"
if inspect_.iscoroutinefunction(obj)
else "def"
)
main_content.append(
h.div(
h.span(
f"{prefix} {html.escape(func_name)}{html.escape(sig)}:"
),
style=(
"font-family: monospace; "
"font-size: 0.875rem; "
"color: var(--slate-12); "
"margin: 0 12px 8px 12px;"
),
)
)
else:
# For other callables (classes, etc), just show the signature
main_content.append(
h.div(
h.pre(
html.escape(sig),
style="color: var(--slate-12); overflow-x: auto; margin: 0;",
),
style=(
"background-color: var(--background); "
"border: 1px solid var(--slate-3); "
"border-radius: 4px; "
"padding: 8px 10px; "
"margin: 0 12px 8px 12px; "
"font-family: monospace; "
"font-size: 0.875rem;"
),
)
)
if attributes:
table_rows = []
for name, value, attr_type, error in attributes:
table_rows.append(
_render_attribute_row(name, value, attr_type, error, docs)
)
main_content.append(
h.div(
h.table(h.tbody(table_rows)),
style=(
"overflow-x: auto; font-size: 0.875rem; padding: 0 0 8px 0;"
),
)
)
super().__init__(
h.div(
[header] + main_content if main_content else [header],
style=(
"border-radius: 6px; "
"overflow: hidden; "
"background-color: var(--slate-1); "
"display: inline-block; "
"min-width: 0; "
"max-width: 100%;"
),
)
)
def _repr_md_(self) -> str:
try:
return repr(self._obj)
except Exception:
return self.text
def _get_object_title(obj: object) -> tuple[str, str]:
"""Returns (type_label, name) for the object."""
if inspect_.isclass(obj):
module = obj.__module__
if module and module != "__builtin__":
return ("class", f"{module}.{obj.__name__}")
return ("class", obj.__name__)
elif inspect_.isfunction(obj):
name = obj.__name__ if hasattr(obj, "__name__") else "function"
return ("function", name)
elif inspect_.ismethod(obj):
name = obj.__name__ if hasattr(obj, "__name__") else "method"
return ("method", name)
elif inspect_.ismodule(obj):
return (
"module",
obj.__name__ if hasattr(obj, "__name__") else "module",
)
elif hasattr(obj, "__class__"):
cls = obj.__class__
module = cls.__module__
if module and module != "__builtin__":
return ("instance", f"{module}.{cls.__name__}")
return ("instance", cls.__name__)
else:
return ("object", type(obj).__name__)
def _get_signature(obj: object) -> str | None:
try:
return str(inspect_.signature(obj)) # type: ignore
except (ValueError, TypeError):
return None
def _get_filtered_attributes(
obj: object, methods: bool, private: bool, dunder: bool
) -> list[tuple[str, bool, str, Exception | None]]:
attributes: list[tuple[str, bool, str, Exception | None]] = []
try:
all_attrs = dir(obj)
except Exception:
return attributes
for name in all_attrs:
if name.startswith("__") and not dunder:
continue
if name.startswith("_") and not name.startswith("__") and not private:
continue
try:
value = getattr(obj, name)
error = None
except Exception as e:
value = None
error = e
if error is not None:
attr_type = "error"
elif _is_property(obj, name):
attr_type = "property"
elif callable(value):
if not methods:
continue
attr_type = "method"
else:
attr_type = "attribute"
attributes.append((name, value, attr_type, error))
return attributes
def _is_property(obj: object, name: str) -> bool:
for cls in inspect_.getmro(type(obj)):
if name in cls.__dict__ and isinstance(cls.__dict__[name], property):
return True
return False
def _render_value(obj: object) -> str:
container_style = (
"background-color: var(--background); "
"border: 1px solid var(--slate-3); "
"border-radius: 4px; "
"padding: 8px 10px; "
"margin: 0 12px 8px 12px; "
"overflow-x: auto; "
"overflow-y: hidden;"
)
# Try to get HTML representation
try:
html_obj = as_html(obj)
_, data = html_obj._mime_()
return h.div(data, style=container_style)
except Exception:
# Fall back to repr
pass
try:
value_repr = html.escape(repr(obj))
except Exception as e:
value_repr = f"<repr-error {html.escape(str(e))}>"
return h.div(
h.pre(
value_repr,
style="color: var(--slate-12); white-space: pre; margin: 0; font-family: monospace; font-size: 0.875rem;",
),
style=container_style,
)
def _render_attribute_row(
name: str,
value: object,
attr_type: str,
error: Exception | None,
docs: bool,
) -> str:
name_style_base = (
"padding: 2px 8px 2px 12px; "
"vertical-align: top; "
"text-align: right; "
"font-family: monospace; "
"font-size: 0.75rem; "
"white-space: nowrap; "
"line-height: 1.5; "
"color: var(--slate-10);"
)
equals_style = (
"padding: 2px 4px; "
"color: var(--slate-9); "
"vertical-align: top; "
"font-family: monospace; "
"font-size: 0.75rem; "
"line-height: 1.5;"
)
if error is not None:
name_style = name_style_base + " color: var(--red-11);"
error_repr = f'<span style="color: var(--red-11); font-family: monospace; font-size: 0.75rem;"><{type(error).__name__}: {html.escape(str(error))}></span>'
return h.tr(
[
h.td(html.escape(name), style=name_style),
h.td("=", style=equals_style),
h.td(
error_repr,
style="color: var(--red-11); vertical-align: top; line-height: 1.5; padding: 2px 12px 2px 4px;",
),
]
)
elif attr_type == "method":
name_style = name_style_base
display = _format_method(name, value, docs)
return h.tr(
[
h.td(html.escape(name), style=name_style),
h.td("=", style=equals_style),
h.td(
h.span(html.escape(display), style="white-space: pre;"),
style=(
"color: var(--slate-11); "
"font-family: monospace; "
"font-size: 0.75rem; "
"vertical-align: top; "
"line-height: 1.5; "
"padding: 2px 12px 2px 4px;"
),
),
]
)
else:
name_style = name_style_base
if attr_type == "property":
name_style += " font-style: italic;"
value_html = _render_value_inline(value)
return h.tr(
[
h.td(html.escape(name), style=name_style),
h.td("=", style=equals_style),
h.td(
value_html,
style="color: var(--foreground); vertical-align: top; line-height: 1.5; padding: 2px 12px 2px 4px;",
),
]
)
def _format_method(name: str, method: object, docs: bool) -> str:
try:
sig = inspect_.signature(method) # type: ignore
if inspect_.iscoroutinefunction(method):
display = f"async def {name}{sig}"
else:
display = f"def {name}{sig}"
except Exception:
display = f"def {name}(...)"
if docs:
doc = inspect_.getdoc(method)
if doc:
first_line = doc.split("\n")[0]
if len(first_line) > 80:
first_line = first_line[:77] + "..."
display += f": {first_line}"
return display
def _render_value_inline(value: object) -> str:
if isinstance(value, str):
# Colors from @textea/json-viewer string rendering
# Light mode: #cb4b16, Dark mode: #dc9656
return h.span(
f'"{html.escape(value)}"',
style="color: light-dark(#cb4b16, #dc9656); font-family: monospace; font-size: 0.75rem;",
)
if isinstance(value, (int, float, bool, type(None))):
return h.span(
html.escape(str(value)),
style="font-family: monospace; font-size: 0.75rem;",
)
# Try to get HTML representation
try:
html_obj = as_html(value)
_, data = html_obj._mime_()
if isinstance(value, (dict, list, tuple)):
return h.div(
data, style="font-size: 0.75rem; display: inline-block;"
)
return h.span(data, style="display: inline-block;")
except Exception:
# Fall back to repr
pass
try:
value_str = repr(value)
except Exception as e:
value_str = f"<repr-error {str(e)}>"
if len(value_str) > 200:
value_str = value_str[:197] + "..."
return h.span(
html.escape(value_str),
style="font-family: monospace; font-size: 0.75rem;",
)
| {
"repo_id": "marimo-team/marimo",
"file_path": "marimo/_plugins/stateless/inspect.py",
"license": "Apache License 2.0",
"lines": 423,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
marimo-team/marimo:tests/_plugins/stateless/test_inspect.py | # Copyright 2026 Marimo. All rights reserved.
from __future__ import annotations
from dataclasses import dataclass
from marimo._plugins.stateless.inspect import inspect
class SimpleClass:
"""A simple test class."""
def __init__(self):
self.value = 42
self._private = "hidden"
self.__dunder = "very hidden"
def method(self):
"""A public method."""
pass
def _private_method(self):
"""A private method."""
pass
def simple_function(x: int, y: str = "default") -> str:
"""A simple test function."""
return f"{x} - {y}"
async def async_function(a: int) -> int:
"""Double the value."""
return a * 2
def test_inspect_basic_object() -> None:
obj = SimpleClass()
result = inspect(obj)
html = result.text
assert "instance" in html
assert "SimpleClass" in html
assert "42" in html
assert "<div" in html
assert "style=" in html
def test_inspect_with_docstring() -> None:
result = inspect(SimpleClass)
html = result.text
assert "A simple test class" in html
def test_inspect_function() -> None:
result = inspect(simple_function)
html = result.text
assert "function" in html
assert "simple_function" in html
# The signature may have HTML-escaped quotes or formatted differently
assert "x:" in html
assert "int" in html
assert "y:" in html
assert "str" in html
def test_inspect_async_function() -> None:
result = inspect(async_function)
html = result.text
assert "async def async_function(" in html
def test_inspect_with_methods() -> None:
obj = SimpleClass()
result = inspect(obj, methods=True)
html = result.text
assert "method" in html
def test_inspect_with_private() -> None:
obj = SimpleClass()
result = inspect(obj, private=True)
html = result.text
assert "_private" in html
def test_inspect_with_dunder() -> None:
obj = SimpleClass()
result = inspect(obj, dunder=True)
html_str = result.text
assert "_SimpleClass__dunder" in html_str or "__" in html_str
result_with_all = inspect(obj, all=True)
html_with_all = result_with_all.text
assert "_SimpleClass__dunder" in html_with_all or "__" in html_with_all
def test_inspect_string_value() -> None:
# When inspecting a string directly, it shows as an instance of str
result = inspect("test string", value=True)
html = result.text
assert "test string" in html
assert "instance" in html
assert "str" in html
# String coloring is used when strings appear in attributes
class WithString:
def __init__(self):
self.text = "colored string"
result2 = inspect(WithString())
html2 = result2.text
assert "light-dark(#cb4b16, #dc9656)" in html2
def test_inspect_dict() -> None:
test_dict = {"key": "value", "number": 42}
result = inspect(test_dict)
html = result.text
assert "dict" in html.lower() or "instance" in html
def test_inspect_list() -> None:
test_list = [1, 2, 3, "test"]
result = inspect(test_list)
html = result.text
assert "list" in html.lower() or "instance" in html
def test_inspect_module() -> None:
import os
result = inspect(os, methods=False)
html = result.text
assert "module" in html
assert "os" in html
def test_inspect_html_escaping() -> None:
class HTMLTest:
def __init__(self):
self.value = "<script>alert('xss')</script>"
obj = HTMLTest()
result = inspect(obj)
html = result.text
assert "<script>" in html or "<" in html
assert "<script>alert" not in html
def test_inspect_no_value() -> None:
obj = SimpleClass()
result = inspect(obj, value=False)
html = result.text
assert "<div" in html
def test_inspect_css_variables() -> None:
result = inspect(SimpleClass())
html = result.text
assert "var(--" in html
assert any(
var in html
for var in [
"var(--slate-",
"var(--background)",
"var(--foreground)",
"var(--blue-",
"var(--green-",
"var(--purple-",
]
)
def test_inspect_type_pills() -> None:
class_result = inspect(SimpleClass)
assert "var(--blue-" in class_result.text
func_result = inspect(simple_function)
assert "var(--green-" in func_result.text
instance_result = inspect(SimpleClass())
assert "var(--crimson-" in instance_result.text
import os
module_result = inspect(os)
assert "var(--orange-" in module_result.text
def test_inspect_divider() -> None:
result = inspect(SimpleClass(), value=True)
html = result.text
# Check for the divider (could be single or double quotes)
assert "height: 1px;" in html
assert "var(--slate-3)" in html
def test_inspect_sort_attributes() -> None:
class UnsortedClass:
def __init__(self):
self.zebra = 1
self.apple = 2
self.middle = 3
obj = UnsortedClass()
sorted_result = inspect(obj, sort=True)
sorted_html = sorted_result.text
unsorted_result = inspect(obj, sort=False)
unsorted_html = unsorted_result.text
for attr in ["zebra", "apple", "middle"]:
assert attr in sorted_html
assert attr in unsorted_html
assert "<table" in sorted_html
assert "<table" in unsorted_html
def test_inspect_repr_md() -> None:
@dataclass
class Value:
value: str
result = inspect(Value(value="one"))
md = result._repr_md_()
assert md == repr(Value(value="one"))
def test_inspect_repr_md_error() -> None:
class ReprError:
def __repr__(self):
raise Exception("repr error") # noqa: TRY002
result = inspect(ReprError())
md = result._repr_md_()
assert "<div" in md
| {
"repo_id": "marimo-team/marimo",
"file_path": "tests/_plugins/stateless/test_inspect.py",
"license": "Apache License 2.0",
"lines": 172,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
marimo-team/marimo:tests/_save/stores/test_file.py | # Copyright 2026 Marimo. All rights reserved.
from __future__ import annotations
from marimo._save.stores.file import FileStore
class TestFileStore:
def test_init_doesnt_make_file(self, tmp_path) -> None:
"""Test that initializing FileStore does not create a file."""
_store = FileStore(tmp_path / "test_store")
# Should not be created just on initialization
assert not (tmp_path / "test_store").exists()
def test_get_put(self, tmp_path) -> None:
"""Test put and get functionality of FileStore."""
store = FileStore(tmp_path / "test_store")
assert not (tmp_path / "test_store").exists()
data = b"hello world"
store.put("key", data)
assert store.get("key") == data
# Store is actually created
assert (tmp_path / "test_store").exists()
assert (tmp_path / "test_store" / "key").exists()
def test_clear(self, tmp_path) -> None:
"""Test clear functionality of FileStore."""
store = FileStore(tmp_path / "test_store")
data = b"test data"
# Put some data
store.put("key1", data)
assert store.hit("key1")
assert store.get("key1") == data
# Clear the key
result = store.clear("key1")
assert result is True
assert not store.hit("key1")
assert store.get("key1") is None
# Clear non-existent key
result = store.clear("nonexistent")
assert result is False
| {
"repo_id": "marimo-team/marimo",
"file_path": "tests/_save/stores/test_file.py",
"license": "Apache License 2.0",
"lines": 35,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
marimo-team/marimo:marimo/_save/toplevel.py | # Copyright 2026 Marimo. All rights reserved.
from __future__ import annotations
from typing import Any, Callable
from marimo import _loggers
from marimo._ast.app import App, InternalApp
from marimo._ast.names import (
SETUP_CELL_NAME,
TOPLEVEL_CELL_PREFIX,
)
from marimo._ast.toplevel import TopLevelExtraction
from marimo._runtime.dataflow import DirectedGraph
from marimo._types.ids import CellId_t
LOGGER = _loggers.marimo_logger()
def get_app_from_scope(scope: dict[str, Any]) -> App | None:
app = scope.get("app", None)
if not isinstance(app, App):
LOGGER.warning(
"The scope does not contain a valid 'app' instance. "
"marimo behavior may be undefined."
)
return None
return app
def graph_from_app(app: App) -> DirectedGraph:
cell_manager = app._cell_manager
extraction = TopLevelExtraction.from_app(InternalApp(app))
graph = DirectedGraph()
for cell_id, cell in cell_manager.valid_cells():
name = cell.name.strip(TOPLEVEL_CELL_PREFIX)
if name in extraction.toplevel or name == SETUP_CELL_NAME:
graph.register_cell(cell_id, cell._cell)
return graph
def graph_from_scope(scope: dict[str, Any]) -> DirectedGraph:
app = get_app_from_scope(scope)
if app is None:
return DirectedGraph()
return graph_from_app(app)
def get_cell_id_from_scope(
fn: Callable[..., Any], scope: dict[str, Any]
) -> CellId_t:
app = get_app_from_scope(scope)
if app is None:
return CellId_t("")
maybe_cell = app._cell_manager.get_cell_data_by_name(fn.__name__)
if maybe_cell is None:
return CellId_t("")
return maybe_cell.cell_id
| {
"repo_id": "marimo-team/marimo",
"file_path": "marimo/_save/toplevel.py",
"license": "Apache License 2.0",
"lines": 46,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
marimo-team/marimo:tests/_save/external_decorators/transitive_wrappers_1.py | import marimo
__generated_with = "0.14.12"
app = marimo.App()
with app.setup:
import functools
from typing import Any
import marimo as mo
# This will be an impure decorator (contains non-primitive objects)
impure_state = [object()]
@app.function
def my_impure_decorator(func):
"""An impure decorator that depends on impure_state"""
@functools.wraps(func)
def wrapper(*args: Any, **kwargs: Any):
# Decorator depends on impure_state
wrapper._call_count = len(impure_state)
return func(*args, **kwargs)
return wrapper
@app.function
def my_pure_decorator(func):
"""Same pure decorator"""
@functools.wraps(func)
def wrapper(*args: Any, **kwargs: Any):
return func(*args, **kwargs)
return wrapper
@app.function
@my_impure_decorator
def pure_function():
# This function itself is pure (no external dependencies)
return 42
@app.function
@my_pure_decorator
def impure_function():
# Same function, but now depends on different impure_dependency
return len(impure_state)
@app.function
@mo.cache
def impure_wrapped_pure():
return pure_function()
@app.function
@mo.cache
def pure_wrapped_impure():
return impure_function()
if __name__ == "__main__":
app.run()
| {
"repo_id": "marimo-team/marimo",
"file_path": "tests/_save/external_decorators/transitive_wrappers_1.py",
"license": "Apache License 2.0",
"lines": 45,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
marimo-team/marimo:tests/_save/external_decorators/transitive_wrappers_2.py | # NB! The only difference between this file and transitive_wrappers_1.py is that
# impure_state is tweaked.
import marimo
__generated_with = "0.14.12"
app = marimo.App()
with app.setup:
import functools
from typing import Any
import marimo as mo
# This will be an impure decorator (contains non-primitive objects)
# impure_state = [object()] in transitive_wrappers_1.py
impure_state = [object(), object()]
@app.function
def my_impure_decorator(func):
"""An impure decorator that depends on impure_state"""
@functools.wraps(func)
def wrapper(*args: Any, **kwargs: Any):
# Decorator depends on impure_state
wrapper._call_count = len(impure_state)
return func(*args, **kwargs)
return wrapper
@app.function
def my_pure_decorator(func):
"""Same pure decorator"""
@functools.wraps(func)
def wrapper(*args: Any, **kwargs: Any):
return func(*args, **kwargs)
return wrapper
@app.function
@my_impure_decorator
def pure_function():
# This function itself is pure (no external dependencies)
return 42
@app.function
@my_pure_decorator
def impure_function():
# Same function, but now depends on different impure_dependency
return len(impure_state)
@app.function
@mo.cache
def impure_wrapped_pure():
return pure_function()
@app.function
@mo.cache
def pure_wrapped_impure():
return impure_function()
if __name__ == "__main__":
app.run()
| {
"repo_id": "marimo-team/marimo",
"file_path": "tests/_save/external_decorators/transitive_wrappers_2.py",
"license": "Apache License 2.0",
"lines": 48,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
marimo-team/marimo:tests/_save/test_external_decorators.py | # Copyright 2026 Marimo. All rights reserved.
from __future__ import annotations
import sys
import textwrap
from marimo._runtime.commands import ExecuteCellCommand
from marimo._runtime.runtime import Kernel
from tests.conftest import ExecReqProvider
class TestDecoratorImports:
@staticmethod
def test_import_alias_hash_path(app) -> None:
"""Test that imported cached functions with module aliases have correct hash paths."""
for module in list(sys.modules.keys()):
if module.startswith("tests._save.external_decorators"):
del sys.modules[module]
with app.setup:
import marimo as mo
import tests._save.external_decorators.module_1 as my_module
from tests._save.external_decorators.transitive_imports import (
doesnt_have_namespace as external_func,
)
@app.function
@mo.cache
def doesnt_have_namespace() -> None:
# Just replicating the function in external_func
return my_module.__version__
@app.cell
def check_hash_paths() -> None:
local_func = doesnt_have_namespace
# Both functions access the same module with the same alias
external_result = external_func()
local_result = local_func()
# Results should be the same (both return "1.0.0")
assert external_result == local_result == "1.0.0"
# Hashes should be equal (same code, same module accessed)
external_name = external_func._loader().name
local_name = local_func._loader().name
assert external_name == local_name, (
f"Hashes should be equal for same code and module, "
f"got {external_name} != {local_name}"
)
@staticmethod
def test_has_shared_import(app) -> None:
with app.setup:
import marimo as mo
from tests._save.external_decorators.transitive_imports import (
has_import,
)
@app.cell
def has_dep_works() -> tuple[int]:
# matches test + use mo for lint
assert has_import() == len([mo])
@staticmethod
def test_doesnt_have_shared_import(app) -> None:
with app.setup:
from tests._save.external_decorators.transitive_imports import (
doesnt_have_import,
)
@app.cell
def doesnt_have_dep_works() -> tuple[int]:
# Counts modules on call.
assert doesnt_have_import() == 2
@staticmethod
def test_has_dep_with_differing_name_works(app) -> None:
for module in list(sys.modules.keys()):
if module.startswith("tests._save.external_decorators"):
del sys.modules[module]
with app.setup:
import marimo as mo
import tests._save.external_decorators.module_0 as my_module
from tests._save.external_decorators.transitive_imports import (
doesnt_have_namespace as other,
doesnt_have_namespace_pinned as other_pinned,
)
@app.function
@mo.cache(pin_modules=True)
def doesnt_have_namespace_pinned() -> None:
return my_module.__version__
@app.function
@mo.cache
def doesnt_have_namespace() -> None:
return my_module.__version__
@app.cell
def has_dep_with_differing_name_works() -> tuple[int]:
assert other() != my_module.__version__
other_hash = other._last_hash
assert doesnt_have_namespace() == my_module.__version__
# By virtue of backwards compatibility, this is true.
# TODO: Negate and fix.
assert other_hash == doesnt_have_namespace._last_hash
@app.cell
def has_dep_with_differing_name_works_pinned() -> tuple[int]:
assert other_pinned() != my_module.__version__
other_hash_pinned = other_pinned._last_hash
assert doesnt_have_namespace_pinned() == my_module.__version__
assert other_hash_pinned != doesnt_have_namespace_pinned._last_hash
@staticmethod
async def test_decorator_in_kernel(
lazy_kernel: Kernel, exec_req: ExecReqProvider
) -> None:
k = lazy_kernel
await k.run(
[
ExecuteCellCommand(
cell_id="setup",
code=textwrap.dedent(
"""
import marimo as mo
import tests._save.external_decorators.module_0 as my_module
from tests._save.external_decorators.transitive_imports import (
doesnt_have_namespace as other,
doesnt_have_namespace_pinned as other_pinned,
)
from tests._save.external_decorators.transitive_imports import (
doesnt_have_import,
)
from tests._save.external_decorators.transitive_imports import has_import
"""
),
),
exec_req.get(
"""
@mo.cache(pin_modules=True)
def doesnt_have_namespace_pinned() -> None:
return my_module.__version__
"""
),
exec_req.get(
"""
@mo.cache
def doesnt_have_namespace() -> None:
return my_module.__version__
"""
),
exec_req.get(
"""
assert has_import() == 1
assert doesnt_have_import() == 2
assert other() != my_module.__version__
other_hash = other._last_hash
assert doesnt_have_namespace() == my_module.__version__
# By virtue of backwards compatibility, this is true.
# TODO: Negate and fix.
assert other_hash == doesnt_have_namespace._last_hash
assert other_pinned() != my_module.__version__
other_hash_pinned = other_pinned._last_hash
assert doesnt_have_namespace_pinned() == my_module.__version__
assert other_hash_pinned != doesnt_have_namespace_pinned._last_hash
resolved = True
"""
),
]
)
assert k.globals.get("resolved", False), k.stderr
class TestDecoratorTransitiveFns:
@staticmethod
async def test_impure_decorator_with_pure_dependencies(app) -> None:
with app.setup:
from tests._save.external_decorators.transitive_wrappers_1 import (
pure_wrapped_impure,
)
from tests._save.external_decorators.transitive_wrappers_2 import (
pure_wrapped_impure as pure_wrapped_impure_2,
)
@app.cell
def _():
result1 = pure_wrapped_impure()
hash1 = pure_wrapped_impure._last_hash
cache_type1 = pure_wrapped_impure.base_block.cache_type
return result1, hash1, cache_type1
@app.cell
def _():
result2 = pure_wrapped_impure_2()
hash2 = pure_wrapped_impure_2._last_hash
cache_type2 = pure_wrapped_impure_2.base_block.cache_type
return result2, hash2, cache_type2
@app.cell
def check_results(
result1, result2, hash1, hash2, cache_type1, cache_type2
):
assert result1 == 1
assert result2 == 2
# The decorator itself is pure, but the function has impure dependencies
# This should use ExecutionPath hashing, not ContentAddressed
assert cache_type1 == "ExecutionPath", (
f"Expected ExecutionPath, got {cache_type1}"
)
assert cache_type2 == "ExecutionPath", (
f"Expected ExecutionPath, got {cache_type2}"
)
# Hashes should be different because the execution path changed
# (due to different impure_dependency)
assert hash1 != hash2, (
f"Expected different hashes for different impure dependencies, "
f"got {hash1} == {hash2}"
)
@staticmethod
async def test_pure_decorator_with_impure_dependencies(app) -> None:
with app.setup:
from tests._save.external_decorators.transitive_wrappers_1 import (
impure_wrapped_pure,
)
from tests._save.external_decorators.transitive_wrappers_2 import (
impure_wrapped_pure as impure_wrapped_pure_2,
)
@app.cell
def _():
result1 = impure_wrapped_pure()
hash1 = impure_wrapped_pure._last_hash
cache_type1 = impure_wrapped_pure.base_block.cache_type
@app.cell
def _():
result2 = impure_wrapped_pure_2()
hash2 = impure_wrapped_pure_2._last_hash
cache_type2 = impure_wrapped_pure_2.base_block.cache_type
@app.cell
def check_results(
result1, result2, hash1, hash2, cache_type1, cache_type2
) -> None:
assert result1 == 42
assert result2 == 42
# The decorator itself is pure, but the function has impure dependencies
# This should use ExecutionPath hashing, not ContentAddressed
assert cache_type1 == "ExecutionPath", (
f"Expected ExecutionPath, got {cache_type1}"
)
assert cache_type2 == "ExecutionPath", (
f"Expected ExecutionPath, got {cache_type2}"
)
# Hashes should be different because the execution path changed
# (due to different impure_dependency)
assert hash1 != hash2, (
f"Expected different hashes for different impure dependencies, "
f"got {hash1} == {hash2}"
)
class TestAsExternalApp:
@staticmethod
async def test_as_external_app(app) -> None:
with app.setup:
from tests._save.external_decorators.app import (
app as ex_app,
)
@app.cell
def _():
_, defs = ex_app.run()
assert defs["bar"] == 2
assert defs["cache"](1) == 2
assert len(defs["x"]) == 0
defs["x"].append(1)
_, defs = ex_app.run()
assert len(defs["x"]) == 1
return
@staticmethod
async def test_as_external_app_in_kernel(
lazy_kernel: Kernel, exec_req: ExecReqProvider
) -> None:
k = lazy_kernel
await k.run(
[
ExecuteCellCommand(
cell_id="setup",
code=textwrap.dedent(
"""
from tests._save.external_decorators.app import (
app as ex_app,
)
"""
),
),
exec_req.get(
"""
_, defs = ex_app.run()
assert defs["bar"] == 2
assert defs["cache"](1) == 2
resolved = True
"""
),
]
)
assert k.globals.get("resolved", False), k.stderr
@staticmethod
async def test_as_external_app_embedded(app) -> None:
with app.setup:
from tests._save.external_decorators.app import (
app as ex_app,
)
@app.cell
async def _():
r1 = await ex_app.embed()
assert r1.defs["bar"] == 2
assert r1.defs["cache"](1) == 2
return
@staticmethod
async def test_as_external_app_embedded_cloned(app) -> None:
with app.setup:
from tests._save.external_decorators.app import (
app as ex_app,
)
@app.cell
async def _():
r2 = await ex_app.clone().embed()
assert r2.defs["bar"] == 2
assert r2.defs["cache"](1) == 2
return
@staticmethod
async def test_as_external_app_embedded_in_kernel(
lazy_kernel: Kernel, exec_req: ExecReqProvider
) -> None:
k = lazy_kernel
await k.run(
[
ExecuteCellCommand(
cell_id="setup",
code=textwrap.dedent(
"""
from tests._save.external_decorators.app import (
app as ex_app,
)
"""
),
),
exec_req.get(
"""
r1 = await ex_app.embed()
assert r1.defs["bar"] == 2
assert r1.defs["cache"](1) == 2
"""
),
exec_req.get(
"""
r2 = await ex_app.clone().embed()
assert r2.defs["bar"] == 2
assert r2.defs["cache"](1) == 2
"""
),
exec_req.get(
"""
r1, r2
resolved = True
"""
),
]
)
assert k.globals.get("resolved", False), k.stderr
assert k.globals["r1"].defs["bar"] == 2
assert k.globals["r1"].defs["cache"](1) == 2
assert k.globals["r2"].defs["bar"] == 2
assert k.globals["r2"].defs["cache"](1) == 2
| {
"repo_id": "marimo-team/marimo",
"file_path": "tests/_save/test_external_decorators.py",
"license": "Apache License 2.0",
"lines": 347,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
marimo-team/marimo:marimo/_smoke_tests/ibis_formatting.py | # /// script
# requires-python = ">=3.12"
# dependencies = [
# "ibis-framework[duckdb,polars]==10.8.0",
# "marimo",
# "polars==1.32.0",
# ]
# ///
import marimo
__generated_with = "0.15.5"
app = marimo.App(width="medium")
@app.cell
def _():
import ibis
import polars as pl
import marimo as mo
return ibis, pl
@app.cell
def _():
from ibis import _
from datetime import datetime
return
@app.cell
def _():
test_data = {
"str": ["a", "c", "hello"],
"num": [1, 2, 3],
"list": [["a", "b"], ["c"], []],
"struct": [{"a": 0}, {"a": 1}, {"a": 2}],
"floats": [1.1, 2.2, None],
}
return (test_data,)
@app.cell
def _(ibis, test_data):
t = ibis.memtable(test_data)
return (t,)
@app.cell
def _(ibis, t):
# Table - lazy mode: Expression + SQL tabs
ibis.options.interactive = False
t
return
@app.cell
def _(ibis, t):
# Column - lazy mode: Expression + SQL tabs
ibis.options.interactive = False
t.struct
return
@app.cell
def _(ibis, t):
# Scalar - lazy mode: Expression + SQL tabs
ibis.options.interactive = False
t.floats.min()
return
@app.cell
def _(ibis, t):
# Table - interactive mode: table widget
ibis.options.interactive = True
t
return
@app.cell
def _(ibis, t):
# Column - interactive mode: table widget
ibis.options.interactive = True
t.struct
return
@app.cell
def _(ibis, t):
# Array scalar - interactive mode: JSON output
ibis.options.interactive = True
t.list.first()
return
@app.cell
def _(ibis, t):
# Scalar - interactive mode: plain text
ibis.options.interactive = True
t.floats.min()
return
@app.cell
def _(ibis):
# Unbound tables
t1 = ibis.table(
dict(value1="float", key1="string", key2="string"), name="table1"
)
t2 = ibis.table(
dict(value2="float", key3="string", key4="string"), name="table2"
)
joined = t1.left_join(t2, t1.key1 == t2.key3)
return (joined,)
@app.cell
def _(ibis, joined):
# Unbound table: Expression + SQL tabs
ibis.options.interactive = False
joined
return
@app.cell
def _(ibis, joined):
# Unbound table - interactive mode: Expression + SQL tabs
ibis.options.interactive = True
joined
return
@app.cell
def _(pl):
lazy_frame = pl.LazyFrame(
{"name": ["Jimmy", "Keith"], "band": ["Led Zeppelin", "Stones"]}
)
return (lazy_frame,)
@app.cell
def _(ibis, lazy_frame):
pl_connection = ibis.polars.connect(tables={"band_members": lazy_frame})
return (pl_connection,)
@app.cell
def _(ibis, pl_connection):
# Polars table - lazy mode: Expression + SQL tabs (SQL shows "Backend doesn't support SQL")
ibis.options.interactive = False
pl_connection.table("band_members")
return
@app.cell
def _(ibis, pl_connection):
# Polars scalar - lazy mode: Expression + SQL tabs (SQL shows "Backend doesn't support SQL")
ibis.options.interactive = False
pl_connection.table("band_members").name.first()
return
@app.cell
def _(ibis, pl_connection):
# Polars table - interactive mode: table widget
ibis.options.interactive = True
pl_connection.table("band_members")
return
@app.cell
def _(ibis, pl_connection):
# Polars scalar - interactive mode: plain text
ibis.options.interactive = True
pl_connection.table("band_members").name.first()
return
@app.cell
def _(ibis, t):
duckb_con = ibis.duckdb.connect()
duckdb_table = duckb_con.create_table("test", t, overwrite=True)
return (duckdb_table,)
@app.cell
def _(duckdb_table, ibis):
# DuckDB table - lazy mode: Expression + SQL tabs
ibis.options.interactive = False
duckdb_table
return
@app.cell
def _(duckdb_table, ibis):
# DuckDB table - interactive mode: table widget
ibis.options.interactive = True
duckdb_table
return
if __name__ == "__main__":
app.run()
| {
"repo_id": "marimo-team/marimo",
"file_path": "marimo/_smoke_tests/ibis_formatting.py",
"license": "Apache License 2.0",
"lines": 154,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
marimo-team/marimo:tests/_output/formatters/test_ibis_formatters.py | from __future__ import annotations
import pytest
from marimo._dependencies.dependencies import DependencyManager
from marimo._output.formatters.formatters import register_formatters
from marimo._output.formatting import (
get_formatter,
)
HAS_IBIS = DependencyManager.ibis.has()
HAS_POLARS = DependencyManager.polars.has()
@pytest.fixture
def test_table():
"""Create a test ibis table with various data types."""
if not HAS_IBIS:
pytest.skip("ibis not available")
import ibis
test_data = {
"str": ["a", "c", "hello"],
"num": [1, 2, 3],
"list": [["a", "b"], ["c"], []],
"struct": [{"a": 0}, {"a": 1}, {"a": 2}],
"floats": [1.1, 2.2, None],
}
return ibis.memtable(test_data)
@pytest.mark.skipif(not HAS_IBIS, reason="ibis not installed")
def test_ibis_formatters_interactive_mode(test_table) -> None:
"""Test ibis formatters in interactive mode."""
register_formatters()
import ibis
# Save original setting
original_interactive = ibis.options.interactive
ibis.options.interactive = True
try:
# Test table - should return table widget
formatter = get_formatter(test_table, include_opinionated=True)
assert formatter is not None
mime, content = formatter(test_table)
assert mime == "text/html"
assert "<marimo-table" in content
assert (
"data-lazy='true'" in content
) # Should be lazy in interactive mode
# Test column - should return table widget via as_table()
column = test_table.struct
formatter = get_formatter(column, include_opinionated=True)
assert formatter is not None
mime, content = formatter(column)
assert mime == "text/html"
assert "<marimo-table" in content
assert (
"data-lazy='true'" in content
) # Should be lazy in interactive mode
# Test scalar - should return formatted text
scalar = test_table.floats.min()
formatter = get_formatter(scalar, include_opinionated=True)
assert formatter is not None
mime, content = formatter(scalar)
assert mime == "text/html"
assert content.startswith("<pre")
finally:
ibis.options.interactive = original_interactive
@pytest.mark.skipif(not HAS_IBIS, reason="ibis not installed")
def test_ibis_formatters_lazy_mode(test_table) -> None:
"""Test ibis formatters in lazy mode."""
register_formatters()
import ibis
# Save original setting
original_interactive = ibis.options.interactive
ibis.options.interactive = False
try:
# Test table - should return Expression+SQL tabs
formatter = get_formatter(test_table, include_opinionated=True)
assert formatter is not None
mime, content = formatter(test_table)
assert mime == "text/html"
assert "<marimo-tabs" in content
assert "<marimo-table" not in content # Should not be table widget
# Test column - should return Expression+SQL tabs
column = test_table.struct
formatter = get_formatter(column, include_opinionated=True)
assert formatter is not None
mime, content = formatter(column)
assert mime == "text/html"
assert "<marimo-tabs" in content
assert "<marimo-table" not in content # Should not be table widget
# Test scalar - should return Expression+SQL tabs
scalar = test_table.floats.min()
formatter = get_formatter(scalar, include_opinionated=True)
assert formatter is not None
mime, content = formatter(scalar)
assert mime == "text/html"
assert "<marimo-tabs" in content
assert "<marimo-table" not in content # Should not be table widget
finally:
ibis.options.interactive = original_interactive
@pytest.mark.skipif(not HAS_IBIS, reason="ibis not installed")
def test_ibis_unbound_expressions() -> None:
"""Test unbound expressions - should always show Expression+SQL tabs."""
register_formatters()
import ibis
# Create unbound tables like in smoke tests
t1 = ibis.table(
dict(value1="float", key1="string", key2="string"), name="table1"
)
t2 = ibis.table(
dict(value2="float", key3="string", key4="string"), name="table2"
)
joined = t1.left_join(t2, t1.key1 == t2.key3)
# Test in both interactive modes - should always return tabs
for interactive_mode in [True, False]:
original_interactive = ibis.options.interactive
ibis.options.interactive = interactive_mode
try:
formatter = get_formatter(joined, include_opinionated=True)
assert formatter is not None
mime, content = formatter(joined)
assert mime == "text/html"
assert "<marimo-tabs" in content
assert "<marimo-table" not in content # Should not be table widget
finally:
ibis.options.interactive = original_interactive
@pytest.mark.skipif(not HAS_IBIS, reason="ibis not installed")
def test_ibis_complex_scalar_interactive(test_table) -> None:
"""Test complex scalar (array) in interactive mode - should return JSON output."""
register_formatters()
import ibis
# Save original setting
original_interactive = ibis.options.interactive
ibis.options.interactive = True
try:
# Array scalar like in smoke tests
array_scalar = test_table.list.first()
formatter = get_formatter(array_scalar, include_opinionated=True)
assert formatter is not None
mime, content = formatter(array_scalar)
# Complex scalars should use JSON output
assert mime == "text/html"
assert "<marimo-json-output" in content
finally:
ibis.options.interactive = original_interactive
@pytest.mark.skipif(
not HAS_IBIS or not HAS_POLARS, reason="ibis and polars not installed"
)
def test_ibis_polars_backend() -> None:
"""Test ibis with polars backend - SQL tab should show 'Backend doesn't support SQL'."""
register_formatters()
import ibis
import polars as pl
lazy_frame = pl.LazyFrame(
{"name": ["Jimmy", "Keith"], "band": ["Led Zeppelin", "Stones"]}
)
pl_connection = ibis.polars.connect(tables={"band_members": lazy_frame})
original_interactive = ibis.options.interactive
ibis.options.interactive = False
try:
polars_table = pl_connection.table("band_members")
formatter = get_formatter(polars_table, include_opinionated=True)
assert formatter is not None
mime, content = formatter(polars_table)
assert mime == "text/html"
assert "<marimo-tabs" in content
assert "<marimo-table" not in content # Should not be table widget
finally:
ibis.options.interactive = original_interactive
| {
"repo_id": "marimo-team/marimo",
"file_path": "tests/_output/formatters/test_ibis_formatters.py",
"license": "Apache License 2.0",
"lines": 163,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
marimo-team/marimo:tests/_output/test_formatter_registry.py | from __future__ import annotations
from dataclasses import dataclass
from marimo._messaging.mimetypes import KnownMimeType
from marimo._output.formatting import Formatter, FormatterRegistry
# Test classes for hierarchy testing
@dataclass
class BaseClass:
value: str = "base"
class ChildClass(BaseClass):
def __init__(self, value: str = "child"):
super().__init__(value)
class GrandChildClass(ChildClass):
def __init__(self, value: str = "grandchild"):
super().__init__(value)
# Multiple inheritance test classes
class MixinA:
pass
class MixinB:
pass
class MultipleInheritanceClass(MixinA, MixinB, BaseClass):
pass
# Test formatters
def base_formatter(obj: BaseClass) -> tuple[KnownMimeType, str]:
return ("text/html", f"<span>Base: {obj.value}</span>")
def child_formatter(obj: ChildClass) -> tuple[KnownMimeType, str]:
return ("text/html", f"<span>Child: {obj.value}</span>")
def mixin_a_formatter(obj: MixinA) -> tuple[KnownMimeType, str]:
assert isinstance(obj, MixinA)
return ("text/html", "<span>MixinA</span>")
def int_formatter(obj: int) -> tuple[KnownMimeType, str]:
return ("text/plain", f"Integer: {obj}")
class TestFormatterRegistry:
def test_init(self):
"""Test that FormatterRegistry initializes with empty formatters dict."""
registry = FormatterRegistry()
assert registry.formatters == {}
assert registry.is_empty()
def test_is_empty_when_empty(self):
"""Test is_empty returns True for empty registry."""
registry = FormatterRegistry()
assert registry.is_empty()
def test_is_empty_when_not_empty(self):
"""Test is_empty returns False when registry has formatters."""
registry = FormatterRegistry()
registry.add_formatter(int, int_formatter)
assert not registry.is_empty()
def test_add_formatter(self):
"""Test adding a formatter to the registry."""
registry = FormatterRegistry()
registry.add_formatter(int, int_formatter)
assert int in registry.formatters
assert registry.formatters[int] == int_formatter
assert not registry.is_empty()
def test_add_multiple_formatters(self):
"""Test adding multiple formatters."""
registry = FormatterRegistry()
registry.add_formatter(int, int_formatter)
registry.add_formatter(BaseClass, base_formatter)
assert len(registry.formatters) == 2
assert registry.formatters[int] == int_formatter
assert registry.formatters[BaseClass] == base_formatter
def test_get_formatter_direct_match(self):
"""Test getting formatter for exact type match."""
registry = FormatterRegistry()
registry.add_formatter(int, int_formatter)
obj = 42
formatter = registry.get_formatter(obj)
assert formatter == int_formatter
def test_get_formatter_no_match(self):
"""Test getting formatter when no match exists."""
registry = FormatterRegistry()
obj = "string"
formatter = registry.get_formatter(obj)
assert formatter is None
def test_get_formatter_hierarchy_match(self):
"""Test getting formatter through type hierarchy (mro)."""
registry = FormatterRegistry()
registry.add_formatter(BaseClass, base_formatter)
# ChildClass should get BaseClass formatter
child_obj = ChildClass()
formatter = registry.get_formatter(child_obj)
assert formatter == base_formatter
def test_get_formatter_hierarchy_caching(self):
"""Test that hierarchy lookups are cached."""
registry = FormatterRegistry()
registry.add_formatter(BaseClass, base_formatter)
child_obj = ChildClass()
# First call should find via hierarchy and cache
assert ChildClass not in registry.formatters
formatter = registry.get_formatter(child_obj)
assert formatter == base_formatter
# After first call, ChildClass should be cached
assert ChildClass in registry.formatters
assert registry.formatters[ChildClass] == base_formatter
# Second call should use cached formatter
formatter2 = registry.get_formatter(child_obj)
assert formatter2 == base_formatter
def test_get_formatter_multiple_hierarchy_levels(self):
"""Test formatter lookup through multiple inheritance levels."""
registry = FormatterRegistry()
registry.add_formatter(BaseClass, base_formatter)
# GrandChildClass -> ChildClass -> BaseClass hierarchy
grandchild_obj = GrandChildClass()
formatter = registry.get_formatter(grandchild_obj)
assert formatter == base_formatter
def test_get_formatter_prefer_exact_match_over_hierarchy(self):
"""Test that exact type matches take precedence over hierarchy."""
registry = FormatterRegistry()
registry.add_formatter(BaseClass, base_formatter)
registry.add_formatter(ChildClass, child_formatter)
child_obj = ChildClass()
formatter = registry.get_formatter(child_obj)
# Should get ChildClass formatter, not BaseClass
assert formatter == child_formatter
def test_get_formatter_multiple_inheritance(self):
"""Test formatter lookup with multiple inheritance."""
registry = FormatterRegistry()
registry.add_formatter(MixinA, mixin_a_formatter)
registry.add_formatter(BaseClass, base_formatter)
multi_obj = MultipleInheritanceClass()
formatter = registry.get_formatter(multi_obj)
# Should find first match in MRO order
# MRO: MultipleInheritanceClass, MixinA, MixinB, BaseClass, object
assert formatter == mixin_a_formatter
def test_get_formatter_mro_order(self):
"""Test that formatter lookup follows MRO order correctly."""
registry = FormatterRegistry()
# Add formatters for both parent classes
registry.add_formatter(MixinB, lambda _: ("text/plain", "MixinB"))
registry.add_formatter(BaseClass, base_formatter)
multi_obj = MultipleInheritanceClass()
formatter = registry.get_formatter(multi_obj)
# Should find MixinB first in MRO, not BaseClass
assert formatter is not None
mime, data = formatter(multi_obj)
assert mime == "text/plain"
assert data == "MixinB"
def test_formatter_override(self):
"""Test that adding a formatter for the same type overrides the previous one."""
registry = FormatterRegistry()
def first_formatter(obj: int) -> tuple[KnownMimeType, str]:
assert isinstance(obj, int)
return ("text/plain", "first")
def second_formatter(obj: int) -> tuple[KnownMimeType, str]:
assert isinstance(obj, int)
return ("text/plain", "second")
registry.add_formatter(int, first_formatter)
registry.add_formatter(int, second_formatter)
obj = 42
formatter = registry.get_formatter(obj)
assert formatter == second_formatter
def test_get_formatter_with_various_builtin_types(self):
"""Test formatter registry with various built-in Python types."""
registry = FormatterRegistry()
def str_formatter(obj: str) -> tuple[KnownMimeType, str]:
return ("text/plain", f"String: {obj}")
def list_formatter(obj: list[int]) -> tuple[KnownMimeType, str]:
return ("text/plain", f"List: {obj}")
registry.add_formatter(str, str_formatter)
registry.add_formatter(list, list_formatter)
# Test string
str_obj = "test"
assert registry.get_formatter(str_obj) == str_formatter
# Test list
list_obj = [1, 2, 3]
assert registry.get_formatter(list_obj) == list_formatter
# Test unregistered type
dict_obj = {"key": "value"}
assert registry.get_formatter(dict_obj) is None
def test_registry_isolation(self):
"""Test that different registry instances are isolated."""
registry1 = FormatterRegistry()
registry2 = FormatterRegistry()
registry1.add_formatter(int, int_formatter)
obj = 42
assert registry1.get_formatter(obj) == int_formatter
assert registry2.get_formatter(obj) is None
assert registry2.is_empty()
def test_formatter_can_handle_none_values(self):
"""Test that registry handles None values gracefully."""
registry = FormatterRegistry()
registry.add_formatter(int, int_formatter)
# None should not match any formatter
formatter = registry.get_formatter(None)
assert formatter is None
def test_formatter_type_annotations_preserved(self):
"""Test that type annotations work correctly with formatters."""
registry = FormatterRegistry()
# This should work without type errors
formatter_func: Formatter[BaseClass] = base_formatter
registry.add_formatter(BaseClass, formatter_func)
obj = BaseClass()
result_formatter = registry.get_formatter(obj)
assert result_formatter == formatter_func
def test_formatter_for_type(self):
"""Test that formatter for type works correctly."""
registry = FormatterRegistry()
registry.add_formatter(int, int_formatter)
value = 1
assert registry.get_formatter(value) == int_formatter
assert registry.get_formatter(type(value)) is None
assert registry.get_formatter(int) is None
assert registry.get_formatter(type(int)) is None
def test_get_formatter_handles_broken_mro(self):
"""Ensure registry gracefully handles types with broken mro()."""
registry = FormatterRegistry()
class Broken:
pass
# Shadow mro with a plain function so calling it raises a TypeError
# because no implicit self/cls is passed for functions on classes
def bad_mro(_self):
del _self
raise TypeError("broken mro")
Broken.mro = bad_mro # type: ignore[attr-defined]
obj = Broken()
# Should not raise, and should return None when MRO cannot be read
formatter = registry.get_formatter(obj)
assert formatter is None
| {
"repo_id": "marimo-team/marimo",
"file_path": "tests/_output/test_formatter_registry.py",
"license": "Apache License 2.0",
"lines": 218,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
marimo-team/marimo:tests/_output/formatters/test_plotly_formatters.py | from __future__ import annotations
import json
import pytest
from marimo._dependencies.dependencies import DependencyManager
from marimo._output.formatters.formatters import register_formatters
from marimo._output.formatters.plotly_formatters import PlotlyFormatter
from marimo._output.formatting import get_formatter
HAS_DEPS = DependencyManager.plotly.has()
HAS_ANYWIDGET = DependencyManager.anywidget.has()
@pytest.mark.skipif(not HAS_DEPS, reason="plotly not installed")
def test_plotly_config_forwarding():
"""Test that config parameter is properly forwarded"""
register_formatters()
import plotly.graph_objects as go
import plotly.io as pio
fig = go.Figure(data=[go.Scatter(x=[1, 2, 3], y=[4, 5, 6])])
json_str = pio.to_json(fig)
json_dict = json.loads(json_str)
config = {"displayModeBar": False, "responsive": True}
result = PlotlyFormatter.render_plotly_dict(json_dict, config=config)
assert "marimo-plotly" in result.text
assert "data-config=" in result.text
assert "displayModeBar" in result.text
assert "responsive" in result.text
@pytest.mark.skipif(
not (HAS_DEPS and HAS_ANYWIDGET),
reason="plotly and anywidget not installed",
)
def test_plotly_figure_widget_uses_anywidget_formatter():
"""Test that FigureWidget uses the anywidget formatter, not the static
plotly formatter.
FigureWidget is an anywidget.AnyWidget subclass and should go through
the anywidget formatter path for interactive widget features (like
plotly-resampler's dynamic resampling) to work.
Regression test for https://github.com/marimo-team/marimo/issues/4091
"""
register_formatters()
import plotly.graph_objects as go
fig_widget = go.FigureWidget(data=[go.Scatter(x=[1, 2, 3], y=[4, 5, 6])])
# get_formatter walks the MRO; for FigureWidget, the first match
# should be the anywidget formatter (not the plotly Figure formatter)
formatter = get_formatter(fig_widget)
assert formatter is not None
# Call the formatter and verify it goes through the anywidget path
mimetype, data = formatter(fig_widget)
assert mimetype == "text/html"
assert "marimo-anywidget" in data
assert "marimo-plotly" not in data
| {
"repo_id": "marimo-team/marimo",
"file_path": "tests/_output/formatters/test_plotly_formatters.py",
"license": "Apache License 2.0",
"lines": 48,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
marimo-team/marimo:tests/_runtime/test_primitives.py | """Tests for marimo._runtime.primitives module."""
import functools
from typing import Any
from marimo._runtime.primitives import is_pure_function
class TestWrappedFunctionHandling:
"""Test handling of wrapped functions (decorators) in is_pure_function."""
def test_wrapped_function_follows_wrapped_object(self):
"""Test that is_pure_function follows __wrapped__ attribute to check the underlying function."""
def external_function():
"""A function from external module."""
return 42
external_function.__module__ = "external_module"
# Create a decorator that wraps the function
def decorator(func):
@functools.wraps(func)
def wrapper():
return func()
return wrapper
decorated_function = decorator(external_function)
# Mock globals dict
defs = {"decorated_function": decorated_function}
cache = {}
# Should follow the wrapped function and determine purity based on that
result = is_pure_function(
"decorated_function", decorated_function, defs, cache
)
# Should be True since the wrapped function is external
assert result is True
def test_nested_wrapped_functions(self):
"""Test handling of functions with multiple layers of wrapping."""
def original_function():
return "original"
original_function.__module__ = "external_module"
def decorator1(func: Any) -> Any:
@functools.wraps(func)
def wrapper1(*args: Any, **kwargs: Any):
return func(*args, **kwargs)
return wrapper1
def decorator2(func: Any) -> Any:
@functools.wraps(func)
def wrapper2(*args: Any, **kwargs: Any):
return func(*args, **kwargs)
return wrapper2
# Apply multiple decorators
@decorator2
@decorator1
def nested_decorated():
return original_function()
defs = {"nested_decorated": nested_decorated}
cache = {}
# Should handle nested wrapping correctly
result = is_pure_function(
"nested_decorated", nested_decorated, defs, cache
)
assert isinstance(result, bool)
def test_wrapped_attribute_is_none(self):
"""Test handling when __wrapped__ exists but is None."""
def function_with_none_wrapped():
return 42
function_with_none_wrapped.__module__ = "external_module"
# Set __wrapped__ to None
function_with_none_wrapped.__wrapped__ = None
defs = {"function_with_none_wrapped": function_with_none_wrapped}
cache = {}
# Should handle None __wrapped__ gracefully
result = is_pure_function(
"function_with_none_wrapped",
function_with_none_wrapped,
defs,
cache,
)
assert result is True # External function should be pure
def test_main_module_wrapped_function(self):
"""Test wrapped function from __main__ module."""
def internal_function():
return 42
internal_function.__module__ = "__main__"
def decorator(func):
@functools.wraps(func)
def wrapper():
return func()
return wrapper
decorated_function = decorator(internal_function)
defs = {
"decorated_function": decorated_function,
"internal_function": internal_function,
}
cache = {}
# Should follow wrapped function and check if it's pure
result = is_pure_function(
"decorated_function", decorated_function, defs, cache
)
# Should be True since the wrapped function is also pure (no external refs)
assert result is True
| {
"repo_id": "marimo-team/marimo",
"file_path": "tests/_runtime/test_primitives.py",
"license": "Apache License 2.0",
"lines": 94,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
marimo-team/marimo:tests/_ast/app_data/notebook_filename.py | import marimo
__generated_with = "0.14.15"
app = marimo.App(width="medium")
with app.setup:
import marimo as mo
@app.cell
def _():
this_is_foo_path = mo.notebook_dir()
this_is_foo_path
return
@app.cell
def _():
this_is_foo_file = __file__
this_is_foo_file
return
if __name__ == "__main__":
app.run()
| {
"repo_id": "marimo-team/marimo",
"file_path": "tests/_ast/app_data/notebook_filename.py",
"license": "Apache License 2.0",
"lines": 17,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
marimo-team/marimo:marimo/_ast/sql_utils.py | # Copyright 2026 Marimo. All rights reserved.
from __future__ import annotations
from typing import Literal, Optional, Union
from marimo import _loggers
from marimo._dependencies.dependencies import DependencyManager
from marimo._sql.error_utils import log_sql_error
LOGGER = _loggers.marimo_logger()
# DCL: Data Control Language, usually associated with auth (GRANT and REVOKE)
# DML: Data Manipulation Language, usually associated with changing data (INSERT, UPDATE, and DELETE)
# DQL: Data Query Language, usually associated with reading data (SELECT)
# DDL: Data Definition Language, usually associated with creating/altering/dropping tables (CREATE, ALTER, and DROP)
SQL_TYPE = Literal["DDL", "DML", "DQL", "DCL"]
SQLGLOT_DIALECTS = Literal[
"duckdb", "clickhouse", "mysql", "postgres", "sqlite"
]
def classify_sql_statement(
sql_statement: str, dialect: Optional[SQLGLOT_DIALECTS] = None
) -> Union[SQL_TYPE, Literal["unknown"]]:
"""
Identifies whether a SQL statement is a DDL, DML, or DQL statement.
"""
DependencyManager.sqlglot.require(why="SQL parsing")
from sqlglot import exp, parse
from sqlglot.errors import ParseError
sql_statement = sql_statement.strip().lower()
try:
with _loggers.suppress_warnings_logs("sqlglot"):
expression_list = parse(sql_statement, dialect=dialect)
except ParseError as e:
log_sql_error(
LOGGER.debug,
message="Failed to parse SQL statement for classification.",
exception=e,
rule_code="MF005",
node=None,
sql_content=sql_statement,
)
return "unknown"
for expression in expression_list:
if expression is None:
continue
if bool(
expression.find(
exp.Create, exp.Drop, exp.Alter, exp.Attach, exp.Detach
)
):
return "DDL"
elif bool(expression.find(exp.Insert, exp.Update, exp.Delete)):
return "DML"
else:
return "DQL"
return "unknown"
| {
"repo_id": "marimo-team/marimo",
"file_path": "marimo/_ast/sql_utils.py",
"license": "Apache License 2.0",
"lines": 52,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
marimo-team/marimo:tests/_ast/test_sql_utils.py | # Copyright 2026 Marimo. All rights reserved.
import pytest
from marimo._ast.sql_utils import classify_sql_statement
from marimo._dependencies.dependencies import DependencyManager
@pytest.mark.skipif(
not DependencyManager.sqlglot.has(), reason="Missing sqlglot"
)
class TestClassifySQLStatement:
"""Test cases for classify_sql_statement function."""
def test_ddl_statements(self):
"""Test DDL (Data Definition Language) statements."""
# CREATE statements
assert classify_sql_statement("CREATE TABLE users (id INT)") == "DDL"
assert (
classify_sql_statement("CREATE INDEX idx_name ON table_name")
== "DDL"
)
assert (
classify_sql_statement(
"CREATE VIEW my_view AS SELECT * FROM table"
)
== "DDL"
)
# DROP statements
assert classify_sql_statement("DROP TABLE users") == "DDL"
assert classify_sql_statement("DROP INDEX idx_name") == "DDL"
assert classify_sql_statement("DROP VIEW my_view") == "DDL"
# ALTER statements
assert (
classify_sql_statement(
"ALTER TABLE users ADD COLUMN name VARCHAR(255)"
)
== "DDL"
)
assert (
classify_sql_statement("ALTER TABLE users DROP COLUMN name")
== "DDL"
)
# Modify, specific to MySQL
assert (
classify_sql_statement(
"ALTER TABLE users MODIFY COLUMN name VARCHAR(100)", "mysql"
)
== "DDL"
)
# ATTACH statements (duckdb)
assert (
classify_sql_statement(
"ATTACH DATABASE 'test.db' AS test", "duckdb"
)
== "DDL"
)
# DETACH statements (duckdb)
assert (
classify_sql_statement("DETACH DATABASE test", "duckdb") == "DDL"
)
def test_dml_statements(self):
"""Test DML (Data Manipulation Language) statements."""
# INSERT statements
assert (
classify_sql_statement(
"INSERT INTO users (name, age) VALUES ('John', 30)"
)
== "DML"
)
assert (
classify_sql_statement(
"INSERT INTO users SELECT * FROM temp_users"
)
== "DML"
)
# UPDATE statements
assert (
classify_sql_statement(
"UPDATE users SET name = 'Jane' WHERE id = 1"
)
== "DML"
)
assert (
classify_sql_statement("UPDATE users SET age = age + 1") == "DML"
)
# DELETE statements
assert (
classify_sql_statement("DELETE FROM users WHERE id = 1") == "DML"
)
assert classify_sql_statement("DELETE FROM users") == "DML"
def test_dql_statements(self):
"""Test DQL (Data Query Language) statements."""
# SELECT statements
assert classify_sql_statement("SELECT * FROM users") == "DQL"
assert (
classify_sql_statement(
"SELECT name, age FROM users WHERE age > 18"
)
== "DQL"
)
assert classify_sql_statement("SELECT COUNT(*) FROM users") == "DQL"
assert (
classify_sql_statement(
"SELECT u.name, p.title FROM users u JOIN posts p ON u.id = p.user_id"
)
== "DQL"
)
@pytest.mark.skip(reason="DCL statements are not supported yet")
def test_dcl_statements(self):
"""Test DCL (Data Control Language) statements."""
assert classify_sql_statement("GRANT SELECT ON users TO john") == "DCL"
assert (
classify_sql_statement("REVOKE SELECT ON users FROM john") == "DCL"
)
assert classify_sql_statement("CREATE USER john") == "DCL"
assert classify_sql_statement("DROP USER john") == "DCL"
assert (
classify_sql_statement(
"ALTER USER john SET PASSWORD = 'new_password'"
)
== "DCL"
)
def test_case_insensitive(self):
"""Test that the function is case insensitive."""
assert classify_sql_statement("create table users (id int)") == "DDL"
assert classify_sql_statement("CREATE TABLE USERS (ID INT)") == "DDL"
assert (
classify_sql_statement("insert into users values (1, 'john')")
== "DML"
)
assert (
classify_sql_statement("INSERT INTO USERS VALUES (1, 'JOHN')")
== "DML"
)
assert classify_sql_statement("select * from users") == "DQL"
assert classify_sql_statement("SELECT * FROM USERS") == "DQL"
def test_whitespace_handling(self):
"""Test that whitespace is properly handled."""
assert (
classify_sql_statement(" CREATE TABLE users (id INT) ") == "DDL"
)
assert (
classify_sql_statement("\nINSERT INTO users VALUES (1)\n") == "DML"
)
assert classify_sql_statement("\tSELECT * FROM users\t") == "DQL"
def test_dialect_specific_statements(self):
"""Test statements with specific dialects."""
# PostgreSQL specific
assert (
classify_sql_statement(
"CREATE TABLE users (id SERIAL)", "postgres"
)
== "DDL"
)
# MySQL specific
assert (
classify_sql_statement(
"CREATE TABLE users (id INT AUTO_INCREMENT)", "mysql"
)
== "DDL"
)
# SQLite specific
assert (
classify_sql_statement(
"CREATE TABLE users (id INTEGER PRIMARY KEY)", "sqlite"
)
== "DDL"
)
# DuckDB specific
assert (
classify_sql_statement("CREATE TABLE users (id INTEGER)", "duckdb")
== "DDL"
)
def test_complex_statements(self):
"""Test complex SQL statements."""
# Complex DML with subqueries
complex_dml = """
UPDATE users
SET last_login = CURRENT_TIMESTAMP
WHERE id IN (SELECT user_id FROM sessions WHERE active = true)
"""
assert classify_sql_statement(complex_dml) == "DML"
# Test CTEs
complex_dql = """
WITH active_users AS (
SELECT * FROM users WHERE active = true
)
SELECT * FROM active_users
"""
assert classify_sql_statement(complex_dql) == "DQL"
def test_edge_cases(self):
"""Test edge cases and unusual inputs."""
# Empty string
assert classify_sql_statement("") == "unknown"
# Whitespace only
assert classify_sql_statement(" ") == "unknown"
# Invalid SQL
assert classify_sql_statement("INVALID SQL STATEMENT") == "unknown"
assert (
classify_sql_statement("SELECT * FROM") == "unknown"
) # Incomplete statement
# SQL with comments
assert (
classify_sql_statement("-- This is a comment\nSELECT * FROM users")
== "DQL"
)
assert (
classify_sql_statement(
"/* Multi-line comment */\nCREATE TABLE users (id INT)"
)
== "DDL"
)
def test_multiple_statements(self):
"""Test that the function handles the first statement in a batch."""
# The function processes the first statement in the list
assert (
classify_sql_statement("SELECT * FROM users; SELECT * FROM posts")
== "DQL"
)
assert (
classify_sql_statement(
"CREATE TABLE users; INSERT INTO users VALUES (1)"
)
== "DDL"
)
| {
"repo_id": "marimo-team/marimo",
"file_path": "tests/_ast/test_sql_utils.py",
"license": "Apache License 2.0",
"lines": 221,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
marimo-team/marimo:marimo/_convert/non_marimo_python_script.py | # Copyright 2026 Marimo. All rights reserved.
"""Convert non-marimo Python scripts to marimo notebooks."""
from __future__ import annotations
import json
from marimo._convert.ipynb.to_ir import convert_from_ipynb_to_notebook_ir
from marimo._schemas.serialization import (
AppInstantiation,
Header,
NotebookSerialization,
UnparsableCell,
)
def convert_pypercent_script_to_notebook_ir(
source: str,
) -> NotebookSerialization:
"""Convert a pypercent Python script into marimo notebook IR.
Converts pypercent to jupyter to marimo notebook IR using jupytext.
"""
try:
import jupytext # type: ignore[import-untyped]
except ImportError as e:
raise ImportError(
"Converting py:percent format requires jupytext"
) from e
notebook = jupytext.reads(source, fmt="py:percent")
notebook_str = json.dumps(notebook)
ir = convert_from_ipynb_to_notebook_ir(notebook_str)
_transform_main_blocks(ir)
return ir
def convert_python_block_to_notebook_ir(
source: str,
) -> NotebookSerialization:
"""Convert a Python script block to marimo notebook IR.
This is used when the script is not in the notebook format.
"""
notebook = {"cells": [{"source": source, "cell_type": "code"}]}
notebook_str = json.dumps(notebook)
ir = convert_from_ipynb_to_notebook_ir(notebook_str)
_transform_main_blocks(ir)
return ir
def convert_script_block_to_notebook_ir(
source: str,
) -> NotebookSerialization:
"""Converts unknown script block to marimo notebook IR.
Puts all content into a single cell. Generally used for unparsable scripts.
"""
return NotebookSerialization(
app=AppInstantiation(),
header=Header(value=""),
cells=[
UnparsableCell(
code=source,
)
],
)
def convert_non_marimo_python_script_to_notebook_ir(
source: str,
) -> NotebookSerialization:
"""Convert a Python script that isn't in the notebook format into marimo notebook IR.
This should only be called after verifying the file is not already
a valid marimo notebook. It converts by:
1. If pypercent format is detected (# %%), use jupytext for conversion
2. Otherwise, puts all content into a single cell
3. Preserves PEP 723 inline script metadata if present
"""
if "# %%" in source:
return convert_pypercent_script_to_notebook_ir(source)
return convert_python_block_to_notebook_ir(source)
def convert_non_marimo_script_to_notebook_ir(
source: str,
) -> NotebookSerialization:
"""Convert a non-marimo script to marimo notebook IR.
This is a convenience function that turns any string into a
marimo notebook.
"""
try:
return convert_non_marimo_python_script_to_notebook_ir(source)
except ImportError:
pass
try:
import ast
ast.parse(source) # Validate if it's valid Python code
return convert_python_block_to_notebook_ir(source)
except SyntaxError:
return convert_script_block_to_notebook_ir(source)
def _transform_main_blocks(ir: NotebookSerialization) -> None:
"""Transform if __name__ == "__main__": blocks in cells to marimo-compatible functions."""
main_pattern = 'if __name__ == "__main__":'
for cell in ir.cells:
if main_pattern in cell.code:
parts = cell.code.split(main_pattern, 1)
before_main = parts[0].strip()
# replace the if __name__ == "__main__": with def _main_():
main_block = "def _main_():" + parts[1] + "\n\n_main_()"
if before_main:
cell.code = before_main + "\n\n" + main_block
else:
cell.code = main_block
| {
"repo_id": "marimo-team/marimo",
"file_path": "marimo/_convert/non_marimo_python_script.py",
"license": "Apache License 2.0",
"lines": 97,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
marimo-team/marimo:tests/_ast/codegen_data/test_get_codes_non_marimo_python_script.py | # /// script
# requires-python = ">=3.12"
# dependencies = [
# "altair==5.4.1",
# "duckdb==1.1.3",
# "marimo",
# ]
# ///
def hello():
print("hello, world")
| {
"repo_id": "marimo-team/marimo",
"file_path": "tests/_ast/codegen_data/test_get_codes_non_marimo_python_script.py",
"license": "Apache License 2.0",
"lines": 10,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
marimo-team/marimo:tests/_convert/test_convert_non_marimo_python_script.py | import pytest
from marimo._convert.converters import MarimoConvert
from marimo._convert.non_marimo_python_script import (
convert_non_marimo_python_script_to_notebook_ir,
convert_non_marimo_script_to_notebook_ir,
convert_python_block_to_notebook_ir,
)
from marimo._dependencies.dependencies import DependencyManager
from tests.mocks import snapshotter
HAS_JUPYTEXT = DependencyManager.has("jupytext")
snapshot_test = snapshotter(__file__)
class TestConvertNonMarimoScriptToNotebookIr:
"""Test the convert_non_marimo_script_to_notebook_ir function."""
def test_python_script_success(self) -> None:
"""Test successful Python script conversion."""
source = '''"""A simple Python script."""
x = 5
y = 10
print(x + y)
'''
ir = convert_non_marimo_script_to_notebook_ir(source)
assert ir.app is not None
assert len(ir.cells) == 1
# CellDef objects don't have cell_type attribute
assert "print(x + y)" in ir.cells[0].code
@pytest.mark.skipif(not HAS_JUPYTEXT, reason="jupytext not installed")
def test_pypercent_script_success(self) -> None:
"""Test successful pypercent script conversion."""
source = '''"""Pypercent format notebook."""
# %%
x = 1
print(x)
'''
ir = convert_non_marimo_script_to_notebook_ir(source)
assert ir.app is not None
assert len(ir.cells) > 0
@pytest.mark.skipif(
HAS_JUPYTEXT,
reason="Check failure occurs when jupytext is not available",
)
def test_pypercent_script_without_jupytext(self) -> None:
"""Test pypercent script when jupytext is not available."""
source = '''"""Pypercent format notebook."""
# %%
x = 1
print(x)
'''
# Should fall back to Python block conversion
ir = convert_non_marimo_script_to_notebook_ir(source)
assert ir.app is not None
assert len(ir.cells) == 1
# CellDef objects don't have cell_type attribute
assert "print(x)" in ir.cells[0].code
def test_invalid_python_syntax(self) -> None:
"""Test script with invalid Python syntax."""
source = """This is not valid Python code
def incomplete_function(
# Missing closing parenthesis
"""
ir = convert_non_marimo_script_to_notebook_ir(source)
assert ir.app is not None
assert len(ir.cells) == 1
# Should be an UnparsableCell
assert hasattr(ir.cells[0], "code")
# Strip trailing newlines for comparison
assert ir.cells[0].code.rstrip() == source.rstrip()
def test_bash_script(self) -> None:
"""Test non-Python script (bash)."""
source = """#!/bin/bash
echo "Hello World"
for i in {1..5}; do
echo "Count: $i"
done
"""
ir = convert_non_marimo_script_to_notebook_ir(source)
assert ir.app is not None
assert len(ir.cells) == 1
# Should be an UnparsableCell
assert hasattr(ir.cells[0], "code")
# Strip trailing newlines for comparison
assert ir.cells[0].code.rstrip() == source.rstrip()
def test_markdown_text(self) -> None:
"""Test markdown text as script."""
source = """# My Document
This is a markdown document with some content.
## Section 1
- Item 1
- Item 2
- Item 3
## Section 2
Some more content here.
"""
ir = convert_non_marimo_script_to_notebook_ir(source)
assert ir.app is not None
assert len(ir.cells) == 1
# Should be an UnparsableCell
assert hasattr(ir.cells[0], "code")
# Strip trailing newlines for comparison
assert ir.cells[0].code.rstrip() == source.rstrip()
def test_empty_python_block(self) -> None:
"""Test empty Python block conversion."""
source = ""
ir = convert_python_block_to_notebook_ir(source)
assert ir.app is not None
assert len(ir.cells) == 1
# CellDef objects don't have cell_type attribute
assert ir.cells[0].code.strip() == ""
def test_basic_script_block(self) -> None:
"""Test basic script block conversion."""
source = "This is some arbitrary text that is not Python code"
ir = MarimoConvert.from_plain_text(source).to_ir()
assert ir.app is not None
assert ir.header.value == ""
assert len(ir.cells) == 1
assert ir.cells[0].code == source
def test_script_block_with_special_characters(self) -> None:
"""Test script block with special characters."""
source = """#!/bin/bash
echo "Hello World"
# This is a bash script, not Python
"""
ir = MarimoConvert.from_non_marimo_python_script(
source, aggressive=True
).to_ir()
assert len(ir.cells) == 1
assert ir.cells[0].code.strip() == source.strip()
class TestConvertNonMarimoPython:
"""Test conversion of non-marimo Python files to marimo notebooks."""
def test_simple_script(self) -> None:
"""Test conversion of a simple script."""
source = '''"""A simple script."""
import math
def calculate_area(radius):
return math.pi * radius ** 2
print(calculate_area(5))
'''
ir = convert_non_marimo_python_script_to_notebook_ir(source)
converted = MarimoConvert.from_ir(ir).to_py()
snapshot_test("simple_script.py.txt", converted)
def test_script_no_header(self) -> None:
"""Test conversion of a minimal script without header."""
source = """x = 5
y = 10
print(x + y)
"""
ir = convert_non_marimo_python_script_to_notebook_ir(source)
converted = MarimoConvert.from_ir(ir).to_py()
snapshot_test("minimal_script.py.txt", converted)
@pytest.mark.skipif(not HAS_JUPYTEXT, reason="jupytext not installed")
def test_pypercent_format(self) -> None:
"""Test conversion of pypercent format file."""
source = '''"""Pypercent format notebook."""
import numpy as np
# %% [markdown]
# This is a markdown cell
# with multiple lines
# %% [markdown]
"""This is a doc string, but also markdown"""
# %%
# First code cell
x = np.array([1, 2, 3])
print(x)
# %% Data processing
# Cell with title
y = x * 2
print(y)
'''
ir = convert_non_marimo_python_script_to_notebook_ir(source)
converted = MarimoConvert.from_ir(ir).to_py()
snapshot_test("pypercent_format.py.txt", converted)
@pytest.mark.skipif(not HAS_JUPYTEXT, reason="jupytext not installed")
def test_pypercent_markdown_only(self) -> None:
"""Test pypercent file with only markdown cells."""
source = '''"""Documentation in pypercent format."""
# %% [markdown]
# Introduction
This is a documentation file.
# %% [markdown]
# Usage
Here's how to use this module.
'''
ir = convert_non_marimo_python_script_to_notebook_ir(source)
converted = MarimoConvert.from_ir(ir).to_py()
snapshot_test("pypercent_markdown_only.py.txt", converted)
@pytest.mark.skipif(not HAS_JUPYTEXT, reason="jupytext not installed")
def test_pypercent_with_main_block(self) -> None:
"""Test pypercent file with main block."""
source = '''"""Script with main block in pypercent."""
# %%
import sys
# %%
if __name__ == "__main__":
print("Running as script")
sys.exit(0)
'''
ir = convert_non_marimo_python_script_to_notebook_ir(source)
converted = MarimoConvert.from_ir(ir).to_py()
snapshot_test("pypercent_with_main.py.txt", converted)
| {
"repo_id": "marimo-team/marimo",
"file_path": "tests/_convert/test_convert_non_marimo_python_script.py",
"license": "Apache License 2.0",
"lines": 198,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
marimo-team/marimo:marimo/_version.py | # Copyright 2026 Marimo. All rights reserved.
from __future__ import annotations
from importlib.metadata import PackageNotFoundError, version
try:
__version__ = version("marimo")
except PackageNotFoundError:
# package is not installed
__version__ = "unknown"
| {
"repo_id": "marimo-team/marimo",
"file_path": "marimo/_version.py",
"license": "Apache License 2.0",
"lines": 8,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
marimo-team/marimo:scripts/modify_pyproject_for_marimo_base.py | #!/usr/bin/env uv run
"""
Python build tools generally only allow building
one project per Python project.
This modifies the pyproject.toml to build marimo-base,
a slimmed down marimo distribution with no static
artifacts.
Adapted from https://github.com/cvxpy/cvxpy/blob/297278e2a88db3c0084750052a16e60672074da3/.github/workflows/build.yml#L169C1-L180C1
"""
# /// script
# requires-python = ">=3.13"
# dependencies = ["tomlkit"]
#
# [tool.uv]
# exclude-newer = "2025-07-28T10:17:41.85442-04:00"
# ///
import pathlib
import tomlkit
root = pathlib.Path(__file__).parent.parent
force_exclude = ["marimo/_lsp"]
static_dir = root / "marimo/_static"
if static_dir.exists():
force_exclude.extend([
str(item.relative_to(root))
for item in static_dir.iterdir()
if item.name != "index.html"
])
else:
print("No _static directory found, skipping")
with (root / "pyproject.toml").open(encoding="utf-8", mode="r") as f:
data = tomlkit.load(f)
data["project"]["name"] = "marimo-base"
build_backend = data["tool"]["uv"]["build-backend"]
if "source-exclude" not in build_backend:
build_backend["source-exclude"] = []
build_backend["source-exclude"].extend(force_exclude)
if "wheel-exclude" not in build_backend:
build_backend["wheel-exclude"] = []
build_backend["wheel-exclude"].extend(force_exclude)
with (root / "pyproject.toml").open(encoding="utf-8", mode="w") as f:
tomlkit.dump(data, f)
print("Successfully modified pyproject.toml for marimo-base build")
| {
"repo_id": "marimo-team/marimo",
"file_path": "scripts/modify_pyproject_for_marimo_base.py",
"license": "Apache License 2.0",
"lines": 42,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
marimo-team/marimo:tests/_plugins/stateless/test_plain_text.py | # Copyright 2026 Marimo. All rights reserved.
from __future__ import annotations
from marimo._plugins.stateless.plain_text import plain_text
def test_plain_text_basic() -> None:
"""Test basic plain_text functionality."""
result = plain_text("Hello, World!")
assert "Hello, World!" in result.text
assert "<pre" in result.text
assert "</pre>" in result.text
def test_plain_text_html_escaping() -> None:
"""Test that plain_text properly escapes HTML characters."""
# Test the reported bug case
result = plain_text("<x")
assert "<x" in result.text
assert "<x" not in result.text or result.text.count("<x") == 0
# Test other HTML characters
result = plain_text("</script>")
assert "</script>" in result.text
result = plain_text("<div>content</div>")
assert "<div>content</div>" in result.text
result = plain_text("&")
assert "&amp;" in result.text
def test_plain_text_preserves_whitespace() -> None:
"""Test that plain_text preserves spaces and newlines."""
text_with_spaces = "line 1\n line 2 with spaces\nline 3"
result = plain_text(text_with_spaces)
# The text should contain the original spacing
assert "line 1" in result.text
assert " line 2 with spaces" in result.text
assert "line 3" in result.text
def test_plain_text_empty_string() -> None:
"""Test plain_text with empty string."""
result = plain_text("")
assert result.text == "<pre class='text-xs'></pre>"
| {
"repo_id": "marimo-team/marimo",
"file_path": "tests/_plugins/stateless/test_plain_text.py",
"license": "Apache License 2.0",
"lines": 34,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
marimo-team/marimo:tests/_server/ai/test_mcp.py | # Copyright 2026 Marimo. All rights reserved.
import asyncio
from unittest.mock import AsyncMock, patch
import pytest
from marimo._config.config import (
MCPConfig,
MCPServerStdioConfig,
MCPServerStreamableHttpConfig,
)
from marimo._dependencies.dependencies import DependencyManager
from marimo._server.ai.mcp import (
MCP_PRESETS,
MCPClient,
MCPConfigComparator,
MCPServerConnection,
MCPServerDefinition,
MCPServerDefinitionFactory,
MCPServerStatus,
MCPTransportRegistry,
MCPTransportType,
StdioTransportConnector,
StreamableHTTPTransportConnector,
append_presets,
get_mcp_client,
)
# test fixtures and helpers
@pytest.fixture
def mock_session_setup():
"""Create a properly configured mock session with async context manager behavior."""
def _create_mock_session(additional_methods=None, side_effects=None):
mock_session = AsyncMock()
mock_session.initialize = AsyncMock()
mock_session.list_tools = AsyncMock()
mock_session.list_tools.return_value.tools = []
# Add any additional methods specified
if additional_methods:
for method_name, method_mock in additional_methods.items():
setattr(mock_session, method_name, method_mock)
# Apply any side effects
if side_effects:
for method_name, side_effect in side_effects.items():
getattr(mock_session, method_name).side_effect = side_effect
mock_session_context = AsyncMock()
mock_session_context.__aenter__ = AsyncMock(return_value=mock_session)
mock_session_context.__aexit__ = AsyncMock(return_value=None)
return mock_session, mock_session_context
return _create_mock_session
@pytest.fixture
def mock_stdio_setup():
"""Create a properly configured mock stdio client with async context manager behavior."""
def _create_mock_stdio():
mock_read = AsyncMock()
mock_write = AsyncMock()
mock_stdio_context = AsyncMock()
mock_stdio_context.__aenter__ = AsyncMock(
return_value=(mock_read, mock_write)
)
mock_stdio_context.__aexit__ = AsyncMock(return_value=None)
return mock_read, mock_write, mock_stdio_context
return _create_mock_stdio
def create_test_server_definition(
name: str = "test_server",
command: str = "test",
args: list = None,
env: dict = None,
timeout: float = None,
) -> MCPServerDefinition:
"""Create a test server definition with sensible defaults."""
if args is None:
args = []
if env is None:
env = {}
config = MCPServerStdioConfig(command=command, args=args, env=env)
server_def = MCPServerDefinitionFactory.from_config(name, config)
if timeout is not None:
server_def.timeout = timeout
return server_def
def create_test_server_connection(
name: str = "test_server",
command: str = "test",
args: list = None,
env: dict = None,
status: MCPServerStatus = MCPServerStatus.DISCONNECTED,
session=None,
timeout: float = None,
) -> MCPServerConnection:
"""Create a test server connection with sensible defaults."""
server_def = create_test_server_definition(
name, command, args, env, timeout
)
connection = MCPServerConnection(definition=server_def)
connection.status = status
connection.session = session
return connection
def create_test_tool(
name: str = "test_tool",
description: str = "Test tool",
server_name: str = "test_server",
namespaced_name: str = None,
input_schema: dict = None,
):
"""Create a test tool with sensible defaults."""
if DependencyManager.mcp.has():
from mcp.types import Tool
if input_schema is None:
input_schema = {"type": "object"}
if namespaced_name is None:
namespaced_name = f"mcp_{server_name}_{name}"
return Tool(
name=name,
description=description,
inputSchema=input_schema,
_meta={
"server_name": server_name,
"namespaced_name": namespaced_name,
},
)
return None
# tests
class TestMCPServerDefinition:
"""Test cases for MCPServerDefinition class."""
@pytest.mark.parametrize(
("config_type", "expected_transport", "config_kwargs"),
[
pytest.param(
MCPServerStdioConfig,
MCPTransportType.STDIO,
{
"command": "python",
"args": ["server.py"],
"env": {"API_KEY": "test"},
},
id="stdio_transport",
),
pytest.param(
MCPServerStreamableHttpConfig,
MCPTransportType.STREAMABLE_HTTP,
{
"url": "https://api.example.com/mcp",
"headers": {"Auth": "Bearer token"},
"timeout": 45.0,
},
id="http_transport",
),
],
)
def test_from_config_transport_detection(
self, config_type, expected_transport, config_kwargs
):
"""Test that transport types are correctly auto-detected from configuration."""
config = config_type(**config_kwargs)
server_def = MCPServerDefinitionFactory.from_config(
"test_server", config
)
assert server_def.name == "test_server"
assert server_def.transport == expected_transport
assert server_def.config == config
# Verify transport-specific attributes are available from config
if expected_transport == MCPTransportType.STDIO:
assert server_def.config["command"] == config_kwargs["command"]
assert server_def.config.get("args") == config_kwargs["args"]
assert server_def.config.get("env") == config_kwargs["env"]
elif expected_transport == MCPTransportType.STREAMABLE_HTTP:
assert server_def.config["url"] == config_kwargs["url"]
assert server_def.config.get("headers") == config_kwargs["headers"]
assert server_def.timeout == config_kwargs["timeout"]
class TestMCPConfigComparator:
"""Test cases for MCPConfigComparator utility class."""
def test_compute_diff_no_changes(self):
"""Test that compute_diff detects no changes when configs are identical."""
server1 = MCPServerDefinition(
name="server1",
transport=MCPTransportType.STDIO,
config=MCPServerStdioConfig(command="test", args=[], env={}),
timeout=30.0,
)
current = {"server1": server1}
new = {"server1": server1}
diff = MCPConfigComparator.compute_diff(current, new)
assert not diff.has_changes()
assert len(diff.servers_to_add) == 0
assert len(diff.servers_to_remove) == 0
assert len(diff.servers_to_update) == 0
assert "server1" in diff.servers_unchanged
def test_compute_diff_add_servers(self):
"""Test that compute_diff detects new servers."""
server1 = MCPServerDefinition(
name="server1",
transport=MCPTransportType.STDIO,
config=MCPServerStdioConfig(command="test1", args=[], env={}),
timeout=30.0,
)
server2 = MCPServerDefinition(
name="server2",
transport=MCPTransportType.STDIO,
config=MCPServerStdioConfig(command="test2", args=[], env={}),
timeout=30.0,
)
current = {"server1": server1}
new = {"server1": server1, "server2": server2}
diff = MCPConfigComparator.compute_diff(current, new)
assert diff.has_changes()
assert "server2" in diff.servers_to_add
assert len(diff.servers_to_remove) == 0
assert len(diff.servers_to_update) == 0
assert "server1" in diff.servers_unchanged
def test_compute_diff_remove_servers(self):
"""Test that compute_diff detects removed servers."""
server1 = MCPServerDefinition(
name="server1",
transport=MCPTransportType.STDIO,
config=MCPServerStdioConfig(command="test1", args=[], env={}),
timeout=30.0,
)
server2 = MCPServerDefinition(
name="server2",
transport=MCPTransportType.STDIO,
config=MCPServerStdioConfig(command="test2", args=[], env={}),
timeout=30.0,
)
current = {"server1": server1, "server2": server2}
new = {"server1": server1}
diff = MCPConfigComparator.compute_diff(current, new)
assert diff.has_changes()
assert "server2" in diff.servers_to_remove
assert len(diff.servers_to_add) == 0
assert len(diff.servers_to_update) == 0
assert "server1" in diff.servers_unchanged
def test_compute_diff_update_servers(self):
"""Test that compute_diff detects modified servers."""
server1_old = MCPServerDefinition(
name="server1",
transport=MCPTransportType.STDIO,
config=MCPServerStdioConfig(
command="test", args=["--old"], env={}
),
timeout=30.0,
)
server1_new = MCPServerDefinition(
name="server1",
transport=MCPTransportType.STDIO,
config=MCPServerStdioConfig(
command="test", args=["--new"], env={}
),
timeout=30.0,
)
current = {"server1": server1_old}
new = {"server1": server1_new}
diff = MCPConfigComparator.compute_diff(current, new)
assert diff.has_changes()
assert "server1" in diff.servers_to_update
assert len(diff.servers_to_add) == 0
assert len(diff.servers_to_remove) == 0
assert len(diff.servers_unchanged) == 0
def test_compute_diff_mixed_changes(self):
"""Test compute_diff with multiple types of changes."""
server1 = MCPServerDefinition(
name="unchanged",
transport=MCPTransportType.STDIO,
config=MCPServerStdioConfig(command="test1", args=[], env={}),
timeout=30.0,
)
server2_old = MCPServerDefinition(
name="updated",
transport=MCPTransportType.STDIO,
config=MCPServerStdioConfig(
command="test2", args=["--old"], env={}
),
timeout=30.0,
)
server2_new = MCPServerDefinition(
name="updated",
transport=MCPTransportType.STDIO,
config=MCPServerStdioConfig(
command="test2", args=["--new"], env={}
),
timeout=30.0,
)
server3 = MCPServerDefinition(
name="removed",
transport=MCPTransportType.STDIO,
config=MCPServerStdioConfig(command="test3", args=[], env={}),
timeout=30.0,
)
server4 = MCPServerDefinition(
name="added",
transport=MCPTransportType.STDIO,
config=MCPServerStdioConfig(command="test4", args=[], env={}),
timeout=30.0,
)
current = {
"unchanged": server1,
"updated": server2_old,
"removed": server3,
}
new = {"unchanged": server1, "updated": server2_new, "added": server4}
diff = MCPConfigComparator.compute_diff(current, new)
assert diff.has_changes()
assert "unchanged" in diff.servers_unchanged
assert "updated" in diff.servers_to_update
assert "removed" in diff.servers_to_remove
assert "added" in diff.servers_to_add
class TestMCPPresets:
"""Test cases for MCP preset configuration system."""
def test_preset_definitions_exist(self):
"""Test that expected presets are defined."""
assert "marimo" in MCP_PRESETS
assert "context7" in MCP_PRESETS
# Verify preset structure
assert "url" in MCP_PRESETS["marimo"]
assert "url" in MCP_PRESETS["context7"]
def test_append_presets_no_presets_list(self):
"""Test append_presets with config that has no presets list."""
config = MCPConfig(
mcpServers={
"custom": MCPServerStdioConfig(command="test", args=[])
}
)
result = append_presets(config)
# Should return config unchanged
assert "custom" in result["mcpServers"]
assert len(result["mcpServers"]) == 1
def test_append_presets_empty_presets_list(self):
"""Test append_presets with empty presets list."""
config = MCPConfig(mcpServers={}, presets=[])
result = append_presets(config)
assert len(result["mcpServers"]) == 0
def test_append_presets_adds_marimo_preset(self):
"""Test that marimo preset is added when specified."""
config = MCPConfig(mcpServers={}, presets=["marimo"])
result = append_presets(config)
assert "marimo" in result["mcpServers"]
assert (
result["mcpServers"]["marimo"]["url"]
== MCP_PRESETS["marimo"]["url"]
)
def test_append_presets_adds_context7_preset(self):
"""Test that context7 preset is added when specified."""
config = MCPConfig(mcpServers={}, presets=["context7"])
result = append_presets(config)
assert "context7" in result["mcpServers"]
assert (
result["mcpServers"]["context7"]["url"]
== MCP_PRESETS["context7"]["url"]
)
def test_append_presets_adds_multiple_presets(self):
"""Test that multiple presets can be added."""
config = MCPConfig(mcpServers={}, presets=["marimo", "context7"])
result = append_presets(config)
assert "marimo" in result["mcpServers"]
assert "context7" in result["mcpServers"]
assert len(result["mcpServers"]) == 2
def test_append_presets_preserves_existing_servers(self):
"""Test that existing servers are preserved when adding presets."""
config = MCPConfig(
mcpServers={
"custom": MCPServerStdioConfig(command="test", args=[])
},
presets=["marimo"],
)
result = append_presets(config)
assert "custom" in result["mcpServers"]
assert "marimo" in result["mcpServers"]
assert len(result["mcpServers"]) == 2
def test_append_presets_does_not_override_existing(self):
"""Test that presets don't override existing servers with same name."""
custom_url = "https://custom.marimo.app/mcp"
config = MCPConfig(
mcpServers={
"marimo": MCPServerStreamableHttpConfig(url=custom_url)
},
presets=["marimo"],
)
result = append_presets(config)
# Original server should be preserved
assert result["mcpServers"]["marimo"]["url"] == custom_url
assert len(result["mcpServers"]) == 1
def test_append_presets_does_not_mutate_original(self):
"""Test that append_presets doesn't mutate the original config."""
config = MCPConfig(mcpServers={}, presets=["marimo"])
result = append_presets(config)
# Original config should be unchanged
assert "marimo" not in config["mcpServers"]
# Result should have the preset
assert "marimo" in result["mcpServers"]
class TestMCPTransportConnectors:
"""Test cases for transport connector classes."""
def test_transport_registry_functionality(self):
"""Test that the transport registry properly handles all transport types."""
registry = MCPTransportRegistry()
# Test that all transport types are supported
for transport_type in MCPTransportType:
connector = registry.get_connector(transport_type)
assert connector is not None
# Test unsupported transport type
with pytest.raises(ValueError, match="Unsupported transport type"):
registry.get_connector("unsupported_transport") # type: ignore
@pytest.mark.skipif(
not DependencyManager.mcp.has(), reason="MCP SDK not available"
)
@patch("mcp.client.stdio.stdio_client")
async def test_stdio_connector_connect(self, mock_stdio_client):
"""Test STDIO transport connector connection."""
# Setup mocks
mock_read = AsyncMock()
mock_write = AsyncMock()
mock_context = AsyncMock()
mock_context.__aenter__ = AsyncMock(
return_value=(mock_read, mock_write)
)
mock_context.__aexit__ = AsyncMock(return_value=None)
mock_stdio_client.return_value = mock_context
# Create connector and test connection
connector = StdioTransportConnector()
config = MCPServerStdioConfig(
command="python", args=["server.py"], env={"TEST_VAR": "value"}
)
server_def = MCPServerDefinition(
name="test", transport=MCPTransportType.STDIO, config=config
)
from contextlib import AsyncExitStack
async with AsyncExitStack() as exit_stack:
read, write = await connector.connect(server_def, exit_stack)
assert read == mock_read
assert write == mock_write
@pytest.mark.skipif(
not DependencyManager.mcp.has(), reason="MCP SDK not available"
)
@patch("mcp.client.streamable_http.streamablehttp_client")
async def test_http_connector_connect(self, mock_http_client):
"""Test HTTP transport connector connection."""
# Setup mocks
mock_read = AsyncMock()
mock_write = AsyncMock()
mock_context = AsyncMock()
mock_context.__aenter__ = AsyncMock(
return_value=(mock_read, mock_write)
)
mock_context.__aexit__ = AsyncMock(return_value=None)
mock_http_client.return_value = mock_context
# Create connector and test connection
connector = StreamableHTTPTransportConnector()
config = MCPServerStreamableHttpConfig(
url="https://api.example.com/mcp",
headers={"Authorization": "Bearer token"},
timeout=30.0,
)
server_def = MCPServerDefinition(
name="test",
transport=MCPTransportType.STREAMABLE_HTTP,
config=config,
timeout=30.0,
)
from contextlib import AsyncExitStack
async with AsyncExitStack() as exit_stack:
read, write = await connector.connect(server_def, exit_stack)
assert read == mock_read
assert write == mock_write
class TestMCPClientConfiguration:
"""Test cases for MCPClient configuration parsing and initialization."""
def test_init_with_empty_config(self):
"""Test MCPClient initialization with empty config."""
client = MCPClient()
assert client.servers == {}
assert client.connections == {}
assert client.tool_registry == {}
@pytest.mark.parametrize(
("server_configs", "expected_servers"),
[
pytest.param(
{
"stdio_server": MCPServerStdioConfig(
command="python",
args=["test.py"],
env={"TEST": "value"},
),
},
["stdio_server"],
id="single_stdio_server",
),
pytest.param(
{
"http_server": MCPServerStreamableHttpConfig(
url="https://api.example.com/mcp",
headers={"Auth": "Bearer token"},
),
},
["http_server"],
id="single_http_server",
),
pytest.param(
{
"stdio_server": MCPServerStdioConfig(
command="python", args=["test.py"]
),
"http_server": MCPServerStreamableHttpConfig(
url="https://api.example.com/mcp"
),
},
["stdio_server", "http_server"],
id="mixed_servers",
),
],
)
def test_parse_config_valid_servers(
self, server_configs, expected_servers
):
"""Test parsing valid server configurations."""
config = MCPConfig(mcpServers=server_configs)
client = MCPClient()
# Parse the config to populate servers
parsed_servers = client._parse_config(config)
client.servers = parsed_servers
assert len(client.servers) == len(expected_servers)
for server_name in expected_servers:
assert server_name in client.servers
server_def = client.servers[server_name]
assert server_def.name == server_name
@pytest.mark.skipif(
not DependencyManager.mcp.has(), reason="MCP SDK not available"
)
class TestMCPClientReconfiguration:
"""Test cases for MCPClient dynamic reconfiguration functionality."""
async def test_configure_noop_when_no_changes(self, mock_session_setup):
"""Test that configure() does nothing when config hasn't changed."""
del mock_session_setup
config = MCPConfig(
mcpServers={
"server1": MCPServerStdioConfig(
command="test", args=[], env={}
)
}
)
client = MCPClient()
# Initial configure
await client.configure(config)
# Track calls to connect_to_server
original_connect = client.connect_to_server
connect_calls = []
async def track_connect(server_name: str):
connect_calls.append(server_name)
return await original_connect(server_name)
client.connect_to_server = track_connect
# Configure with same config
await client.configure(config)
# Should not have called connect_to_server
assert len(connect_calls) == 0
async def test_configure_adds_new_servers(self, mock_session_setup):
"""Test that configure() adds new servers."""
del mock_session_setup
initial_config = MCPConfig(
mcpServers={
"server1": MCPServerStdioConfig(
command="test1", args=[], env={}
)
}
)
client = MCPClient()
await client.configure(initial_config)
# New config with additional server
new_config = MCPConfig(
mcpServers={
"server1": MCPServerStdioConfig(
command="test1", args=[], env={}
),
"server2": MCPServerStdioConfig(
command="test2", args=[], env={}
),
}
)
# Mock the connection methods
mock_connect = AsyncMock(return_value=True)
with patch.object(client, "connect_to_server", mock_connect):
await client.configure(new_config)
# Verify server2 was added
assert "server1" in client.servers
assert "server2" in client.servers
assert mock_connect.called
# Should only connect to server2 (the new one)
assert mock_connect.call_count == 1
mock_connect.assert_called_with("server2")
async def test_configure_removes_old_servers(self, mock_session_setup):
"""Test that configure() removes servers not in new config."""
del mock_session_setup
initial_config = MCPConfig(
mcpServers={
"server1": MCPServerStdioConfig(
command="test1", args=[], env={}
),
"server2": MCPServerStdioConfig(
command="test2", args=[], env={}
),
}
)
client = MCPClient()
await client.configure(initial_config)
# Create mock connections
client.connections["server1"] = create_test_server_connection(
"server1", MCPServerStatus.CONNECTED
)
client.connections["server2"] = create_test_server_connection(
"server2", MCPServerStatus.CONNECTED
)
# New config with only server1
new_config = MCPConfig(
mcpServers={
"server1": MCPServerStdioConfig(
command="test1", args=[], env={}
)
}
)
# Mock disconnect_from_server
mock_disconnect = AsyncMock(return_value=True)
with patch.object(client, "disconnect_from_server", mock_disconnect):
await client.configure(new_config)
# Verify server2 was removed
assert "server1" in client.servers
assert "server2" not in client.servers
assert "server2" not in client.connections
# Should have called disconnect for server2
mock_disconnect.assert_called_once_with("server2")
async def test_configure_updates_modified_servers(
self, mock_session_setup
):
"""Test that configure() reconnects to servers with changed config."""
del mock_session_setup
initial_config = MCPConfig(
mcpServers={
"server1": MCPServerStdioConfig(
command="test1", args=["--old"], env={}
)
}
)
client = MCPClient()
await client.configure(initial_config)
# Create mock connection
client.connections["server1"] = create_test_server_connection(
"server1", MCPServerStatus.CONNECTED
)
# New config with modified server1
new_config = MCPConfig(
mcpServers={
"server1": MCPServerStdioConfig(
command="test1", args=["--new"], env={}
)
}
)
# Mock methods
mock_disconnect = AsyncMock(return_value=True)
mock_connect = AsyncMock(return_value=True)
with (
patch.object(client, "disconnect_from_server", mock_disconnect),
patch.object(client, "connect_to_server", mock_connect),
):
await client.configure(new_config)
# Should have disconnected and reconnected to server1
mock_disconnect.assert_called_once_with("server1")
mock_connect.assert_called_once_with("server1")
# Verify config was updated
assert client.servers["server1"].config["args"] == ["--new"]
async def test_configure_mixed_changes(self, mock_session_setup):
"""Test configure() with add, remove, and update operations."""
del mock_session_setup
initial_config = MCPConfig(
mcpServers={
"keep_unchanged": MCPServerStdioConfig(
command="test1", args=[], env={}
),
"to_update": MCPServerStdioConfig(
command="test2", args=["--old"], env={}
),
"to_remove": MCPServerStdioConfig(
command="test3", args=[], env={}
),
}
)
client = MCPClient()
await client.configure(initial_config)
# Create mock connections
for name in ["keep_unchanged", "to_update", "to_remove"]:
client.connections[name] = create_test_server_connection(
name, MCPServerStatus.CONNECTED
)
# New config
new_config = MCPConfig(
mcpServers={
"keep_unchanged": MCPServerStdioConfig(
command="test1", args=[], env={}
),
"to_update": MCPServerStdioConfig(
command="test2", args=["--new"], env={}
),
"to_add": MCPServerStdioConfig(
command="test4", args=[], env={}
),
}
)
# Mock methods
mock_disconnect = AsyncMock(return_value=True)
mock_connect = AsyncMock(return_value=True)
with (
patch.object(client, "disconnect_from_server", mock_disconnect),
patch.object(client, "connect_to_server", mock_connect),
):
await client.configure(new_config)
# Verify results
assert "keep_unchanged" in client.servers
assert "to_update" in client.servers
assert "to_add" in client.servers
assert "to_remove" not in client.servers
assert "to_remove" not in client.connections
# Verify disconnect was called for removed and updated
assert mock_disconnect.call_count == 2
disconnect_calls = [
call[0][0] for call in mock_disconnect.call_args_list
]
assert "to_remove" in disconnect_calls
assert "to_update" in disconnect_calls
# Verify connect was called for added and updated
assert mock_connect.call_count == 2
connect_calls = [call[0][0] for call in mock_connect.call_args_list]
assert "to_add" in connect_calls
assert "to_update" in connect_calls
async def test_configure_connection_failures_logged(
self, mock_session_setup
):
"""Test that configure() handles connection failures gracefully."""
del mock_session_setup
initial_config = MCPConfig(mcpServers={})
client = MCPClient()
await client.configure(initial_config)
new_config = MCPConfig(
mcpServers={
"server1": MCPServerStdioConfig(
command="test1", args=[], env={}
)
}
)
# Mock connect_to_server to fail
mock_connect = AsyncMock(side_effect=Exception("Connection failed"))
with patch.object(client, "connect_to_server", mock_connect):
# Should not raise, just log
await client.configure(new_config)
# Server should still be in registry even if connection failed
assert "server1" in client.servers
class TestMCPClientToolManagement:
"""Test cases for MCPClient tool management functionality."""
def test_create_namespaced_tool_name_no_conflict(self):
"""Test creating namespaced tool name without conflicts."""
client = MCPClient()
name = client._create_namespaced_tool_name("github", "create_issue")
assert name == "mcp_github_create_issue"
@pytest.mark.skipif(
not DependencyManager.mcp.has(), reason="MCP SDK not available"
)
def test_create_namespaced_tool_name_with_conflicts(self):
"""Test creating namespaced tool name with conflicts and counter resolution."""
client = MCPClient()
from mcp.types import Tool
# Create first tool - should get base name
name1 = client._create_namespaced_tool_name("github", "create_issue")
assert name1 == "mcp_github_create_issue"
# Add it to registry
tool1 = Tool(
name="create_issue",
description="Test tool",
inputSchema={},
_meta={"server_name": "github", "namespaced_name": name1},
)
client.tool_registry[name1] = tool1
# Create second tool with same name - should get numbered suffix
name2 = client._create_namespaced_tool_name("github", "create_issue")
assert name2 == "mcp_github1_create_issue"
# Create third tool - should get next counter
name3 = client._create_namespaced_tool_name("github", "create_issue")
assert name3 == "mcp_github2_create_issue"
# All names should be unique
assert len({name1, name2, name3}) == 3
@pytest.mark.skipif(
not DependencyManager.mcp.has(), reason="MCP SDK not available"
)
def test_add_server_tools(self):
"""Test adding tools from a server to registry and connection."""
client = MCPClient()
from mcp.types import Tool
# Create server connection
connection = create_test_server_connection()
# Create raw tools to add
raw_tools = [
Tool(
name="tool1",
description="Test tool 1",
inputSchema={"type": "object"},
),
Tool(
name="tool2",
description="Test tool 2",
inputSchema={"type": "object"},
),
]
# Add tools
client._add_server_tools(connection, raw_tools)
# Verify tools are added to connection
assert len(connection.tools) == 2
# Verify tools are added to registry with proper namespacing
assert "mcp_test_server_tool1" in client.tool_registry
assert "mcp_test_server_tool2" in client.tool_registry
# Verify tool metadata
tool1 = client.tool_registry["mcp_test_server_tool1"]
assert tool1.meta["server_name"] == "test_server"
assert tool1.meta["namespaced_name"] == "mcp_test_server_tool1"
@pytest.mark.skipif(
not DependencyManager.mcp.has(), reason="MCP SDK not available"
)
def test_remove_server_tools(self):
"""Test removing tools from a server."""
client = MCPClient()
from mcp.types import Tool
# Create tools from different servers
server1_tools = [
(
"mcp_server1_tool1",
Tool(
name="tool1",
description="Test",
inputSchema={},
_meta={
"server_name": "server1",
"namespaced_name": "mcp_server1_tool1",
},
),
),
(
"mcp_server1_tool2",
Tool(
name="tool2",
description="Test",
inputSchema={},
_meta={
"server_name": "server1",
"namespaced_name": "mcp_server1_tool2",
},
),
),
]
server2_tools = [
(
"mcp_server2_tool3",
Tool(
name="tool3",
description="Test",
inputSchema={},
_meta={
"server_name": "server2",
"namespaced_name": "mcp_server2_tool3",
},
),
)
]
# Add tools to registry
for namespaced_name, tool in server1_tools + server2_tools:
client.tool_registry[namespaced_name] = tool
# Create connection and add tools
connection = create_test_server_connection(name="server1")
connection.tools = [tool for _, tool in server1_tools]
client.connections["server1"] = connection
# Set a counter for the server
client.server_counters["server1"] = 3
# Remove tools from server1
client._remove_server_tools("server1")
# Verify server1 tools are removed
for namespaced_name, _ in server1_tools:
assert namespaced_name not in client.tool_registry
# Verify server2 tools remain
for namespaced_name, _ in server2_tools:
assert namespaced_name in client.tool_registry
# Verify connection tools are cleared
assert len(connection.tools) == 0
# Verify counter is reset
assert "server1" not in client.server_counters
@pytest.mark.skipif(
not DependencyManager.mcp.has(), reason="MCP SDK not available"
)
@pytest.mark.parametrize(
("server_name", "expected_tool_count"),
[
pytest.param("server1", 2, id="existing_server"),
pytest.param("nonexistent", 0, id="nonexistent_server"),
],
)
def test_get_tools_by_server(self, server_name, expected_tool_count):
"""Test getting tools by server name."""
client = MCPClient()
from mcp.types import Tool
# Add tools from different servers
tools_data = [
("mcp_server1_tool1", "server1"),
("mcp_server1_tool2", "server1"),
("mcp_server2_tool3", "server2"),
]
for namespaced_name, server in tools_data:
tool = Tool(
name=namespaced_name.split("_")[-1],
description="Test",
inputSchema={},
_meta={
"server_name": server,
"namespaced_name": namespaced_name,
},
)
client.tool_registry[namespaced_name] = tool
# Get tools by server
tools = client.get_tools_by_server(server_name)
assert len(tools) == expected_tool_count
@pytest.mark.skipif(
not DependencyManager.mcp.has(), reason="MCP SDK not available"
)
class TestMCPClientToolExecution:
"""Test cases for MCPClient tool execution functionality."""
def test_create_tool_params(self):
"""Test creating properly typed CallToolRequestParams."""
client = MCPClient()
# Add a mock tool to the registry
mock_tool = create_test_tool()
client.tool_registry["mcp_test_server_test_tool"] = mock_tool
# Test creating tool params with arguments
params = client.create_tool_params(
"mcp_test_server_test_tool", {"arg1": "value1"}
)
assert params.name == "test_tool"
assert params.arguments == {"arg1": "value1"}
# Test with no arguments
params_no_args = client.create_tool_params("mcp_test_server_test_tool")
assert params_no_args.name == "test_tool"
assert params_no_args.arguments is None
# Test with non-existent tool
with pytest.raises(ValueError, match="Tool 'nonexistent' not found"):
client.create_tool_params("nonexistent")
@pytest.mark.parametrize(
("tool_setup", "connection_setup", "expected_error_pattern"),
[
pytest.param(
None, # No tool setup
{"status": MCPServerStatus.CONNECTED, "session": AsyncMock()},
"Tool 'nonexistent_tool' not found",
id="tool_not_found",
),
pytest.param(
{"server_name": "test_server"},
{"status": MCPServerStatus.DISCONNECTED, "session": None},
"Server 'test_server' is not connected",
id="server_not_connected",
),
pytest.param(
{"server_name": "test_server"},
{"status": MCPServerStatus.CONNECTED, "session": None},
"No active session for server 'test_server'",
id="no_active_session",
),
],
)
async def test_invoke_tool_error_cases(
self, tool_setup, connection_setup, expected_error_pattern
):
"""Test invoke_tool error handling scenarios."""
client = MCPClient()
from mcp.types import Tool
# Setup tool if provided
if tool_setup:
mock_tool = Tool(
name="test_tool",
description="Test tool",
inputSchema={},
_meta={
"server_name": tool_setup["server_name"],
"namespaced_name": "mcp_test_server_test_tool",
},
)
client.tool_registry["mcp_test_server_test_tool"] = mock_tool
# Setup connection
server_def = MCPServerDefinitionFactory.from_config(
"test_server", MCPServerStdioConfig(command="test", args=[])
)
connection = MCPServerConnection(definition=server_def)
connection.status = connection_setup["status"]
connection.session = connection_setup["session"]
client.connections["test_server"] = connection
# Create params for the tool
params = client.create_tool_params(
"mcp_test_server_test_tool", {"arg1": "value1"}
)
tool_name = "mcp_test_server_test_tool"
else:
# Use non-existent tool
from mcp.types import CallToolRequestParams
params = CallToolRequestParams(
name="nonexistent", arguments={"arg1": "value1"}
)
tool_name = "nonexistent_tool"
# Test tool invocation
result = await client.invoke_tool(tool_name, params)
# Verify it's an error result
assert client.is_error_result(result) is True
# Verify error message
error_messages = client.extract_text_content(result)
assert len(error_messages) > 0
assert expected_error_pattern in error_messages[0]
async def test_invoke_tool_success(self):
"""Test successful tool invocation."""
client = MCPClient()
from mcp.types import CallToolResult, TextContent
# Setup tool
mock_tool = create_test_tool()
client.tool_registry["mcp_test_server_test_tool"] = mock_tool
# Setup connection with mock session
connection = create_test_server_connection(
status=MCPServerStatus.CONNECTED, session=AsyncMock()
)
# Mock successful tool result
expected_result = CallToolResult(
content=[
TextContent(type="text", text="Tool executed successfully")
]
)
connection.session.call_tool = AsyncMock(return_value=expected_result)
client.connections["test_server"] = connection
# Create params and invoke tool
params = client.create_tool_params(
"mcp_test_server_test_tool", {"arg1": "value1"}
)
result = await client.invoke_tool("mcp_test_server_test_tool", params)
# Verify result
assert client.is_error_result(result) is False
text_contents = client.extract_text_content(result)
assert "Tool executed successfully" in text_contents[0]
# Verify session was called correctly
connection.session.call_tool.assert_called_once_with(
"test_tool", {"arg1": "value1"}
)
async def test_invoke_tool_timeout(self):
"""Test tool invocation timeout handling."""
client = MCPClient()
# Setup tool
mock_tool = create_test_tool()
client.tool_registry["mcp_test_server_test_tool"] = mock_tool
# Setup connection with timeout
connection = create_test_server_connection(
timeout=0.1, # Very short timeout
status=MCPServerStatus.CONNECTED,
session=AsyncMock(),
)
# Mock session to hang longer than timeout
async def slow_call_tool(_name, _args):
await asyncio.sleep(1) # Longer than timeout
connection.session.call_tool = AsyncMock(side_effect=slow_call_tool)
client.connections["test_server"] = connection
# Create params and invoke tool
params = client.create_tool_params(
"mcp_test_server_test_tool", {"arg1": "value1"}
)
result = await client.invoke_tool("mcp_test_server_test_tool", params)
# Verify timeout error
assert client.is_error_result(result) is True
error_messages = client.extract_text_content(result)
assert "timed out" in error_messages[0]
@pytest.mark.parametrize(
("result_content", "expected_is_error", "expected_text_count"),
[
pytest.param(
[{"type": "text", "text": "Success message"}],
False,
1,
id="success_result",
),
pytest.param(
[{"type": "text", "text": "Error occurred"}],
True,
1,
id="error_result",
),
pytest.param(
[
{"type": "text", "text": "First message"},
{"type": "text", "text": "Second message"},
],
False,
2,
id="multiple_text_content",
),
],
)
def test_result_handling_helpers(
self, result_content, expected_is_error, expected_text_count
):
"""Test CallToolResult helper methods."""
from mcp.types import CallToolResult, TextContent
client = MCPClient()
# Create result
content = [TextContent(**item) for item in result_content]
result = CallToolResult(isError=expected_is_error, content=content)
# Test error detection
assert client.is_error_result(result) == expected_is_error
# Test text extraction
text_contents = client.extract_text_content(result)
assert len(text_contents) == expected_text_count
for i, expected_text in enumerate(
[item["text"] for item in result_content]
):
assert text_contents[i] == expected_text
@pytest.mark.skipif(
not DependencyManager.mcp.has(), reason="MCP SDK not available"
)
class TestMCPClientConnectionManagement:
"""Test cases for MCPClient connection management functionality."""
async def test_discover_tools_success(self):
"""Test successful tool discovery from an MCP server."""
client = MCPClient()
from mcp.types import ListToolsResult, Tool
# Create mock connection with session
mock_session = AsyncMock()
connection = create_test_server_connection(session=mock_session)
# Mock tools response
mock_tools = [
Tool(
name="tool1",
description="First tool",
inputSchema={"type": "object"},
),
Tool(
name="tool2",
description="Second tool",
inputSchema={"type": "object"},
),
]
mock_response = ListToolsResult(tools=mock_tools)
mock_session.list_tools = AsyncMock(return_value=mock_response)
# Test tool discovery
await client._discover_tools(connection)
# Verify tools were added
assert len(connection.tools) == 2
assert "mcp_test_server_tool1" in client.tool_registry
assert "mcp_test_server_tool2" in client.tool_registry
# Verify session was called
mock_session.list_tools.assert_called_once()
async def test_discover_tools_no_session(self):
"""Test tool discovery with no active session."""
client = MCPClient()
# Create connection without session
connection = create_test_server_connection(session=None)
# Test tool discovery (should handle gracefully)
await client._discover_tools(connection)
# Verify no tools were added
assert len(connection.tools) == 0
assert len(client.tool_registry) == 0
@patch("mcp.ClientSession")
@patch("mcp.client.stdio.stdio_client")
async def test_connect_to_server_success(
self,
mock_stdio_client,
mock_session_class,
mock_stdio_setup,
mock_session_setup,
):
"""Test successful server connection with complete flow."""
# Setup stdio and session mocks using fixtures
mock_read, mock_write, mock_stdio_context = mock_stdio_setup()
mock_stdio_client.return_value = mock_stdio_context
mock_session, mock_session_context = mock_session_setup()
mock_session_class.return_value = mock_session_context
# Mock AsyncExitStack
with patch(
"marimo._server.ai.mcp.StdioTransportConnector.connect"
) as mock_connector_connect:
# Mock connector.connect to return the expected streams
mock_connector_connect.return_value = (mock_read, mock_write)
# Create client with test config
config = MCPConfig(
mcpServers={
"test_server": MCPServerStdioConfig(
command="python", args=["test.py"], env={}
)
}
)
client = MCPClient()
await client.configure(config)
# Test connection
result = await client.connect_to_server("test_server")
assert result is True
assert "test_server" in client.connections
assert (
client.connections["test_server"].status
== MCPServerStatus.CONNECTED
)
@pytest.mark.parametrize(
("server_exists", "already_connected", "expected_result"),
[
pytest.param(False, False, False, id="server_not_found"),
pytest.param(True, True, True, id="already_connected"),
],
)
async def test_connect_to_server_edge_cases(
self, server_exists, already_connected, expected_result
):
"""Test server connection edge cases."""
config = MCPConfig(mcpServers={})
if server_exists:
config["mcpServers"]["test_server"] = MCPServerStdioConfig(
command="python", args=["test.py"]
)
client = MCPClient()
await client.configure(config)
if already_connected:
# Setup existing connection
server_def = MCPServerDefinitionFactory.from_config(
"test_server", MCPServerStdioConfig(command="test", args=[])
)
connection = MCPServerConnection(definition=server_def)
connection.status = MCPServerStatus.CONNECTED
client.connections["test_server"] = connection
result = await client.connect_to_server("test_server")
assert result == expected_result
@pytest.mark.xfail(reason="Flaky test")
@patch("mcp.ClientSession")
async def test_connect_to_all_servers_mixed_results(
self, mock_session_class
):
"""Test connecting to multiple servers with mixed success/failure."""
# Setup session mock for successful connections
mock_session = AsyncMock()
mock_session.initialize = AsyncMock()
mock_session.list_tools = AsyncMock()
mock_session.list_tools.return_value.tools = []
mock_session_context = AsyncMock()
mock_session_context.__aenter__ = AsyncMock(return_value=mock_session)
mock_session_context.__aexit__ = AsyncMock(return_value=None)
mock_session_class.return_value = mock_session_context
with patch(
"marimo._server.ai.mcp.StdioTransportConnector.connect"
) as mock_connector_connect:
# Simulate success for server1, failure for server2
mock_connector_connect.side_effect = [
(AsyncMock(), AsyncMock()), # server1 success
Exception("Connection failed"), # server2 failure
]
config = MCPConfig(
mcpServers={
"server1": MCPServerStdioConfig(
command="python", args=["test1.py"]
),
"server2": MCPServerStdioConfig(
command="python", args=["test2.py"]
),
}
)
client = MCPClient()
await client.configure(config)
results = await client.connect_to_all_servers()
# Verify mixed results
assert len(results) == 2
assert results["server1"] is True
assert results["server2"] is False
@pytest.mark.skipif(
not DependencyManager.mcp.has(), reason="MCP SDK not available"
)
class TestMCPClientDisconnectionManagement:
"""Test cases for MCPClient disconnection functionality."""
async def test_disconnect_from_server_success(self):
"""Test successful disconnection from a connected server."""
client = MCPClient()
# Setup a connected server using existing patterns
connection = create_test_server_connection(
name="test_server",
status=MCPServerStatus.CONNECTED,
session=AsyncMock(),
)
# Create mock task that simulates running connection task
mock_task = AsyncMock()
mock_task.done.return_value = False # Task is still running
disconnect_event = asyncio.Event()
connection.connection_task = mock_task
connection.disconnect_event = disconnect_event
client.connections["test_server"] = connection
# Call actual disconnect method
result = await client.disconnect_from_server("test_server")
# Verify successful disconnection
assert result is True
assert disconnect_event.is_set() # Event was signaled
# Note: mock_task should be awaited since done() returns False
async def test_disconnect_from_server_already_disconnected(self):
"""Test disconnection from server that's already disconnected."""
client = MCPClient()
# Call disconnect on non-existent server
result = await client.disconnect_from_server("nonexistent_server")
# Should return True (idempotent operation)
assert result is True
async def test_disconnect_from_server_with_exception(self):
"""Test disconnection failure handling (validates our new comment)."""
client = MCPClient()
# Setup connection with task that will raise exception when awaited
connection = create_test_server_connection(
name="test_server", status=MCPServerStatus.CONNECTED
)
# Create event to signal when task has started
task_started = asyncio.Event()
# Create a long-running task that will fail when awaited
async def blocking_failing_task():
task_started.set() # Signal task has started
await asyncio.sleep(0.1) # Simulate work
raise RuntimeError("Simulated disconnection failure")
# Start the task
failing_task = asyncio.create_task(blocking_failing_task())
# Wait for task to actually start (deterministic)
await asyncio.wait_for(task_started.wait(), timeout=1.0)
connection.connection_task = failing_task
connection.disconnect_event = asyncio.Event()
client.connections["test_server"] = connection
# Call disconnect - should handle exception gracefully
result = await client.disconnect_from_server("test_server")
# Should return False but not raise exception (non-blocking behavior)
assert result is False
async def test_disconnect_from_server_cleanup_verification(self):
"""Test that disconnection properly cleans up server state."""
client = MCPClient()
# Setup connected server with tools and monitoring
connection = create_test_server_connection(
name="test_server",
status=MCPServerStatus.CONNECTED,
session=AsyncMock(),
)
# Add tools to verify they get cleaned up
mock_tools = [
create_test_tool(name="tool1", server_name="test_server"),
create_test_tool(name="tool2", server_name="test_server"),
]
for i, tool in enumerate(mock_tools):
if tool:
namespaced_name = f"mcp_test_server_tool{i + 1}"
client.tool_registry[namespaced_name] = tool
connection.tools.append(tool)
# Add health monitoring task
health_task = AsyncMock()
client.health_check_tasks["test_server"] = health_task
# Setup connection task
connection.connection_task = AsyncMock()
connection.connection_task.done.return_value = True # Already done
connection.disconnect_event = asyncio.Event()
client.connections["test_server"] = connection
# Disconnect
result = await client.disconnect_from_server("test_server")
# Verify cleanup happens in _connection_lifecycle finally block
assert result is True
# Note: Tool cleanup happens in _connection_lifecycle finally block,
# not directly in disconnect_from_server
@pytest.mark.parametrize(
"server_setups",
[
pytest.param(
[
{"name": "server1", "should_succeed": True},
{"name": "server2", "should_succeed": True},
],
id="all_succeed",
),
pytest.param(
[
{"name": "server1", "should_succeed": True},
{"name": "server2", "should_succeed": False},
],
id="mixed_results",
),
pytest.param(
[
{"name": "server1", "should_succeed": False},
{"name": "server2", "should_succeed": False},
],
id="all_fail",
),
],
)
async def test_disconnect_from_all_servers_scenarios(self, server_setups):
"""Test disconnect_from_all_servers with various success/failure combinations."""
client = MCPClient()
# Setup connections based on test parameters
for setup in server_setups:
connection = create_test_server_connection(
name=setup["name"], status=MCPServerStatus.CONNECTED
)
# Setup task behavior based on should_succeed
if setup["should_succeed"]:
mock_task = AsyncMock()
mock_task.done.return_value = False
else:
mock_task = AsyncMock()
mock_task.done.return_value = False
mock_task.side_effect = Exception("Simulated failure")
connection.connection_task = mock_task
connection.disconnect_event = asyncio.Event()
client.connections[setup["name"]] = connection
# Call actual disconnect_from_all_servers method
await client.disconnect_from_all_servers()
# Verify disconnect events were set (disconnect_from_all_servers doesn't return results)
for setup in server_setups:
connection = client.connections[setup["name"]]
# Event should be set regardless of success/failure (signal was sent)
assert connection.disconnect_event.is_set()
async def test_disconnect_from_all_servers_with_health_monitoring(self):
"""Test that disconnect_from_all_servers cancels health monitoring first."""
client = MCPClient()
# Setup connections with health monitoring tasks
server_names = ["server1", "server2"]
for name in server_names:
# Create connection
connection = create_test_server_connection(
name=name, status=MCPServerStatus.CONNECTED
)
connection.connection_task = AsyncMock()
connection.connection_task.done.return_value = True
connection.disconnect_event = asyncio.Event()
client.connections[name] = connection
# Create health monitoring task
health_task = AsyncMock()
health_task.cancel = AsyncMock()
client.health_check_tasks[name] = health_task
# Mock _cancel_health_monitoring to verify it's called
with patch.object(
client, "_cancel_health_monitoring", new_callable=AsyncMock
) as mock_cancel:
await client.disconnect_from_all_servers()
# Verify health monitoring was cancelled first
mock_cancel.assert_called_once_with()
async def test_disconnect_cross_task_scenario(self):
"""Test disconnection in cross-task scenarios (like server shutdown)."""
client = MCPClient()
# Setup connection that simulates cross-task issues
connection = create_test_server_connection(
name="test_server", status=MCPServerStatus.CONNECTED
)
# Create event to signal when task has started
task_started = asyncio.Event()
# Create a task that simulates cross-task lifecycle issues
async def cross_task_error():
task_started.set() # Signal task has started
await asyncio.sleep(0.1) # Simulate work
raise RuntimeError("Task was destroyed but it is pending!")
# Start the task
cross_task = asyncio.create_task(cross_task_error())
# Wait for task to actually start (deterministic)
await asyncio.wait_for(task_started.wait(), timeout=1.0)
connection.connection_task = cross_task
connection.disconnect_event = asyncio.Event()
client.connections["test_server"] = connection
# This should handle the cross-task error gracefully (non-blocking)
result = await client.disconnect_from_server("test_server")
# Should return False (failure) but not raise exception
assert result is False
# Event should still be signaled to attempt cleanup
assert connection.disconnect_event.is_set()
class TestMCPClientHealthMonitoring:
"""Test cases for MCPClient health monitoring functionality."""
@pytest.mark.skipif(
not DependencyManager.mcp.has(), reason="MCP SDK not available"
)
async def test_perform_health_check_success(self):
"""Test successful health check."""
client = MCPClient()
# Create connection with mock session
server_def = MCPServerDefinitionFactory.from_config(
"test", MCPServerStdioConfig(command="test", args=[])
)
connection = MCPServerConnection(definition=server_def)
connection.session = AsyncMock()
connection.session.send_ping = AsyncMock()
client.connections["test"] = connection
result = await client._perform_health_check("test")
assert result is True
connection.session.send_ping.assert_called_once()
# Note: last_health_check is updated by the caller (_monitor_server_health), not _perform_health_check
assert connection.last_health_check == 0 # Should remain unchanged
@pytest.mark.parametrize(
("session_setup", "ping_behavior", "expected_result"),
[
pytest.param(
None, # No session
None,
False,
id="no_session",
),
pytest.param(
AsyncMock(), # Valid session
Exception("Ping failed"), # Exception during ping
False,
id="ping_exception",
),
],
)
async def test_perform_health_check_failure_cases(
self, session_setup, ping_behavior, expected_result
):
"""Test health check failure scenarios."""
client = MCPClient()
# Create connection
server_def = MCPServerDefinitionFactory.from_config(
"test", MCPServerStdioConfig(command="test", args=[])
)
connection = MCPServerConnection(definition=server_def)
connection.session = session_setup
if session_setup and ping_behavior:
connection.session.send_ping = AsyncMock(side_effect=ping_behavior)
client.connections["test"] = connection
result = await client._perform_health_check("test")
assert result == expected_result
# Note: _perform_health_check doesn't update connection status directly
# Status updates happen in the calling code (_monitor_server_health)
async def test_perform_health_check_timeout(self):
"""Test health check timeout handling."""
client = MCPClient()
client.health_check_timeout = 0.1 # Very short timeout
# Create connection with session that hangs
server_def = MCPServerDefinitionFactory.from_config(
"test", MCPServerStdioConfig(command="test", args=[])
)
connection = MCPServerConnection(definition=server_def)
connection.session = AsyncMock()
# Create a coroutine that sleeps longer than timeout
async def slow_ping():
await asyncio.sleep(1)
connection.session.send_ping = AsyncMock(side_effect=slow_ping)
client.connections["test"] = connection
result = await client._perform_health_check("test")
assert result is False
# Note: _perform_health_check doesn't update connection status directly
# Status updates happen in the calling code (_monitor_server_health)
class TestMCPServerConnection:
"""Test cases for MCPServerConnection class."""
def test_server_connection_creation(self):
"""Test creating a server connection with proper defaults."""
server_def = MCPServerDefinitionFactory.from_config(
"test_server",
MCPServerStdioConfig(
command="python", args=["test.py"], env={"TEST": "value"}
),
)
connection = MCPServerConnection(definition=server_def)
assert connection.definition.name == "test_server"
assert connection.definition.config["command"] == "python"
assert connection.definition.config.get("args") == ["test.py"]
assert connection.definition.config.get("env") == {"TEST": "value"}
assert connection.status == MCPServerStatus.DISCONNECTED
assert connection.session is None
assert len(connection.tools) == 0
assert connection.last_health_check == 0
assert connection.error_message is None
class TestMCPUtilities:
"""Test utility functions and configuration."""
@pytest.mark.skipif(
not DependencyManager.mcp.has(), reason="MCP SDK not available"
)
def test_get_mcp_client_singleton(self):
"""Test that get_mcp_client returns singleton instance."""
client1 = get_mcp_client()
client2 = get_mcp_client()
assert client1 is client2
@pytest.mark.skipif(
not DependencyManager.mcp.has(), reason="MCP SDK not available"
)
async def test_get_mcp_client_with_custom_config(self):
"""Test get_mcp_client with custom configuration."""
# Reset global client for this test
import marimo._server.ai.mcp.client as client_module
client_module._MCP_CLIENT = None
custom_config = MCPConfig(
mcpServers={
"custom_server": MCPServerStdioConfig(
command="custom", args=["--test"], env={}
)
}
)
client = get_mcp_client()
await client.configure(custom_config)
assert "custom_server" in client.servers
| {
"repo_id": "marimo-team/marimo",
"file_path": "tests/_server/ai/test_mcp.py",
"license": "Apache License 2.0",
"lines": 1580,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
marimo-team/marimo:marimo/_smoke_tests/third_party/pandas/transform_edgecases.py | # /// script
# requires-python = ">=3.12"
# dependencies = [
# "geodatasets==2024.8.0",
# "geopandas==1.1.1",
# "mapclassify==2.10.0",
# "marimo",
# "matplotlib==3.10.3",
# "pandas==2.3.1",
# "polars==1.31.0",
# ]
# ///
import marimo
__generated_with = "0.15.5"
app = marimo.App(width="medium")
@app.cell
def _():
import marimo as mo
import pandas as pd
return mo, pd
@app.cell
def _(mo, pd):
# https://github.com/marimo-team/marimo/issues/5445
df = pd.read_csv(
"https://raw.githubusercontent.com/mwaskom/seaborn-data/master/iris.csv"
)
float64_cols = df.select_dtypes(include="float64").columns
df[float64_cols] = df[float64_cols].astype("Float64")
object_cols = df.select_dtypes(include=["object"]).columns
df[object_cols] = df[object_cols].astype("string")
df
mo.ui.dataframe(df)
return (df,)
@app.cell
def _(df):
df.dtypes
return
@app.cell
def _():
import geopandas as gpd
from geodatasets import get_path
import polars as pl
path_to_data = get_path("nybb")
gdf = gpd.read_file(path_to_data)
return gdf, pl
@app.cell
def _(gdf):
# This is ugly
gdf
return
@app.cell
def _(gdf):
# This interactive leaflet map does work like a charm
gdf.explore()
return
@app.cell
def _(gdf, mo):
# https://github.com/marimo-team/marimo/issues/5447
mo.ui.table(gdf)
return
@app.cell
def _(gdf):
type(gdf)
return
@app.cell
def _(gdf, mo):
# This should not fail
# https://github.com/marimo-team/marimo/issues/5447
mo.ui.dataframe(gdf)
return
@app.cell
def _(gdf, mo, pl):
pl_df = pl.DataFrame(gdf.assign(geometry=gdf.geometry.astype(str)))
mo.ui.dataframe(pl_df)
return
if __name__ == "__main__":
app.run()
| {
"repo_id": "marimo-team/marimo",
"file_path": "marimo/_smoke_tests/third_party/pandas/transform_edgecases.py",
"license": "Apache License 2.0",
"lines": 77,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
marimo-team/marimo:marimo/_utils/requests.py | # Copyright 2026 Marimo. All rights reserved.
from __future__ import annotations
import json
import urllib.error
import urllib.parse
import urllib.request
from typing import Any, Optional, Union
from marimo._version import __version__
# Utility functions for making HTTP requests,
# without using the requests library or any other external dependencies.
MARIMO_USER_AGENT = f"marimo/{__version__}"
class RequestError(Exception):
"""Exception raised when a request fails."""
def __init__(self, message: str):
self.message = message
super().__init__(self.message)
class Response:
"""Simple response object similar to requests.Response."""
def __init__(
self,
status_code: int,
content: bytes,
headers: dict[str, str],
original_error: Optional[Exception] = None,
):
self.status_code = status_code
self.content = content
self.headers = headers
self.original_error = original_error
def json(self) -> Any:
"""Parse response content as JSON.
This assumes the response is UTF-8 encoded.
In future, we can infer the encoding from the headers.
"""
return json.loads(self.text())
def text(self) -> str:
"""Get response content as text.
This assumes the response is UTF-8 encoded.
In future, we can infer the encoding from the headers.
Line endings are normalized to Unix style (\n) to match Python's
text mode behavior when reading files.
"""
decoded = self.content.decode("utf-8")
# Normalize line endings: \r\n -> \n, \r -> \n
# This matches Python's universal newline mode used by Path.read_text()
return decoded.replace("\r\n", "\n").replace("\r", "\n")
def raise_for_status(self) -> Response:
"""Raise an exception for non-2xx status codes.
Returns:
The response object for chaining.
"""
if self.status_code >= 300:
if self.original_error:
raise self.original_error
raise RequestError(
f"Request failed: {self.status_code}. {self.text()}"
)
return self
def _make_request(
method: str,
url: str,
*,
params: Optional[dict[str, str]] = None,
headers: Optional[dict[str, str]] = None,
data: Optional[Union[dict[str, Any], str]] = None,
json_data: Optional[dict[str, Any]] = None,
timeout: Optional[float] = None,
) -> Response:
"""Make an HTTP request and return a Response object.
If the URL already contains query parameters and new params are provided,
they will be merged with new params taking precedence over existing ones.
"""
assert isinstance(url, str), "url must be a string"
has_data = data is not None
has_json_data = json_data is not None
assert not has_data or not has_json_data, (
"cannot pass both data and json_data"
)
# Handle URL parameters
if params:
parsed = urllib.parse.urlparse(url)
# Parse existing query parameters
existing_params = urllib.parse.parse_qs(parsed.query)
# Flatten existing params (parse_qs returns lists)
flattened_existing = {k: v[0] for k, v in existing_params.items()}
# Merge with new params (new params take precedence)
merged_params = {**flattened_existing, **params}
query = urllib.parse.urlencode(merged_params)
url = urllib.parse.urlunparse(
(
parsed.scheme,
parsed.netloc,
parsed.path,
parsed.params,
query,
parsed.fragment,
)
)
# Prepare headers
request_headers = headers or {}
if "User-Agent" not in request_headers:
request_headers["User-Agent"] = MARIMO_USER_AGENT
# Prepare body
body = None
if json_data is not None:
request_headers["Content-Type"] = "application/json"
body = json.dumps(json_data).encode("utf-8")
elif data is not None:
if isinstance(data, dict):
body = urllib.parse.urlencode(data).encode("utf-8")
request_headers["Content-Type"] = (
"application/x-www-form-urlencoded"
)
else:
body = str(data).encode("utf-8")
# Create request
req = urllib.request.Request(
url, data=body, headers=request_headers, method=method
)
try:
with urllib.request.urlopen(req, timeout=timeout) as response:
return Response(
status_code=response.getcode(),
content=response.read(),
headers=dict(response.headers),
)
except urllib.error.HTTPError as e:
# For HTTP errors, we still want to return a Response object
return Response(
status_code=e.code,
content=e.read(),
headers=dict(e.headers),
original_error=e,
)
except Exception as e:
raise RequestError(f"Request failed: {str(e)}") from e
def get(
url: str,
*,
params: Optional[dict[str, str]] = None,
headers: Optional[dict[str, str]] = None,
timeout: Optional[float] = None,
) -> Response:
"""Make a GET request."""
return _make_request(
"GET", url, params=params, headers=headers, timeout=timeout
)
def post(
url: str,
*,
data: Optional[Union[dict[str, Any], str]] = None,
json_data: Optional[dict[str, Any]] = None,
headers: Optional[dict[str, str]] = None,
timeout: Optional[float] = None,
) -> Response:
"""Make a POST request."""
return _make_request(
"POST",
url,
data=data,
json_data=json_data,
headers=headers,
timeout=timeout,
)
def put(
url: str,
*,
data: Optional[Union[dict[str, Any], str]] = None,
json_data: Optional[dict[str, Any]] = None,
headers: Optional[dict[str, str]] = None,
timeout: Optional[float] = None,
) -> Response:
"""Make a PUT request."""
return _make_request(
"PUT",
url,
data=data,
json_data=json_data,
headers=headers,
timeout=timeout,
)
def delete(
url: str,
*,
headers: Optional[dict[str, str]] = None,
timeout: Optional[float] = None,
) -> Response:
"""Make a DELETE request."""
return _make_request("DELETE", url, headers=headers, timeout=timeout)
| {
"repo_id": "marimo-team/marimo",
"file_path": "marimo/_utils/requests.py",
"license": "Apache License 2.0",
"lines": 189,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
marimo-team/marimo:tests/_utils/test_utils_request.py | from typing import Any, Optional, Union
from unittest.mock import MagicMock, patch
import pytest
from marimo._utils.requests import (
RequestError,
Response,
_make_request,
delete,
get,
post,
put,
)
def test_response_object():
response = Response(
200, b'{"key": "value"}', {"Content-Type": "application/json"}
)
assert response.status_code == 200
assert response.content == b'{"key": "value"}'
assert response.headers == {"Content-Type": "application/json"}
assert response.text() == '{"key": "value"}'
assert response.json() == {"key": "value"}
def test_response_raise_for_status():
# Test that raise_for_status does not raise for success codes
success_response = Response(200, b"OK", {})
success_response.raise_for_status() # Should not raise
assert success_response is success_response.raise_for_status()
# Test that raise_for_status raises for error codes
error_response = Response(404, b"Not Found", {})
with pytest.raises(RequestError, match="Request failed: 404"):
error_response.raise_for_status()
# Test various error codes
for status_code in [300, 400, 401, 403, 404, 500, 502, 503]:
error_response = Response(status_code, b"Error", {})
with pytest.raises(
RequestError, match=f"Request failed: {status_code}"
):
error_response.raise_for_status()
# Test that success codes (2xx) don't raise
for status_code in [200, 201, 202, 204]:
success_response = Response(status_code, b"Success", {})
success_response.raise_for_status() # Should not raise
@pytest.mark.parametrize(
(
"method",
"url",
"params",
"headers",
"data",
"json_data",
"expected_body",
"expected_headers",
),
[
# GET request
(
"GET",
"https://api.example.com",
{"param": "value"},
{"Authorization": "Bearer token"},
None,
None,
None,
{"Authorization": "Bearer token"},
),
# POST with form data
(
"POST",
"https://api.example.com",
None,
None,
{"key": "value"},
None,
b"key=value",
{"Content-Type": "application/x-www-form-urlencoded"},
),
# POST with JSON data
(
"POST",
"https://api.example.com",
None,
None,
None,
{"key": "value"},
b'{"key": "value"}',
{"Content-Type": "application/json"},
),
# POST with string data
(
"POST",
"https://api.example.com",
None,
None,
"raw data",
None,
b"raw data",
{},
),
],
)
def test_make_request(
method: str,
url: str,
params: Optional[dict[str, str]],
headers: Optional[dict[str, str]],
data: Optional[Union[dict[str, Any], str]],
json_data: Optional[dict[str, Any]],
expected_body: bytes,
expected_headers: dict[str, str],
):
mock_response = MagicMock()
mock_response.getcode.return_value = 200
mock_response.read.return_value = b'{"key": "value"}'
mock_response.headers = {"Content-Type": "application/json"}
mock_response.__enter__.return_value = mock_response
mock_response.__exit__.return_value = None
with patch(
"urllib.request.urlopen", return_value=mock_response
) as mock_urlopen:
response = _make_request(
method,
url,
params=params,
headers=headers,
data=data,
json_data=json_data,
)
# Verify request was made
mock_urlopen.assert_called_once()
request_arg = mock_urlopen.call_args[0][0]
# Check method and URL
assert request_arg.get_method() == method
if params:
assert "param=value" in request_arg.full_url
# Check body
assert request_arg.data == expected_body
# Check headers
for key, value in expected_headers.items():
# urllib.request.Request normalizes header names to Title-case
# We need to check the actual header values as they're stored
found_header = False
for header_name, header_value in request_arg.headers.items():
if header_name.lower() == key.lower():
assert header_value == value
found_header = True
break
assert found_header, f"Header {key} not found in request headers"
# Verify response
assert response.status_code == 200
assert response.content == b'{"key": "value"}'
assert response.headers == {"Content-Type": "application/json"}
def test_invalid_url():
with pytest.raises(AssertionError, match="url must be a string"):
_make_request("GET", 123) # type: ignore
def test_conflicting_data_and_json():
with pytest.raises(
AssertionError, match="cannot pass both data and json_data"
):
_make_request(
"POST",
"https://api.example.com",
data={"key": "value"},
json_data={"key": "value"},
)
def test_http_methods():
with patch("marimo._utils.requests._make_request") as mock_make_request:
# Test GET
get("https://api.example.com", params={"key": "value"})
mock_make_request.assert_called_with(
"GET",
"https://api.example.com",
params={"key": "value"},
headers=None,
timeout=None,
)
# Test POST
post("https://api.example.com", json_data={"key": "value"})
mock_make_request.assert_called_with(
"POST",
"https://api.example.com",
data=None,
json_data={"key": "value"},
headers=None,
timeout=None,
)
# Test PUT
put("https://api.example.com", data={"key": "value"})
mock_make_request.assert_called_with(
"PUT",
"https://api.example.com",
data={"key": "value"},
json_data=None,
headers=None,
timeout=None,
)
# Test DELETE
delete("https://api.example.com")
mock_make_request.assert_called_with(
"DELETE", "https://api.example.com", headers=None, timeout=None
)
def test_http_error_handling():
"""Test that HTTP errors return Response objects instead of raising exceptions."""
import urllib.error
# Mock an HTTP error
mock_fp = MagicMock()
mock_fp.read.return_value = b'{"error": "not found"}'
mock_error = urllib.error.HTTPError(
url="https://api.example.com",
code=404,
msg="Not Found",
hdrs={"Content-Type": "application/json"},
fp=mock_fp,
)
with patch("urllib.request.urlopen", side_effect=mock_error):
response = _make_request("GET", "https://api.example.com")
assert response.status_code == 404
assert response.content == b'{"error": "not found"}'
assert response.headers == {"Content-Type": "application/json"}
def test_request_error_handling():
"""Test that other exceptions are converted to RequestError."""
with patch(
"urllib.request.urlopen", side_effect=Exception("Network error")
):
with pytest.raises(
RequestError, match="Request failed: Network error"
):
_make_request("GET", "https://api.example.com")
@pytest.mark.parametrize(
("url", "params", "expected_url"),
[
# URL with no existing params, add new params
(
"https://api.example.com/path",
{"new": "value"},
"https://api.example.com/path?new=value",
),
# URL with existing params, add new params (merge)
(
"https://api.example.com/path?existing=param",
{"new": "value"},
"https://api.example.com/path?existing=param&new=value",
),
# URL with existing params, no new params (preserve existing)
(
"https://api.example.com/path?existing=param",
None,
"https://api.example.com/path?existing=param",
),
# URL with no params at all
("https://api.example.com/path", None, "https://api.example.com/path"),
# URL with existing params, new params override existing
(
"https://api.example.com/path?key=old",
{"key": "new"},
"https://api.example.com/path?key=new",
),
# URL with multiple existing params, add new params
(
"https://api.example.com/path?a=1&b=2",
{"c": "3"},
"https://api.example.com/path?a=1&b=2&c=3",
),
# URL with multiple existing params, override some
(
"https://api.example.com/path?a=1&b=2",
{"b": "new", "c": "3"},
"https://api.example.com/path?a=1&b=new&c=3",
),
],
)
def test_url_parameter_handling(
url: str, params: Optional[dict[str, str]], expected_url: str
):
mock_response = MagicMock()
mock_response.getcode.return_value = 200
mock_response.read.return_value = b'{"key": "value"}'
mock_response.headers = {"Content-Type": "application/json"}
mock_response.__enter__.return_value = mock_response
mock_response.__exit__.return_value = None
with patch(
"urllib.request.urlopen", return_value=mock_response
) as mock_urlopen:
_make_request("GET", url, params=params)
# Verify the URL was constructed correctly
request_arg = mock_urlopen.call_args[0][0]
assert request_arg.full_url == expected_url
| {
"repo_id": "marimo-team/marimo",
"file_path": "tests/_utils/test_utils_request.py",
"license": "Apache License 2.0",
"lines": 285,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
marimo-team/marimo:scripts/generate_bash_focus.py | # /// script
# requires-python = ">=3.13,<3.14"
# dependencies = [
# "msgspec",
# ]
#
# [tool.uv]
# exclude-newer = "2025-06-27T12:38:25.742953-04:00"
# ///
"""Get GitHub PRs labeled with 'bash-focus' since the last release."""
from __future__ import annotations
import re
import subprocess
import sys
import msgspec
class Author(msgspec.Struct):
"""GitHub author/user information."""
login: str
class Label(msgspec.Struct):
"""GitHub label information."""
name: str
color: str | None = None
description: str | None = None
class PullRequest(msgspec.Struct):
"""GitHub Pull Request information."""
number: int
title: str
author: Author
labels: list[Label]
url: str
mergedAt: str | None = None
def get_latest_tag() -> str:
"""Get the latest git tag."""
result = subprocess.run(
["git", "describe", "--tags", "--abbrev=0"],
capture_output=True,
text=True,
check=True,
)
return result.stdout.strip()
def get_commits_since_tag(since_tag: str) -> set[int]:
"""Get PR numbers from commits since a specific tag."""
try:
result = subprocess.run(
[
"git",
"log",
f"{since_tag}..HEAD",
"--format=%s",
"--first-parent",
"main",
],
capture_output=True,
text=True,
check=True,
)
except subprocess.CalledProcessError:
print(f"Error: Tag '{since_tag}' not found. Available tags:")
tag_result = subprocess.run(
["git", "tag", "--list", "--sort=-version:refname"],
capture_output=True,
text=True,
)
for tag in tag_result.stdout.strip().split('\n')[:10]:
print(f" {tag}")
sys.exit(1)
pr_numbers = set()
for line in result.stdout.strip().split('\n'):
if not line:
continue
# Extract PR number from commit message
pr_match = re.search(r'#(\d+)', line)
if pr_match:
pr_numbers.add(int(pr_match.group(1)))
return pr_numbers
def get_prs_with_label(label: str, pr_numbers: set[int]) -> list[PullRequest]:
"""Get PRs with specific label from a set of PR numbers."""
result = subprocess.run(
[
"gh",
"pr",
"list",
"--base",
"main",
"--state",
"merged",
"--label",
label,
"--limit",
"100",
"--json",
"number,title,author,labels,url,mergedAt",
],
check=True,
capture_output=True,
text=True,
)
all_prs = msgspec.json.decode(result.stdout, type=list[PullRequest])
# Filter PRs that are in our commit list
filtered_prs = [pr for pr in all_prs if pr.number in pr_numbers]
return filtered_prs
def main() -> None:
label = "bash-focus"
# Allow specifying a tag or use the latest one
if len(sys.argv) >= 2:
since_tag = sys.argv[1]
else:
since_tag = get_latest_tag()
print(f"Using latest tag: {since_tag}")
print(f"Fetching PRs with label '{label}' since {since_tag}...\n")
pr_numbers = get_commits_since_tag(since_tag)
prs = get_prs_with_label(label, pr_numbers)
if not prs:
print(f"No PRs found with label '{label}' since {since_tag}")
return
print(f"Found {len(prs)} PR(s) with label '{label}':\n")
for pr in prs:
print(f"#{pr.number}: {pr.title}")
print(f" Author: @{pr.author.login}")
print(f" Link: {pr.url}")
print(f" Merged: {pr.mergedAt}")
print()
if __name__ == "__main__":
main()
| {
"repo_id": "marimo-team/marimo",
"file_path": "scripts/generate_bash_focus.py",
"license": "Apache License 2.0",
"lines": 124,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
marimo-team/marimo:scripts/generate_release_notes.py | # /// script
# requires-python = ">=3.13,<3.14"
# dependencies = [
# "msgspec",
# ]
#
# [tool.uv]
# exclude-newer = "2025-06-27T12:38:25.742953-04:00"
# ///
"""Generate release notes from commits on main branch."""
from __future__ import annotations
import re
import subprocess
import sys
import msgspec
class Author(msgspec.Struct):
"""GitHub author/user information."""
login: str
class Label(msgspec.Struct):
"""GitHub label information."""
name: str
color: str | None = None
description: str | None = None
class PullRequest(msgspec.Struct):
"""GitHub Pull Request information."""
number: int
title: str
author: Author
labels: list[Label]
body: str | None
mergedAt: str | None = None
class Commit(msgspec.Struct):
"""Git commit information."""
sha: str
message: str
pr_number: int | None = None
class CategorizedEntry(msgspec.Struct):
"""A release note entry with its PR information."""
commit: Commit
pr: PullRequest | None = None
def get_commits_since_tag(since_tag: str) -> list[Commit]:
"""Get commits on main since a specific tag."""
result = subprocess.run(
[
"git",
"log",
f"{since_tag}..HEAD",
"--format=%H %s",
"--first-parent", # Only follow the first parent (main branch)
"main",
],
capture_output=True,
text=True,
check=True,
)
commits = []
for line in result.stdout.strip().split("\n"):
if not line:
continue
parts = line.split(" ", 1)
if len(parts) != 2:
continue
sha, message = parts
# Extract PR number from squash merge commit message
# Looking for patterns like (#1234) or #1234
pr_match = re.search(r"#(\d+)", message)
pr_number = int(pr_match.group(1)) if pr_match else None
commits.append(Commit(sha=sha, message=message, pr_number=pr_number))
return commits
def get_merged_prs(limit: int = 100) -> dict[int, PullRequest]:
"""Get recently merged PRs and return as a dict keyed by PR number."""
result = subprocess.run(
[
"gh",
"pr",
"list",
"--base",
"main",
"--state",
"merged",
"--limit",
str(limit),
"--json",
"number,title,author,labels,body,mergedAt",
],
check=True,
capture_output=True,
text=True,
)
return {
pr.number: pr
for pr in msgspec.json.decode(result.stdout, type=list[PullRequest])
}
def extract_media_from_body(body: str | None) -> list[str]:
"""Extract media (images/links) from PR body."""
if not body:
return []
media = []
# Find markdown images: 
img_pattern = r"!\[.*?\]\((.*?)\)"
for match in re.finditer(img_pattern, body):
media.append(f'<img src="{match.group(1)}" alt="PR media">')
# Find HTML img tags
html_img_pattern = r'<img[^>]+src=["\']([^"\']+)["\'][^>]*>'
for match in re.finditer(html_img_pattern, body):
media.append(match.group(0))
# Find video links (common patterns)
video_patterns = [
r"https?://[^\s]+\.(?:mp4|webm|mov|gif)",
r"https?://(?:www\.)?(?:youtube\.com/watch\?v=|youtu\.be/)[^\s]+",
r"https?://(?:www\.)?vimeo\.com/[^\s]+",
]
for pattern in video_patterns:
for match in re.finditer(pattern, body):
media.append(f'<a href="{match.group(0)}">{match.group(0)}</a>')
return media
def categorize_entries(
entries: list[CategorizedEntry],
) -> dict[str, list[CategorizedEntry]]:
"""Categorize entries based on PR labels."""
# TODO: Could add more or be more granular
categories = {
"breaking": [],
"bug": [],
"enhancement": [],
"documentation": [],
"preview": [],
"other": [],
"highlights": [],
}
for entry in entries:
if entry.pr is None:
categories["other"].append(entry)
continue
label_names = {label.name for label in entry.pr.labels}
# Skip entries labeled as "internal"
if "internal" in label_names:
continue
if "release-highlight" in label_names:
categories["highlights"].append(entry)
if "breaking" in label_names:
categories["breaking"].append(entry)
elif "preview" in label_names:
categories["preview"].append(entry)
elif "bug" in label_names:
categories["bug"].append(entry)
elif "enhancement" in label_names:
categories["enhancement"].append(entry)
elif "documentation" in label_names:
categories["documentation"].append(entry)
else:
categories["other"].append(entry)
return categories
def strip_conventional_prefix(title: str) -> str:
"""Strip conventional commit prefixes and capitalize first letter."""
# Match patterns like "word:" or "word(scope):" at the beginning
match = re.match(r"^(\w+)(?:\([^)]+\))?:\s*(.+)", title)
if match:
# Get the part after the prefix and capitalize first letter
stripped = match.group(2)
return stripped[0].upper() + stripped[1:] if stripped else stripped
return title
def format_entry(entry: CategorizedEntry) -> str:
if entry.pr:
title = strip_conventional_prefix(entry.pr.title)
return f"* {title} ([#{entry.pr.number}](https://github.com/marimo-team/marimo/pull/{entry.pr.number}))"
title = entry.commit.message
title = strip_conventional_prefix(entry.commit.message)
return f"* {title} ({entry.commit.sha[:7]})"
def get_contributors(entries: list[CategorizedEntry]) -> list[str]:
"""Extract unique contributors from all entries."""
contributors = set()
for entry in entries:
if entry.pr and entry.pr.author:
contributors.add(entry.pr.author.login)
return sorted(contributors, key=str.lower)
def generate_release_notes(since_tag: str) -> str:
"""Generate release notes since a specific tag."""
commits = get_commits_since_tag(since_tag)
pr_map = get_merged_prs(limit=100)
# Match commits with PRs
entries = []
for commit in commits:
pr = pr_map.get(commit.pr_number) if commit.pr_number else None
entries.append(CategorizedEntry(commit=commit, pr=pr))
categories = categorize_entries(entries)
notes = ["## What's Changed\n"]
if categories["highlights"]:
for i, entry in enumerate(categories["highlights"]):
if i > 0:
notes.append("")
if entry.pr:
notes.append(f"**TODO: {entry.pr.title} #{entry.pr.number}**")
notes.append("")
notes.append("TODO: Description of the feature")
# Check for media
label_names = {label.name for label in entry.pr.labels}
if "includes-media" in label_names:
media = extract_media_from_body(entry.pr.body)
if media:
notes.append("")
for item in media:
notes.append(item)
notes.append("")
if categories["breaking"]:
notes.append("## 🚨 Breaking changes")
for entry in categories["breaking"]:
notes.append(format_entry(entry))
notes.append("")
if categories["enhancement"]:
notes.append("## ✨ Enhancements")
for entry in categories["enhancement"]:
notes.append(format_entry(entry))
notes.append("")
if categories["bug"]:
notes.append("## 🐛 Bug fixes")
for entry in categories["bug"]:
notes.append(format_entry(entry))
notes.append("")
if categories["documentation"]:
notes.append("## 📚 Documentation")
for entry in categories["documentation"]:
notes.append(format_entry(entry))
notes.append("")
if categories["preview"]:
notes.append("## 🔬 Preview features")
for entry in categories["preview"]:
notes.append(format_entry(entry))
notes.append("")
if categories["other"]:
notes.append("## 📝 Other changes")
for entry in categories["other"]:
notes.append(format_entry(entry))
notes.append("")
contributors = get_contributors(entries)
notes.append("## Contributors")
notes.append(
f"Thanks to all our community and contributors who made this release possible: @{', @'.join(contributors)}"
)
notes.append("")
notes.append("And especially to our new contributors:")
notes.append("* TODO: Check for new contributors")
notes.append("")
current_tag = "TODO_CURRENT_VERSION"
notes.append(
f"\n**Full Changelog**: https://github.com/marimo-team/marimo/compare/{since_tag}...{current_tag}"
)
return "\n".join(notes)
def main() -> None:
if len(sys.argv) < 2:
print("Usage: generate_release_notes.py <since-tag>")
print("Example: generate_release_notes.py 0.14.7")
sys.exit(1)
print(generate_release_notes(sys.argv[1]))
if __name__ == "__main__":
main()
| {
"repo_id": "marimo-team/marimo",
"file_path": "scripts/generate_release_notes.py",
"license": "Apache License 2.0",
"lines": 256,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
marimo-team/marimo:marimo/_server/api/dependency_tree.py | # Copyright 2026 Marimo. All rights reserved.
from __future__ import annotations
from marimo._utils.uv_tree import DependencyTag, DependencyTreeNode
def parse_name_version(content: str) -> tuple[str, str | None]:
if " v" in content:
name, version = content.split(" v", 1)
return name.strip(), version.split()[0] # Take only version part
return content.strip(), None
def parse_uv_tree(text: str) -> DependencyTreeNode:
"""The text output of `uv tree` into a nested data structure."""
lines = text.strip().split("\n")
# Create a virtual root to hold all top-level dependencies
tree = DependencyTreeNode(
name="<root>", version=None, tags=[], dependencies=[]
)
stack = [(tree, -1)] # (node, level)
for line in lines:
line = line.rstrip()
if (
not line
or "Package tree already displayed" in line
or "Package tree is a cycle" in line
):
continue
# Calculate indentation level by counting characters before tree symbols
if not any(symbol in line for symbol in ["├──", "└──"]):
level = 0 # Top-level package
else:
# Find the tree symbol position and divide by 4 (standard tree indentation)
for symbol in ["├──", "└──"]:
pos = line.find(symbol)
if pos != -1:
level = (pos // 4) + 1
break
# content after tree symbols
content = line.lstrip("│ ├└─").strip()
# Check for cycle indicator
is_cycle = content.endswith("(*)")
if is_cycle:
content = content[:-3].strip()
# tags (extras/groups)
tags: list[DependencyTag] = []
while "(extra:" in content or "(group:" in content:
start = (
content.rfind("(extra:")
if "(extra:" in content
else content.rfind("(group:")
)
if start == -1:
break
end = content.find(")", start)
if end == -1:
break
tag_text = content[start + 1 : end]
kind, value = tag_text.split(":", 1)
assert kind == "extra" or kind == "group"
tags.append(DependencyTag(kind=kind, value=value.strip()))
content = content[:start].strip()
name, version = parse_name_version(content)
# Add cycle indicator as a special tag
if is_cycle:
tags.append(DependencyTag(kind="cycle", value="true"))
node = DependencyTreeNode(
name=name,
version=version,
tags=tags,
dependencies=[],
)
# Adjust stack to correct level
while len(stack) > 1 and stack[-1][1] >= level:
stack.pop()
# Add to parent and push to stack
stack[-1][0].dependencies.append(node)
stack.append((node, level))
return tree
| {
"repo_id": "marimo-team/marimo",
"file_path": "marimo/_server/api/dependency_tree.py",
"license": "Apache License 2.0",
"lines": 75,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
marimo-team/marimo:marimo/_utils/uv_tree.py | # Copyright 2026 Marimo. All rights reserved.
from __future__ import annotations
from typing import Optional
import msgspec
class DependencyTag(msgspec.Struct, rename="camel"):
kind: str
value: str
class DependencyTreeNode(msgspec.Struct, rename="camel"):
name: str
version: Optional[str]
# List of {"kind": "extra"|"group", "value": str}
tags: list[DependencyTag]
dependencies: list[DependencyTreeNode]
def parse_name_version(content: str) -> tuple[str, str | None]:
"""Parse package name and version from uv tree output."""
if " v" in content:
name, version = content.split(" v", 1)
return name.strip(), version.split()[0] # Take only version part
return content.strip(), None
def parse_uv_tree(text: str) -> DependencyTreeNode:
"""Parse the text output of `uv tree` into a nested data structure."""
lines = text.strip().split("\n")
# Create a virtual root to hold all top-level dependencies
tree = DependencyTreeNode(
name="<root>", version=None, tags=[], dependencies=[]
)
stack = [(tree, -1)] # (node, level)
for line in lines:
line = line.rstrip()
if (
not line
or "Package tree already displayed" in line
or "Package tree is a cycle" in line
):
continue
# Calculate indentation level by counting characters before tree symbols
if not any(symbol in line for symbol in ["├──", "└──"]):
level = 0 # Top-level package
else:
# Find the tree symbol position and divide by 4 (standard tree indentation)
for symbol in ["├──", "└──"]:
pos = line.find(symbol)
if pos != -1:
level = (pos // 4) + 1
break
# content after tree symbols
content = line.lstrip("│ ├└─").strip()
# Check for cycle indicator
is_cycle = content.endswith("(*)")
if is_cycle:
content = content[:-3].strip()
# tags (extras/groups)
tags: list[DependencyTag] = []
while "(extra:" in content or "(group:" in content:
start = (
content.rfind("(extra:")
if "(extra:" in content
else content.rfind("(group:")
)
if start == -1:
break
end = content.find(")", start)
if end == -1:
break
tag_text = content[start + 1 : end]
kind, value = tag_text.split(":", 1)
assert kind == "extra" or kind == "group"
tags.append(DependencyTag(kind=kind, value=value.strip()))
content = content[:start].strip()
name, version = parse_name_version(content)
# Add cycle indicator as a special tag
if is_cycle:
tags.append(DependencyTag(kind="cycle", value="true"))
node = DependencyTreeNode(
name=name,
version=version,
tags=tags,
dependencies=[],
)
# Adjust stack to correct level
while len(stack) > 1 and stack[-1][1] >= level:
stack.pop()
# Add to parent and push to stack
stack[-1][0].dependencies.append(node)
stack.append((node, level))
return tree
| {
"repo_id": "marimo-team/marimo",
"file_path": "marimo/_utils/uv_tree.py",
"license": "Apache License 2.0",
"lines": 86,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
marimo-team/marimo:tests/_utils/test_uv_tree.py | from __future__ import annotations
import json
import os
import pathlib
import subprocess
import sys
import pytest
from marimo._messaging.msgspec_encoder import asdict
from marimo._server.models.packages import DependencyTreeNode
from marimo._utils.uv_tree import parse_uv_tree
from tests.mocks import snapshotter
skip_if_below_py312 = pytest.mark.skipif(
sys.version_info < (3, 12),
reason="uv resolution snapshots only run on Python 3.12+",
)
UV_BIN = os.environ.get("UV")
SELF_DIR = pathlib.Path(__file__).parent
snapshot_test = snapshotter(__file__)
def serialize(tree: DependencyTreeNode) -> str:
return json.dumps(asdict(tree), indent=2)
def uv(cmd: list[str], cwd: str | None = None) -> str:
assert UV_BIN, "Must have uv installed to use."
result = subprocess.run(
[UV_BIN] + cmd,
check=True,
capture_output=True,
text=True,
cwd=cwd,
env={
"UV_PYTHON": "3.13",
"UV_EXCLUDE_NEWER": "2025-06-19T00:00:00-02:00",
},
)
return result.stdout
@pytest.mark.skipif(UV_BIN is None, reason="requires uv executable.")
def test_complex_project_tree(tmp_path: pathlib.Path) -> None:
uv(["init", "blah"], cwd=str(tmp_path))
project_dir = tmp_path / "blah"
uv(["add", "anywidget", "marimo"], cwd=str(project_dir))
uv(["add", "--dev", "pytest"], cwd=str(project_dir))
uv(["add", "--optional", "bar", "pandas"], cwd=str(project_dir))
raw = uv(["tree", "--no-dedupe"], cwd=str(project_dir))
tree = parse_uv_tree(raw)
assert tree is not None
snapshot_test("complex_project_tree.json", serialize(tree))
@pytest.mark.skipif(UV_BIN is None, reason="requires uv executable.")
def test_empty_project_tree(tmp_path: pathlib.Path) -> None:
uv(["init", "blah"], cwd=str(tmp_path))
project_dir = tmp_path / "blah"
raw = uv(["tree", "--no-dedupe"], cwd=str(project_dir))
tree = parse_uv_tree(raw)
snapshot_test("empty_project_tree.json", serialize(tree))
@pytest.mark.skipif(UV_BIN is None, reason="requires uv executable.")
def test_simple_project_tree(tmp_path: pathlib.Path) -> None:
uv(["init", "blah"], cwd=str(tmp_path))
project_dir = tmp_path / "blah"
uv(["add", "polars", "pandas"], cwd=str(project_dir))
raw = uv(["tree", "--no-dedupe"], cwd=str(project_dir))
tree = parse_uv_tree(raw)
snapshot_test("simple_project_tree.json", serialize(tree))
@pytest.mark.skipif(UV_BIN is None, reason="requires uv executable.")
@skip_if_below_py312
def test_script_tree(tmp_path: pathlib.Path) -> None:
script_path = tmp_path / "blah.py"
uv(["init", "--script", str(script_path)])
uv(["add", "--script", str(script_path), "polars", "pandas", "anywidget"])
raw = uv(["tree", "--no-dedupe", "--script", str(script_path)])
tree = parse_uv_tree(raw)
snapshot_test("script_tree.json", serialize(tree))
@pytest.mark.skipif(UV_BIN is None, reason="requires uv executable.")
def test_empty_script_tree_stable_output(tmp_path: pathlib.Path) -> None:
script_path = tmp_path / "blah.py"
uv(["init", "--script", str(script_path)])
raw = uv(["tree", "--no-dedupe", "--script", str(script_path)])
tree = parse_uv_tree(raw)
snapshot_test("empty_script_tree.json", serialize(tree))
@pytest.mark.xfail(reason="TODO: fix this. fails in CI.")
def test_complex_project_tree_raw_snapshot() -> None:
raw = """blah v0.1.0
├── anywidget v0.9.18
│ ├── ipywidgets v8.1.7
│ │ ├── comm v0.2.2
│ │ │ └── traitlets v5.14.3
│ │ ├── ipython v9.3.0
│ │ │ ├── decorator v5.2.1
│ │ │ ├── ipython-pygments-lexers v1.1.1
│ │ │ │ └── pygments v2.19.1
│ │ │ ├── jedi v0.19.2
│ │ │ │ └── parso v0.8.4
│ │ │ ├── matplotlib-inline v0.1.7
│ │ │ │ └── traitlets v5.14.3
│ │ │ ├── pexpect v4.9.0
│ │ │ │ └── ptyprocess v0.7.0
│ │ │ ├── prompt-toolkit v3.0.51
│ │ │ │ └── wcwidth v0.2.13
│ │ │ ├── pygments v2.19.1
│ │ │ ├── stack-data v0.6.3
│ │ │ │ ├── asttokens v3.0.0
│ │ │ │ ├── executing v2.2.0
│ │ │ │ └── pure-eval v0.2.3
│ │ │ └── traitlets v5.14.3
│ │ ├── jupyterlab-widgets v3.0.15
│ │ ├── traitlets v5.14.3
│ │ └── widgetsnbextension v4.0.14
│ ├── psygnal v0.13.0
│ └── typing-extensions v4.14.0
├── marimo v0.0.0
│ ├── click v8.2.1
│ ├── docutils v0.21.2
│ ├── itsdangerous v2.2.0
│ ├── jedi v0.19.2
│ │ └── parso v0.8.4
│ ├── loro v1.5.1
│ ├── markdown v3.8.1
│ ├── narwhals v1.42.1
│ ├── packaging v25.0
│ ├── psutil v7.0.0
│ ├── pygments v2.19.1
│ ├── pymdown-extensions v10.15
│ │ ├── markdown v3.8.1
│ │ └── pyyaml v6.0.2
│ ├── pyyaml v6.0.2
│ ├── starlette v0.47.0
│ │ └── anyio v4.9.0
│ │ ├── idna v3.10
│ │ └── sniffio v1.3.1
│ ├── tomlkit v0.13.3
│ ├── uvicorn v0.34.3
│ │ ├── click v8.2.1
│ │ └── h11 v0.16.0
│ └── websockets v15.0.1
├── pandas v2.3.0 (extra: bar)
│ ├── numpy v2.3.0
│ ├── python-dateutil v2.9.0.post0
│ │ └── six v1.17.0
│ ├── pytz v2025.2
│ └── tzdata v2025.2
└── pytest v8.4.1 (group: dev)
├── iniconfig v2.1.0
├── packaging v25.0
├── pluggy v1.6.0
└── pygments v2.19.1"""
tree = parse_uv_tree(raw)
snapshot_test("complex_project_tree_from_raw.json", serialize(tree))
def test_script_tree_raw_snapshot() -> None:
raw = """polars v1.31.0
pandas v2.3.0
├── numpy v2.3.0
├── python-dateutil v2.9.0.post0
│ └── six v1.17.0
├── pytz v2025.2
└── tzdata v2025.2
anywidget v0.9.18
├── ipywidgets v8.1.7
│ ├── comm v0.2.2
│ │ └── traitlets v5.14.3
│ ├── ipython v9.3.0
│ │ ├── decorator v5.2.1
│ │ ├── ipython-pygments-lexers v1.1.1
│ │ │ └── pygments v2.19.1
│ │ ├── jedi v0.19.2
│ │ │ └── parso v0.8.4
│ │ ├── matplotlib-inline v0.1.7
│ │ │ └── traitlets v5.14.3
│ │ ├── pexpect v4.9.0
│ │ │ └── ptyprocess v0.7.0
│ │ ├── prompt-toolkit v3.0.51
│ │ │ └── wcwidth v0.2.13
│ │ ├── pygments v2.19.1
│ │ ├── stack-data v0.6.3
│ │ │ ├── asttokens v3.0.0
│ │ │ ├── executing v2.2.0
│ │ │ └── pure-eval v0.2.3
│ │ └── traitlets v5.14.3
│ ├── jupyterlab-widgets v3.0.15
│ ├── traitlets v5.14.3
│ └── widgetsnbextension v4.0.14
├── psygnal v0.13.0
└── typing-extensions v4.14.0"""
tree = parse_uv_tree(raw)
# Use keep_version=True to avoid jedi v0.19.2 being incorrectly
# sanitized when marimo version is 0.19.2
snapshot_test(
"script_tree_from_raw.json", serialize(tree), keep_version=True
)
| {
"repo_id": "marimo-team/marimo",
"file_path": "tests/_utils/test_uv_tree.py",
"license": "Apache License 2.0",
"lines": 185,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
marimo-team/marimo:marimo/_utils/async_path.py | # Copyright 2026 Marimo. All rights reserved.
"""
Async version of pathlib.Path that uses asyncio.to_thread for filesystem operations.
"""
from __future__ import annotations
import asyncio
import os
from pathlib import Path, PurePath, PurePosixPath, PureWindowsPath
from typing import IO, TYPE_CHECKING, Any, Optional, Union
StrPath = Union[str, os.PathLike[str]]
if TYPE_CHECKING:
from collections.abc import AsyncGenerator, Iterator
class AsyncPath(PurePath):
"""
An async version of pathlib.Path that uses asyncio.to_thread for filesystem operations.
This class inherits from PurePath for path manipulation and adds async filesystem methods.
"""
def __new__(cls, *args: Any, **kwargs: Any) -> AsyncPath:
# Create the path using the same logic as PurePath
if cls is AsyncPath:
cls = AsyncWindowsPath if os.name == "nt" else AsyncPosixPath
return super().__new__(cls, *args, **kwargs) # type: ignore
def __truediv__(self, other: StrPath) -> AsyncPath:
# Override to return AsyncPath instance
result = super().__truediv__(other)
return self.__class__(result)
def __rtruediv__(self, other: StrPath) -> AsyncPath:
# Override to return AsyncPath instance
result = super().__rtruediv__(other)
return self.__class__(result)
@property
def _path(self) -> Path:
"""Get the synchronous Path equivalent."""
return Path(self)
# Async filesystem operations
async def exists(self) -> bool:
"""Return True if the path exists."""
return await asyncio.to_thread(self._path.exists)
async def is_file(self) -> bool:
"""Return True if the path is a regular file."""
return await asyncio.to_thread(self._path.is_file)
async def is_dir(self) -> bool:
"""Return True if the path is a directory."""
return await asyncio.to_thread(self._path.is_dir)
async def is_symlink(self) -> bool:
"""Return True if the path is a symbolic link."""
return await asyncio.to_thread(self._path.is_symlink)
async def stat(self) -> os.stat_result:
"""Return stat info for the path."""
return await asyncio.to_thread(self._path.stat)
async def lstat(self) -> os.stat_result:
"""Return lstat info for the path (doesn't follow symlinks)."""
return await asyncio.to_thread(self._path.lstat)
async def chmod(self, mode: int) -> None:
"""Change file mode and permissions."""
return await asyncio.to_thread(self._path.chmod, mode)
async def unlink(self, missing_ok: bool = False) -> None:
"""Remove the file."""
return await asyncio.to_thread(self._path.unlink, missing_ok)
async def rmdir(self) -> None:
"""Remove the directory."""
return await asyncio.to_thread(self._path.rmdir)
async def mkdir(
self, mode: int = 0o777, parents: bool = False, exist_ok: bool = False
) -> None:
"""Create directory."""
return await asyncio.to_thread(
self._path.mkdir, mode, parents, exist_ok
)
async def rename(self, target: Union[str, AsyncPath, Path]) -> AsyncPath:
"""Rename the path to target."""
result = await asyncio.to_thread(self._path.rename, target)
return self.__class__(result)
async def replace(self, target: Union[str, AsyncPath, Path]) -> AsyncPath:
"""Replace the path with target."""
result = await asyncio.to_thread(self._path.replace, target)
return self.__class__(result)
async def symlink_to(
self, target: Union[str, Path], target_is_directory: bool = False
) -> None:
"""Create a symbolic link to target."""
return await asyncio.to_thread(
self._path.symlink_to, target, target_is_directory
)
async def hardlink_to(self, target: Union[str, Path]) -> None:
"""Create a hard link to target."""
return await asyncio.to_thread(self._path.hardlink_to, target)
async def readlink(self) -> AsyncPath:
"""Return the path the symbolic link points to."""
result = await asyncio.to_thread(self._path.readlink)
return self.__class__(result)
# File I/O operations
async def read_text(
self, encoding: Optional[str] = None, errors: Optional[str] = None
) -> str:
"""Read and return the file contents as text."""
return await asyncio.to_thread(self._path.read_text, encoding, errors)
async def read_bytes(self) -> bytes:
"""Read and return the file contents as bytes."""
return await asyncio.to_thread(self._path.read_bytes)
async def write_text(
self,
data: str,
encoding: Optional[str] = None,
errors: Optional[str] = None,
newline: Optional[str] = None,
) -> int:
"""Write text data to the file."""
return await asyncio.to_thread(
self._path.write_text, data, encoding, errors, newline
)
async def write_bytes(self, data: bytes) -> int:
"""Write bytes data to the file."""
return await asyncio.to_thread(self._path.write_bytes, data)
# Directory operations
async def iterdir(self) -> AsyncGenerator[AsyncPath, None]:
"""Iterate over directory contents asynchronously."""
def _iterdir() -> Iterator[Path]:
return self._path.iterdir()
paths = await asyncio.to_thread(list, _iterdir())
for path in paths:
yield self.__class__(path)
async def glob(self, pattern: str) -> AsyncGenerator[AsyncPath, None]:
"""Glob for paths matching pattern asynchronously."""
def _glob() -> Iterator[Path]:
return self._path.glob(pattern)
paths = await asyncio.to_thread(list, _glob())
for path in paths:
yield self.__class__(path)
async def rglob(self, pattern: str) -> AsyncGenerator[AsyncPath, None]:
"""Recursively glob for paths matching pattern asynchronously."""
def _rglob() -> Iterator[Path]:
return self._path.rglob(pattern)
paths = await asyncio.to_thread(list, _rglob())
for path in paths:
yield self.__class__(path)
# Utility methods
async def resolve(self, strict: bool = False) -> AsyncPath:
"""Resolve the path to an absolute path."""
result = await asyncio.to_thread(self._path.resolve, strict)
return self.__class__(result)
async def expanduser(self) -> AsyncPath:
"""Expand ~ and ~user constructs."""
result = await asyncio.to_thread(self._path.expanduser)
return self.__class__(result)
async def absolute(self) -> AsyncPath:
"""Return an absolute version of this path."""
result = await asyncio.to_thread(self._path.absolute)
return self.__class__(result)
# Context manager support for opening files
def open(
self,
mode: str = "r",
buffering: int = -1,
encoding: Optional[str] = None,
errors: Optional[str] = None,
newline: Optional[str] = None,
) -> IO[Any]:
"""
Open the file.
This returns the same file object as the built-in open() function.
Note: This is not async - use aiofiles or similar for truly async file I/O.
"""
return self._path.open(mode, buffering, encoding, errors, newline)
class AsyncPosixPath(AsyncPath, PurePosixPath):
"""AsyncPath implementation for POSIX systems."""
__slots__ = ()
def __getattr__(self, name: str) -> Any:
return super().__getattr__(name) # type: ignore
class AsyncWindowsPath(AsyncPath, PureWindowsPath):
"""AsyncPath implementation for Windows systems."""
__slots__ = ()
def __getattr__(self, name: str) -> Any:
return super().__getattr__(name) # type: ignore
# Module-level convenience functions for compatibility with aio_path
async def exists(path: StrPath) -> bool:
"""Check if a path exists asynchronously."""
return await AsyncPath(path).exists()
async def isfile(path: StrPath) -> bool:
"""Check if a path is a file asynchronously."""
return await AsyncPath(path).is_file()
async def isdir(path: StrPath) -> bool:
"""Check if a path is a directory asynchronously."""
return await AsyncPath(path).is_dir()
async def abspath(path: StrPath) -> str:
"""Return absolute path asynchronously."""
result = await AsyncPath(path).absolute()
return str(result)
async def normpath(path: str) -> str:
"""Normalize a pathname asynchronously."""
return await asyncio.to_thread(os.path.normpath, path)
async def mkdir(
path: StrPath,
mode: int = 0o777,
parents: bool = False,
exist_ok: bool = False,
) -> None:
"""Create a directory asynchronously."""
await AsyncPath(path).mkdir(mode=mode, parents=parents, exist_ok=exist_ok)
| {
"repo_id": "marimo-team/marimo",
"file_path": "marimo/_utils/async_path.py",
"license": "Apache License 2.0",
"lines": 199,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
marimo-team/marimo:tests/_utils/test_async_path.py | import asyncio
import os
import tempfile
from pathlib import Path
from unittest.mock import patch
import pytest
from marimo._utils.async_path import (
AsyncPath,
AsyncPosixPath,
AsyncWindowsPath,
)
class TestAsyncPathConstruction:
def test_new_creates_correct_platform_type(self):
"""Test that AsyncPath creates the correct platform-specific type."""
path = AsyncPath("test")
if os.name == "nt":
assert isinstance(path, AsyncWindowsPath)
else:
assert isinstance(path, AsyncPosixPath)
def test_new_with_multiple_args(self):
"""Test creating AsyncPath with multiple path components."""
path = AsyncPath("home", "user", "file.txt")
assert str(path) == os.path.join("home", "user", "file.txt")
def test_truediv_returns_async_path(self):
"""Test that / operator returns AsyncPath instance."""
path = AsyncPath("home")
result = path / "user"
assert isinstance(result, AsyncPath)
assert str(result) == os.path.join("home", "user")
def test_rtruediv_returns_async_path(self):
"""Test that reverse / operator returns AsyncPath instance."""
path = AsyncPath("user")
result = "home" / path
assert isinstance(result, AsyncPath)
assert str(result) == os.path.join("home", "user")
def test_path_property(self):
"""Test that _path property returns synchronous Path."""
async_path = AsyncPath("test")
sync_path = async_path._path
assert isinstance(sync_path, Path)
assert str(sync_path) == str(async_path)
class TestAsyncPathFileSystemOperations:
async def test_exists_true(self):
"""Test exists returns True for existing file."""
with tempfile.NamedTemporaryFile() as tmp:
path = AsyncPath(tmp.name)
assert await path.exists() is True
async def test_exists_false(self):
"""Test exists returns False for non-existing file."""
path = AsyncPath("/nonexistent/file")
assert await path.exists() is False
async def test_is_file_true(self):
"""Test is_file returns True for regular file."""
with tempfile.NamedTemporaryFile() as tmp:
path = AsyncPath(tmp.name)
assert await path.is_file() is True
async def test_is_file_false(self):
"""Test is_file returns False for directory."""
with tempfile.TemporaryDirectory() as tmp:
path = AsyncPath(tmp)
assert await path.is_file() is False
async def test_is_dir_true(self):
"""Test is_dir returns True for directory."""
with tempfile.TemporaryDirectory() as tmp:
path = AsyncPath(tmp)
assert await path.is_dir() is True
async def test_is_dir_false(self):
"""Test is_dir returns False for file."""
with tempfile.NamedTemporaryFile() as tmp:
path = AsyncPath(tmp.name)
assert await path.is_dir() is False
async def test_stat(self):
"""Test stat returns stat_result object."""
with tempfile.NamedTemporaryFile() as tmp:
path = AsyncPath(tmp.name)
stat_result = await path.stat()
assert hasattr(stat_result, "st_size")
assert hasattr(stat_result, "st_mtime")
async def test_lstat(self):
"""Test lstat returns stat_result object."""
with tempfile.NamedTemporaryFile() as tmp:
path = AsyncPath(tmp.name)
lstat_result = await path.lstat()
assert hasattr(lstat_result, "st_size")
assert hasattr(lstat_result, "st_mtime")
async def test_chmod(self):
"""Test chmod changes file permissions."""
with tempfile.NamedTemporaryFile() as tmp:
path = AsyncPath(tmp.name)
await path.chmod(0o644)
stat_result = await path.stat()
# Check that some permission bits are set
assert stat_result.st_mode & 0o644
async def test_mkdir(self):
"""Test mkdir creates directory."""
with tempfile.TemporaryDirectory() as tmp:
path = AsyncPath(tmp) / "new_dir"
await path.mkdir()
assert await path.exists()
assert await path.is_dir()
async def test_mkdir_with_parents(self):
"""Test mkdir with parents creates intermediate directories."""
with tempfile.TemporaryDirectory() as tmp:
path = AsyncPath(tmp) / "parent" / "child"
await path.mkdir(parents=True)
assert await path.exists()
assert await path.is_dir()
async def test_mkdir_exist_ok(self):
"""Test mkdir with exist_ok doesn't raise if directory exists."""
with tempfile.TemporaryDirectory() as tmp:
path = AsyncPath(tmp) / "existing"
await path.mkdir()
# Should not raise
await path.mkdir(exist_ok=True)
@pytest.mark.skipif(
os.name == "nt",
reason="Symlinks require special permissions on Windows",
)
async def test_unlink(self):
"""Test unlink removes file."""
with tempfile.NamedTemporaryFile(delete=False) as tmp:
path = AsyncPath(tmp.name)
assert await path.exists()
await path.unlink()
assert not await path.exists()
async def test_unlink_missing_ok(self):
"""Test unlink with missing_ok doesn't raise for non-existing file."""
path = AsyncPath("/nonexistent/file")
# Should not raise
await path.unlink(missing_ok=True)
async def test_rmdir(self):
"""Test rmdir removes empty directory."""
with tempfile.TemporaryDirectory() as tmp:
subdir = AsyncPath(tmp) / "subdir"
await subdir.mkdir()
assert await subdir.exists()
await subdir.rmdir()
assert not await subdir.exists()
async def test_rename(self):
"""Test rename moves file to new location."""
with tempfile.TemporaryDirectory() as tmp:
source = AsyncPath(tmp) / "source.txt"
target = AsyncPath(tmp) / "target.txt"
await source.write_text("test content")
result = await source.rename(target)
assert isinstance(result, AsyncPath)
assert not await source.exists()
assert await target.exists()
assert await target.read_text() == "test content"
async def test_replace(self):
"""Test replace overwrites target file."""
with tempfile.TemporaryDirectory() as tmp:
source = AsyncPath(tmp) / "source.txt"
target = AsyncPath(tmp) / "target.txt"
await source.write_text("source content")
await target.write_text("target content")
result = await source.replace(target)
assert isinstance(result, AsyncPath)
assert not await source.exists()
assert await target.exists()
assert await target.read_text() == "source content"
class TestAsyncPathFileIO:
async def test_read_text(self):
"""Test read_text returns file contents as string."""
with tempfile.NamedTemporaryFile(mode="w", delete=False) as tmp:
tmp.write("test content")
tmp.flush()
path = AsyncPath(tmp.name)
content = await path.read_text()
assert content == "test content"
os.unlink(tmp.name)
async def test_read_text_with_encoding(self):
"""Test read_text with specific encoding."""
with tempfile.NamedTemporaryFile(
mode="w", encoding="utf-8", delete=False
) as tmp:
tmp.write("test content with unicode: ñ")
tmp.flush()
path = AsyncPath(tmp.name)
content = await path.read_text(encoding="utf-8")
assert "ñ" in content
os.unlink(tmp.name)
async def test_read_bytes(self):
"""Test read_bytes returns file contents as bytes."""
with tempfile.NamedTemporaryFile(delete=False) as tmp:
tmp.write(b"test bytes")
tmp.flush()
path = AsyncPath(tmp.name)
content = await path.read_bytes()
assert content == b"test bytes"
os.unlink(tmp.name)
async def test_write_text(self):
"""Test write_text writes string to file."""
with tempfile.TemporaryDirectory() as tmp:
path = AsyncPath(tmp) / "test.txt"
bytes_written = await path.write_text("test content")
assert bytes_written > 0
content = await path.read_text()
assert content == "test content"
async def test_write_text_with_encoding(self):
"""Test write_text with specific encoding."""
with tempfile.TemporaryDirectory() as tmp:
path = AsyncPath(tmp) / "test.txt"
content = "test content with unicode: ñ"
await path.write_text(content, encoding="utf-8")
read_content = await path.read_text(encoding="utf-8")
assert read_content == content
async def test_write_bytes(self):
"""Test write_bytes writes bytes to file."""
with tempfile.TemporaryDirectory() as tmp:
path = AsyncPath(tmp) / "test.bin"
data = b"test bytes"
bytes_written = await path.write_bytes(data)
assert bytes_written == len(data)
content = await path.read_bytes()
assert content == data
class TestAsyncPathDirectoryOperations:
async def test_iterdir(self):
"""Test iterdir yields directory contents."""
with tempfile.TemporaryDirectory() as tmp:
# Create some files
(Path(tmp) / "file1.txt").touch()
(Path(tmp) / "file2.txt").touch()
path = AsyncPath(tmp)
files = []
async for item in path.iterdir():
files.append(item)
assert len(files) == 2
assert all(isinstance(f, AsyncPath) for f in files)
filenames = {f.name for f in files}
assert filenames == {"file1.txt", "file2.txt"}
async def test_glob(self):
"""Test glob returns matching paths."""
with tempfile.TemporaryDirectory() as tmp:
# Create some files
(Path(tmp) / "test1.txt").touch()
(Path(tmp) / "test2.txt").touch()
(Path(tmp) / "other.py").touch()
path = AsyncPath(tmp)
txt_files = []
async for item in path.glob("*.txt"):
txt_files.append(item)
assert len(txt_files) == 2
assert all(isinstance(f, AsyncPath) for f in txt_files)
assert all(f.suffix == ".txt" for f in txt_files)
async def test_rglob(self):
"""Test rglob returns matching paths recursively."""
with tempfile.TemporaryDirectory() as tmp:
# Create nested structure
subdir = Path(tmp) / "subdir"
subdir.mkdir()
(Path(tmp) / "test1.txt").touch()
(subdir / "test2.txt").touch()
path = AsyncPath(tmp)
txt_files = []
async for item in path.rglob("*.txt"):
txt_files.append(item)
assert len(txt_files) == 2
assert all(isinstance(f, AsyncPath) for f in txt_files)
assert all(f.suffix == ".txt" for f in txt_files)
class TestAsyncPathUtilityMethods:
async def test_resolve(self):
"""Test resolve returns absolute path."""
path = AsyncPath(".")
resolved = await path.resolve()
assert isinstance(resolved, AsyncPath)
assert resolved.is_absolute()
async def test_expanduser(self):
"""Test expanduser expands ~ to home directory."""
path = AsyncPath("~")
expanded = await path.expanduser()
assert isinstance(expanded, AsyncPath)
# Should expand to actual home directory
assert str(expanded) != "~"
@pytest.mark.skipif(
os.name == "nt",
reason="Temp file permissions are different on Windows",
)
def test_open(self):
"""Test open returns file object."""
with tempfile.NamedTemporaryFile() as tmp:
path = AsyncPath(tmp.name)
with path.open("r") as f:
assert hasattr(f, "read")
assert hasattr(f, "write")
class TestAsyncPathSymlinks:
async def test_is_symlink_false(self):
"""Test is_symlink returns False for regular file."""
with tempfile.NamedTemporaryFile() as tmp:
path = AsyncPath(tmp.name)
assert await path.is_symlink() is False
@pytest.mark.skipif(
os.name == "nt",
reason="Symlinks require special permissions on Windows",
)
async def test_symlink_operations(self):
"""Test symlink creation and detection."""
with tempfile.TemporaryDirectory() as tmp:
target = AsyncPath(tmp) / "target.txt"
link = AsyncPath(tmp) / "link.txt"
await target.write_text("target content")
await link.symlink_to(target)
assert await link.is_symlink()
assert await link.exists()
# Test readlink
resolved_target = await link.readlink()
assert isinstance(resolved_target, AsyncPath)
class TestAsyncPathErrorHandling:
async def test_stat_nonexistent_file_raises(self):
"""Test stat raises FileNotFoundError for non-existent file."""
path = AsyncPath("/nonexistent/file")
with pytest.raises(FileNotFoundError):
await path.stat()
async def test_unlink_nonexistent_file_raises(self):
"""Test unlink raises FileNotFoundError for non-existent file."""
path = AsyncPath("/nonexistent/file")
with pytest.raises(FileNotFoundError):
await path.unlink()
async def test_mkdir_existing_directory_raises(self):
"""Test mkdir raises FileExistsError if directory exists and exist_ok=False."""
with tempfile.TemporaryDirectory() as tmp:
path = AsyncPath(tmp) / "existing"
await path.mkdir()
with pytest.raises(FileExistsError):
await path.mkdir(exist_ok=False)
async def test_rmdir_nonexistent_raises(self):
"""Test rmdir raises FileNotFoundError for non-existent directory."""
path = AsyncPath("/nonexistent/dir")
with pytest.raises(FileNotFoundError):
await path.rmdir()
async def test_read_text_nonexistent_raises(self):
"""Test read_text raises FileNotFoundError for non-existent file."""
path = AsyncPath("/nonexistent/file")
with pytest.raises(FileNotFoundError):
await path.read_text()
class TestAsyncPathThreading:
async def test_operations_use_asyncio_to_thread(self):
"""Test that operations actually use asyncio.to_thread."""
with patch("asyncio.to_thread") as mock_to_thread:
# Create a future that resolves to True
future = asyncio.get_event_loop().create_future()
future.set_result(True)
mock_to_thread.return_value = future
path = AsyncPath("test")
await path.exists()
mock_to_thread.assert_called_once()
# Verify it was called with the path's sync method
args = mock_to_thread.call_args[0]
assert callable(args[0]) # First arg should be the sync method
class TestAsyncPathEdgeCases:
async def test_hardlink_to(self):
"""Test hardlink_to creates hard link."""
with tempfile.TemporaryDirectory() as tmp:
source = AsyncPath(tmp) / "source.txt"
target = AsyncPath(tmp) / "target.txt"
await source.write_text("test content")
await target.hardlink_to(source)
assert await target.exists()
assert await target.read_text() == "test content"
# Both should have same inode (hard link)
source_stat = await source.stat()
target_stat = await target.stat()
assert source_stat.st_ino == target_stat.st_ino
async def test_write_text_with_newline(self):
"""Test write_text with custom newline parameter."""
with tempfile.TemporaryDirectory() as tmp:
path = AsyncPath(tmp) / "test.txt"
content = "line1\nline2"
await path.write_text(content, newline="\r\n")
# Read the raw bytes to verify newline conversion
raw_content = await path.read_bytes()
assert b"\r\n" in raw_content
async def test_resolve_with_strict(self):
"""Test resolve with strict parameter."""
path = AsyncPath(".")
resolved = await path.resolve(strict=True)
assert isinstance(resolved, AsyncPath)
assert resolved.is_absolute()
async def test_iterdir_empty_directory(self):
"""Test iterdir on empty directory."""
with tempfile.TemporaryDirectory() as tmp:
path = AsyncPath(tmp)
files = []
async for item in path.iterdir():
files.append(item)
assert len(files) == 0
async def test_glob_no_matches(self):
"""Test glob with pattern that matches nothing."""
with tempfile.TemporaryDirectory() as tmp:
path = AsyncPath(tmp)
matches = []
async for item in path.glob("*.nonexistent"):
matches.append(item)
assert len(matches) == 0
async def test_path_with_spaces_and_special_chars(self):
"""Test AsyncPath handles paths with spaces and special characters."""
with tempfile.TemporaryDirectory() as tmp:
# Create path with spaces and special chars
special_path = (
AsyncPath(tmp) / "file with spaces & special chars.txt"
)
await special_path.write_text("test content")
assert await special_path.exists()
assert await special_path.is_file()
content = await special_path.read_text()
assert content == "test content"
async def test_multiple_path_operations(self):
"""Test chaining multiple path operations."""
base = AsyncPath("home")
result = base / "user" / "documents" / "file.txt"
assert isinstance(result, AsyncPath)
assert str(result) == os.path.join(
"home", "user", "documents", "file.txt"
)
async def test_path_equality_and_hashing(self):
"""Test path equality and hashing."""
path1 = AsyncPath("test", "file.txt")
path2 = AsyncPath("test") / "file.txt"
assert str(path1) == str(path2)
# They should be equal as strings
assert str(path1) == str(path2)
| {
"repo_id": "marimo-team/marimo",
"file_path": "tests/_utils/test_async_path.py",
"license": "Apache License 2.0",
"lines": 421,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
marimo-team/marimo:marimo/_utils/files.py | # Copyright 2026 Marimo. All rights reserved.
from __future__ import annotations
import fnmatch
import os
import re
from pathlib import Path
from typing import TYPE_CHECKING, Union
from marimo._utils import async_path
if TYPE_CHECKING:
from collections.abc import AsyncGenerator, Generator
_SPLIT_NUMBERS = re.compile(r"([0-9]+)").split
def natural_sort(filename: str) -> list[Union[int, str]]:
return [
int(c) if c.isdigit() else c.lower() for c in _SPLIT_NUMBERS(filename)
]
def get_files(folder: str) -> Generator[Path, None, None]:
"""Recursively get all files from a folder."""
with os.scandir(folder) as scan:
for item in scan:
if item.is_file():
yield Path(item.path)
elif item.is_dir() and not item.name.startswith("."):
yield from get_files(item.path)
async def async_get_files(folder: str) -> AsyncGenerator[Path, None]:
"""Asynchronously recursively get all files from a folder."""
with os.scandir(folder) as scan:
for item in scan:
if item.is_file():
yield Path(item.path)
elif item.is_dir() and not item.name.startswith("."):
async for file_path in async_get_files(item.path):
yield file_path
def _get_root(pattern: str) -> str:
sep = os.sep
root = "."
parts = pattern.split(sep)
for i, part in enumerate(parts):
if "*" in part or "?" in part:
root = sep.join(parts[:i]) if i > 0 else "."
break
elif os.path.isdir(sep.join(parts[: i + 1])):
root = sep.join(parts[: i + 1])
return root
def expand_file_patterns(file_patterns: tuple[str, ...]) -> list[Path]:
"""Expand file patterns to actual file paths.
Args:
file_patterns: Tuple of file patterns (files, directories, or glob-like patterns)
Returns:
List of Path objects for all matching files
"""
files_to_check = []
for pattern in file_patterns:
if os.path.isfile(pattern):
files_to_check.append(Path(pattern))
elif os.path.isdir(pattern):
files_to_check.extend(get_files(pattern))
else:
# Handle glob patterns by walking from root and filtering
if "**" in pattern or "*" in pattern or "?" in pattern:
# Extract root directory to walk from
root = _get_root(pattern)
# Get all files from root and filter by pattern
if os.path.isdir(root):
all_files = get_files(root)
matched_files = [
path
for path in all_files
if fnmatch.fnmatch(str(path), pattern)
]
files_to_check.extend(matched_files)
else:
# Not a glob pattern but file doesn't exist, skip
pass
# Remove duplicates and sort
return sorted(set(files_to_check))
async def async_expand_file_patterns(
file_patterns: tuple[str, ...],
) -> AsyncGenerator[Path, None]:
"""Asynchronously expand file patterns to file paths, yielding as discovered.
Args:
file_patterns: Tuple of file patterns (files, directories, or glob-like patterns)
Yields:
Path objects for matching files (including non-existent explicit files)
"""
seen = set()
for pattern in file_patterns:
if await async_path.isfile(pattern):
path = Path(pattern)
if path not in seen:
seen.add(path)
yield path
elif await async_path.isdir(pattern):
async for file_path in async_get_files(pattern):
if file_path not in seen:
seen.add(file_path)
yield file_path
else:
# Handle glob patterns by walking from root and filtering
if "**" in pattern or "*" in pattern or "?" in pattern:
# Extract root directory to walk from
root = _get_root(pattern)
# Get all files from root and filter by pattern
if await async_path.isdir(root):
async for file_path in async_get_files(root):
if (
fnmatch.fnmatch(str(file_path), pattern)
and file_path not in seen
):
seen.add(file_path)
yield file_path
else:
# Not a glob pattern but file doesn't exist - yield it anyway for error handling
path = Path(pattern)
if path not in seen:
seen.add(path)
yield path
| {
"repo_id": "marimo-team/marimo",
"file_path": "marimo/_utils/files.py",
"license": "Apache License 2.0",
"lines": 116,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
marimo-team/marimo:marimo/_utils/code.py | # Copyright 2026 Marimo. All rights reserved.
from __future__ import annotations
def hash_code(code: str) -> str:
import hashlib
return hashlib.md5(code.encode("utf-8"), usedforsecurity=False).hexdigest()
| {
"repo_id": "marimo-team/marimo",
"file_path": "marimo/_utils/code.py",
"license": "Apache License 2.0",
"lines": 5,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
marimo-team/marimo:marimo/_utils/theme.py | # Copyright 2026 Marimo. All rights reserved.
from __future__ import annotations
from marimo._config.config import Theme
from marimo._config.manager import get_default_config_manager
def get_current_theme() -> Theme:
config_manager = get_default_config_manager(current_path=None)
return config_manager.theme
| {
"repo_id": "marimo-team/marimo",
"file_path": "marimo/_utils/theme.py",
"license": "Apache License 2.0",
"lines": 7,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
marimo-team/marimo:marimo/_runtime/packages/import_error_extractors.py | # Copyright 2026 Marimo. All rights reserved.
from __future__ import annotations
import re
def extract_missing_module_from_cause_chain(
error: ImportError,
) -> str | None:
"""Traverse an `ImportError` cause chain maybe find a missing module name.
This handles cases where a `ModuleNotFoundError` was raised and then wrapped,
e.g., via `raise ImportError("helpful message") from err`
"""
current: None | BaseException = error
while current is not None:
if (
isinstance(current, ModuleNotFoundError)
and hasattr(current, "name")
and current.name
):
return current.name
current = current.__cause__
return None
def strip_quotes(s: str) -> str:
if len(s) >= 2 and s[0] == s[-1] and s[0] in {"'", '"'}:
return s[1:-1]
return s
def extract_packages_from_pip_install_suggestion(
message: str,
) -> list[str] | None:
"""Extract package names from pip install commands in error messages."""
# First try to find quoted/backticked pip install commands (complete commands)
quoted_patterns = [
r"`pip install\s+([^`]+)`", # backticks
r'"pip install\s+([^"]+)"', # double quotes
r"'pip install\s+([^']+)'", # single quotes
]
for pattern in quoted_patterns:
match = re.search(pattern, message, re.IGNORECASE)
if match:
args_part = match.group(1)
args = args_part.split()
packages = []
seen = set()
for arg in args:
# Could be `pip install "foo[extra1,extra2]"`
arg = strip_quotes(arg)
# Skip flags and duplicates
if not arg.startswith("-") and arg not in seen:
packages.append(arg)
seen.add(arg)
if packages:
return packages
# Look for pip install with quoted individual packages
individual_quoted_pattern = r'pip install\s+"([^"]+)"'
match = re.search(individual_quoted_pattern, message, re.IGNORECASE)
if match:
return [match.group(1)]
# If no quoted command found, look for unquoted and take only first positional arg
unquoted_pattern = (
r"pip install\s+([a-zA-Z0-9_.-]+(?:\[[a-zA-Z0-9_,.-]+\])?)"
)
match = re.search(unquoted_pattern, message, re.IGNORECASE)
if match:
return [match.group(1)]
return None
def extract_packages_special_cases(message: str) -> list[str] | None:
"""Extract package names based on special case substrings in error messages."""
special_cases = {
# pd.DataFrame.to_parquet()
"Unable to find a usable engine; tried using: 'pyarrow', 'fastparquet'.": [
"pyarrow"
],
}
packages = []
for substring, package_names in special_cases.items():
if substring in message:
packages.extend(package_names)
return packages if packages else None
def try_extract_packages_from_import_error_message(
import_error_message: str,
) -> list[str] | None:
"""Try to extract package names from an `ImportError` message using various strategies.
Args:
import_error_message: The error message
Returns:
List of package names if found, None otherwise
"""
for extract in [
extract_packages_from_pip_install_suggestion,
extract_packages_special_cases,
]:
result = extract(import_error_message)
if result is not None:
return result
return None
| {
"repo_id": "marimo-team/marimo",
"file_path": "marimo/_runtime/packages/import_error_extractors.py",
"license": "Apache License 2.0",
"lines": 93,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
marimo-team/marimo:tests/_runtime/packages/test_import_error_extractors.py | # Copyright 2026 Marimo. All rights reserved.
import pytest
from marimo._runtime.packages.import_error_extractors import (
extract_missing_module_from_cause_chain,
extract_packages_from_pip_install_suggestion,
extract_packages_special_cases,
)
def test_extract_missing_module_from_cause_chain_direct():
"""Test direct ModuleNotFoundError (no cause chain)."""
module_error = ModuleNotFoundError("No module named 'numpy'")
module_error.name = "numpy"
result = extract_missing_module_from_cause_chain(module_error)
assert result == "numpy"
def test_extract_missing_module_from_cause_chain_with_cause():
"""Test ImportError with ModuleNotFoundError cause."""
module_error = ModuleNotFoundError("No module named 'numpy'")
module_error.name = "numpy"
import_error = ImportError("Custom message")
import_error.__cause__ = module_error
result = extract_missing_module_from_cause_chain(import_error)
assert result == "numpy"
def test_extract_missing_module_from_cause_chain_nested():
"""Test nested cause chain."""
root_error = ModuleNotFoundError("No module named 'pandas'")
root_error.name = "pandas"
middle_error = ImportError("Middle error")
middle_error.__cause__ = root_error
top_error = ImportError("Top error")
top_error.__cause__ = middle_error
result = extract_missing_module_from_cause_chain(top_error)
assert result == "pandas"
def test_extract_missing_module_from_cause_chain_no_module():
"""Test error with no module in chain."""
import_error = ImportError("No useful cause")
result = extract_missing_module_from_cause_chain(import_error)
assert result is None
@pytest.mark.parametrize(
("message", "expected"),
[
# Simple cases
("Try: pip install requests", ["requests"]),
("Run: pip install pandas[all]", ["pandas[all]"]),
# Quoted commands (multiple packages)
("Try `pip install -U polars anywidget`", ["polars", "anywidget"]),
(
'Run "pip install --upgrade requests pandas[all]"',
["requests", "pandas[all]"],
),
("Execute 'pip install -U numpy matplotlib'", ["numpy", "matplotlib"]),
# Additional quoted edge cases
("Try: `pip install pandas`.", ["pandas"]), # trailing punctuation
('Try running `"pip install seaborn"`', ["seaborn"]), # nested quotes
(
"Use: 'pip install scipy matplotlib'",
["scipy", "matplotlib"],
), # extra spaces
(
"Here's the command: `pip install jupyterlab` for notebooks",
["jupyterlab"],
),
# Unquoted with surrounding text (conservative parsing)
("Try: pip install polars if you want to do something", ["polars"]),
("You can pip install requests pandas but maybe not", ["requests"]),
# No match
("Some other error message", None),
# Harder, https://github.com/flekschas/jupyter-scatter/blob/ecfd8c4e19a1ad202372c09939682e5fbe9e70ba/jscatter/dependencies.py#L33-L37
(
"""Please install it with: pip install "jupyter-scatter[blah]" or pip install "jupyter-scatter[all]".""",
["jupyter-scatter[blah]"],
),
('Try: `pip install foo bar "baz[all]"`.', ["foo", "bar", "baz[all]"]),
("Try: `pip install foo bar 'baz[all]'`.", ["foo", "bar", "baz[all]"]),
],
)
def test_extract_packages_from_pip_install_suggestion(message, expected):
"""Test pip install suggestion extraction with various formats."""
result = extract_packages_from_pip_install_suggestion(message)
assert result == expected
def test_extract_packages_special_cases_pandas_parquet():
"""Test pandas parquet special case."""
message = "Unable to find a usable engine; tried using: 'pyarrow', 'fastparquet'."
result = extract_packages_special_cases(message)
assert result == ["pyarrow"]
| {
"repo_id": "marimo-team/marimo",
"file_path": "tests/_runtime/packages/test_import_error_extractors.py",
"license": "Apache License 2.0",
"lines": 83,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
marimo-team/marimo:marimo/_utils/uv.py | # Copyright 2026 Marimo. All rights reserved.
from __future__ import annotations
import os
# Could be replaced with `find_uv_bin` from uv Python package in the future
def find_uv_bin() -> str:
return os.environ.get("UV", "uv")
| {
"repo_id": "marimo-team/marimo",
"file_path": "marimo/_utils/uv.py",
"license": "Apache License 2.0",
"lines": 6,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
marimo-team/marimo:scripts/generate_ipynb_fixtures.py | # /// script
# requires-python = ">=3.13"
# dependencies = [
# "jupytext",
# "nbformat",
# ]
#
# [tool.uv]
# exclude-newer = "2025-06-03T16:30:20.082913-04:00"
# ///
"""
Generate `.ipynb` test fixtures for marimo's notebook conversion pipeline.
Each fixture corresponds to a named Jupyter notebook containing one or more code cells.
To add a new fixture, call `create_notebook_fixture(name, sources)`:
- `name`: the output filename (without `.ipynb`)
- `sources`: a list of code cell contents (as strings or `nbformat` cell dicts)
Run this script with `uv run scripts/generate_ipynb_fixtures.py` to regenerate all fixtures.
Output notebooks are written to `tests/_convert/fixtures/`.
"""
from __future__ import annotations
from pathlib import Path
import nbformat.v4.nbbase as nb
import jupytext
SELF_DIR = Path(__file__).parent
FIXTURES_DIR = SELF_DIR / "../tests/_convert/fixtures"
def create_notebook_fixture(name: str, sources: list[str | dict]) -> None:
cells = []
for source in sources:
if isinstance(source, str):
cell = nb.new_code_cell(source)
else:
cell = source
cells.append(cell)
notebook = nb.new_notebook(cells=cells)
for i, cell in enumerate(notebook.cells):
cell.id = str(i) # ensure we always have 1,2,3,4
(FIXTURES_DIR / f"{name}.ipynb").write_text(
jupytext.writes(notebook, fmt="ipynb")
)
def main() -> None:
FIXTURES_DIR.mkdir(exist_ok=True)
create_notebook_fixture(
"multiple_definitions",
[
"x = 1\nprint(x) # print",
"x = 2\nprint(x) # print",
],
)
create_notebook_fixture(
"multiple_definitions_multiline",
[
"K = 2\nnearest_partition = np.argpartition(dist_sq_1, K + 1, axis=1)",
"""plt.scatter(X_1[:, 0], X_1[:, 1], s=100)
K = 2
for i_1 in range(X_1.shape[0]):
for j in nearest_partition[i_1, :K + 1]:
plt.plot(*zip(X_1[j], X_1[i_1]), color='black')\
""",
],
)
create_notebook_fixture(
"duplicate_definitions_and_aug_assign",
[
"x = 1",
"x",
"x += 1",
"x",
],
)
create_notebook_fixture(
"duplicate_definitions_read_before_write",
[
"x = 1",
"x",
"x; x = 2; x",
"x",
],
)
create_notebook_fixture(
"duplicate_definitions_syntax_error",
[
"x ( b 2 d & !",
"x",
],
)
create_notebook_fixture(
"cell_metadata",
[
nb.new_code_cell(
"print('Hello')", metadata={"tags": ["tag1", "tag2"]}
),
nb.new_code_cell("print('World')", metadata={}),
nb.new_code_cell(
"print('Cell 1')",
metadata={"tags": ["important", "data-processing"]},
),
nb.new_code_cell("print('Cell 2')", metadata={"tags": []}),
nb.new_code_cell(
"print('Cell 3')",
metadata={"tags": ["visualization"], "collapsed": True},
),
nb.new_code_cell(
"print('Complex metadata')",
metadata={
"tags": ["tag1", "tag2"],
"collapsed": True,
"scrolled": False,
"custom": {"key": "value"},
},
),
nb.new_code_cell(
"print('hidden cell')",
metadata={
"tags": ["hide-cell"],
},
),
nb.new_code_cell(
"print('hidden cell, with other tags')",
metadata={
"tags": ["hide-cell", "remove-print"],
},
),
],
)
create_notebook_fixture(
"hides_markdown_cells",
[
nb.new_markdown_cell("A markdown cell."),
nb.new_markdown_cell(
"A markdown cell with tags: ['blah'].",
metadata={"tags": ["blah"]},
),
nb.new_markdown_cell(
"A markdown cell with tags: ['blah', 'hide-cell'].",
metadata={"tags": ["blah", "hide-cell"]},
),
],
)
create_notebook_fixture(
"pip_commands",
[
"!pip install transformers",
"!pip install pandas numpy matplotlib",
"# Mixed cell with pip and other commands\n!pip install scikit-learn\nimport numpy as np\n!pip install seaborn",
"# Non-pip exclamation commands should remain unchanged\n!ls -la\n!echo 'Hello World'",
"# Magic pip command should also be handled\n%pip install requests",
],
)
if __name__ == "__main__":
main()
| {
"repo_id": "marimo-team/marimo",
"file_path": "scripts/generate_ipynb_fixtures.py",
"license": "Apache License 2.0",
"lines": 150,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
marimo-team/marimo:marimo/_convert/converters.py | # Copyright 2026 Marimo. All rights reserved.
from __future__ import annotations
from marimo._schemas.notebook import NotebookV1
from marimo._schemas.serialization import (
EMPTY_NOTEBOOK_SERIALIZATION,
NotebookSerialization,
)
class MarimoConverterIntermediate:
"""Intermediate representation that allows chaining conversions."""
def __init__(self, ir: NotebookSerialization):
self.ir = ir
def to_notebook_v1(self) -> NotebookV1:
"""Convert to NotebookV1 format."""
from marimo._convert.notebook import convert_from_ir_to_notebook_v1
return convert_from_ir_to_notebook_v1(self.ir)
def to_markdown(self, filename: str | None = None) -> str:
"""Convert to markdown format."""
from marimo._convert.markdown import convert_from_ir_to_markdown
return convert_from_ir_to_markdown(self.ir, filename)
def to_py(self) -> str:
"""Convert to python format."""
from marimo._ast.codegen import generate_filecontents_from_ir
return generate_filecontents_from_ir(self.ir)
def to_ir(self) -> NotebookSerialization:
"""Convert to notebook IR."""
return self.ir
class MarimoConvert:
"""Converter utility for marimo notebooks."""
@staticmethod
def from_py(source: str) -> MarimoConverterIntermediate:
"""Convert from marimo Python source code.
Args:
source: Python source code string
"""
from marimo._ast.parse import parse_notebook
ir = parse_notebook(source) or EMPTY_NOTEBOOK_SERIALIZATION
return MarimoConverterIntermediate(ir)
@staticmethod
def from_non_marimo_python_script(
source: str,
aggressive: bool = False,
) -> MarimoConverterIntermediate:
"""Convert from a non-marimo Python script to marimo notebook.
This should only be used when the .py file is not already a valid
marimo notebook.
Args:
source: Unknown Python script source code string
aggressive: If True, will attempt to convert aggressively,
turning even invalid text into a notebook.
"""
from marimo._convert.non_marimo_python_script import (
convert_non_marimo_python_script_to_notebook_ir,
convert_non_marimo_script_to_notebook_ir,
)
if aggressive:
notebook_ir = convert_non_marimo_script_to_notebook_ir(source)
else:
notebook_ir = convert_non_marimo_python_script_to_notebook_ir(
source
)
return MarimoConvert.from_ir(notebook_ir)
@staticmethod
def from_plain_text(
source: str,
) -> MarimoConverterIntermediate:
"""Converts plain text into a single celled marimo notebook.
Used for cases with syntax errors or unparsable code.
Args:
source: Unknown source code string
"""
from marimo._convert.non_marimo_python_script import (
convert_script_block_to_notebook_ir,
)
return MarimoConvert.from_ir(
convert_script_block_to_notebook_ir(source)
)
@staticmethod
def from_md(source: str) -> MarimoConverterIntermediate:
"""Convert from markdown source code.
Args:
source: Markdown source code string
"""
from marimo._convert.markdown.to_ir import (
convert_from_md_to_marimo_ir,
)
return MarimoConvert.from_ir(convert_from_md_to_marimo_ir(source))
@staticmethod
def from_ipynb(source: str) -> MarimoConverterIntermediate:
"""Convert from Jupyter notebook JSON.
Args:
source: Jupyter notebook JSON string
"""
from marimo._convert.ipynb.to_ir import (
convert_from_ipynb_to_notebook_ir,
)
return MarimoConvert.from_ir(convert_from_ipynb_to_notebook_ir(source))
@staticmethod
def from_notebook_v1(
notebook_v1: NotebookV1,
) -> MarimoConverterIntermediate:
"""Convert from notebook v1.
Args:
notebook_v1: Notebook v1
"""
from marimo._convert.notebook import convert_from_notebook_v1_to_ir
return MarimoConverterIntermediate(
convert_from_notebook_v1_to_ir(notebook_v1)
)
@staticmethod
def from_ir(ir: NotebookSerialization) -> MarimoConverterIntermediate:
"""Convert from notebook IR.
Args:
ir: Notebook IR
"""
return MarimoConverterIntermediate(ir)
| {
"repo_id": "marimo-team/marimo",
"file_path": "marimo/_convert/converters.py",
"license": "Apache License 2.0",
"lines": 115,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
marimo-team/marimo:marimo/_convert/notebook.py | # Copyright 2026 Marimo. All rights reserved.
from __future__ import annotations
from marimo._schemas.notebook import (
NotebookCell,
NotebookCellConfig,
NotebookMetadata,
NotebookV1,
)
from marimo._schemas.serialization import (
AppInstantiation,
CellDef,
NotebookSerialization,
NotebookSerializationV1,
)
from marimo._utils.code import hash_code
from marimo._version import __version__
def convert_from_ir_to_notebook_v1(
notebook_ir: NotebookSerialization,
) -> NotebookV1:
"""Convert the notebook IR to the NotebookV1.
Args:
notebook_ir: The notebook IR.
Returns:
NotebookV1: The notebook v1.
"""
cells: list[NotebookCell] = []
for data in notebook_ir.cells:
cells.append(
NotebookCell(
id=None,
code=data.code,
code_hash=hash_code(data.code) if data.code else None,
name=data.name,
config=NotebookCellConfig(
column=data.options.get("column", None),
disabled=data.options.get("disabled", False),
hide_code=data.options.get("hide_code", False),
),
)
)
return NotebookV1(
version="1",
cells=cells,
metadata=NotebookMetadata(marimo_version=__version__),
)
def convert_from_notebook_v1_to_ir(
notebook_v1: NotebookV1,
) -> NotebookSerialization:
"""Convert the notebook v1 to the python source code.
Args:
notebook_v1: The notebook v1.
Returns:
str: The python source code.
"""
return NotebookSerializationV1(
app=AppInstantiation(options={}),
header=None,
version=None,
cells=[
CellDef(
code=cell.get("code", "") or "",
name=cell.get("name", "") or "",
options={
"column": cell.get("config", {}).get("column", None),
"disabled": cell.get("config", {}).get("disabled", False),
"hide_code": cell.get("config", {}).get(
"hide_code", False
),
},
)
for cell in notebook_v1.get("cells", [])
],
violations=[],
valid=True,
)
| {
"repo_id": "marimo-team/marimo",
"file_path": "marimo/_convert/notebook.py",
"license": "Apache License 2.0",
"lines": 75,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
marimo-team/marimo:marimo/_smoke_tests/sql/redshift_example.py | # /// script
# requires-python = ">=3.12"
# dependencies = [
# "marimo",
# "pandas==2.2.3",
# "polars[pyarrow]==1.30.0",
# "redshift-connector[full]==2.1.7",
# "sqlglot==26.23.0",
# ]
# ///
import marimo
__generated_with = "0.15.5"
app = marimo.App(width="medium")
@app.cell
def _():
import marimo as mo
import redshift_connector
import os
return mo, os, redshift_connector
@app.cell
def _(mo):
mo.md(
r"""
# Connecting to Redshift via redshift_connector
Steps:
1. Make sure the instance is publicly accessible
2. Check VPC/subnets is public traffic is allowed
3. Navigate to redshift -> workgroup -> obtain connection details
"""
)
return
@app.cell
def _(os, redshift_connector):
# IAM Connection
_host = os.environ.get("HOST")
_aws_access_key_id = os.environ.get("AWS_ACCESS_KEY_ID")
_aws_secret_access_key = os.environ.get("AWS_SECRET_KEY")
conn = redshift_connector.connect(
iam=True,
host=_host,
port=5439,
region="ap-southeast-1",
database="dev",
access_key_id=_aws_access_key_id,
secret_access_key=_aws_secret_access_key,
)
# Default connection
# _conn = redshift_connector.connect(
# host="testwg.549569150818.ap-southeast-1.redshift-serverless.amazonaws.com",
# database="dev",
# user="admin",
# password="****",
# port=5439,
# )
cursor = conn.cursor()
return conn, cursor
@app.cell
def _(cursor):
cursor.execute("DROP TABLE IF EXISTS users;")
cursor.execute("""
CREATE TABLE IF NOT EXISTS users (
user_id INTEGER IDENTITY(1,1) PRIMARY KEY,
username VARCHAR(50) NOT NULL,
email VARCHAR(100) NOT NULL,
created_date DATE NOT NULL
)
DISTSTYLE AUTO
SORTKEY (user_id);
""")
cursor.execute("""
INSERT INTO users (username, email, created_date) VALUES
('john_doe', 'john.doe@email.com', '2024-01-15'),
('jane_smith', 'jane.smith@email.com', '2024-02-20'),
('mike_wilson', 'mike.wilson@email.com', '2024-03-10'),
('sarah_jones', 'sarah.jones@email.com', '2024-04-05'),
('alex_brown', 'alex.brown@email.com', '2024-05-12');
""")
return
@app.cell
def _(conn, mo):
_df = mo.sql(
f"""
SHOW COLUMNS FROM TABLE dev.public.users;
""",
engine=conn
)
return
@app.function
def close_prepared_statements_sample():
import ast
x = """{'S': 'ERROR', 'C': '42P05', 'M': 'prepared statement "redshift_connector_statement_90026_2" already exists', 'F': '/opt/brazil-pkg-cache/packages/RedshiftPADB/RedshiftPADB-1.0.12895.0/AL2_aarch64/generic-flavor/src/src/pg/src/backend/commands/commands_prepare.c', 'L': '685', 'R': 'StorePreparedStatement'}"""
message = ast.literal_eval(x)["M"]
parts = message.split('"')
# The part within the first pair of double quotes is usually at index 1
if len(parts) > 1:
extracted_message = parts[1]
print(f"Extracted message: {extracted_message}")
else:
print("Could not find content within double quotes.")
@app.cell
def _(mo):
mo.md(
r"""
## Introspection
get_tables() ->
[
"dev", "information_schema", "views", "VIEW", None, "", "", "","", ""
]
1. catalog
2. schema
3. table_name
4. table_type
5. unknown
6. unknown
7. unknown
8. unknown
9. unknown
10. unknown
get_schemas() -> ["information_schema", "dev"]
1. schema
2. catalog
get_columns() -> [
"dev",
"public",
"users",
"user_id",
4,
"int4",
10,
None,
0,
10,
0,
None,
"\"identity\"(110900, 0, '1,1'::text)",
4,
None,
10,
1,
"NO",
None,
None,
None,
None,
"YES",
"YES"
]
1. catalog
2. schema
3. table_name
4. column_name
5. unknown
6. data type
7. unknown
...
14. unknown
get_primary_keys() -> [
"dev",
"public",
"users",
"user_id",
1,
"users_pkey"
]
1. catalog
2. schema
3. table_name
4. column_name
5. key_sequence
6. pk_name
"""
)
return
if __name__ == "__main__":
app.run()
| {
"repo_id": "marimo-team/marimo",
"file_path": "marimo/_smoke_tests/sql/redshift_example.py",
"license": "Apache License 2.0",
"lines": 175,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | documentation |
marimo-team/marimo:marimo/_sql/engines/redshift.py | # Copyright 2026 Marimo. All rights reserved.
from __future__ import annotations
import ast
from typing import TYPE_CHECKING, Any, Literal, Optional, Union
from marimo import _loggers
from marimo._data.models import (
Database,
DataTable,
DataTableColumn,
DataTableType,
DataType,
Schema,
)
from marimo._dependencies.dependencies import DependencyManager
from marimo._sql.engines.types import InferenceConfig, SQLConnection
from marimo._sql.sql_quoting import quote_qualified_name
from marimo._sql.utils import (
raise_df_import_error,
sql_type_to_data_type,
try_convert_to_polars,
)
from marimo._types.ids import VariableName
LOGGER = _loggers.marimo_logger()
if TYPE_CHECKING:
from redshift_connector import Connection # type: ignore
class RedshiftEngine(SQLConnection["Connection"]):
"""Redshift engine."""
def __init__(
self,
connection: Connection,
engine_name: Optional[VariableName] = None,
):
super().__init__(connection, engine_name)
@property
def source(self) -> str:
return "redshift"
@property
def dialect(self) -> str:
return "redshift"
@staticmethod
def is_compatible(var: Any) -> bool:
if not DependencyManager.redshift_connector.imported():
return False
from redshift_connector import Connection
return isinstance(var, Connection)
@property
def inference_config(self) -> InferenceConfig:
return InferenceConfig(
auto_discover_schemas=True,
auto_discover_tables=False,
auto_discover_columns=False,
)
def _try_commit(self) -> None:
try:
self._connection.commit()
except Exception as e:
LOGGER.debug("Failed to commit. Reason: %s.", e)
def _try_rollback(self) -> None:
"""
Rollback the connection to avoid errors with the connection being in a bad state.
For example, after a query failure
"""
from redshift_connector.error import ProgrammingError # type: ignore
try:
self._connection.rollback()
except ProgrammingError as e:
# Close prepared statements that are left open
# They can prevent rollbacks
LOGGER.debug(f"Programming error {e}")
error_message = ast.literal_eval(str(e))["M"]
parts = error_message.split('"')
if len(parts) > 1:
prepared_statement_name = parts[1]
statement_bin = self._connection.get_statement_name_bin(
prepared_statement_name
)
self._connection.close_prepared_statement(
statement_name_bin=statement_bin
)
LOGGER.debug(
f"Closing prepared statement {prepared_statement_name}"
)
try:
self._connection.rollback()
except Exception as e:
LOGGER.debug(
"Failed to rollback after closing prepared statement. Reason: %s.",
e,
)
except Exception as e:
LOGGER.debug("Failed to rollback. Reason: %s.", e)
def execute(self, query: str) -> Any:
sql_output_format = self.sql_output_format()
with self._connection.cursor() as cursor:
self._try_rollback()
if sql_output_format == "auto":
if DependencyManager.polars.has():
sql_output_format = "polars"
elif DependencyManager.pandas.has():
sql_output_format = "pandas"
else:
raise_df_import_error("polars[pyarrow]")
if sql_output_format in ("polars", "lazy-polars"):
result, error = try_convert_to_polars(
query=query,
connection=cursor,
lazy=sql_output_format == "lazy-polars",
)
if error is None:
self._try_commit()
return result
LOGGER.warning(
"Failed to convert to polars. Reason: %s.", error
)
DependencyManager.pandas.require("to convert this data")
# Fall back to pandas
sql_output_format = "pandas"
cursor_result = cursor.execute(query)
self._try_commit()
if sql_output_format == "native":
return cursor_result
if sql_output_format == "pandas":
return cursor_result.fetch_dataframe()
return cursor_result
def get_default_database(self) -> Optional[str]:
with self._connection.cursor() as cursor:
try:
return str(cursor.cur_catalog())
except Exception as e:
LOGGER.debug("Failed to get default database. Reason: %s.", e)
return None
def get_default_schema(self) -> Optional[str]:
with self._connection.cursor() as cursor:
try:
result = cursor.execute("SELECT current_schema()")
row = result.fetchone()
if row is None or row[0] is None:
return None
return str(row[0])
except Exception as e:
LOGGER.debug("Failed to get default schema. Reason: %s.", e)
return None
def get_databases(
self,
*,
include_schemas: Union[bool, Literal["auto"]],
include_tables: Union[bool, Literal["auto"]],
include_table_details: Union[bool, Literal["auto"]],
) -> list[Database]:
"""Get catalogs from the engine. Redshift only supports one catalog per connection.
Catalogs -> Schemas -> Tables
"""
with self._connection.cursor() as cursor:
try:
# get_catalogs only returns current catalog
catalog = cursor.get_catalogs()[0][0]
except Exception as e:
LOGGER.debug("Failed to get catalogs. Reason: %s.", e)
return []
databases: list[Database] = []
include_schemas = self._resolve_should_auto_discover(
include_schemas
)
include_tables = self._resolve_should_auto_discover(include_tables)
include_table_details = self._resolve_should_auto_discover(
include_table_details
)
schemas: list[Schema] = []
if include_schemas:
schemas = self.get_schemas(
catalog=catalog,
include_tables=include_tables,
include_table_details=include_table_details,
)
databases.append(
Database(
name=catalog,
dialect=self.dialect,
schemas=schemas,
engine=self._engine_name,
)
)
return databases
def get_schemas(
self,
*,
catalog: str,
include_tables: bool,
include_table_details: bool,
) -> list[Schema]:
"""Get schemas from the engine."""
output_schemas: list[Schema] = []
with self._connection.cursor() as cursor:
# get_schemas returns [["schema_name", "catalog"], ["schema_2", "catalog"]]
schemas = cursor.get_schemas(catalog=catalog)
for schema in schemas:
schema_name = schema[0]
if schema_name == "information_schema": # Skip meta-schemas
continue
tables = (
self.get_tables_in_schema(
schema=schema_name,
database=catalog,
include_table_details=include_table_details,
)
if include_tables
else []
)
output_schemas.append(Schema(name=schema_name, tables=tables))
return output_schemas
def get_tables_in_schema(
self, *, schema: str, database: str, include_table_details: bool
) -> list[DataTable]:
"""Get tables from the engine. Databases are treated as catalogs."""
output_tables: list[DataTable] = []
with self._connection.cursor() as cursor:
# get_tables returns [["catalog", "schema", "table_name", "table_type (VIEW / TABLE)", None, "", ...]]
try:
tables = cursor.get_tables(
catalog=database, schema_pattern=schema
)
except Exception as e:
LOGGER.debug("Failed to get tables. Reason: %s.", e)
return []
for table in tables:
table_name, table_type = table[2], table[3]
table_type = self._resolve_table_type(table_type)
# If we are satisfied with this info, we can return
if not include_table_details:
output_tables.append(
DataTable(
source_type="connection",
source=self.dialect,
name=table_name,
num_rows=None,
num_columns=None,
variable_name=None,
engine=self._engine_name,
type=table_type,
columns=[],
primary_keys=[],
indexes=[],
)
)
continue
detailed_table = self.get_table_details(
table_name=table_name,
schema_name=schema,
database_name=database,
)
if detailed_table is not None:
output_tables.append(detailed_table)
return output_tables
def _get_columns_api(
self,
*,
catalog: str,
schema_name: str,
table_name: str,
) -> tuple[tuple[str, ...], ...]:
"""The API is unreliable hence this method is not preferred"""
columns: tuple[tuple[str, ...], ...] = ()
with self._connection.cursor() as cursor:
try:
# get_columns returns:
# [[catalog, schema, table_name, column_name, unknown, data type, unknown, ...], ...]
columns = cursor.get_columns(
catalog=catalog,
schema_pattern=schema_name,
tablename_pattern=table_name,
)
except Exception as e:
LOGGER.debug(
f"Failed to get columns for {catalog}.{schema_name}.{table_name} Reason: {e}"
)
return columns
def get_table_details(
self, *, table_name: str, schema_name: str, database_name: str
) -> Optional[DataTable]:
"""Get detailed metadata for a given table in a database."""
with self._connection.cursor() as cursor:
try:
table = cursor.get_tables(
catalog=database_name,
schema_pattern=schema_name,
table_name_pattern=table_name,
)
except Exception as e:
LOGGER.debug("Failed to get table. Reason: %s.", e)
return None
table_type = self._resolve_table_type(table[0][3])
quoted_name = quote_qualified_name(
database_name, schema_name, table_name, dialect="redshift"
)
row_count = cursor.execute(f"SELECT COUNT(*) FROM {quoted_name}")
row = row_count.fetchone()
if row is None or row[0] is None:
return None
num_rows = row[0]
try:
# [[catalog, schema, table_name, column_name, ordinal_position, column_default, is_nullable, data_type, character_maximum_length, numeric_precision, numeric_scale, remarks]]
columns = cursor.execute(
f"SHOW COLUMNS FROM TABLE {quoted_name};"
)
except Exception as e:
LOGGER.debug(
f"Failed to get columns for {database_name}.{schema_name}.{table_name}. Reason: {e}"
)
columns = []
cols: list[DataTableColumn] = []
for col in columns:
col_name, col_type = col[3], col[7]
data_type = self._get_data_type(
col_type
) or sql_type_to_data_type(col_type)
cols.append(
DataTableColumn(
name=col_name,
type=data_type,
external_type=str(col_type),
sample_values=[],
)
)
# get_primary_keys returns:
# [[catalog, schema, table_name, column_name, key_seq, pk_name], ...]
primary_keys = cursor.get_primary_keys(
catalog=database_name, schema=schema_name, table=table_name
)
primary_keys = [pk[3] for pk in primary_keys]
return DataTable(
source_type="connection",
source=self.dialect,
name=table_name,
num_rows=num_rows,
num_columns=len(cols),
variable_name=None,
engine=self._engine_name,
type=table_type,
columns=cols,
primary_keys=primary_keys,
indexes=[],
)
def _resolve_table_type(self, table_type: str) -> DataTableType:
return "view" if table_type == "VIEW" else "table"
def _get_data_type(self, data_type: str) -> Optional[DataType]:
data_type = data_type.lower()
if "cardinal_number" in data_type:
return "number"
elif "character_data" in data_type:
return "string"
return None
def _resolve_should_auto_discover(
self, value: Union[bool, Literal["auto"]]
) -> bool:
# Opt to not auto-discover for now
if value == "auto":
return False
return value
| {
"repo_id": "marimo-team/marimo",
"file_path": "marimo/_sql/engines/redshift.py",
"license": "Apache License 2.0",
"lines": 358,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
marimo-team/marimo:tests/_sql/test_redshift.py | # Copyright 2026 Marimo. All rights reserved.
from __future__ import annotations
from typing import TYPE_CHECKING
from unittest import mock
import pytest
from marimo._dependencies.dependencies import DependencyManager
from marimo._sql.engines.redshift import RedshiftEngine
from marimo._sql.engines.types import EngineCatalog, QueryEngine
from marimo._types.ids import VariableName
HAS_REDSHIFT = DependencyManager.redshift_connector.has()
if TYPE_CHECKING:
from collections.abc import Generator
from redshift_connector import Connection
@pytest.fixture
def mock_connection() -> Generator[Connection, None, None]:
"""Create a mock Redshift connection for testing."""
if not HAS_REDSHIFT:
yield mock.MagicMock()
return
from redshift_connector import Connection
# Create mock connection
connection = mock.MagicMock(spec=Connection)
# Mock cursor methods
cursor = mock.MagicMock()
connection.cursor.return_value = cursor
yield connection
@pytest.mark.skipif(
not HAS_REDSHIFT, reason="Redshift connector not installed"
)
def test_engine_compatibility(mock_connection: Connection) -> None:
"""Test engine compatibility checks."""
assert RedshiftEngine.is_compatible(mock_connection)
assert not RedshiftEngine.is_compatible(object())
engine = RedshiftEngine(
mock_connection, engine_name=VariableName("my_redshift")
)
assert isinstance(engine, RedshiftEngine)
assert isinstance(engine, EngineCatalog)
assert isinstance(engine, QueryEngine)
@pytest.mark.skipif(
not HAS_REDSHIFT, reason="Redshift connector not installed"
)
def test_engine_name_initialization(mock_connection: Connection) -> None:
"""Test engine name initialization."""
engine = RedshiftEngine(
mock_connection, engine_name=VariableName("my_redshift")
)
assert engine._engine_name == VariableName("my_redshift")
# Test default name
engine = RedshiftEngine(mock_connection)
assert engine._engine_name is None
@pytest.mark.skipif(
not HAS_REDSHIFT, reason="Redshift connector not installed"
)
def test_redshift_engine_source_and_dialect(
mock_connection: Connection,
) -> None:
"""Test RedshiftEngine source and dialect properties."""
engine = RedshiftEngine(mock_connection)
assert engine.source == "redshift"
assert engine.dialect == "redshift"
| {
"repo_id": "marimo-team/marimo",
"file_path": "tests/_sql/test_redshift.py",
"license": "Apache License 2.0",
"lines": 61,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
marimo-team/marimo:examples/ui/image_comparison_demo.py | #!/usr/bin/env python3
# /// script
# requires-python = ">=3.11"
# dependencies = [
# "marimo",
# ]
# ///
# Copyright 2026 Marimo. All rights reserved.
import marimo
__generated_with = "0.19.7"
app = marimo.App()
@app.cell(hide_code=True)
def _(mo):
mo.md("""
# Image Comparison Demo
This demo showcases the `mo.image_compare` feature, which allows for side-by-side comparison of images.
## Basic Usage - Horizontal Comparison
The default orientation is horizontal, where you can slide left and right to compare images:
""")
return
@app.cell
def _():
before_image_path = "https://picsum.photos/200/301.jpg"
after_image_path = "https://picsum.photos/200/300.jpg"
return after_image_path, before_image_path
@app.cell
def _(after_image_path, before_image_path, mo):
# Basic horizontal comparison with default settings
mo.image_compare(before_image=before_image_path, after_image=after_image_path)
return
@app.cell(hide_code=True)
def _(mo):
mo.md(r"""
## Custom Initial Position
You can set the initial position of the slider:
""")
return
@app.cell
def _(after_image_path, before_image_path, mo):
mo.image_compare(
before_image=before_image_path,
after_image=after_image_path,
direction="horizontal",
value=20, # Start at 25% position
)
return
@app.cell(hide_code=True)
def _(mo):
mo.md(r"""
## Vertical Comparison
You can also use a vertical comparison slider:
""")
return
@app.cell
def _(after_image_path, before_image_path, mo):
mo.image_compare(
before_image=before_image_path,
after_image=after_image_path,
direction="vertical",
value=75, # Start at 75% position
height=400,
)
return
@app.cell
def _():
import marimo as mo
return (mo,)
if __name__ == "__main__":
app.run()
| {
"repo_id": "marimo-team/marimo",
"file_path": "examples/ui/image_comparison_demo.py",
"license": "Apache License 2.0",
"lines": 69,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
marimo-team/marimo:marimo/_plugins/stateless/image_compare.py | # Copyright 2026 Marimo. All rights reserved.
from __future__ import annotations
import io
import os
from pathlib import Path
from typing import Literal, Optional, Union
import marimo._output.data.data as mo_data
from marimo._output.hypertext import Html
from marimo._output.rich_help import mddoc
from marimo._output.utils import normalize_dimension
from marimo._plugins.core.media import io_to_data_url
from marimo._plugins.core.web_component import build_stateless_plugin
from marimo._plugins.stateless.image import ImageLike, _normalize_image
@mddoc
def image_compare(
before_image: ImageLike,
after_image: ImageLike,
value: float = 50,
direction: Literal["horizontal", "vertical"] = "horizontal",
width: Optional[Union[int, str]] = None,
height: Optional[Union[int, str]] = None,
) -> Html:
"""Render an image comparison slider to compare two images side by side.
Examples:
```python3
# Basic usage with two images
mo.image_compare(before_image="before.jpg", after_image="after.jpg")
```
```python3
# With custom settings
mo.image_compare(
before_image="original.png",
after_image="processed.png",
value=30, # Initial slider position at 30%
direction="vertical",
width=500,
height=400,
)
```
Args:
before_image: The "before" image to show in the comparison slider.
Can be a path, URL, or array-like object.
after_image: The "after" image to show in the comparison slider.
Can be a path, URL, or array-like object.
value: Initial position of the slider (0-100), defaults to 50.
direction: Orientation of the slider, either "horizontal" or "vertical".
Defaults to "horizontal".
width: Width of the component in pixels or CSS units.
height: Height of the component in pixels or CSS units.
Returns:
`Html` object with the image comparison slider.
"""
# Process the before and after images
before_src = _process_image_to_url(before_image)
after_src = _process_image_to_url(after_image)
normalized_value = max(0, min(100, float(value)))
# Prepare dimensions
width_str = normalize_dimension(width) if width is not None else None
height_str = normalize_dimension(height) if height is not None else None
# Build the plugin arguments
args = {
"before-src": before_src,
"after-src": after_src,
"value": normalized_value,
"direction": direction,
}
# Add optional dimensions
if width_str is not None:
args["width"] = width_str
if height_str is not None:
args["height"] = height_str
return Html(
build_stateless_plugin(
component_name="marimo-image-comparison",
args=args,
)
)
def _process_image_to_url(src: ImageLike) -> str:
"""Process an image-like object to a URL that can be used in an <img> tag.
Args:
src: An image-like object.
Returns:
A string URL that can be used in an <img> tag.
"""
try:
src = _normalize_image(src)
# different types handling
if isinstance(src, io.BufferedReader) or isinstance(src, io.BytesIO):
src.seek(0)
return mo_data.image(src.read()).url
elif isinstance(src, bytes):
return mo_data.image(src).url
elif isinstance(src, Path):
return mo_data.image(src.read_bytes(), ext=src.suffix).url
elif isinstance(src, str) and os.path.isfile(
expanded_path := os.path.expanduser(src)
):
path = Path(expanded_path)
return mo_data.image(path.read_bytes(), ext=path.suffix).url
else:
# If it's a URL or other string, try to use it directly
result = io_to_data_url(src, fallback_mime_type="image/png")
return (
result
if result is not None
else f"data:text/plain,Unable to process image: {src}"
)
except Exception as e:
# return an error message otherwise
error_message = f"Error processing image: {str(e)}"
# Using a comment instead of print for logging
# print(f"Warning: {error_message}")
return f"data:text/plain,{error_message}"
| {
"repo_id": "marimo-team/marimo",
"file_path": "marimo/_plugins/stateless/image_compare.py",
"license": "Apache License 2.0",
"lines": 113,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
marimo-team/marimo:tests/_plugins/stateless/test_image_compare.py | # Copyright 2026 Marimo. All rights reserved.
from __future__ import annotations
import sys
import pytest
from marimo._dependencies.dependencies import DependencyManager
from marimo._plugins.stateless.image_compare import image_compare
from marimo._runtime.context import get_context
from marimo._runtime.runtime import Kernel
from tests.conftest import ExecReqProvider
HAS_DEPS = DependencyManager.numpy.has() and DependencyManager.pillow.has()
async def test_image_compare_basic() -> None:
result = image_compare(
before_image="https://marimo.io/logo.png",
after_image="https://marimo.io/logo.png",
)
assert "marimo-image-comparison" in result.text
assert "before-src" in result.text
assert "after-src" in result.text
assert "value" in result.text
assert "direction" in result.text
async def test_image_compare_with_value() -> None:
result = image_compare(
before_image="https://marimo.io/logo.png",
after_image="https://marimo.io/logo.png",
value=75,
)
assert "marimo-image-comparison" in result.text
assert "data-value='75.0'" in result.text
async def test_image_compare_vertical() -> None:
result = image_compare(
before_image="https://marimo.io/logo.png",
after_image="https://marimo.io/logo.png",
direction="vertical",
)
assert "marimo-image-comparison" in result.text
assert "data-direction='"vertical"'" in result.text
async def test_image_compare_with_dimensions() -> None:
result = image_compare(
before_image="https://marimo.io/logo.png",
after_image="https://marimo.io/logo.png",
width=500,
height=400,
)
assert "marimo-image-comparison" in result.text
assert "data-width='"500px"'" in result.text
assert "data-height='"400px"'" in result.text
async def test_image_compare_with_string_dimensions() -> None:
result = image_compare(
before_image="https://marimo.io/logo.png",
after_image="https://marimo.io/logo.png",
width="50%",
height="300px",
)
assert "marimo-image-comparison" in result.text
assert "data-width='"50%"'" in result.text
assert "data-height='"300px"'" in result.text
async def test_image_compare_value_bounds() -> None:
# Test value below 0
result = image_compare(
before_image="https://marimo.io/logo.png",
after_image="https://marimo.io/logo.png",
value=-10,
)
assert "data-value='0'" in result.text
# Test value above 100
result = image_compare(
before_image="https://marimo.io/logo.png",
after_image="https://marimo.io/logo.png",
value=150,
)
assert "data-value='100'" in result.text
async def test_image_compare_filename(
k: Kernel, exec_req: ExecReqProvider
) -> None:
await k.run(
[
exec_req.get(
"""
import marimo as mo
import os
# Create test images
with open("test_before.png", "wb") as f:
f.write(b"before_image_data")
with open("test_after.png", "wb") as f:
f.write(b"after_image_data")
comparison = mo.image_compare("test_before.png", "test_after.png")
# Clean up
os.remove("test_before.png")
os.remove("test_after.png")
"""
),
]
)
assert len(get_context().virtual_file_registry.registry) == 2
for fname in get_context().virtual_file_registry.registry.keys():
assert fname.endswith(".png")
async def test_image_compare_path(
k: Kernel, exec_req: ExecReqProvider
) -> None:
await k.run(
[
exec_req.get(
"""
import marimo as mo
from pathlib import Path
import os
# Create test images
with open("test_before.png", "wb") as f:
f.write(b"before_image_data")
with open("test_after.png", "wb") as f:
f.write(b"after_image_data")
comparison = mo.image_compare(
Path("test_before.png"),
Path("test_after.png")
)
# Clean up
os.remove("test_before.png")
os.remove("test_after.png")
"""
),
]
)
assert len(get_context().virtual_file_registry.registry) == 2
for fname in get_context().virtual_file_registry.registry.keys():
assert fname.endswith(".png")
async def test_image_compare_bytes_io(
k: Kernel, exec_req: ExecReqProvider
) -> None:
await k.run(
[
exec_req.get(
"""
import io
import marimo as mo
before_stream = io.BytesIO(b"before_image_data")
after_stream = io.BytesIO(b"after_image_data")
comparison = mo.image_compare(before_stream, after_stream)
"""
),
]
)
assert len(get_context().virtual_file_registry.registry) == 2
for fname in get_context().virtual_file_registry.registry.keys():
assert fname.endswith(".png")
async def test_image_compare_mixed_sources(
k: Kernel, exec_req: ExecReqProvider
) -> None:
await k.run(
[
exec_req.get(
"""
import io
import marimo as mo
import os
# Create one file
with open("test_before.png", "wb") as f:
f.write(b"before_image_data")
# Use file for before, BytesIO for after
after_stream = io.BytesIO(b"after_image_data")
comparison = mo.image_compare("test_before.png", after_stream)
# Clean up
os.remove("test_before.png")
"""
),
]
)
assert len(get_context().virtual_file_registry.registry) == 2
async def test_image_compare_str(k: Kernel, exec_req: ExecReqProvider) -> None:
await k.run(
[
exec_req.get(
"""
import marimo as mo
comparison = mo.image_compare(
"https://marimo.io/logo.png",
"https://marimo.io/logo.png"
)
"""
),
]
)
# URLs should not be registered
assert len(get_context().virtual_file_registry.registry) == 0
@pytest.mark.skipif(not HAS_DEPS, reason="optional dependencies not installed")
async def test_image_compare_array(
k: Kernel, exec_req: ExecReqProvider
) -> None:
await k.run(
[
exec_req.get(
"""
import marimo as mo
before_data = [[[255, 0, 0], [0, 255, 0], [0, 0, 255]]]
after_data = [[[0, 255, 0], [255, 0, 0], [0, 0, 255]]]
comparison = mo.image_compare(before_data, after_data)
"""
),
]
)
assert len(get_context().virtual_file_registry.registry) == 2
for fname in get_context().virtual_file_registry.registry.keys():
assert fname.endswith(".png")
@pytest.mark.skipif(not HAS_DEPS, reason="optional dependencies not installed")
async def test_image_compare_numpy(
k: Kernel, exec_req: ExecReqProvider
) -> None:
await k.run(
[
exec_req.get(
"""
import marimo as mo
import numpy as np
before_data = np.random.rand(10, 10, 3)
after_data = np.random.rand(10, 10, 3)
comparison = mo.image_compare(before_data, after_data)
"""
),
]
)
assert len(get_context().virtual_file_registry.registry) == 2
for fname in get_context().virtual_file_registry.registry.keys():
assert fname.endswith(".png")
@pytest.mark.skipif(sys.platform == "win32", reason="Failing on Windows CI")
async def test_image_compare_local_file(
k: Kernel, exec_req: ExecReqProvider
) -> None:
# Use this test file itself as a dummy image
with open(__file__, encoding="utf-8") as f: # noqa: ASYNC230
await k.run(
[
exec_req.get(
f"""
import marimo as mo
comparison = mo.image_compare('{f.name}', '{f.name}')
"""
),
]
)
assert len(get_context().virtual_file_registry.registry) == 2
async def test_image_compare_error_handling() -> None:
# This should not raise an exception, but handle the error gracefully
result = image_compare(
before_image="invalid_path_that_does_not_exist.png",
after_image="another_invalid_path.png",
)
# Should still generate HTML even with invalid images
assert "marimo-image-comparison" in result.text
| {
"repo_id": "marimo-team/marimo",
"file_path": "tests/_plugins/stateless/test_image_compare.py",
"license": "Apache License 2.0",
"lines": 247,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
marimo-team/marimo:marimo/_schemas/common.py | # Copyright 2026 Marimo. All rights reserved.
from __future__ import annotations
from typing import TypedDict
# Base types for extensibility
class BaseDict(TypedDict, total=False):
"""Base dictionary allowing additional fields"""
pass
| {
"repo_id": "marimo-team/marimo",
"file_path": "marimo/_schemas/common.py",
"license": "Apache License 2.0",
"lines": 7,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
marimo-team/marimo:marimo/_schemas/notebook.py | # Copyright 2026 Marimo. All rights reserved.
from __future__ import annotations
from typing import Literal, Optional, TypedDict
# This file contains the schema for the notebook.
# It may be externally used and must be kept backwards compatible.
#
# We generate the OpenAPI schema using a marimo notebook: `python scripts/generate_schemas.py`
# We generate frontend types using `make fe-codegen`
# We check for backwards compatibility using a GitHub action: `.github/workflows/test_schemas.yaml`
class NotebookCellConfig(TypedDict, total=False):
"""Configuration for a notebook cell"""
column: Optional[int]
disabled: Optional[bool]
hide_code: Optional[bool]
class NotebookCell(TypedDict):
"""Code cell specific structure"""
id: Optional[str]
code: Optional[str]
code_hash: Optional[str]
name: Optional[str]
config: NotebookCellConfig
# Notebook metadata
class NotebookMetadata(TypedDict, total=False):
"""Metadata about the notebook"""
marimo_version: Optional[str]
# Main notebook structure
class NotebookV1(TypedDict):
"""Main notebook structure"""
# The notebook format version
version: Literal["1"]
# Metadata about the notebook
metadata: NotebookMetadata
# The cells in the notebook
cells: list[NotebookCell]
| {
"repo_id": "marimo-team/marimo",
"file_path": "marimo/_schemas/notebook.py",
"license": "Apache License 2.0",
"lines": 34,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
marimo-team/marimo:marimo/_smoke_tests/markdown/highlight.py | import marimo
__generated_with = "0.15.5"
app = marimo.App(width="medium")
@app.cell
def _(mo):
mo.md(
"""
```
>>> # pycon (omitted)
>>> def foo():
>>> pass
```
```
# python (omitted)
def foo():
return range(1, 100)
return x + y
```
```pycon
>>> def foo():
>>> pass
```
```python
# python
def foo():
pass
x + y
```
"""
)
return
@app.cell
def _(mo):
mo.md(
r"""
```js
// js
const myVar = "";
```
```
import { foo } from "bar";
// js omitted
var myVar = "";
```
"""
)
return
@app.cell
def _():
import marimo as mo
return (mo,)
if __name__ == "__main__":
app.run()
| {
"repo_id": "marimo-team/marimo",
"file_path": "marimo/_smoke_tests/markdown/highlight.py",
"license": "Apache License 2.0",
"lines": 53,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | documentation |
marimo-team/marimo:marimo/_smoke_tests/issues/4744-fixed-loading-indicator.py | import marimo
__generated_with = "0.15.5"
app = marimo.App()
@app.cell
def _():
import marimo as mo
return (mo,)
@app.cell(hide_code=True)
def _(mo):
mo.md(r"""When viewing this notebook in run mode, the hourglass loading indicator should be stickied to the top left even when scrolling down.""")
return
@app.cell
def _(mo):
mo.md("hello world" * 10000)
return
@app.cell
def _():
import time
time.sleep(100)
return
if __name__ == "__main__":
app.run()
| {
"repo_id": "marimo-team/marimo",
"file_path": "marimo/_smoke_tests/issues/4744-fixed-loading-indicator.py",
"license": "Apache License 2.0",
"lines": 22,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
marimo-team/marimo:marimo/_utils/hashable.py | # Copyright 2026 Marimo. All rights reserved.
from __future__ import annotations
from typing import Any
def is_hashable(*values: Any) -> bool:
"""
Check if all values passed in are hashable.
"""
try:
hash(values)
return True
except TypeError:
return False
| {
"repo_id": "marimo-team/marimo",
"file_path": "marimo/_utils/hashable.py",
"license": "Apache License 2.0",
"lines": 12,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
marimo-team/marimo:marimo/_smoke_tests/issues/4970-unique-transform.py | import marimo
__generated_with = "0.15.5"
app = marimo.App(width="columns")
@app.cell(column=0)
def _(df_pandas, mo):
mo.ui.dataframe(df_pandas)
return
@app.cell
def _(df_polars, mo):
mo.ui.dataframe(df_polars)
return
@app.cell
def _(df_ibis, mo):
mo.ui.dataframe(df_ibis)
return
@app.cell
def _():
import marimo as mo
import pandas as pd
import polars as pl
import ibis as ib
from vega_datasets import data
return data, ib, mo, pd, pl
@app.cell
def _(data, ib, pd, pl):
iris_dataset = data.iris()
df_pandas = pd.DataFrame(iris_dataset)
df_polars = pl.DataFrame(iris_dataset)
df_ibis = ib.memtable(iris_dataset)
return df_ibis, df_pandas, df_polars
@app.cell(column=1)
def _(df_pandas):
df_pandas_next = df_pandas
df_pandas_next.drop_duplicates(["sepalLength"], keep="first")
return
@app.cell
def _(df_polars):
df_polars_next = df_polars
df_polars_next.unique(subset=["sepalLength"], keep="first")
return
@app.cell
def _(df_ibis, mo):
df_ibis_next = df_ibis
df_ibis_next = df_ibis_next.distinct(on=["sepalLength"], keep="first")
mo.ui.table(df_ibis_next)
return
if __name__ == "__main__":
app.run()
| {
"repo_id": "marimo-team/marimo",
"file_path": "marimo/_smoke_tests/issues/4970-unique-transform.py",
"license": "Apache License 2.0",
"lines": 48,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
marimo-team/marimo:tests/_ast/codegen_data/test_app_with_annotation_typing.py | import marimo
__generated_with = "0.0.0"
app = marimo.App(width="medium")
with app.setup:
CONSTANT: int = 42
@app.cell
def _():
z: "int" = 0
return (z,)
@app.cell
def _():
x: int = CONSTANT + 2
y: float = 2.0
return x, y
@app.cell
def _(x: int, y: float, z: "int"):
_ = x + y + z
return
if __name__ == "__main__":
app.run()
| {
"repo_id": "marimo-team/marimo",
"file_path": "tests/_ast/codegen_data/test_app_with_annotation_typing.py",
"license": "Apache License 2.0",
"lines": 20,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
marimo-team/marimo:marimo/_smoke_tests/logs.py | # /// script
# requires-python = ">=3.11"
# dependencies = [
# "marimo",
# ]
# ///
# Copyright 2026 Marimo. All rights reserved.
import marimo
__generated_with = "0.15.5"
app = marimo.App()
@app.cell
def _():
import marimo as mo
return (mo,)
@app.cell
def _(mo):
level_dropdown = mo.ui.dropdown(
label="Log level",
options=["DEBUG", "INFO", "WARNING", "ERROR", "CRITICAL"],
value="INFO",
)
level_dropdown
return (level_dropdown,)
@app.cell
def _(level_dropdown):
# Configure logging
import logging
logger = logging.getLogger(__name__)
logger.setLevel(level_dropdown.value)
# Test different log levels
logger.debug("This is a DEBUG message")
logger.info("This is an INFO message")
logger.warning("This is a WARNING message")
logger.error("This is an ERROR message")
logger.critical("This is a CRITICAL message")
return logger, logging
@app.cell
def _(logger, mo):
# Test logging in a cell with output
logger.info("Starting computation...")
result = 42
logger.debug(f"Result computed: {result}")
mo.md(f"The result is {result}")
return
@app.cell
def _(logger):
# Test logging with exception
try:
x = 1 / 0
except ZeroDivisionError as e:
logger.error("Division by zero!", exc_info=True)
return
@app.cell
def _(level_dropdown, logging):
# Test logging with custom formatting
formatter = logging.Formatter(
"%(asctime)s - %(name)s - %(levelname)s - %(message)s"
)
handler = logging.StreamHandler()
handler.setFormatter(formatter)
_logger = logging.getLogger("custom_logger")
_logger.addHandler(handler)
_logger.setLevel(level_dropdown.value)
_logger.info("Custom formatted log message")
# Test logging with extra context
extra_logger = logging.getLogger("context_logger")
extra = {"user": "john", "ip": "192.168.1.1"}
extra_logger.info("User action", extra=extra)
# Test logging with different string formatting
template_logger = logging.getLogger("template_logger")
name = "Alice"
age = 30
template_logger.info("User %s is %d years old", name, age)
template_logger.info(f"User {name} is {age} years old")
template_logger.info(
"User %(name)s is %(age)d years old", {"name": name, "age": age}
)
return
if __name__ == "__main__":
app.run()
| {
"repo_id": "marimo-team/marimo",
"file_path": "marimo/_smoke_tests/logs.py",
"license": "Apache License 2.0",
"lines": 80,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
marimo-team/marimo:tests/test_loggers.py | from __future__ import annotations
import logging
import os
from logging.handlers import TimedRotatingFileHandler
from pathlib import Path
from unittest.mock import patch
import pytest
from marimo._loggers import (
_LOG_LEVEL,
_LOGGERS,
WindowsSafeRotatingFileHandler,
get_log_directory,
get_logger,
make_log_directory,
marimo_logger,
set_level,
)
def test_set_level():
original_level = _LOG_LEVEL
# Test with integer levels
set_level(logging.DEBUG)
logger = get_logger("test1")
assert logger.level == logging.DEBUG
assert logger.handlers[0].level == logging.DEBUG
set_level(logging.INFO)
assert logger.level == logging.INFO
assert logger.handlers[0].level == logging.INFO
# Test with string levels
for level in ["WARNING", "WARN", "DEBUG", "INFO", "ERROR", "CRITICAL"]:
set_level(level)
assert logger.level == min(logging._nameToLevel[level], logging.INFO)
assert logger.handlers[0].level == logging._nameToLevel[level]
# Test invalid levels
with pytest.raises(ValueError):
set_level("INVALID")
with pytest.raises(ValueError):
set_level(999)
# Reset the log level
set_level(original_level)
def test_get_logger():
# Test basic logger creation
logger1 = get_logger("test2")
assert logger1.name == "test2"
assert not logger1.propagate
assert len(logger1.handlers) == 1
# Test logger caching
logger2 = get_logger("test2")
assert logger1 is logger2 # Same logger instance
# Test custom level
logger3 = get_logger("test3", level=logging.DEBUG)
assert logger3.level == logging.DEBUG
# Test handlers
handler = logger3.handlers[0]
assert isinstance(handler, logging.StreamHandler)
def test_marimo_logger():
logger = marimo_logger()
assert logger.name == "marimo"
first_handler = logger.handlers[0]
assert isinstance(first_handler, logging.StreamHandler)
second_handler = logger.handlers[1]
assert isinstance(second_handler, logging.FileHandler)
# Test file handler
file_handler = next(
h for h in logger.handlers if isinstance(h, logging.FileHandler)
)
assert isinstance(file_handler, TimedRotatingFileHandler)
def test_log_directory():
# Test default directory
default_dir = Path.home() / ".cache" / "marimo" / "logs"
assert get_log_directory() == default_dir
def test_make_log_directory(tmp_path: Path):
test_dir = tmp_path / "marimo_test_logs"
with patch.dict(os.environ, {"XDG_CACHE_HOME": str(test_dir)}):
make_log_directory()
assert test_dir.exists()
assert test_dir.is_dir()
def test_handler_levels():
logger = marimo_logger()
stream_handler = logger.handlers[0]
file_handler = logger.handlers[1]
assert isinstance(stream_handler, logging.StreamHandler)
assert isinstance(file_handler, logging.FileHandler)
# Test level changes affect stream handler but not file handler
set_level(logging.WARNING)
assert stream_handler.level == logging.WARNING
assert file_handler.level == logging.INFO
set_level(logging.DEBUG)
assert stream_handler.level == logging.DEBUG
assert file_handler.level == logging.DEBUG
def test_windows_safe_handler(tmp_path: Path):
"""Test that WindowsSafeRotatingFileHandler handles rotation gracefully."""
log_file = tmp_path / "test.log"
# Create handler
handler = WindowsSafeRotatingFileHandler(
log_file,
when="S", # Rotate every second for testing
interval=1,
backupCount=3,
encoding="utf-8",
)
# Write some logs
logger = logging.getLogger("test_windows")
logger.addHandler(handler)
logger.setLevel(logging.INFO)
logger.info("Test message 1")
logger.info("Test message 2")
# Test that doRollover doesn't crash even with PermissionError
try:
handler.doRollover()
# Should succeed or gracefully handle errors
assert True
except PermissionError:
# Should not raise PermissionError
pytest.fail("WindowsSafeRotatingFileHandler raised PermissionError")
finally:
handler.close()
logger.removeHandler(handler)
def test_file_handler_uses_windows_safe_on_windows(tmp_path: Path):
"""Test that _file_handler uses WindowsSafeRotatingFileHandler on Windows."""
from marimo._loggers import _file_handler
# Test on Windows
with patch("sys.platform", "win32"):
with patch("marimo._loggers.get_log_directory", return_value=tmp_path):
handler = _file_handler()
assert isinstance(handler, WindowsSafeRotatingFileHandler)
handler.close()
# Test on non-Windows
with patch("sys.platform", "linux"):
with patch("marimo._loggers.get_log_directory", return_value=tmp_path):
handler = _file_handler()
assert isinstance(handler, TimedRotatingFileHandler)
# Should not be the Windows-safe subclass on non-Windows
assert not isinstance(handler, WindowsSafeRotatingFileHandler)
handler.close()
@pytest.fixture(autouse=True)
def clear_loggers():
# Clear the logger cache before each test
_LOGGERS.clear()
return
| {
"repo_id": "marimo-team/marimo",
"file_path": "tests/test_loggers.py",
"license": "Apache License 2.0",
"lines": 139,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
marimo-team/marimo:marimo/_smoke_tests/packages/is_in_uv.py | import marimo
__generated_with = "0.15.5"
app = marimo.App(width="medium")
@app.cell
def _():
import os
from pathlib import Path
# Check we have a virtual environment
venv_path = os.environ.get("VIRTUAL_ENV", None)
# Check that the `UV` environment variable is set
# This tells us that marimo was run by uv
uv_env_exists = os.environ.get("UV", None)
# Check that the uv.lock and pyproject.toml files exist
uv_lock_path = Path(venv_path).parent / "uv.lock"
pyproject_path = Path(venv_path).parent / "pyproject.toml"
# If all these are True or defined, then we are running in a uv project
{
"venv_path": venv_path,
"uv_env_exists": uv_env_exists,
"uv_lock_path": uv_lock_path.exists(),
"pyproject_path": pyproject_path.exists(),
}
return
if __name__ == "__main__":
app.run()
| {
"repo_id": "marimo-team/marimo",
"file_path": "marimo/_smoke_tests/packages/is_in_uv.py",
"license": "Apache License 2.0",
"lines": 25,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
marimo-team/marimo:marimo/_smoke_tests/sql/dbapi_sqlite.py | import marimo
__generated_with = "0.15.5"
app = marimo.App(width="medium")
@app.cell
def _():
import marimo as mo
return (mo,)
@app.cell
def _():
import sqlite3
return (sqlite3,)
@app.cell
def _(sqlite3):
conn = sqlite3.connect(":memory:")
conn.execute("""
CREATE TABLE test (
id INTEGER PRIMARY KEY,
name TEXT,
value REAL
)
""")
conn.execute("""
INSERT INTO test (name, value) VALUES
('a', 1.0),
('b', 2.0),
('c', 3.0)
""")
return (conn,)
@app.cell
def _(conn, mo):
_df = mo.sql(
f"""
select * FROM test
""",
engine=conn
)
return
if __name__ == "__main__":
app.run()
| {
"repo_id": "marimo-team/marimo",
"file_path": "marimo/_smoke_tests/sql/dbapi_sqlite.py",
"license": "Apache License 2.0",
"lines": 39,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | documentation |
marimo-team/marimo:marimo/_sql/engines/dbapi.py | # Copyright 2026 Marimo. All rights reserved.
from __future__ import annotations
from types import ModuleType
from typing import TYPE_CHECKING, Any, Optional, Protocol
from marimo import _loggers
from marimo._sql.engines.types import QueryEngine
from marimo._sql.utils import convert_to_output
LOGGER = _loggers.marimo_logger()
if TYPE_CHECKING:
from collections.abc import Sequence
import pandas as pd
import polars as pl
class DBAPIConnection(Protocol):
def cursor(self) -> Any: ...
def commit(self) -> None: ...
class DBAPIEngine(QueryEngine[DBAPIConnection]):
"""DB-API 2.0 (PEP 249) engine."""
@property
def source(self) -> str:
return "dbapi"
@property
def dialect(self) -> str:
# Try to get dialect from connection
try:
return str(self._connection.dialect) # type: ignore[attr-defined]
except AttributeError:
return "sql"
def execute(
self, query: str, parameters: Optional[Sequence[Any]] = None
) -> Any:
sql_output_format = self.sql_output_format()
cursor = self._connection.cursor()
should_close = True
try:
cursor.execute(query, parameters or ())
if sql_output_format == "native":
should_close = False
return cursor
rows = cursor.fetchall() if cursor.description else None
try:
self._connection.commit()
except Exception:
LOGGER.info("Unable to commit transaction", exc_info=True)
if rows is None:
return None
# Get column names from cursor description
if cursor.description:
columns = [col[0] for col in cursor.description]
else:
columns = []
def convert_to_polars() -> pl.DataFrame:
import polars as pl
data: dict[str, list[Any]] = {col: [] for col in columns}
for row in rows:
for i, col in enumerate(columns):
data[col].append(row[i])
return pl.DataFrame(data)
def convert_to_pandas() -> pd.DataFrame:
import pandas as pd
return pd.DataFrame(rows, columns=columns)
return convert_to_output(
sql_output_format=sql_output_format,
to_polars=convert_to_polars,
to_pandas=convert_to_pandas,
)
finally:
if should_close:
cursor.close()
@staticmethod
def is_compatible(var: Any) -> bool:
"""Check if a variable is a DB-API 2.0 compatible connection.
A DB-API 2.0 connection must have:
- cursor() method
- commit() method
- rollback() method
- close() method
"""
# Imports like duckdb should not be treated as DB-API connections
if isinstance(var, ModuleType):
return False
# Ibis Deferred expression object should not be handled as datasource #7791
var_type = type(var)
var_type_name = f"{var_type.__module__}.{var_type.__qualname__}"
if var_type_name == "ibis.common.deferred.Deferred":
return False
try:
required_methods = ["cursor", "commit", "rollback", "close"]
has_required_methods = all(
callable(getattr(var, method, None))
for method in required_methods
)
if not has_required_methods:
return False
cursor = var.cursor()
cursor_methods = ["execute", "fetchall"]
has_cursor_methods = all(
callable(getattr(cursor, method, None))
for method in cursor_methods
)
return has_required_methods and has_cursor_methods
except Exception:
return False
@staticmethod
def is_dbapi_cursor(obj: Any) -> bool:
"""
Return True if obj looks like a DB-API 2.0 cursor.
"""
try:
# Required methods
has_execute = callable(getattr(obj, "execute", None))
# has_executemany = callable(getattr(obj, "executemany", None))
# At least one fetch method
fetch_methods = ("fetchone", "fetchmany", "fetchall")
has_fetch = any(
callable(getattr(obj, m, None)) for m in fetch_methods
)
# Required attributes (description may be None after DML, but must exist)
has_description_attr = hasattr(obj, "description")
has_rowcount = hasattr(obj, "rowcount")
return (
has_execute
and has_fetch
and has_description_attr
and has_rowcount
)
except Exception:
return False
@staticmethod
def get_cursor_metadata(cursor: Any) -> dict[str, Any]:
"""
Extract standard DB-API 2.0 cursor metadata.
"""
try:
meta: dict[str, Any] = {
"result_type": f"{type(cursor)}",
}
# Column info
desc = getattr(cursor, "description", None)
if desc:
cols: list[dict[str, Optional[Any]]] = []
for col in desc:
# description tuple: (name, type_code, display_size, internal_size, precision, scale, null_ok)
name = col[0]
type_code = col[1] if len(col) > 1 else None
display_size = col[2] if len(col) > 2 else None
internal_size = col[3] if len(col) > 3 else None
precision = col[4] if len(col) > 4 else None
scale = col[5] if len(col) > 5 else None
null_ok = col[6] if len(col) > 6 else None
cols.append(
{
"name": name,
"type_code": type_code,
"display_size": display_size,
"internal_size": internal_size,
"precision": precision,
"scale": scale,
"null_ok": null_ok,
}
)
meta["columns"] = cols
else:
meta["columns"] = None
if hasattr(cursor, "rowcount"):
meta["rowcount"] = cursor.rowcount
# lastrowid (optional in many drivers)
if hasattr(cursor, "lastrowid"):
meta["lastrowid"] = cursor.lastrowid
# SQL type guess
# rowcount == -1 usually means SELECT (or DDL), >=0 means DML or SELECT (after execute)
rc = getattr(cursor, "rowcount", None)
if rc is None:
sql_type = "Unknown"
elif rc == -1:
sql_type = "Query/DDL"
else:
sql_type = "Query/DML"
meta["sql_statement_type"] = sql_type
return meta
except Exception:
LOGGER.warning("Failed to extract cursor metadata", exc_info=True)
return {
"result_type": f"{type(cursor)}",
"error": "Failed to extract metadata",
}
| {
"repo_id": "marimo-team/marimo",
"file_path": "marimo/_sql/engines/dbapi.py",
"license": "Apache License 2.0",
"lines": 185,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
marimo-team/marimo:tests/_sql/test_dbapi.py | # Copyright 2026 Marimo. All rights reserved.
from __future__ import annotations
import sqlite3
from unittest.mock import patch
import pytest
from marimo._sql.engines.dbapi import DBAPIEngine
from marimo._sql.engines.types import EngineCatalog, QueryEngine
@pytest.fixture
def sqlite_connection() -> sqlite3.Connection:
conn = sqlite3.connect(":memory:")
conn.execute("""
CREATE TABLE test (
id INTEGER PRIMARY KEY,
name TEXT,
value REAL
)
""")
conn.execute("""
INSERT INTO test (name, value) VALUES
('a', 1.0),
('b', 2.0),
('c', 3.0)
""")
return conn
@pytest.fixture
def dbapi_engine(sqlite_connection: sqlite3.Connection) -> DBAPIEngine:
return DBAPIEngine(connection=sqlite_connection)
def test_source(dbapi_engine: DBAPIEngine) -> None:
assert dbapi_engine.source == "dbapi"
def test_dialect(dbapi_engine: DBAPIEngine) -> None:
assert dbapi_engine.dialect == "sql"
def test_is_compatible() -> None:
# Test with sqlite3 connection
conn = sqlite3.connect(":memory:")
assert DBAPIEngine.is_compatible(conn)
conn.close()
# Test with non-DBAPI object
assert not DBAPIEngine.is_compatible("not a connection")
assert not DBAPIEngine.is_compatible(None)
engine = DBAPIEngine(conn)
assert isinstance(engine, DBAPIEngine)
assert isinstance(engine, QueryEngine)
assert not isinstance(engine, EngineCatalog)
def test_execute_native(dbapi_engine: DBAPIEngine) -> None:
with patch.object(
dbapi_engine, "sql_output_format", return_value="native"
):
result = dbapi_engine.execute("SELECT * FROM test")
assert isinstance(result, sqlite3.Cursor)
assert result.fetchall() == [
(1, "a", 1.0),
(2, "b", 2.0),
(3, "c", 3.0),
]
def test_execute_pandas(dbapi_engine: DBAPIEngine) -> None:
pd = pytest.importorskip("pandas")
with patch.object(
dbapi_engine, "sql_output_format", return_value="pandas"
):
result = dbapi_engine.execute("SELECT * FROM test")
assert isinstance(result, pd.DataFrame)
assert list(result.columns) == ["id", "name", "value"]
assert len(result) == 3
assert result.iloc[0].to_dict() == {"id": 1, "name": "a", "value": 1.0}
def test_execute_polars(dbapi_engine: DBAPIEngine) -> None:
pl = pytest.importorskip("polars")
with patch.object(
dbapi_engine, "sql_output_format", return_value="polars"
):
result = dbapi_engine.execute("SELECT * FROM test")
assert isinstance(result, pl.DataFrame)
assert result.columns == ["id", "name", "value"]
assert len(result) == 3
assert result.row(0) == (1, "a", 1.0)
def test_execute_lazy_polars(dbapi_engine: DBAPIEngine) -> None:
pl = pytest.importorskip("polars")
with patch.object(
dbapi_engine, "sql_output_format", return_value="lazy-polars"
):
result = dbapi_engine.execute("SELECT * FROM test")
assert isinstance(result, pl.LazyFrame)
result = result.collect()
assert result.columns == ["id", "name", "value"]
assert result.row(0) == (1, "a", 1.0)
def test_execute_no_results(dbapi_engine: DBAPIEngine) -> None:
result = dbapi_engine.execute("CREATE TABLE empty (id INTEGER)")
assert result is None
def test_execute_error(dbapi_engine: DBAPIEngine) -> None:
with pytest.raises(sqlite3.OperationalError):
dbapi_engine.execute("SELECT * FROM nonexistent")
def test_execute_transaction(dbapi_engine: DBAPIEngine) -> None:
pytest.importorskip("pandas")
# Test that transaction is committed
with patch.object(
dbapi_engine, "sql_output_format", return_value="pandas"
):
dbapi_engine.execute(
"INSERT INTO test (name, value) VALUES ('d', 4.0)"
)
result = dbapi_engine.execute("SELECT * FROM test")
assert len(result) == 4
assert result.iloc[3].to_dict() == {"id": 4, "name": "d", "value": 4.0}
def test_execute_with_parameters(dbapi_engine: DBAPIEngine) -> None:
pytest.importorskip("pandas")
with patch.object(
dbapi_engine, "sql_output_format", return_value="pandas"
):
result = dbapi_engine.execute(
"SELECT * FROM test WHERE name = ?", ["a"]
)
assert len(result) == 1
assert result.iloc[0].to_dict() == {"id": 1, "name": "a", "value": 1.0}
def test_is_dbapi_cursor() -> None:
cursor = sqlite3.connect(":memory:").cursor()
assert DBAPIEngine.is_dbapi_cursor(cursor)
# Test with non-DBAPI object
assert not DBAPIEngine.is_dbapi_cursor("not a cursor")
assert not DBAPIEngine.is_dbapi_cursor(None)
def test_get_cursor_metadata(dbapi_engine: DBAPIEngine) -> None:
with patch.object(
dbapi_engine, "sql_output_format", return_value="native"
):
result = dbapi_engine.execute("SELECT * FROM test")
result = DBAPIEngine.get_cursor_metadata(result)
assert result is not None
assert len(result["columns"]) == 3
assert result["sql_statement_type"] == "Query/DDL"
| {
"repo_id": "marimo-team/marimo",
"file_path": "tests/_sql/test_dbapi.py",
"license": "Apache License 2.0",
"lines": 127,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
marimo-team/marimo:examples/ai/chat/bedrock_example.py | # /// script
# requires-python = ">=3.11"
# dependencies = [
# "boto3",
# "litellm",
# "marimo",
# ]
# ///
import marimo
__generated_with = "0.19.7"
app = marimo.App(width="medium")
@app.cell(hide_code=True)
def _():
import marimo as mo
return (mo,)
@app.cell(hide_code=True)
def _(mo):
mo.md(r"""
# AWS Bedrock Chat Example
This example demonstrates using AWS Bedrock with marimo's chat interface.
AWS Bedrock provides access to foundation models from leading AI companies like Anthropic, Meta, and others.
⚠️ **Note:** You'll need an AWS account with access to the AWS Bedrock service and the specific model you want to use.
""")
return
@app.cell(hide_code=True)
def _():
import os
import boto3
# For this example, let's add a helper to check AWS configuration
def check_aws_config():
"""Check if AWS configuration is available"""
# Check for credentials
has_creds = False
try:
session = boto3.Session()
credentials = session.get_credentials()
if credentials:
has_creds = True
except:
pass
return {"has_credentials": has_creds}
# Run the check
aws_config = check_aws_config()
return (aws_config,)
@app.cell
def _(aws_config, mo):
# Display AWS configuration status
mo.stop(
not aws_config["has_credentials"],
mo.md("""
### ⚠️ AWS Credentials Not Found
To use AWS Bedrock, you need AWS credentials configured.
Options:
1. Set environment variables:
```
export AWS_ACCESS_KEY_ID=your_key
export AWS_SECRET_ACCESS_KEY=your_secret
```
2. Configure AWS CLI:
```
aws configure
```
3. Use an AWS profile in ~/.aws/credentials
"""),
)
return
@app.cell(hide_code=True)
def _(mo):
# UI for model configuration
# Predefined model options
model_options = [
"bedrock/us.amazon.nova-pro-v1:0",
"bedrock/anthropic.claude-3-sonnet-20240229",
"bedrock/anthropic.claude-3-haiku-20240307",
"bedrock/meta.llama3-8b-instruct-v1:0",
"bedrock/amazon.titan-text-express-v1",
"bedrock/cohere.command-r-plus-v1",
]
# Region options
region_options = [
"us-east-1",
"us-west-2",
"eu-central-1",
"ap-northeast-1",
"ap-southeast-1",
]
# Model selection
model = mo.ui.dropdown(
options=model_options, value=model_options[0], label="AWS Bedrock Model"
)
# Region selection
region = mo.ui.dropdown(
options=region_options, value="us-east-1", label="AWS Region"
)
# Optional profile name
profile = mo.ui.text(
value="",
label="AWS Profile (optional)",
placeholder="Leave empty to use default credentials",
)
# System message
system_message = mo.ui.text_area(
value="You are a helpful, harmless assistant. Provide clear, concise answers.",
label="System Message",
rows=2,
)
# Create a form to wrap all inputs
config_form = (
mo.md("""
AWS Bedrock Chat Configuration:
{model}
{region}
{profile}
{system_message}
""")
.batch(
model=model,
region=region,
profile=profile,
system_message=system_message,
)
.form(
submit_button_label="Update Chat Configuration",
)
)
config_form
return (config_form,)
@app.cell
def _(mo):
mo.md(r"""
## AWS Bedrock Chat
""")
return
@app.cell
def _(config_form, max_tokens, mo, temperature):
# Create a refreshable chat component that updates when the form is submitted
def create_chat(config_form):
# temperature = config_form.value["temperature"]
# max_tokens = config_form.value["max_tokens"]
model = config_form.value["model"]
region = config_form.value["region"]
system_message = config_form.value["system_message"]
profile = config_form.value["profile"]
try:
# Create chat config
chat_config = mo.ai.ChatModelConfig(
temperature=temperature,
max_tokens=max_tokens,
# top_k=1,
# top_p=1.0,
# frequency_penalty=1,
# presence_penalty=1,
)
# Create model with optional profile
model_kwargs = {
"model": model,
"region_name": region,
"system_message": system_message,
}
# Add profile if specified
if profile.strip():
model_kwargs["profile_name"] = profile.strip()
# Create chat interface
chatbot = mo.ui.chat(
mo.ai.llm.bedrock(**model_kwargs),
allow_attachments=[
"image/png",
"image/jpeg",
],
prompts=[
"Hello",
"How are you?",
"I'm doing great, how about you?",
],
max_height=400,
config=chat_config,
)
return chatbot
except Exception as e:
mo.md(f"**Error initializing chat**: {str(e)}")
return None
# Display the chat interface
chatbot = create_chat(config_form)
chatbot
return
@app.cell
def _(mo):
mo.md(r"""
## Notes on AWS Bedrock Usage
1. **Model Access**: You need to request access to the specific models you want to use in the AWS Bedrock console.
2. **Pricing**: Using AWS Bedrock incurs usage costs based on the number of input and output tokens. Check the [AWS Bedrock pricing](https://aws.amazon.com/bedrock/pricing/) for details.
3. **Regions**: AWS Bedrock is not available in all AWS regions. Make sure to choose a region where Bedrock is available.
4. **Authentication**: This example uses the standard AWS credential chain (environment variables, AWS config files, or instance profiles). You can also provide explicit credentials when creating the model.
5. **Troubleshooting**: If you encounter issues, check:
- That your AWS credentials are configured correctly
- That you have requested model access in the AWS Bedrock console
- That you're using a region where the selected model is available
""")
return
if __name__ == "__main__":
app.run()
| {
"repo_id": "marimo-team/marimo",
"file_path": "examples/ai/chat/bedrock_example.py",
"license": "Apache License 2.0",
"lines": 201,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
marimo-team/marimo:tests/_server/ai/test_providers.py | """Tests for the LLM providers in marimo._server.ai.providers."""
from unittest.mock import AsyncMock, MagicMock, patch
import pytest
from marimo._config.config import AiConfig
from marimo._dependencies.dependencies import Dependency, DependencyManager
from marimo._server.ai.config import AnyProviderConfig
from marimo._server.ai.providers import (
AnthropicProvider,
AzureOpenAIProvider,
BedrockProvider,
CustomProvider,
GoogleProvider,
OpenAIProvider,
get_completion_provider,
)
@pytest.mark.parametrize(
("model_name", "provider_name"),
[
pytest.param("gpt-4", "openai", id="openai"),
pytest.param("claude-3-opus-20240229", "anthropic", id="anthropic"),
pytest.param("gemini-1.5-flash", "google", id="google"),
pytest.param(
"bedrock/anthropic.claude-3-sonnet-20240229",
"bedrock",
id="bedrock",
),
pytest.param("openrouter/gpt-4", "openrouter", id="openrouter"),
],
)
def test_anyprovider_for_model(model_name: str, provider_name: str) -> None:
"""Test that the correct config is returned for a given model."""
ai_config = AiConfig(
open_ai={
"model": model_name,
"api_key": "openai-key",
},
anthropic={
"api_key": "anthropic-key",
},
google={
"api_key": "google-key",
},
bedrock={
"profile_name": "aws-profile",
},
openrouter={
"api_key": "openrouter-key",
},
)
config = AnyProviderConfig.for_model(model_name, ai_config)
if provider_name != "bedrock":
assert config.api_key == f"{provider_name}-key"
else:
assert config.api_key == "profile:aws-profile"
@pytest.mark.parametrize(
("model_name", "provider_type", "dependency"),
[
pytest.param("gpt-4", OpenAIProvider, None, id="openai"),
pytest.param(
"claude-3-opus-20240229",
AnthropicProvider,
DependencyManager.anthropic,
id="anthropic",
),
pytest.param(
"gemini-1.5-flash",
GoogleProvider,
DependencyManager.google_ai,
id="google",
),
pytest.param(
"bedrock/anthropic.claude-3-sonnet-20240229",
BedrockProvider,
DependencyManager.boto3,
id="bedrock",
),
pytest.param(
"openrouter/gpt-4", CustomProvider, None, id="openrouter"
),
],
)
def test_get_completion_provider(
model_name: str, provider_type: type, dependency: Dependency | None
) -> None:
"""Test that the correct provider is returned for a given model."""
if not DependencyManager.pydantic_ai.has():
pytest.skip("requires pydantic_ai")
if dependency and not dependency.has():
pytest.skip(f"{dependency.pkg} is not installed")
if provider_type == BedrockProvider:
# For Bedrock, we pass bedrock-required details through the config
config = AnyProviderConfig(
api_key="aws_access_key_id:aws_secret_access_key", # credentials
base_url="us-east-1", # region name
)
else:
config = AnyProviderConfig(
api_key="test-key", base_url="http://test-url"
)
provider = get_completion_provider(config, model_name)
assert isinstance(provider, provider_type)
@pytest.mark.requires("pydantic_ai")
async def test_azure_openai_provider() -> None:
"""Test that Azure OpenAI provider uses correct parameters."""
config = AnyProviderConfig(
api_key="test-key",
base_url="https://test.openai.azure.com/openai/deployments/gpt-4-1?api-version=2023-05-15",
)
provider = AzureOpenAIProvider("gpt-4", config)
api_version, deployment_name, endpoint = provider._handle_azure_openai(
"https://test.openai.azure.com/openai/deployments/gpt-4-1?api-version=2023-05-15"
)
assert api_version == "2023-05-15"
assert deployment_name == "gpt-4-1"
assert endpoint == "https://test.openai.azure.com"
api_version, deployment_name, endpoint = provider._handle_azure_openai(
"https://unknown_domain.openai/openai/deployments/gpt-4-1?api-version=2023-05-15"
)
assert api_version == "2023-05-15"
assert deployment_name == "gpt-4-1"
assert endpoint == "https://unknown_domain.openai"
@pytest.mark.skipif(
not DependencyManager.anthropic.has()
or not DependencyManager.pydantic_ai.has(),
reason="anthropic or pydantic_ai not installed",
)
def test_anthropic_process_part_text_file() -> None:
"""Test Anthropic converts text file parts to text parts."""
from pydantic_ai.ui.vercel_ai.request_types import FileUIPart, TextUIPart
config = AnyProviderConfig(api_key="test-key", base_url="http://test")
provider = AnthropicProvider("claude-3-opus-20240229", config)
# Test text file conversion - base64 encoded "Hello, World!"
text_file_part = FileUIPart(
type="file",
media_type="text/plain",
url="data:text/plain;base64,SGVsbG8sIFdvcmxkIQ==",
filename="test.txt",
)
result = provider.process_part(text_file_part)
assert isinstance(result, TextUIPart)
assert result.text == "Hello, World!"
# Test image file is not converted
image_file_part = FileUIPart(
type="file",
media_type="image/png",
url="data:image/png;base64,iVBORw0KGgo=",
filename="test.png",
)
result = provider.process_part(image_file_part)
assert isinstance(result, FileUIPart)
assert result.media_type == "image/png"
@pytest.mark.parametrize(
("model_name", "base_url", "expected"),
[
pytest.param(
"o1-mini",
None,
True,
id="o1_mini_no_base_url",
),
pytest.param(
"o1-preview",
None,
True,
id="o1_preview_no_base_url",
),
pytest.param(
"o1",
None,
True,
id="o1_no_base_url",
),
pytest.param(
"o1-2024-12-17",
"https://api.openai.com/v1",
True,
id="o1_dated_openai_base_url",
),
pytest.param(
"o3-mini",
None,
True,
id="o3_mini_no_base_url",
),
pytest.param(
"gpt-5-turbo",
None,
True,
id="gpt5_turbo_no_base_url",
),
pytest.param(
"gpt-5-preview",
None,
True,
id="gpt5_preview_no_base_url",
),
pytest.param(
"openai/o1-mini",
None,
True,
id="openai_prefix_o1_mini_no_base_url",
),
pytest.param(
"openai/o1-preview",
None,
True,
id="openai_prefix_o1_preview_no_base_url",
),
pytest.param(
"openai/gpt-5-turbo",
None,
True,
id="openai_prefix_gpt5_no_base_url",
),
pytest.param(
"o1-mini",
"https://custom.api.com/v1",
False,
id="o1_custom_base_url",
),
pytest.param(
"o1-preview",
"https://litellm.proxy.com/api/v1",
False,
id="o1_litellm_proxy",
),
pytest.param(
"gpt-4",
None,
False,
id="gpt4_no_base_url",
),
pytest.param(
"gpt-4o",
None,
False,
id="gpt4o_no_base_url",
),
pytest.param(
"gpt-4",
"https://custom.api.com/v1",
False,
id="gpt4_custom_base_url",
),
pytest.param(
"olive-model",
None,
False,
id="model_starting_with_o_but_not_reasoning",
),
pytest.param(
"openrouter/o1-mini",
None,
False,
id="openrouter_prefix_not_openai",
),
],
)
@pytest.mark.requires("pydantic_ai")
def test_is_reasoning_model(
model_name: str, base_url: str | None, expected: bool
) -> None:
"""Test that _is_reasoning_model correctly identifies reasoning models."""
config = AnyProviderConfig(api_key="test-key", base_url=base_url)
provider = OpenAIProvider(model_name, config)
assert provider._is_reasoning_model(model_name) == expected
@pytest.mark.parametrize(
("model_name", "expected"),
[
pytest.param(
"claude-opus-4-20250514",
True,
id="claude_opus_4",
),
pytest.param(
"claude-sonnet-4-20250514",
True,
id="claude_sonnet_4",
),
pytest.param(
"claude-haiku-4-5-20250514",
True,
id="claude_haiku_4_5",
),
pytest.param(
"claude-3-7-sonnet-20250219",
True,
id="claude_3_7_sonnet",
),
pytest.param(
"claude-3-5-sonnet-20241022",
False,
id="claude_3_5_sonnet_not_thinking",
),
pytest.param(
"claude-3-opus-20240229",
False,
id="claude_3_opus_not_thinking",
),
],
)
@pytest.mark.skipif(
not DependencyManager.anthropic.has()
or not DependencyManager.pydantic_ai.has(),
reason="anthropic or pydantic_ai not installed",
)
def test_anthropic_is_extended_thinking_model(
model_name: str, expected: bool
) -> None:
"""Test that is_extended_thinking_model correctly identifies thinking models."""
config = AnyProviderConfig(api_key="test-key", base_url=None)
provider = AnthropicProvider(model_name, config)
assert provider.is_extended_thinking_model(model_name) == expected
@pytest.mark.requires("pydantic_ai")
async def test_completion_does_not_pass_redundant_instructions() -> None:
from pydantic_ai.messages import ModelResponse, TextPart
from pydantic_ai.models.openai import OpenAIResponsesModel
config = AnyProviderConfig(api_key="test-key", base_url="http://test-url")
provider = OpenAIProvider("gpt-4", config)
with (
patch("marimo._server.ai.providers.get_tool_manager") as mock_get_tm,
patch.object(
OpenAIResponsesModel, "request", new_callable=AsyncMock
) as mock_request,
):
mock_get_tm.return_value = MagicMock()
mock_request.return_value = ModelResponse(
parts=[TextPart(content="test")]
)
await provider.completion(
messages=[],
system_prompt="Test prompt",
max_tokens=100,
additional_tools=[],
)
mock_request.assert_called_once()
request_messages = mock_request.call_args.args[0]
assert len(request_messages) == 1
# The bug caused instructions to be "Test prompt\nTest prompt"
instructions = request_messages[0].instructions
# This asserts the duplication is gone
assert instructions == "Test prompt"
| {
"repo_id": "marimo-team/marimo",
"file_path": "tests/_server/ai/test_providers.py",
"license": "Apache License 2.0",
"lines": 342,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
marimo-team/marimo:marimo/_smoke_tests/primitives/_decimal.py | import marimo
__generated_with = "0.15.5"
app = marimo.App(width="medium")
@app.cell
def _():
import marimo as mo
return (mo,)
@app.cell
def _(alt, df):
_chart = (
alt.Chart(df)
.mark_bar()
.encode(
x=alt.X("CAST(10 AS DECIMAL(18,3))", type="quantitative", bin=True),
y=alt.Y("count()", type="quantitative"),
)
.properties(width="container")
)
_chart
return
@app.cell
def _():
import altair as alt
return (alt,)
@app.cell
def _(mo):
df = mo.sql(
f"""
SELECT
10::numeric
"""
)
return (df,)
if __name__ == "__main__":
app.run()
| {
"repo_id": "marimo-team/marimo",
"file_path": "marimo/_smoke_tests/primitives/_decimal.py",
"license": "Apache License 2.0",
"lines": 35,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
marimo-team/marimo:marimo/_plugins/ui/_impl/anywidget/types.py | # Copyright 2026 Marimo. All rights reserved.
from __future__ import annotations
from typing import Any, NewType, Union
# AnyWidget model id
WidgetModelId = NewType("WidgetModelId", str)
# Buffer paths
BufferPaths = list[list[Union[str, int]]]
# Widget model state
WidgetModelState = dict[str, Any]
# Widget model state without buffers
WidgetModelStateWithoutBuffers = dict[str, Any]
| {
"repo_id": "marimo-team/marimo",
"file_path": "marimo/_plugins/ui/_impl/anywidget/types.py",
"license": "Apache License 2.0",
"lines": 11,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
marimo-team/marimo:marimo/_plugins/ui/_impl/anywidget/utils.py | # Copyright 2026 Marimo. All rights reserved.
from __future__ import annotations
from marimo._dependencies.dependencies import DependencyManager
from marimo._plugins.ui._impl.anywidget.types import (
BufferPaths,
WidgetModelState,
WidgetModelStateWithoutBuffers,
)
def extract_buffer_paths(
message: WidgetModelState,
) -> tuple[WidgetModelStateWithoutBuffers, BufferPaths, list[bytes]]:
"""
Extract buffer paths from a message.
"""
DependencyManager.ipywidgets.require("for anywidget support.")
import ipywidgets # type: ignore
_remove_buffers = ipywidgets.widgets.widget._remove_buffers # type: ignore
# Get the initial state of the widget
state, buffer_paths, buffers = _remove_buffers(message) # type: ignore
return state, buffer_paths, buffers # type: ignore
| {
"repo_id": "marimo-team/marimo",
"file_path": "marimo/_plugins/ui/_impl/anywidget/utils.py",
"license": "Apache License 2.0",
"lines": 20,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
marimo-team/marimo:marimo/_smoke_tests/anywidget_examples/lonboard_example.py | # /// script
# requires-python = ">=3.13"
# dependencies = [
# "geopandas==1.0.1",
# "lonboard==0.10.4",
# "marimo",
# "matplotlib==3.10.3",
# "palettable==3.3.3",
# "pandas==2.2.3",
# "pyarrow==20.0.0",
# "shapely==2.1.1",
# ]
# ///
import marimo
__generated_with = "0.19.7"
app = marimo.App(width="medium")
@app.cell
def _():
import marimo as mo
from pathlib import Path
import geopandas as gpd
import pandas as pd
import shapely
from palettable.colorbrewer.diverging import BrBG_10
from lonboard import Map, ScatterplotLayer
from lonboard.colormap import apply_continuous_cmap
return (
BrBG_10,
Map,
Path,
ScatterplotLayer,
apply_continuous_cmap,
gpd,
mo,
pd,
shapely,
)
@app.cell
def _(Path, gpd, pd, shapely):
_url = "https://ookla-open-data.s3.us-west-2.amazonaws.com/parquet/performance/type=mobile/year=2019/quarter=1/2019-01-01_performance_mobile_tiles.parquet"
_local_path = Path("internet-speeds.parquet")
if _local_path.exists():
gdf = gpd.read_parquet(_local_path)
else:
_columns = ["avg_d_kbps", "tile"]
_df = pd.read_parquet(_url, columns=_columns)
_tile_geometries = shapely.from_wkt(_df["tile"])
_tile_centroids = shapely.centroid(_tile_geometries)
gdf = gpd.GeoDataFrame(
_df[["avg_d_kbps"]], geometry=_tile_centroids, crs="EPSG:4326"
)
gdf.to_parquet(_local_path)
gdf
return (gdf,)
@app.cell
def _(Map, ScatterplotLayer, gdf, mo):
layer = ScatterplotLayer.from_geopandas(gdf)
min_bound = mo.ui.slider(
0, 100_000, value=5000, show_value=True, label="min bound"
)
max_bound = mo.ui.slider(
0, 100_000, value=50_000, show_value=True, label="max bound"
)
mo.vstack([min_bound, max_bound, Map(layer)])
return layer, max_bound, min_bound
@app.cell
def _(BrBG_10, apply_continuous_cmap, gdf, layer, max_bound, min_bound):
normalized_download_speed = (gdf["avg_d_kbps"] - min_bound.value) / (
max_bound.value - min_bound.value
)
layer.get_fill_color = apply_continuous_cmap(
normalized_download_speed, BrBG_10, alpha=0.7
)
return
@app.cell
def _(layer):
layer.get_fill_color[0]
return
if __name__ == "__main__":
app.run()
| {
"repo_id": "marimo-team/marimo",
"file_path": "marimo/_smoke_tests/anywidget_examples/lonboard_example.py",
"license": "Apache License 2.0",
"lines": 80,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
marimo-team/marimo:marimo/_smoke_tests/anywidget_smoke_tests/multiple_models.py | import marimo
__generated_with = "0.19.7"
app = marimo.App(width="medium")
@app.cell
def _():
import marimo as mo
import anywidget
import ipywidgets
import traitlets
class ModelOnlyWidget(ipywidgets.Widget):
value = traitlets.Int(42).tag(sync=True)
class Widget(anywidget.AnyWidget):
_esm = """
async function render({ model, el }) {
let fooModel = await model.widget_manager.get_model(
model.get("foo").slice("IPY_MODEL_".length)
)
console.log(fooModel)
let button = document.createElement("button");
el.appendChild(button);
button.innerText = "count is " + fooModel.get("value");
button.onclick = async () => {
fooModel.set("value", fooModel.get("value") + 1);
fooModel.save_changes();
}
fooModel.on("change:value", () => {
button.innerText = "count is " + fooModel.get("value");
});
}
export default { render }
"""
foo = traitlets.Instance(ModelOnlyWidget).tag(
sync=True, **ipywidgets.widget_serialization
)
m = ModelOnlyWidget()
mo.ui.anywidget(Widget(foo=m))
return (m,)
@app.cell
def _(m):
m.value
return
if __name__ == "__main__":
app.run()
| {
"repo_id": "marimo-team/marimo",
"file_path": "marimo/_smoke_tests/anywidget_smoke_tests/multiple_models.py",
"license": "Apache License 2.0",
"lines": 43,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | documentation |
marimo-team/marimo:marimo/_smoke_tests/third_party/lonboard_example.py | # /// script
# requires-python = ">=3.11"
# dependencies = [
# "duckdb==1.2.0",
# "lonboard==0.10.4",
# "marimo",
# "polars==1.23.0",
# "pyarrow==19.0.1",
# "shapely==2.0.7",
# "sqlglot==26.6.0",
# ]
# ///
import marimo
__generated_with = "0.15.5"
app = marimo.App(width="full")
@app.cell
def _():
import duckdb
from lonboard import viz
# Initialize DuckDB connection
con = duckdb.connect()
# Load spatial extension
duckdb.install_extension("spatial", connection=con)
duckdb.load_extension("spatial", connection=con)
return con, viz
@app.cell
def _(con):
sql = """
SELECT 'Polygon 1' AS polygon_name, 'POLYGON((-48.5 -25.4, -48.4 -25.4, -48.4 -25.3, -48.5 -25.3, -48.5 -25.4))'::geometry AS geometry
UNION ALL
SELECT 'Polygon 2' AS polygon_name, 'POLYGON((-48.3 -25.4, -48.2 -25.4, -48.2 -25.3, -48.3 -25.3, -48.3 -25.4))'::geometry AS geometry
UNION ALL
SELECT 'Polygon 3' AS polygon_name, 'POLYGON((-48.1 -25.4, -48.0 -25.4, -48.0 -25.3, -48.1 -25.3, -48.1 -25.4))'::geometry AS geometry
UNION ALL
SELECT 'Polygon 4' AS polygon_name, 'POLYGON((-48.55 -25.35, -48.45 -25.35, -48.45 -25.25, -48.55 -25.25, -48.55 -25.35))'::geometry AS geometry
UNION ALL
SELECT 'Polygon 5' AS polygon_name, 'POLYGON((-48.35 -25.35, -48.25 -25.35, -48.25 -25.25, -48.35 -25.25, -48.35 -25.35))'::geometry AS geometry
"""
polygons = con.sql(sql)
return (polygons,)
@app.cell
def _(con, polygons, viz):
viz(polygons, con=con)
return
if __name__ == "__main__":
app.run()
| {
"repo_id": "marimo-team/marimo",
"file_path": "marimo/_smoke_tests/third_party/lonboard_example.py",
"license": "Apache License 2.0",
"lines": 46,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
marimo-team/marimo:tests/_plugins/ui/_impl/anywidget/test_anywidget_utils.py | # Copyright 2026 Marimo. All rights reserved.
from __future__ import annotations
import pytest
from marimo._dependencies.dependencies import DependencyManager
from marimo._plugins.ui._impl.anywidget.utils import extract_buffer_paths
HAS_DEPS = DependencyManager.ipywidgets.has()
@pytest.mark.skipif(not HAS_DEPS, reason="ipywidgets is not installed")
def test_extract_buffer_paths():
# Test with a simple message containing buffers
message = {
"method": "update",
"state": {"value": b"test", "other": "not a buffer"},
"buffer_paths": [["state", "value"]],
}
state, buffer_paths, buffers = extract_buffer_paths(message["state"])
assert isinstance(state, dict)
assert isinstance(buffer_paths, list)
assert isinstance(buffers, list)
assert len(buffers) == 1
assert buffers[0] == b"test"
assert state["other"] == "not a buffer"
assert "value" not in state
| {
"repo_id": "marimo-team/marimo",
"file_path": "tests/_plugins/ui/_impl/anywidget/test_anywidget_utils.py",
"license": "Apache License 2.0",
"lines": 22,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
marimo-team/marimo:tests/_plugins/ui/_impl/test_comm.py | from __future__ import annotations
import sys
from unittest.mock import MagicMock, patch
import pytest
from marimo._plugins.ui._impl.anywidget.init import CommLifecycleItem
from marimo._plugins.ui._impl.comm import (
MarimoComm,
MarimoCommManager,
)
from marimo._runtime.commands import (
ModelCommand,
ModelCustomMessage,
ModelUpdateMessage,
)
from marimo._types.ids import WidgetModelId
@pytest.fixture
def comm_manager():
return MarimoCommManager()
@pytest.fixture
def comm(comm_manager: MarimoCommManager) -> MarimoComm:
comm_id = WidgetModelId("test-comm")
with patch("marimo._plugins.ui._impl.comm.broadcast_notification"):
c = MarimoComm(
comm_id=comm_id,
comm_manager=comm_manager,
target_name="test_target",
)
yield c # type: ignore[misc]
# Ensure the comm is closed so __del__ doesn't fire broadcast_notification
# during garbage collection in a later test's patch scope.
with patch("marimo._plugins.ui._impl.comm.broadcast_notification"):
c._closed = True
def test_comm_manager_register_unregister(
comm_manager: MarimoCommManager, comm: MarimoComm
):
# comm is already registered during __init__
comm_id = comm.comm_id
assert comm_id in comm_manager.comms
assert comm_manager.comms[comm_id] == comm
# Test unregistration
unregistered_comm = comm_manager.unregister_comm(comm)
assert unregistered_comm == comm
assert comm_id not in comm_manager.comms
def test_comm_manager_receive_unknown_message(
comm_manager: MarimoCommManager,
):
with patch("marimo._plugins.ui._impl.comm.LOGGER") as mock_logger:
command = ModelCommand(
model_id=WidgetModelId("unknown"),
message=ModelUpdateMessage(state={}, buffer_paths=[]),
buffers=[],
)
comm_manager.receive_comm_message(command)
mock_logger.warning.assert_called_once()
def test_comm_initialization(comm: MarimoComm):
assert comm.comm_id == WidgetModelId("test-comm")
assert comm.target_name == "test_target"
assert comm.kernel == "marimo"
assert not comm._closed
assert comm._msg_callback is None
assert comm._close_callback is None
def test_comm_open(comm: MarimoComm):
with patch.object(comm, "_broadcast") as mock_broadcast:
comm.open(data={"test": "data"})
mock_broadcast.assert_called_once_with({"test": "data"}, [])
assert not comm._closed
def test_comm_send(comm: MarimoComm):
with patch.object(comm, "_broadcast") as mock_broadcast:
comm.send(data={"test": "data"})
mock_broadcast.assert_called_once_with({"test": "data"}, [])
def test_comm_close(comm: MarimoComm):
with patch(
"marimo._plugins.ui._impl.comm.broadcast_notification"
) as mock_broadcast:
comm.close(data={"test": "data"})
mock_broadcast.assert_called_once()
assert comm._closed
def test_comm_close_already_closed(comm: MarimoComm):
comm._closed = True
with patch(
"marimo._plugins.ui._impl.comm.broadcast_notification"
) as mock_broadcast:
comm.close()
mock_broadcast.assert_not_called()
def test_comm_on_msg(comm: MarimoComm):
callback = MagicMock()
comm.on_msg(callback)
assert comm._msg_callback == callback
def test_comm_on_close(comm: MarimoComm):
callback = MagicMock()
comm.on_close(callback)
assert comm._close_callback == callback
def test_comm_handle_msg(comm: MarimoComm):
callback = MagicMock()
comm.on_msg(callback)
msg = {"test": "message"}
comm.handle_msg(msg)
callback.assert_called_once_with(msg)
def test_comm_handle_msg_no_callback(comm: MarimoComm):
with patch("marimo._plugins.ui._impl.comm.LOGGER") as mock_logger:
msg = {"test": "message"}
comm.handle_msg(msg)
mock_logger.warning.assert_called_once()
def test_comm_handle_close(comm: MarimoComm):
callback = MagicMock()
comm.on_close(callback)
msg = {"test": "message"}
comm.handle_close(msg)
callback.assert_called_once_with(msg)
def test_comm_handle_close_no_callback(comm: MarimoComm):
with patch("marimo._plugins.ui._impl.comm.LOGGER") as mock_logger:
msg = {"test": "message"}
comm.handle_close(msg)
mock_logger.debug.assert_called()
def test_comm_broadcast(comm: MarimoComm):
"""Test that _broadcast sends a ModelLifecycleNotification."""
with patch(
"marimo._plugins.ui._impl.comm.broadcast_notification"
) as mock_broadcast:
comm._broadcast({"method": "update", "state": {"key": "value"}}, [])
mock_broadcast.assert_called_once()
notification = mock_broadcast.call_args[0][0]
assert notification.model_id == comm.comm_id
def test_comm_manager_receive_update_message(
comm_manager: MarimoCommManager, comm: MarimoComm
):
"""Test receiving an update message through the comm manager."""
callback = MagicMock()
comm.on_msg(callback)
comm.ui_element_id = "test-element"
command = ModelCommand(
model_id=comm.comm_id,
message=ModelUpdateMessage(
state={"key": "value"},
buffer_paths=[],
),
buffers=[],
)
result = comm_manager.receive_comm_message(command)
assert result == ("test-element", {"key": "value"})
callback.assert_called_once()
def test_comm_manager_receive_custom_message(
comm_manager: MarimoCommManager, comm: MarimoComm
):
"""Test receiving a custom message through the comm manager."""
callback = MagicMock()
comm.on_msg(callback)
command = ModelCommand(
model_id=comm.comm_id,
message=ModelCustomMessage(
content={"custom": "data"},
),
buffers=[],
)
result = comm_manager.receive_comm_message(command)
assert result == (None, None)
callback.assert_called_once()
def test_comm_lifecycle_item_dispose_closes_comm(
comm_manager: MarimoCommManager, comm: MarimoComm
):
"""CommLifecycleItem.dispose() should close the comm."""
item = CommLifecycleItem(comm)
assert not comm._closed
assert comm.comm_id in comm_manager.comms
with patch(
"marimo._plugins.ui._impl.comm.broadcast_notification"
) as mock_broadcast:
result = item.dispose(context=MagicMock(), deletion=False)
assert result is True
assert comm._closed
assert comm.comm_id not in comm_manager.comms
mock_broadcast.assert_called_once()
class _CustomBytes:
"""Simulates obstore.Bytes — implements buffer protocol but not a
subclass of bytes/memoryview/bytearray."""
def __init__(self, data: bytes):
self._data = data
def __buffer__(self, flags: int = 0) -> memoryview:
return memoryview(self._data)
class TestBroadcastBufferTypes:
"""Broadcast through the comm with various buffer types and verify the
notification carries the right data after a serialize/deserialize
roundtrip."""
PAYLOAD = b"RIFF\x00\x01binary\xff\xfe"
def _roundtrip_buffer(self, comm, buf):
"""Broadcast a buffer through the comm and return the deserialized
notification."""
from marimo._messaging.notification import ModelLifecycleNotification
from marimo._messaging.serde import (
deserialize_kernel_message,
serialize_kernel_message,
)
with patch(
"marimo._plugins.ui._impl.comm.broadcast_notification"
) as mock_broadcast:
comm._broadcast(
{"method": "update", "state": {}, "buffer_paths": []},
[buf],
)
notification = mock_broadcast.call_args[0][0]
assert isinstance(notification, ModelLifecycleNotification)
# Full roundtrip through JSON serialization
raw = serialize_kernel_message(notification)
return deserialize_kernel_message(raw)
def test_bytes_buffer(self, comm):
result = self._roundtrip_buffer(comm, self.PAYLOAD)
assert result.message.buffers == [self.PAYLOAD]
def test_memoryview_buffer(self, comm):
result = self._roundtrip_buffer(comm, memoryview(self.PAYLOAD))
assert result.message.buffers == [self.PAYLOAD]
def test_bytearray_buffer(self, comm):
result = self._roundtrip_buffer(comm, bytearray(self.PAYLOAD))
assert result.message.buffers == [self.PAYLOAD]
@pytest.mark.skipif(
sys.version_info < (3, 12),
reason="__buffer__ dunder requires Python 3.12+",
)
def test_custom_buffer_protocol(self, comm):
"""Custom buffer-protocol objects (like obstore.Bytes) survive the
roundtrip."""
result = self._roundtrip_buffer(comm, _CustomBytes(self.PAYLOAD))
assert result.message.buffers == [self.PAYLOAD]
def test_unsupported_type_raises(self):
from marimo._plugins.ui._impl.comm import _ensure_bytes
with pytest.raises(TypeError):
_ensure_bytes("not bytes")
def test_comm_lifecycle_item_dispose_idempotent(comm: MarimoComm):
"""Calling dispose twice should not error (comm.close is idempotent)."""
item = CommLifecycleItem(comm)
with patch("marimo._plugins.ui._impl.comm.broadcast_notification"):
item.dispose(context=MagicMock(), deletion=False)
# Second dispose — comm is already closed, should be a no-op
result = item.dispose(context=MagicMock(), deletion=True)
assert result is True
assert comm._closed
| {
"repo_id": "marimo-team/marimo",
"file_path": "tests/_plugins/ui/_impl/test_comm.py",
"license": "Apache License 2.0",
"lines": 235,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
marimo-team/marimo:marimo/_smoke_tests/scripts/logging_in_scripts.py | import marimo
__generated_with = "0.15.5"
app = marimo.App(width="medium")
@app.cell
def _():
import marimo as mo
import sys
import time
return mo, sys, time
@app.cell
def _(mo, sys, time):
for i in mo.status.progress_bar(range(10)):
if i % 2 == 0:
print(f"Step {i}", file=sys.stderr)
else:
print(f"Step {i}")
time.sleep(1)
return
if __name__ == "__main__":
app.run()
| {
"repo_id": "marimo-team/marimo",
"file_path": "marimo/_smoke_tests/scripts/logging_in_scripts.py",
"license": "Apache License 2.0",
"lines": 20,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
marimo-team/marimo:examples/running_as_a_script/textual_app.py | # /// script
# requires-python = ">=3.13"
# dependencies = [
# "marimo",
# "textual==3.2.0",
# ]
# ///
"""
Marimo wrapper for the base Textual app example.
Provides a basic clock widget to show the current time.
run with `python textual_app.py`
"""
import marimo
__generated_with = "0.15.5"
app = marimo.App()
with app.setup:
import marimo as mo
from datetime import datetime
from textual.app import App, ComposeResult
from textual.widgets import Digits
@app.class_definition
class ClockApp(App):
CSS = """
Screen { align: center middle; }
Digits { width: auto; }
"""
def compose(self) -> ComposeResult:
yield Digits("")
def on_ready(self) -> None:
self.update_clock()
self.set_interval(1, self.update_clock)
def update_clock(self) -> None:
clock = datetime.now().time()
self.query_one(Digits).update(f"{clock:%T}")
@app.cell
def _():
app = ClockApp()
if mo.app_meta().mode == "script":
app.run()
# App gives back a basic repr
app
return
if __name__ == "__main__":
app.run()
| {
"repo_id": "marimo-team/marimo",
"file_path": "examples/running_as_a_script/textual_app.py",
"license": "Apache License 2.0",
"lines": 44,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
marimo-team/marimo:marimo/_save/stores/tiered.py | # Copyright 2026 Marimo. All rights reserved.
from __future__ import annotations
from typing import Optional
from marimo import _loggers
from marimo._save.stores.store import Store
LOGGER = _loggers.marimo_logger()
class TieredStore(Store):
"""A composite store that tries multiple stores in order.
Reads will check each store in order until the data is found.
Writes will update all stores.
"""
def __init__(self, stores: list[Store]) -> None:
"""Initialize the tiered store with a list of stores.
Args:
stores: List of stores to use, in order of priority
"""
if not stores:
raise ValueError("At least one store is required")
self.stores = stores
def get(self, key: str) -> Optional[bytes]:
"""Get a value from the first store that has it."""
for i, store in enumerate(self.stores):
try:
value = store.get(key)
if value is not None:
# Found in this store, update preceding stores
self._update_preceding_stores(key, value, i)
return value
except Exception as e:
LOGGER.error(f"Error getting from store {i}: {e}")
return None
def put(self, key: str, value: bytes) -> bool:
"""Put a value in all stores."""
success = False
for i, store in enumerate(self.stores):
try:
if store.put(key, value):
success = True
except Exception as e:
LOGGER.error(f"Error putting to store {i}: {e}")
return success
def hit(self, key: str) -> bool:
"""Check if any store has the key."""
for i, store in enumerate(self.stores):
try:
if store.hit(key):
return True
except Exception as e:
LOGGER.error(f"Error checking hit on store {i}: {e}")
return False
def _update_preceding_stores(
self, key: str, value: bytes, found_index: int
) -> None:
"""Update all stores before the one where the value was found."""
for i in range(found_index):
try:
self.stores[i].put(key, value)
except Exception as e:
LOGGER.error(f"Error updating preceding store {i}: {e}")
| {
"repo_id": "marimo-team/marimo",
"file_path": "marimo/_save/stores/tiered.py",
"license": "Apache License 2.0",
"lines": 59,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
marimo-team/marimo:tests/_save/stores/test_store_config.py | # Copyright 2026 Marimo. All rights reserved.
from __future__ import annotations
from marimo._save.stores import (
DEFAULT_STORE,
FileStore,
TieredStore,
_get_store_from_config,
)
class TestGetStoreFromConfig:
def test_none_config(self) -> None:
"""Test that None config returns the default store."""
store = _get_store_from_config(None)
assert isinstance(store, DEFAULT_STORE)
def test_empty_list_config(self) -> None:
"""Test that an empty list config returns the default store."""
store = _get_store_from_config([])
assert isinstance(store, DEFAULT_STORE)
def test_list_with_none_config(self) -> None:
"""Test that a list with None items returns the default store."""
store = _get_store_from_config([None])
assert isinstance(store, DEFAULT_STORE)
def test_single_item_list(self) -> None:
"""Test that a list with a single valid item returns that item's store."""
config = [{"type": "file", "args": {"save_path": "/tmp/test"}}]
store = _get_store_from_config(config)
assert isinstance(store, FileStore)
assert store.save_path.as_posix() == "/tmp/test"
def test_multi_item_list(self) -> None:
"""Test that a list with multiple items returns a TieredStore."""
config = [
{"type": "file", "args": {"save_path": "/tmp/test1"}},
{"type": "file", "args": {"save_path": "/tmp/test2"}},
]
store = _get_store_from_config(config)
assert isinstance(store, TieredStore)
assert len(store.stores) == 2
assert all(isinstance(s, FileStore) for s in store.stores)
assert store.stores[0].save_path.as_posix() == "/tmp/test1"
assert store.stores[1].save_path.as_posix() == "/tmp/test2"
def test_dict_config(self) -> None:
"""Test that a dict config returns the appropriate store."""
config = {"type": "file", "args": {"save_path": "/tmp/test"}}
store = _get_store_from_config(config)
assert isinstance(store, FileStore)
assert store.save_path.as_posix() == "/tmp/test"
def test_invalid_store_type(self) -> None:
"""Test that an invalid store type returns the default store."""
config = {"type": "invalid", "args": {}}
store = _get_store_from_config(config)
assert isinstance(store, DEFAULT_STORE)
def test_store_creation_error(self) -> None:
"""Test that an error during store creation returns the default store."""
config = {"type": "file", "args": {"invalid_arg": "value"}}
store = _get_store_from_config(config)
assert isinstance(store, DEFAULT_STORE)
def test_missing_store_type_uses_default(self) -> None:
"""Test that a missing store type uses the default store type."""
config = {"args": {}}
store = _get_store_from_config(config)
assert isinstance(store, DEFAULT_STORE)
| {
"repo_id": "marimo-team/marimo",
"file_path": "tests/_save/stores/test_store_config.py",
"license": "Apache License 2.0",
"lines": 60,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
marimo-team/marimo:tests/_save/stores/test_tiered.py | # Copyright 2026 Marimo. All rights reserved.
from __future__ import annotations
from typing import Any
from unittest.mock import patch
import pytest
from marimo._save.stores.tiered import TieredStore
from tests._save.store.mocks import MockStore
def throw_exception(*args: Any, **kwargs: Any) -> None:
del args, kwargs
raise ValueError("Test exception")
class TestTieredStore:
def test_init_with_empty_stores_raises_error(self) -> None:
"""Test that initializing with empty stores raises an error."""
with pytest.raises(ValueError, match="At least one store is required"):
TieredStore([])
def test_init_with_multiple_stores(self) -> None:
"""Test that initializing with multiple stores works."""
stores = [MockStore(), MockStore()]
tiered_store = TieredStore(stores)
assert tiered_store.stores == stores
def test_get_from_first_store(self) -> None:
"""Test getting a value that exists in the first store."""
store1 = MockStore()
store2 = MockStore()
key = "test_key"
value = b"test_value"
store1._cache[key] = value
tiered_store = TieredStore([store1, store2])
result = tiered_store.get(key)
assert result == value
# Second store should remain empty
assert key not in store2._cache
def test_get_from_second_store_updates_first(self) -> None:
"""Test getting a value from the second store updates the first store."""
store1 = MockStore()
store2 = MockStore()
key = "test_key"
value = b"test_value"
store2._cache[key] = value
tiered_store = TieredStore([store1, store2])
result = tiered_store.get(key)
assert result == value
# First store should be updated
assert key in store1._cache
assert store1._cache[key] == value
def test_get_not_found(self) -> None:
"""Test that get returns None when the key is not found in any store."""
store1 = MockStore()
store2 = MockStore()
key = "test_key"
tiered_store = TieredStore([store1, store2])
result = tiered_store.get(key)
assert result is None
def test_put_updates_all_stores(self) -> None:
"""Test that put updates all stores."""
store1 = MockStore()
store2 = MockStore()
key = "test_key"
value = b"test_value"
tiered_store = TieredStore([store1, store2])
result = tiered_store.put(key, value)
assert result is True
assert key in store1._cache
assert store1._cache[key] == value
assert key in store2._cache
assert store2._cache[key] == value
def test_hit_returns_true_if_any_store_has_key(self) -> None:
"""Test that hit returns True if any store has the key."""
store1 = MockStore()
store2 = MockStore()
key = "test_key"
value = b"test_value"
store2._cache[key] = value
tiered_store = TieredStore([store1, store2])
result = tiered_store.hit(key)
assert result is True
def test_hit_returns_false_if_no_store_has_key(self) -> None:
"""Test that hit returns False if no store has the key."""
store1 = MockStore()
store2 = MockStore()
key = "test_key"
tiered_store = TieredStore([store1, store2])
result = tiered_store.hit(key)
assert result is False
@patch("marimo._save.stores.tiered.LOGGER")
def test_get_with_exception(self, mock_logger) -> None:
"""Test handling exceptions during get operation."""
store1 = MockStore()
# Create a store that will raise an exception
store2 = MockStore()
store2.get = throw_exception
tiered_store = TieredStore([store1, store2])
result = tiered_store.get("test_key")
assert result is None
mock_logger.error.assert_called_once()
assert "Test exception" in mock_logger.error.call_args[0][0]
@patch("marimo._save.stores.tiered.LOGGER")
def test_put_with_exception(self, mock_logger) -> None:
"""Test handling exceptions during put operation."""
store1 = MockStore()
# Create a store that will raise an exception
store2 = MockStore()
store2.put = throw_exception
tiered_store = TieredStore([store1, store2])
result = tiered_store.put("test_key", b"test_value")
# Should still return True because store1 succeeded
assert result is True
mock_logger.error.assert_called_once()
assert "Test exception" in mock_logger.error.call_args[0][0]
@patch("marimo._save.stores.tiered.LOGGER")
def test_hit_with_exception(self, mock_logger) -> None:
"""Test handling exceptions during hit operation."""
store1 = MockStore()
# Create a store that will raise an exception
store2 = MockStore()
store2.hit = throw_exception
tiered_store = TieredStore([store1, store2])
result = tiered_store.hit("test_key")
assert result is False
mock_logger.error.assert_called_once()
assert "Test exception" in mock_logger.error.call_args[0][0]
@patch("marimo._save.stores.tiered.LOGGER")
def test_update_preceding_stores_with_exception(self, mock_logger) -> None:
"""Test handling exceptions during update_preceding_stores operation."""
store1 = MockStore()
store1.put = throw_exception
store2 = MockStore()
key = "test_key"
value = b"test_value"
store2._cache[key] = value
tiered_store = TieredStore([store1, store2])
# This should trigger _update_preceding_stores
result = tiered_store.get(key)
assert result == value
mock_logger.error.assert_called_once()
assert "Test exception" in mock_logger.error.call_args[0][0]
| {
"repo_id": "marimo-team/marimo",
"file_path": "tests/_save/stores/test_tiered.py",
"license": "Apache License 2.0",
"lines": 136,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
marimo-team/marimo:marimo/_smoke_tests/ipython/progress.py | import marimo
__generated_with = "0.15.5"
app = marimo.App(width="medium")
@app.cell
def __1():
import time
from transformers.utils.notebook import NotebookProgressCallback
from transformers.training_args import IntervalStrategy
return IntervalStrategy, NotebookProgressCallback, time
@app.cell
def _(
MockTrainingArgs,
MockTrainingControl,
MockTrainingState,
NotebookProgressCallback,
time,
):
def simulate_transformers_training():
# Training parameters
num_train_epochs = 5
steps_per_epoch = 100
total_steps = num_train_epochs * steps_per_epoch
# Initialize state, args, and control
state = MockTrainingState(total_steps, num_train_epochs)
args = MockTrainingArgs()
control = MockTrainingControl()
# Initialize the callback
callback = NotebookProgressCallback()
# Start training
callback.on_train_begin(args, state, control)
# Simulate epochs
for epoch in range(1, num_train_epochs + 1):
state.epoch = epoch
# Simulate steps within epoch
for step in range(1, steps_per_epoch + 1):
state.global_step = (epoch - 1) * steps_per_epoch + step
# Simulate work
time.sleep(0.01) # Reduced sleep time for faster simulation
# Update progress
callback.on_step_end(args, state, control)
# Call on_evaluate
callback.on_evaluate(args, state, control, metrics={})
# End training
callback.on_train_end(args, state, control)
simulate_transformers_training()
return
@app.cell
def _(IntervalStrategy):
# Create a mock training state and args for the callback
class MockTrainingState:
def __init__(self, max_steps, num_train_epochs):
self.max_steps = max_steps
self.num_train_epochs = num_train_epochs
self.global_step = 0
self.epoch = 0
self.log_history = []
class MockTrainingArgs:
def __init__(self):
self.eval_strategy = IntervalStrategy.EPOCH
class MockTrainingControl:
def __init__(self):
pass
return MockTrainingArgs, MockTrainingControl, MockTrainingState
if __name__ == "__main__":
app.run()
| {
"repo_id": "marimo-team/marimo",
"file_path": "marimo/_smoke_tests/ipython/progress.py",
"license": "Apache License 2.0",
"lines": 65,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
marimo-team/marimo:marimo/_runtime/watch/_directory.py | # Copyright 2026 Marimo. All rights reserved.
from __future__ import annotations
import hashlib
import os
import sys
import time
from pathlib import Path
from typing import TYPE_CHECKING, Callable, cast
from marimo._output.rich_help import mddoc
from marimo._runtime.watch._path import (
WATCHER_SLEEP_INTERVAL,
PathState,
write_side_effect,
)
if TYPE_CHECKING:
import threading
from collections.abc import Iterable
# For testing only - do not use in production
_TEST_SLEEP_INTERVAL: float | None = None
def walk(path: Path) -> Iterable[tuple[Path, list[str], list[str]]]:
if sys.version_info >= (3, 12):
return path.walk()
return os.walk(path)
def _hashable_walk(
walked: Iterable[tuple[Path, list[str], list[str]]],
) -> set[tuple[Path, tuple[str], tuple[str]]]:
return cast(
set[tuple[Path, tuple[str], tuple[str]]],
set((p, *map(tuple, r)) for p, *r in walked),
)
def hashable_walk(path: Path) -> set[tuple[Path, tuple[str], tuple[str]]]:
return _hashable_walk(walk(path))
def watch_directory(
path: Path, state: DirectoryState, should_exit: threading.Event
) -> None:
"""Watch a directory for changes and update the state."""
last_structure = hashable_walk(path)
current_structure = last_structure
sleep_interval = _TEST_SLEEP_INTERVAL or WATCHER_SLEEP_INTERVAL
while not should_exit.is_set():
time.sleep(sleep_interval)
try:
current_structure = hashable_walk(path)
except FileNotFoundError:
# Directory has been deleted, trigger a change
current_structure = set()
except Exception as e:
# Handle other exceptions (e.g., permission denied)
sys.stderr.write(f"Error watching directory {path}: {e}\n")
continue
if current_structure != last_structure:
last_structure = current_structure
state._set_value(path)
class DirectoryState(PathState):
"""Wrapper for directory state."""
_forbidden_attributes = {
"open",
"rename",
"replace",
"remove",
"unlink",
"write_text",
"write_bytes",
"read_text",
"read_bytes",
"mkdir",
"touch",
}
_target: Callable[[Path, DirectoryState, threading.Event], None] = (
staticmethod(watch_directory)
)
def walk(self) -> Iterable[tuple[Path, list[str], list[str]]]:
"""Walk the directory."""
items = walk(self._value)
as_list = list(_hashable_walk(items))
write_side_effect(f"walk:{sorted(as_list)}")
return iter(items)
def iterdir(self) -> Iterable[Path]:
"""Iterate over the directory."""
items = list(self._value.iterdir())
write_side_effect(f"iterdir:{items}")
return iter(items)
def glob(self, pattern: str) -> Iterable[Path]:
"""Glob the directory."""
items = list(self._value.glob(pattern))
write_side_effect(f"glob:{items}")
return iter(items)
def rglob(self, pattern: str) -> Iterable[Path]:
"""Recursive glob the directory."""
items = list(self._value.rglob(pattern))
write_side_effect(f"rglob:{items}")
return iter(items)
def __repr__(self) -> str:
"""Return a string representation of the file state."""
_walk = self.walk() # Call to issue side effect
_hash = hashlib.sha256(f"{list(_walk)}".encode()).hexdigest()
return f"DirectoryState({self._value}: {_hash})"
@mddoc
def directory(path: Path | str) -> DirectoryState:
"""
A reactive wrapper for directory paths.
This function takes a directory path to watch and returns a wrapper to
reactively list the contents of the directory.
This object will trigger dependent cells to re-evaluate when the directory
structure is changed (i.e., files are added or removed).
Note:
This function does NOT react to file content changes, only to changes in
the directory structure. Utilize `mo.watch.file()` to watch for changes
in specific files. Additional note: this will not follow symlinks.
Args:
path: Path to watch.
Returns:
A reactive wrapper for watching the directory.
"""
if isinstance(path, str):
path = Path(path)
if not path.is_dir():
raise ValueError("Path must be a directory, use mo.file()")
return DirectoryState(path, allow_self_loops=True)
| {
"repo_id": "marimo-team/marimo",
"file_path": "marimo/_runtime/watch/_directory.py",
"license": "Apache License 2.0",
"lines": 119,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
marimo-team/marimo:marimo/_runtime/watch/_file.py | # Copyright 2026 Marimo. All rights reserved.
from __future__ import annotations
import hashlib
import sys
import threading
import time
from pathlib import Path
from typing import Any, Callable
from marimo._output.rich_help import mddoc
from marimo._runtime.watch._path import (
WATCHER_SLEEP_INTERVAL,
PathState,
write_side_effect,
)
# For testing only - do not use in production
_TEST_SLEEP_INTERVAL: float | None = None
def watch_file(
path: Path, state: FileState, should_exit: threading.Event
) -> None:
"""Watch a file for changes and update the state."""
last_mtime: float = 0
current_mtime = last_mtime
sleep_interval = _TEST_SLEEP_INTERVAL or WATCHER_SLEEP_INTERVAL
while not should_exit.is_set():
time.sleep(sleep_interval)
try:
current_mtime = path.stat().st_mtime
except FileNotFoundError:
# File has been deleted, trigger a change
current_mtime = 0
except Exception as e:
# Handle other exceptions (e.g., permission denied)
sys.stderr.write(f"Error watching file {path}: {e}\n")
continue
if current_mtime != last_mtime:
last_mtime = current_mtime
with state._debounce_lock:
if not state._debounced:
state._set_value(path)
state._debounced = False
class FileState(PathState):
"""Wrapper for file state."""
_forbidden_attributes = {
"open",
"iterdir",
"glob",
"rglob",
"mkdir",
"rename",
"replace",
"walk",
}
_target: Callable[[Path, FileState, threading.Event], None] = staticmethod(
watch_file
)
def __init__(self, *args: Any, **kwargs: Any) -> None:
super().__init__(*args, **kwargs)
self._debounced = False
self._debounce_lock = threading.Lock()
def read_text(self) -> str:
"""Read the file as a string."""
text = self._value.read_text()
write_side_effect(f"read_text:{text}")
return text
def write_text(self, value: str) -> int:
"""Write the file as a string."""
response = self._value.write_text(value)
text = self._value.read_text()
write_side_effect(f"write_text:{text}")
with self._debounce_lock:
self._debounced = True
self._set_value(self._value)
return response
def read_bytes(self) -> bytes:
"""Read the file as bytes."""
data = self._value.read_bytes()
write_side_effect(f"read_bytes:{data!r}")
return data
def write_bytes(self, value: bytes) -> int:
"""Write the file as bytes."""
response = self._value.write_bytes(value)
data = self._value.read_bytes()
write_side_effect(f"write_bytes:{data!r}")
with self._debounce_lock:
self._debounced = True
self._set_value(self._value)
return response
def __repr__(self) -> str:
"""Return a string representation of the file state."""
if not self._value.exists():
return f"FileState({self._value}: File not found)"
_hash = hashlib.sha256(self._value.read_bytes()).hexdigest()
return f"FileState({self._value}: {_hash})"
@mddoc
def file(path: Path | str) -> FileState:
"""
A reactive wrapper for file paths.
This function takes a file path to watch and returns a wrapper to reactively
read and write from the file.
The "wrapped" file Path object exposes most of the same methods as the
[pathlib.Path object](https://docs.python.org/3/library/pathlib.html#pathlib.Path),
with a few exceptions. The following methods are not available:
- `open()`
- `rename()`
- `replace()`
This object will trigger dependent cells to re-evaluate when the file is
changed.
Warning:
It is possible to misuse this API in similar ways to `state()`. Consider
reading the warning and caveats in the
[`state()` documentation](state.md), and using this function only when
reading file paths, and not when writing them.
Args:
path: Path to watch.
Returns:
A reactive wrapper for watching the file path.
"""
if isinstance(path, str):
path = Path(path)
if path.is_dir():
raise ValueError(
"Path must be a file, not a directory, use mo.directory() instead"
)
return FileState(path, allow_self_loops=True)
| {
"repo_id": "marimo-team/marimo",
"file_path": "marimo/_runtime/watch/_file.py",
"license": "Apache License 2.0",
"lines": 124,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
marimo-team/marimo:marimo/_runtime/watch/_path.py | # Copyright 2026 Marimo. All rights reserved.
from __future__ import annotations
import sys
import threading
from pathlib import Path
from typing import Any, Callable, TypeVar
from marimo import _loggers
from marimo._runtime.context import (
ContextNotInitializedError,
get_context,
runtime_context_installed,
)
from marimo._runtime.side_effect import SideEffect
from marimo._runtime.state import State
from marimo._runtime.threads import Thread
from marimo._utils.platform import is_pyodide
LOGGER = _loggers.marimo_logger()
if sys.version_info >= (3, 11):
from typing import Self
else:
from typing_extensions import Self
T = TypeVar("T")
WATCHER_SLEEP_INTERVAL = 1.0
def write_side_effect(data: str | bytes) -> None:
"""Write side effect to the context."""
try:
ctx = get_context()
except ContextNotInitializedError:
# Context is not initialized, nothing we can do
return
ctx.cell_lifecycle_registry.add(SideEffect(data))
class PathState(State[Path]):
"""Base class for reactive path watchers.
Args:
path: The filesystem path to watch for changes.
allow_self_loops: Whether to allow self-referential reactivity.
"""
_forbidden_attributes: set[str]
_target: Callable[[Path, Self, threading.Event], None]
def __init__(
self,
path: Path,
*args: Any,
allow_self_loops: bool = True,
**kwargs: Any,
) -> None:
if kwargs.pop("_context", None) is not None:
raise ValueError(
"The '_context' argument is not supported for this class."
)
if kwargs.pop("allow_self_loops", None) is not None:
raise ValueError(
"The 'allow_self_loops' argument is not supported for this class."
)
# Mypy seems to think we could provide multiple kwargs definitions here
# but we can't.
super().__init__(
path,
*args,
allow_self_loops=allow_self_loops,
_context="file",
**kwargs,
) # type: ignore[misc]
self._should_exit = threading.Event()
# Only bother with the watcher if the context is installed
# State is not enabled in script mode
if runtime_context_installed():
# File watching with threads is not supported in Pyodide/WASM
# The file can still be read/written, but won't trigger reactive
# updates when changed externally
if is_pyodide():
LOGGER.warning(
"Reactive file watching is not supported in "
"Pyodide/WebAssembly. The file can still be read/written, "
"but external changes won't trigger reactive updates."
)
else:
Thread(
target=self._target,
args=(path, self, self._should_exit),
daemon=True,
).start()
def __getattr__(self, name: str) -> Any:
"""Get an attribute from the file path."""
# Disable some attributes
if name in self._forbidden_attributes:
raise AttributeError(
f"'{self.__class__.__name__}' does not "
f"expose attribute '{name}'"
)
if hasattr(self._value, name):
return getattr(self._value, name)
raise AttributeError(
f"'{self.__class__.__name__}' object has no attribute '{name}'"
)
def __del__(self) -> None:
self._should_exit.set()
def __repr__(self) -> str:
return f"{self.__class__.__name__}({self._value})"
def __fspath__(self) -> str:
return self._value.__fspath__()
def __str__(self) -> str:
return str(self._value)
def exists(self) -> bool:
"""Check if the path exists."""
exists = self._value.exists()
if not exists:
write_side_effect(f"doesn't exists:{self._value}")
else:
_ = self.read_text()
return exists
| {
"repo_id": "marimo-team/marimo",
"file_path": "marimo/_runtime/watch/_path.py",
"license": "Apache License 2.0",
"lines": 111,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
marimo-team/marimo:tests/_runtime/watch/test_watch.py | # Copyright 2026 Marimo. All rights reserved.
import asyncio
from pathlib import Path
import pytest
from marimo._runtime.runtime import Kernel
from tests.conftest import ExecReqProvider
@pytest.mark.xfail(
True, reason="Flaky in CI, can't repro locally", strict=False
)
async def test_read_and_write_path(
execution_kernel: Kernel, exec_req: ExecReqProvider, tmp_path: Path
) -> None:
k = execution_kernel
await k.run(
[
exec_req.get("from pathlib import Path"),
exec_req.get(
f'tmp = Path("{tmp_path.as_posix()}") / "test.txt"; tmp.write_text("0")'
),
exec_req.get(
"""
import time
import marimo as mo
mo.watch._file._TEST_SLEEP_INTERVAL = 0.05
"""
),
exec_req.get("path = mo.watch.file(tmp)"),
exec_req.get("x = path.read_text()"),
exec_req.get(
"""
x
if x == "0":
path.write_text("1")
"""
),
]
)
await asyncio.sleep(0.1)
assert not k.stdout.messages, k.stdout
assert not k.stderr.messages, k.stderr
assert k.globals["x"] == "1"
@pytest.mark.xfail(
True, reason="Flaky in CI, can't repro locally", strict=False
)
async def test_read_and_write_iteration(
execution_kernel: Kernel, exec_req: ExecReqProvider, tmp_path: Path
) -> None:
k = execution_kernel
await k.run(
[
exec_req.get("from pathlib import Path"),
exec_req.get(
f'tmp = Path("{tmp_path.as_posix()}") / "test.txt"; tmp.touch()'
),
exec_req.get(
"""
import asyncio
import marimo as mo
mo.watch._file._TEST_SLEEP_INTERVAL = 0.01
"""
),
exec_req.get("path = mo.watch.file(tmp)"),
exec_req.get("x = path.read_bytes()"),
exec_req.get(
"""
x
if len(x) < 3:
print("Writing 0")
path.write_bytes(b"0" * (len(x) + 1))
"""
),
]
)
await asyncio.sleep(0.1)
assert not k.stderr.messages, k.stderr
assert k.globals["x"] == b"000"
@pytest.mark.xfail(
True, reason="Flaky in CI, can't repro locally", strict=False
)
async def test_allow_self_loops(
execution_kernel: Kernel, exec_req: ExecReqProvider, tmp_path: Path
) -> None:
k = execution_kernel
await k.run(
[
exec_req.get("from pathlib import Path"),
exec_req.get(
"""
import asyncio
import marimo as mo
mo.watch._file._TEST_SLEEP_INTERVAL = 0.01
"""
),
exec_req.get(
f'tmp = Path("{tmp_path.as_posix()}") / "test.txt"; tmp.touch()'
),
exec_req.get("path = mo.watch.file(tmp)"),
exec_req.get(
"""
path() # Just returns the path
x = len(path.read_bytes())
if x < 3:
path.write_bytes(b"0" * (x + 1))
"""
),
]
)
await asyncio.sleep(0.1)
# Flakey in CI 3.13
await k.run([])
assert not k.stderr.messages, k.stderr
assert k.globals["x"] == 3
@pytest.mark.xfail(
True, reason="Flaky in CI, can't repro locally", strict=False
)
async def test_directory_watch(
execution_kernel: Kernel, exec_req: ExecReqProvider, tmp_path: Path
) -> None:
k = execution_kernel
await k.run(
[
exec_req.get("from pathlib import Path"),
exec_req.get(f'tmp = Path("{tmp_path.as_posix()}")'),
exec_req.get(
"""
import time
import marimo as mo
mo.watch._directory._TEST_SLEEP_INTERVAL = 0.05
"""
),
exec_req.get("path = mo.watch.directory(tmp)"),
exec_req.get("x = len(list(path.glob('*')))"),
exec_req.get(
"""
x
if x == 0:
(tmp / "test.txt").write_text("1")
"""
),
]
)
await asyncio.sleep(0.25)
await k.run([])
assert not k.stderr.messages, k.stderr
assert k.globals["x"] == 1
| {
"repo_id": "marimo-team/marimo",
"file_path": "tests/_runtime/watch/test_watch.py",
"license": "Apache License 2.0",
"lines": 145,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
marimo-team/marimo:tests/_server/rtc/test_rtc_doc.py | from __future__ import annotations
import asyncio
import sys
from typing import TYPE_CHECKING, cast
import pytest
from marimo._server.file_router import MarimoFileKey
from marimo._server.rtc.doc import LoroDocManager
from marimo._types.ids import CellId_t
if sys.version_info >= (3, 11) and sys.version_info < (3, 14):
from loro import LoroDoc, LoroText
doc_manager = LoroDocManager()
if TYPE_CHECKING:
from collections.abc import AsyncGenerator
@pytest.fixture # type: ignore
async def setup_doc_manager() -> AsyncGenerator[None, None]:
"""Setup and teardown for loro_docs tests"""
# Clear any existing loro docs
doc_manager.loro_docs.clear()
doc_manager.loro_docs_clients.clear()
doc_manager.loro_docs_cleaners.clear()
yield
# Cleanup after test
doc_manager.loro_docs.clear()
doc_manager.loro_docs_clients.clear()
doc_manager.loro_docs_cleaners.clear()
@pytest.mark.skipif(
"sys.version_info < (3, 11) or sys.version_info >= (3, 14)"
)
async def test_quick_reconnection(setup_doc_manager: None) -> None:
"""Test that quick reconnection properly handles cleanup task cancellation"""
del setup_doc_manager
# Setup
file_key = MarimoFileKey("test_file")
# Create initial loro_doc
doc = LoroDoc()
doc_manager.loro_docs[file_key] = doc
# Setup client queue
update_queue = asyncio.Queue[bytes]()
doc_manager.loro_docs_clients[file_key] = {update_queue}
# Start cleanup task
cleanup_task = asyncio.create_task(doc_manager._clean_loro_doc(file_key))
# Simulate quick reconnection by creating a new client before cleanup finishes
new_queue = asyncio.Queue[bytes]()
doc_manager.loro_docs_clients[file_key].add(new_queue)
# Cancel cleanup task
cleanup_task.cancel()
try:
await cleanup_task
except asyncio.CancelledError:
pass
# Verify state
assert len(doc_manager.loro_docs) == 1
assert (
len(doc_manager.loro_docs_clients[file_key]) == 2
) # Original client + reconnected client
@pytest.mark.skipif(
"sys.version_info < (3, 11) or sys.version_info >= (3, 14)"
)
async def test_two_users_sync(setup_doc_manager: None) -> None:
"""Test that two users can connect and sync text properly without duplicates"""
del setup_doc_manager
file_key = MarimoFileKey("test_file")
cell_id = str(CellId_t("test_cell")) # Convert CellId to string for loro
# First user connects
doc = LoroDoc()
doc_manager.loro_docs[file_key] = doc
# Setup client queues for both users
queue1 = asyncio.Queue[bytes]()
queue2 = asyncio.Queue[bytes]()
doc_manager.loro_docs_clients[file_key] = {queue1, queue2}
# Get maps from doc
doc_codes = doc.get_map("codes")
doc_languages = doc.get_map("languages")
# Add text to the doc using get_or_create_container
code_text = doc_codes.get_or_create_container(cell_id, LoroText())
code_text_typed = cast(LoroText, code_text)
code_text_typed.insert(0, "print('hello')")
lang_text = doc_languages.get_or_create_container(cell_id, LoroText())
lang_text_typed = cast(LoroText, lang_text)
lang_text_typed.insert(0, "python")
# Verify state
assert len(doc_manager.loro_docs) == 1
assert len(doc_manager.loro_docs_clients[file_key]) == 2
# Make sure we can get the text content
assert code_text_typed.to_string() == "print('hello')"
# Second user makes changes - no need to retrieve the text again
code_text_typed.insert(
len(code_text_typed.to_string()), "\nprint('world')"
)
# Verify changes propagate
assert code_text_typed.to_string() == "print('hello')\nprint('world')"
assert lang_text_typed.to_string() == "python"
@pytest.mark.skipif(
"sys.version_info < (3, 11) or sys.version_info >= (3, 14)"
)
async def test_concurrent_doc_creation(setup_doc_manager: None) -> None:
"""Test concurrent doc creation doesn't cause issues"""
del setup_doc_manager
file_key = MarimoFileKey("test_file")
cell_ids = (CellId_t("cell1"), CellId_t("cell2"))
codes = ("print('hello')", "print('world')")
# Create multiple tasks that try to create the same doc
tasks = [
doc_manager.create_doc(file_key, cell_ids, codes) for _ in range(5)
]
docs = await asyncio.gather(*tasks)
# All tasks should return the same doc instance
assert all(doc is docs[0] for doc in docs)
assert len(doc_manager.loro_docs) == 1
@pytest.mark.skipif(
"sys.version_info < (3, 11) or sys.version_info >= (3, 14)"
)
async def test_concurrent_client_operations(
setup_doc_manager: None,
) -> None:
"""Test concurrent client operations don't cause deadlocks"""
del setup_doc_manager
file_key = MarimoFileKey("test_file")
doc = LoroDoc()
doc_manager.loro_docs[file_key] = doc
# Create multiple client queues
queues = [asyncio.Queue[bytes]() for _ in range(5)]
doc_manager.loro_docs_clients[file_key] = set(queues)
# Concurrently add and remove clients
async def client_operation(queue: asyncio.Queue[bytes]) -> None:
doc_manager.add_client_to_doc(file_key, queue)
await asyncio.sleep(0.1) # Simulate some work
await doc_manager.remove_client(file_key, queue)
tasks = [client_operation(queue) for queue in queues]
await asyncio.gather(*tasks)
# Verify final state
assert len(doc_manager.loro_docs_clients[file_key]) == 0
@pytest.mark.skipif(
"sys.version_info < (3, 11) or sys.version_info >= (3, 14)"
)
async def test_cleanup_task_management(setup_doc_manager: None) -> None:
"""Test cleanup task management and cancellation"""
del setup_doc_manager
file_key = MarimoFileKey("test_file")
doc = LoroDoc()
doc_manager.loro_docs[file_key] = doc
# Add and remove a client to trigger cleanup
queue = asyncio.Queue[bytes]()
doc_manager.add_client_to_doc(file_key, queue)
await doc_manager.remove_client(file_key, queue)
# Verify cleanup task was created
assert file_key in doc_manager.loro_docs_cleaners
assert doc_manager.loro_docs_cleaners[file_key] is not None
# Add a new client before cleanup finishes
new_queue = asyncio.Queue[bytes]()
doc_manager.add_client_to_doc(file_key, new_queue)
# Wait for the task to be cancelled
await asyncio.sleep(0.1)
# Verify cleanup task was cancelled and removed
# TODO: not sure why this is still here.
# assert doc_manager.loro_docs_cleaners[file_key] is None
# Clean up
await doc_manager.remove_client(file_key, new_queue)
@pytest.mark.skipif(
"sys.version_info < (3, 11) or sys.version_info >= (3, 14)"
)
async def test_broadcast_update(setup_doc_manager: None) -> None:
"""Test broadcast update functionality"""
del setup_doc_manager
file_key = MarimoFileKey("test_file")
doc = LoroDoc()
doc_manager.loro_docs[file_key] = doc
# Create multiple client queues
queues = [asyncio.Queue[bytes]() for _ in range(3)]
doc_manager.loro_docs_clients[file_key] = set(queues)
# Broadcast a message
message = b"test message"
await doc_manager.broadcast_update(
file_key, message, exclude_queue=queues[0]
)
# Verify all queues except excluded one received the message
for i, queue in enumerate(queues):
if i == 0:
assert queue.empty()
else:
assert await queue.get() == message
@pytest.mark.skipif(
"sys.version_info < (3, 11) or sys.version_info >= (3, 14)"
)
async def test_remove_nonexistent_doc(setup_doc_manager: None) -> None:
"""Test removing a doc that doesn't exist"""
del setup_doc_manager
file_key = MarimoFileKey("nonexistent")
await doc_manager.remove_doc(file_key)
assert file_key not in doc_manager.loro_docs
assert file_key not in doc_manager.loro_docs_clients
assert file_key not in doc_manager.loro_docs_cleaners
@pytest.mark.skipif(
"sys.version_info < (3, 11) or sys.version_info >= (3, 14)"
)
async def test_remove_nonexistent_client(setup_doc_manager: None) -> None:
"""Test removing a client that doesn't exist"""
del setup_doc_manager
file_key = MarimoFileKey("test_file")
queue = asyncio.Queue[bytes]()
await doc_manager.remove_client(file_key, queue)
assert file_key not in doc_manager.loro_docs_clients
@pytest.mark.skipif(
"sys.version_info < (3, 11) or sys.version_info >= (3, 14)"
)
async def test_concurrent_doc_removal(setup_doc_manager: None) -> None:
"""Test concurrent doc removal doesn't cause issues"""
del setup_doc_manager
file_key = MarimoFileKey("test_file")
doc = LoroDoc()
doc_manager.loro_docs[file_key] = doc
# Create multiple tasks that try to remove the same doc
tasks = [doc_manager.remove_doc(file_key) for _ in range(5)]
await asyncio.gather(*tasks)
# Verify doc was removed
assert file_key not in doc_manager.loro_docs
assert file_key not in doc_manager.loro_docs_clients
assert file_key not in doc_manager.loro_docs_cleaners
@pytest.mark.skipif(
"sys.version_info < (3, 11) or sys.version_info >= (3, 14)"
)
async def test_prevent_lock_deadlock(setup_doc_manager: None) -> None:
"""Test that our deadlock prevention measures work correctly.
This test simulates the scenario that could cause a deadlock:
1. A client disconnects, starting the cleanup process
2. Another operation acquires the lock before cleanup timer finishes
3. Cleanup timer expires and tries to acquire the lock
The fixed implementation should handle this without deadlocking.
"""
del setup_doc_manager
file_key = MarimoFileKey("test_file")
# Create a doc and add a client
doc = LoroDoc()
doc_manager.loro_docs[file_key] = doc
queue = asyncio.Queue[bytes]()
doc_manager.add_client_to_doc(file_key, queue)
# Set a very short cleanup timeout for testing
original_timeout = 60.0
cleanup_timeout = 0.1 # 100ms
# Create a barrier to coordinate tasks
barrier = asyncio.Barrier(2)
long_operation_done = asyncio.Event()
# Task 1: Remove client, which will schedule cleanup with short timeout
async def remove_client_task() -> None:
await doc_manager.remove_client(file_key, queue)
# Wait at barrier to synchronize with the long operation
await barrier.wait()
# Wait for long operation to complete
await long_operation_done.wait()
# Task 2: Simulate a long operation that holds the lock
async def long_lock_operation() -> None:
# Wait for remove_client to schedule the cleanup
await barrier.wait()
# Acquire the lock and hold it for longer than the cleanup timeout
async with doc_manager.loro_docs_lock:
# Sleep while holding the lock (longer than cleanup timeout)
await asyncio.sleep(cleanup_timeout * 2)
# Signal that we're done holding the lock
long_operation_done.set()
# Modified test version of _clean_loro_doc with shorter timeout
original_clean_loro_doc = doc_manager._clean_loro_doc
async def test_clean_loro_doc(
file_key: MarimoFileKey, timeout: float = original_timeout
) -> None:
del timeout
# Override timeout with our test value
await original_clean_loro_doc(file_key, cleanup_timeout)
# Override the method for this test
doc_manager._clean_loro_doc = test_clean_loro_doc
try:
# Run both tasks simultaneously
task1 = asyncio.create_task(remove_client_task())
task2 = asyncio.create_task(long_lock_operation())
# This should complete without deadlocking
await asyncio.gather(task1, task2)
# Verify the doc was properly cleaned up
assert file_key not in doc_manager.loro_docs
assert file_key not in doc_manager.loro_docs_clients
assert file_key not in doc_manager.loro_docs_cleaners
finally:
# Restore the original method
doc_manager._clean_loro_doc = original_clean_loro_doc
| {
"repo_id": "marimo-team/marimo",
"file_path": "tests/_server/rtc/test_rtc_doc.py",
"license": "Apache License 2.0",
"lines": 282,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
marimo-team/marimo:marimo/_smoke_tests/async/background_tasks.py | import marimo
__generated_with = "0.15.5"
app = marimo.App(width="medium")
@app.cell(hide_code=True)
def _():
import time
import asyncio
async def background_task(name, seconds):
"""A simple task that prints messages at intervals."""
print(f"Task {name} started")
for i in range(seconds):
print(f"Task {name}: working... ({i + 1}/{seconds})")
await asyncio.sleep(1)
print(f"Task {name} completed")
return f"{name} result"
return asyncio, background_task
@app.cell
def _(asyncio, background_task):
# Run this cell for a new background task
a = asyncio.create_task(background_task("A", 2))
return
@app.cell
def _(asyncio, refresh):
# This list should have at least one task (the kernel). When creating tasks above, they should be added and then removed.
refresh
list(asyncio.all_tasks())
return
@app.cell(hide_code=True)
def _():
import marimo as mo
refresh = mo.ui.refresh(options=["1s"], default_interval="1s")
refresh
return (refresh,)
if __name__ == "__main__":
app.run()
| {
"repo_id": "marimo-team/marimo",
"file_path": "marimo/_smoke_tests/async/background_tasks.py",
"license": "Apache License 2.0",
"lines": 35,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
marimo-team/marimo:marimo/_runtime/side_effect.py | # Copyright 2026 Marimo. All rights reserved.
from __future__ import annotations
import hashlib
from typing import TYPE_CHECKING
from marimo._runtime.cell_lifecycle_item import CellLifecycleItem
if TYPE_CHECKING:
from marimo._runtime.context.types import RuntimeContext
class SideEffect(CellLifecycleItem):
def __init__(self, key: str | bytes) -> None:
self._key = key
@property
def key(self) -> bytes:
assert self._key is not None
if isinstance(self._key, bytes):
return self._key
return self._key.encode("utf-8")
@property
def hash(self) -> bytes:
"""Hash the lookup to a consistent size."""
return hashlib.sha256(self.key).digest()
def create(self, context: RuntimeContext | None) -> None:
"""NoOp for side effect.
Typically hook to expose the object to the context, but the existence of
the object is enough for side effect tracking.
"""
del context
def dispose(self, context: RuntimeContext, deletion: bool) -> bool: # noqa: ARG002
"""Clean up and mark the object for deletion"""
# Side effects can always be disposed since they are just a cell level
# marker that some event has occurred.
del context
del deletion
return True
class CellHash(CellLifecycleItem):
"""Execution as a side effect to prevent the recomputation of a cell for
recursive or repeated calls.
"""
def __init__(self, key: str | bytes) -> None:
self._key = key
@property
def key(self) -> bytes:
assert self._key is not None
if isinstance(self._key, bytes):
return self._key
return self._key.encode("utf-8")
@property
def hash(self) -> bytes:
"""Hash the lookup to a consistent size."""
return hashlib.sha256(self.key).digest()
def create(self, context: RuntimeContext | None) -> None:
"""NoOp for side effect.
Typically hook to expose the object to the context, but the existence of
the object is enough for side effect tracking.
"""
def dispose(self, context: RuntimeContext, deletion: bool) -> bool: # noqa: ARG002
"""Clean up and mark the object for deletion"""
# Side effects can always be disposed since they are just a cell level
# marker that some event has occurred.
return True
| {
"repo_id": "marimo-team/marimo",
"file_path": "marimo/_runtime/side_effect.py",
"license": "Apache License 2.0",
"lines": 59,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
marimo-team/marimo:marimo/_server/rtc/doc.py | # Copyright 2026 Marimo. All rights reserved.
from __future__ import annotations
import asyncio
from typing import TYPE_CHECKING, Optional
from marimo import _loggers
from marimo._server.file_router import MarimoFileKey
from marimo._types.ids import CellId_t
if TYPE_CHECKING:
from loro import LoroDoc
LOGGER = _loggers.marimo_logger()
class LoroDocManager:
def __init__(self) -> None:
self.loro_docs: dict[MarimoFileKey, LoroDoc] = {}
self.loro_docs_lock = asyncio.Lock()
self.loro_docs_clients: dict[
MarimoFileKey, set[asyncio.Queue[bytes]]
] = {}
self.loro_docs_cleaners: dict[
MarimoFileKey, Optional[asyncio.Task[None]]
] = {}
async def _clean_loro_doc(
self, file_key: MarimoFileKey, timeout: float = 60
) -> None:
"""Clean up a loro doc if no clients are connected."""
try:
await asyncio.sleep(timeout)
async with self.loro_docs_lock:
if (
file_key in self.loro_docs_clients
and len(self.loro_docs_clients[file_key]) == 0
):
LOGGER.debug(
f"RTC: Removing loro doc for file {file_key} as it has no clients"
)
# Clean up the document
await self._do_remove_doc(file_key)
except asyncio.CancelledError:
# Task was cancelled due to client reconnection
LOGGER.debug(
f"RTC: clean_loro_doc task cancelled for file {file_key} - likely due to reconnection"
)
pass
async def create_doc(
self,
file_key: MarimoFileKey,
cell_ids: tuple[CellId_t, ...],
codes: tuple[str, ...],
) -> LoroDoc:
"""Create a new loro doc."""
from loro import LoroDoc, LoroText
assert len(cell_ids) == len(codes), (
"cell_ids and codes must be the same length"
)
async with self.loro_docs_lock:
if file_key in self.loro_docs:
return self.loro_docs[file_key]
LOGGER.debug(f"RTC: Initializing LoroDoc for file {file_key}")
doc = LoroDoc() # type: ignore[no-untyped-call]
self.loro_docs[file_key] = doc
# Add all cell code to the doc
doc_codes = doc.get_map("codes")
doc.get_map("languages")
for cell_id, code in zip(cell_ids, codes):
cell_text = LoroText() # type: ignore[no-untyped-call]
cell_text.insert(0, code)
doc_codes.insert_container(cell_id, cell_text)
# We don't set the language here because it will be set
# when the client connects for the first time.
return doc
async def get_or_create_doc(self, file_key: MarimoFileKey) -> LoroDoc:
"""Get or create a loro doc for a file key."""
from loro import LoroDoc
async with self.loro_docs_lock:
if file_key in self.loro_docs:
doc = self.loro_docs[file_key]
# Cancel existing cleaner task if it exists
cleaner = self.loro_docs_cleaners.get(file_key, None)
if cleaner is not None:
LOGGER.debug(
f"RTC: Cancelling existing cleaner for file {file_key}"
)
cleaner.cancel()
self.loro_docs_cleaners[file_key] = None
else:
LOGGER.warning(f"RTC: Expected loro doc for file {file_key}")
doc = LoroDoc() # type: ignore[no-untyped-call]
self.loro_docs[file_key] = doc
return doc
def add_client_to_doc(
self, file_key: MarimoFileKey, update_queue: asyncio.Queue[bytes]
) -> None:
"""Add a client queue to the loro doc clients."""
if file_key not in self.loro_docs_clients:
self.loro_docs_clients[file_key] = {update_queue}
else:
self.loro_docs_clients[file_key].add(update_queue)
async def broadcast_update(
self,
file_key: MarimoFileKey,
message: bytes,
exclude_queue: Optional[asyncio.Queue[bytes]] = None,
) -> None:
"""Broadcast an update to all clients except the excluded queue."""
clients = self.loro_docs_clients[file_key]
for client in clients:
if client == exclude_queue:
continue
client.put_nowait(message)
async def remove_client(
self,
file_key: MarimoFileKey,
update_queue: asyncio.Queue[bytes],
) -> None:
"""Clean up a loro client and potentially the doc if no clients remain."""
should_create_cleaner = False
async with self.loro_docs_lock:
if file_key not in self.loro_docs_clients:
return
self.loro_docs_clients[file_key].remove(update_queue)
# If no clients are connected, set up a cleaner task
if len(self.loro_docs_clients[file_key]) == 0:
# Remove any existing cleaner
cleaner = self.loro_docs_cleaners.get(file_key, None)
if cleaner is not None:
cleaner.cancel()
self.loro_docs_cleaners[file_key] = None
should_create_cleaner = True
# Create the cleaner task outside the lock to avoid deadlocks
if should_create_cleaner:
self.loro_docs_cleaners[file_key] = asyncio.create_task(
self._clean_loro_doc(file_key, 60.0)
)
async def _do_remove_doc(self, file_key: MarimoFileKey) -> None:
"""Actual implementation of removing a doc, separate from remove_doc to avoid deadlocks."""
if file_key in self.loro_docs:
del self.loro_docs[file_key]
if file_key in self.loro_docs_clients:
del self.loro_docs_clients[file_key]
if file_key in self.loro_docs_cleaners:
del self.loro_docs_cleaners[file_key]
async def remove_doc(self, file_key: MarimoFileKey) -> None:
"""Remove a loro doc and all associated clients"""
async with self.loro_docs_lock:
await self._do_remove_doc(file_key)
| {
"repo_id": "marimo-team/marimo",
"file_path": "marimo/_server/rtc/doc.py",
"license": "Apache License 2.0",
"lines": 144,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
marimo-team/marimo:marimo/_entrypoints/ids.py | # Copyright 2026 Marimo. All rights reserved.
from __future__ import annotations
from typing import Literal
# Internal entrypoints. Not user-facing as the API is not stable.
KnownEntryPoint = Literal[
"marimo.cell.executor",
"marimo.cache.store",
"marimo.kernel.lifespan",
"marimo.server.asgi.lifespan",
"marimo.server.asgi.middleware",
]
| {
"repo_id": "marimo-team/marimo",
"file_path": "marimo/_entrypoints/ids.py",
"license": "Apache License 2.0",
"lines": 11,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
marimo-team/marimo:marimo/_entrypoints/registry.py | # Copyright 2026 Marimo. All rights reserved.
from __future__ import annotations
import os
from importlib.metadata import entry_points
from typing import TYPE_CHECKING, Generic, Optional, TypeVar, cast
from marimo import _loggers
from marimo._entrypoints.ids import KnownEntryPoint
if TYPE_CHECKING:
from importlib.metadata import EntryPoints
T = TypeVar("T")
LOGGER = _loggers.marimo_logger()
class EntryPointRegistry(Generic[T]):
"""A registry for entry points.
This registry allows entry points to be loaded in two ways:
1. Through an explicit call to `.register(name, value)`
2. By looking for Python packages that provide a setuptools entry point group
The registry can be configured with allowlists and denylists through environment variables:
- MARIMO_{GROUP}_ALLOWLIST: Comma-separated list of allowed extensions
- MARIMO_{GROUP}_DENYLIST: Comma-separated list of denied extensions
Example:
MARIMO_CELL_EXECUTOR_ALLOWLIST=my-executor,another-executor
MARIMO_CELL_EXECUTOR_DENYLIST=denied-executor
Usage:
registry = EntryPointRegistry[MyType]("my_entrypoint_group")
"""
def __init__(self, entry_point_group: KnownEntryPoint) -> None:
"""Create an EntryPointRegistry for a named entry point group.
Args:
entry_point_group: The name of the entry point group.
"""
self.entry_point_group: KnownEntryPoint = entry_point_group
self._plugins: dict[str, T] = {}
# Convert entry point group to env var format (e.g. marimo.cell.executor -> MARIMO_CELL_EXECUTOR)
self._env_prefix = entry_point_group.replace(".", "_").upper()
def _is_allowed(self, name: str) -> bool:
"""Check if an extension name is allowed based on environment variables.
Args:
name: The name of the extension to check.
Returns:
True if the extension is allowed, False otherwise.
"""
# Check denylist first
denylist_var = f"{self._env_prefix}_DENYLIST"
if denylist_var in os.environ:
denylist = {
n.strip().lower() for n in os.environ[denylist_var].split(",")
}
if name.lower() in denylist:
return False
# Then check allowlist
allowlist_var = f"{self._env_prefix}_ALLOWLIST"
if allowlist_var in os.environ:
allowlist = {
n.strip().lower() for n in os.environ[allowlist_var].split(",")
}
return name.lower() in allowlist
return True
def register(self, name: str, value: T) -> None:
"""Register a plugin by name and value if it is allowed.
Args:
name: The name of the plugin.
value: The actual plugin object to register.
"""
if not self._is_allowed(name):
LOGGER.debug("Extension ignored %s", name)
return
self._plugins[name] = value
def unregister(self, name: str) -> Optional[T]:
"""Unregister a plugin by name.
Args:
name: The name of the plugin to unregister.
Returns:
The plugin that was unregistered.
"""
return self._plugins.pop(name, None)
def names(self) -> list[str]:
"""List the names of the registered and entry points plugins.
Returns:
A sorted list of plugin names.
"""
registered = list(self._plugins.keys())
entry_points_list = get_entry_points(self.entry_point_group)
entry_point_names = [ep.name for ep in entry_points_list]
all_names = set(registered + entry_point_names)
return sorted(name for name in all_names if self._is_allowed(name))
def get(self, name: str) -> T:
"""Get a plugin by name, loading it from entry points if necessary.
Args:
name: The name of the plugin to get.
Returns:
The requested plugin.
Raises:
KeyError: If the plugin cannot be found.
ValueError: If the plugin is not allowed by allowlist/denylist.
"""
if not self._is_allowed(name):
LOGGER.debug("Extension ignored %s", name)
raise ValueError(f"Extension '{name}' is not allowed")
if name in self._plugins:
return self._plugins[name]
entry_points_list = get_entry_points(self.entry_point_group)
for ep in entry_points_list:
if ep.name == name:
value = ep.load()
self.register(name, value)
return cast(T, value)
raise KeyError(
f"No entry point named '{name}' found in group '{self.entry_point_group}'"
)
def get_all(self) -> list[T]:
"""Get all registered and entry point plugins.
Returns:
A list of all registered and entry point plugins.
"""
return [self.get(name) for name in self.names()]
def __repr__(self) -> str:
return f"{type(self).__name__}(group={self.entry_point_group!r}, registered={self.names()!r})"
def get_entry_points(group: KnownEntryPoint) -> EntryPoints:
ep = entry_points()
if hasattr(ep, "select"):
return ep.select(group=group)
else:
return ep.get(group, []) # type: ignore
| {
"repo_id": "marimo-team/marimo",
"file_path": "marimo/_entrypoints/registry.py",
"license": "Apache License 2.0",
"lines": 123,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | documentation |
marimo-team/marimo:marimo/_server/registry.py | # Copyright 2026 Marimo. All rights reserved.
from __future__ import annotations
from typing import TYPE_CHECKING
from marimo._entrypoints.registry import EntryPointRegistry
if TYPE_CHECKING:
from starlette.applications import Starlette
from starlette.middleware import Middleware
from starlette.types import Lifespan
MIDDLEWARE_REGISTRY: EntryPointRegistry[Middleware] = EntryPointRegistry(
entry_point_group="marimo.server.asgi.middleware"
)
LIFESPAN_REGISTRY: EntryPointRegistry[Lifespan[Starlette]] = (
EntryPointRegistry(entry_point_group="marimo.server.asgi.lifespan")
)
| {
"repo_id": "marimo-team/marimo",
"file_path": "marimo/_server/registry.py",
"license": "Apache License 2.0",
"lines": 14,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
marimo-team/marimo:marimo/_types/lifespan.py | # Copyright 2026 Marimo. All rights reserved.
from __future__ import annotations
from collections.abc import (
Callable,
Mapping,
)
from contextlib import AbstractAsyncContextManager
from typing import Any, TypeVar, Union
AppType = TypeVar("AppType")
StatelessLifespan = Callable[[AppType], AbstractAsyncContextManager[None]]
StatefulLifespan = Callable[
[AppType], AbstractAsyncContextManager[Mapping[str, Any]]
]
Lifespan = Union[StatelessLifespan[AppType], StatefulLifespan[AppType]]
| {
"repo_id": "marimo-team/marimo",
"file_path": "marimo/_types/lifespan.py",
"license": "Apache License 2.0",
"lines": 14,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
marimo-team/marimo:marimo/_utils/lifespans.py | # Copyright 2026 Marimo. All rights reserved.
from __future__ import annotations
import asyncio
import contextlib
from collections.abc import AsyncIterator, Sequence
from contextlib import AbstractAsyncContextManager
from typing import Any, Generic, TypeAlias, TypeVar
from marimo import _loggers
from marimo._types.lifespan import Lifespan
T = TypeVar("T", bound=Any)
LifespanList: TypeAlias = Sequence[Lifespan[T]]
LOGGER = _loggers.marimo_logger()
class Lifespans(Generic[T]):
"""
A compound lifespan that runs a list of lifespans in order.
"""
def __init__(
self,
lifespans: LifespanList[T],
) -> None:
self._lifespans = lifespans
def has_lifespans(self) -> bool:
return bool(self._lifespans)
@contextlib.asynccontextmanager
async def _manager(
self,
app: T,
lifespans: LifespanList[T],
) -> AsyncIterator[None]:
exit_stack = contextlib.AsyncExitStack()
try:
async with exit_stack:
for lifespan in lifespans:
LOGGER.debug(f"Setup: {lifespan.__name__}")
await exit_stack.enter_async_context(lifespan(app))
yield
except asyncio.CancelledError:
pass
def __call__(self, app: T) -> AbstractAsyncContextManager[None]:
return self._manager(app, lifespans=self._lifespans)
def __repr__(self) -> str:
return f"Lifespans({self._lifespans})"
| {
"repo_id": "marimo-team/marimo",
"file_path": "marimo/_utils/lifespans.py",
"license": "Apache License 2.0",
"lines": 42,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
marimo-team/marimo:tests/_utils/test_lifespans.py | import asyncio
from collections.abc import AsyncIterator
from contextlib import asynccontextmanager
from typing import Callable
import pytest
from marimo._types.lifespan import Lifespan
from marimo._utils.lifespans import Lifespans
class MockApp:
def __init__(self) -> None:
self.setup_calls: list[str] = []
self.teardown_calls: list[str] = []
def create_mock_lifespan() -> Callable[[str], Lifespan[MockApp]]:
def mock_lifespan(name: str) -> Lifespan[MockApp]:
@asynccontextmanager
async def lifespan(app: MockApp) -> AsyncIterator[None]:
app.setup_calls.append(name)
try:
yield
finally:
app.teardown_calls.append(name)
return lifespan
return mock_lifespan
async def test_empty_lifespans() -> None:
app = MockApp()
lifespans: Lifespans[MockApp] = Lifespans([])
assert not lifespans.has_lifespans()
async with lifespans(app):
assert len(app.setup_calls) == 0
assert len(app.teardown_calls) == 0
async def test_single_lifespan() -> None:
app = MockApp()
mock_lifespan = create_mock_lifespan()
lifespans: Lifespans[MockApp] = Lifespans([mock_lifespan("test1")])
assert lifespans.has_lifespans()
async with lifespans(app):
assert app.setup_calls == ["test1"]
assert len(app.teardown_calls) == 0
assert app.setup_calls == ["test1"]
assert app.teardown_calls == ["test1"]
async def test_multiple_lifespans() -> None:
app = MockApp()
mock_lifespan = create_mock_lifespan()
lifespans: Lifespans[MockApp] = Lifespans(
[
mock_lifespan("test1"),
mock_lifespan("test2"),
mock_lifespan("test3"),
]
)
assert lifespans.has_lifespans()
async with lifespans(app):
assert app.setup_calls == ["test1", "test2", "test3"]
assert len(app.teardown_calls) == 0
assert app.setup_calls == ["test1", "test2", "test3"]
assert app.teardown_calls == [
"test3",
"test2",
"test1",
] # Teardown in reverse order
async def test_lifespan_error_handling() -> None:
app = MockApp()
mock_lifespan = create_mock_lifespan()
def failing_lifespan(name: str) -> Lifespan[MockApp]:
@asynccontextmanager
async def lifespan(app: MockApp) -> AsyncIterator[None]:
app.setup_calls.append(name)
try:
yield
finally:
app.teardown_calls.append(name)
raise ValueError(f"Error in {name}")
return lifespan
lifespans: Lifespans[MockApp] = Lifespans(
[
mock_lifespan("test1"),
failing_lifespan("test2"),
mock_lifespan("test3"),
]
)
with pytest.raises(ValueError, match="Error in test2"):
async with lifespans(app):
pass
# Even with errors, setup and teardown should be called in correct order
assert app.setup_calls == ["test1", "test2", "test3"]
assert app.teardown_calls == ["test3", "test2", "test1"]
async def test_lifespan_cancellation() -> None:
app = MockApp()
mock_lifespan = create_mock_lifespan()
lifespans: Lifespans[MockApp] = Lifespans(
[mock_lifespan("test1"), mock_lifespan("test2")]
)
async with lifespans(app):
assert app.setup_calls == ["test1", "test2"]
# Simulate cancellation
raise asyncio.CancelledError()
# Teardown should still be called even on cancellation
assert app.teardown_calls == ["test2", "test1"]
| {
"repo_id": "marimo-team/marimo",
"file_path": "tests/_utils/test_lifespans.py",
"license": "Apache License 2.0",
"lines": 97,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
marimo-team/marimo:tests/test_entrypoints.py | import os
from typing import cast
from unittest.mock import MagicMock, patch
import pytest
from marimo._entrypoints.ids import KnownEntryPoint
from marimo._entrypoints.registry import EntryPointRegistry, get_entry_points
from marimo._runtime.executor import ExecutionConfig, Executor, get_executor
class TestEntryPointRegistry:
@pytest.fixture
def registry(self) -> EntryPointRegistry[str]:
return EntryPointRegistry[str](
cast(KnownEntryPoint, "marimo.test.group")
)
def test_register_and_get(self, registry: EntryPointRegistry[str]) -> None:
registry.register("test", "value")
assert registry.get("test") == "value"
def test_unregister(self, registry: EntryPointRegistry[str]) -> None:
registry.register("test", "value")
assert registry.unregister("test") == "value"
with pytest.raises(KeyError):
registry.get("test")
def test_names(self, registry: EntryPointRegistry[str]) -> None:
registry.register("test1", "value1")
registry.register("test2", "value2")
assert set(registry.names()) == {"test1", "test2"}
def test_get_nonexistent(self, registry: EntryPointRegistry[str]) -> None:
with pytest.raises(KeyError):
registry.get("nonexistent")
def test_repr(self, registry: EntryPointRegistry[str]) -> None:
registry.register("test", "value")
assert "EntryPointRegistry" in repr(registry)
assert "test" in repr(registry)
def test_allowlist(self, registry: EntryPointRegistry[str]) -> None:
with patch.dict(
os.environ, {"MARIMO_TEST_GROUP_ALLOWLIST": "test1,test2"}
):
# Allowed extension
registry.register("test1", "value1")
assert registry.get("test1") == "value1"
# Not allowed extension - should be silently ignored
registry.register("test3", "value3")
assert "test3" not in registry.names()
# Not allowed extension - should raise on get
with pytest.raises(ValueError, match="not allowed"):
registry.get("test3")
def test_denylist(self, registry: EntryPointRegistry[str]) -> None:
with patch.dict(
os.environ, {"MARIMO_TEST_GROUP_DENYLIST": "test2,test3"}
):
# Allowed extension
registry.register("test1", "value1")
assert registry.get("test1") == "value1"
# Denied extension - should be silently ignored
registry.register("test2", "value2")
assert "test2" not in registry.names()
# Denied extension - should raise on get
with pytest.raises(ValueError, match="not allowed"):
registry.get("test2")
def test_allowlist_and_denylist(
self, registry: EntryPointRegistry[str]
) -> None:
with patch.dict(
os.environ,
{
"MARIMO_TEST_GROUP_ALLOWLIST": "test1,test2",
"MARIMO_TEST_GROUP_DENYLIST": "test2,test3",
},
):
# Allowed extension
registry.register("test1", "value1")
assert registry.get("test1") == "value1"
# Denied extension - should be silently ignored even if in allowlist
registry.register("test2", "value2")
assert "test2" not in registry.names()
# Not in allowlist - should be silently ignored
registry.register("test4", "value4")
assert "test4" not in registry.names()
def test_case_insensitive(self, registry: EntryPointRegistry[str]) -> None:
with patch.dict(
os.environ,
{
"MARIMO_TEST_GROUP_ALLOWLIST": "Test1,TEST2",
"MARIMO_TEST_GROUP_DENYLIST": "TEST3,test4",
},
):
# Case-insensitive allowlist match
registry.register("test1", "value1")
registry.register("TEST2", "value2")
assert set(registry.names()) == {"test1", "TEST2"}
# Case-insensitive denylist match
registry.register("Test3", "value3")
registry.register("TEST4", "value4")
assert "Test3" not in registry.names()
assert "TEST4" not in registry.names()
@patch("marimo._entrypoints.registry.entry_points")
def test_get_entry_points_modern(
self, mock_entry_points: MagicMock
) -> None:
mock_eps = MagicMock()
mock_eps.select.return_value = ["ep1", "ep2"]
mock_entry_points.return_value = mock_eps
result = get_entry_points(cast(KnownEntryPoint, "plugins"))
assert result == ["ep1", "ep2"]
mock_eps.select.assert_called_once_with(
group=cast(KnownEntryPoint, "plugins")
)
def test_get_all(self, registry: EntryPointRegistry[str]) -> None:
registry.register("test1", "value1")
registry.register("test2", "value2")
with patch(
"marimo._entrypoints.registry.get_entry_points"
) as mock_get_entry_points:
mock_get_entry_points.return_value = []
result = registry.get_all()
assert set(result) == {"value1", "value2"}
@patch("marimo._entrypoints.registry.get_entry_points")
def test_get_all_with_entry_points(
self, mock_get_entry_points: MagicMock
) -> None:
registry = EntryPointRegistry[str](cast(KnownEntryPoint, "test_group"))
# Create mock entry points
ep1 = MagicMock()
ep1.name = "ep1"
ep1.load.return_value = "ep_value1"
ep2 = MagicMock()
ep2.name = "ep2"
ep2.load.return_value = "ep_value2"
mock_get_entry_points.return_value = [ep1, ep2]
# Register one plugin directly
registry.register("test1", "value1")
# Get all plugins
result = registry.get_all()
# Should include both registered and entry point plugins
assert set(result) == {"value1", "ep_value1", "ep_value2"}
class CustomExecutor(Executor):
def execute_cell(
self,
cell: str,
glbls: dict[str, str],
graph: str,
) -> str:
return f"Executed {cell} with {glbls} in {graph}"
async def execute_cell_async(
self,
cell: str,
glbls: dict[str, str],
graph: str,
) -> str:
return f"Executed {cell} with {glbls} in {graph}"
class TestExecutorEntryPoint:
@pytest.fixture
def registry(self) -> EntryPointRegistry[Executor]:
reg = EntryPointRegistry[Executor]("marimo.cell.executor")
reg.register("custom", CustomExecutor)
return reg
def test_get_entry_points_modern(
self, registry: EntryPointRegistry[Executor]
) -> None:
executor = get_executor(
ExecutionConfig(is_strict=False), registry=registry
)
assert isinstance(executor, CustomExecutor)
| {
"repo_id": "marimo-team/marimo",
"file_path": "tests/test_entrypoints.py",
"license": "Apache License 2.0",
"lines": 162,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
marimo-team/marimo:marimo/_pyodide/restartable_task.py | # Copyright 2026 Marimo. All rights reserved.
from __future__ import annotations
import asyncio
from typing import Any, Callable, Optional
class RestartableTask:
def __init__(self, coro: Callable[[], Any]):
self.coro = coro
self.task: Optional[asyncio.Task[Any]] = None
self.stopped = False
async def start(self) -> None:
"""Create a task that runs the coro."""
while True:
if self.stopped:
break
try:
self.task = asyncio.create_task(self.coro())
await self.task
except asyncio.CancelledError:
pass
def stop(self) -> None:
# Stop the task and set the stopped flag
self.stopped = True
assert self.task is not None
self.task.cancel()
def restart(self) -> None:
# Cancel the current task, which will cause
# the while loop to start a new task
assert self.task is not None
self.task.cancel()
| {
"repo_id": "marimo-team/marimo",
"file_path": "marimo/_pyodide/restartable_task.py",
"license": "Apache License 2.0",
"lines": 29,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
marimo-team/marimo:marimo/_utils/inline_script_metadata.py | # Copyright 2026 Marimo. All rights reserved.
from __future__ import annotations
import json
import re
from pathlib import Path
from typing import Any, cast
from marimo import _loggers
from marimo._cli.files.file_path import FileContentReader
from marimo._utils.code import hash_code
from marimo._utils.paths import normalize_path
from marimo._utils.scripts import read_pyproject_from_script
LOGGER = _loggers.marimo_logger()
class PyProjectReader:
def __init__(
self,
project: dict[str, Any],
*,
config_path: str | None,
name: str | None = None,
):
self.project = project
self.config_path = config_path
self.name = name
@staticmethod
def from_filename(name: str) -> PyProjectReader:
return PyProjectReader(
name=name,
project=_get_pyproject_from_filename(name) or {},
config_path=name,
)
@staticmethod
def from_script(script: str) -> PyProjectReader:
return PyProjectReader(
project=read_pyproject_from_script(script) or {},
config_path=None,
name=None,
)
@property
def extra_index_urls(self) -> list[str]:
# See https://docs.astral.sh/uv/reference/settings/#pip_extra-index-url
return ( # type: ignore[no-any-return]
self.project.get("tool", {})
.get("uv", {})
.get("extra-index-url", [])
)
@property
def index_configs(self) -> list[dict[str, str]]:
# See https://docs.astral.sh/uv/reference/settings/#index
return self.project.get("tool", {}).get("uv", {}).get("index", []) # type: ignore[no-any-return]
@property
def index_url(self) -> str | None:
# See https://docs.astral.sh/uv/reference/settings/#pip_index-url
return ( # type: ignore[no-any-return]
self.project.get("tool", {}).get("uv", {}).get("index-url", None)
)
@property
def python_version(self) -> str | None:
try:
version = self.project.get("requires-python")
# Only return string version requirements
if not isinstance(version, str):
return None
return version
except Exception as e:
LOGGER.warning(f"Failed to parse Python version requirement: {e}")
return None
@property
def dependencies(self) -> list[str]:
return self.project.get("dependencies", []) # type: ignore[no-any-return]
@property
def requirements_txt_lines(self) -> list[str]:
"""Get dependencies from string representation of script."""
try:
return _pyproject_toml_to_requirements_txt(
self.project, self.config_path
)
except Exception as e:
LOGGER.warning(f"Failed to parse dependencies: {e}")
return []
def _get_pyproject_from_filename(name: str) -> dict[str, Any] | None:
try:
contents, _ = FileContentReader().read_file(name)
if name.endswith(".py"):
return read_pyproject_from_script(contents)
if not (name.endswith(".md") or name.endswith(".qmd")):
raise ValueError(
f"Unsupported file type: {name}. Only .py and .md files are supported."
)
headers = get_headers_from_markdown(contents)
header = headers["pyproject"]
if not header:
header = headers["header"]
elif headers["header"]:
pyproject = PyProjectReader.from_script(headers["header"])
if pyproject.dependencies or pyproject.python_version:
LOGGER.warning(
"Both header and pyproject provide dependencies. "
"Preferring pyproject."
)
return read_pyproject_from_script(header)
except FileNotFoundError:
return None
except Exception:
LOGGER.warning(f"Failed to read pyproject.toml from {name}")
return None
def _pyproject_toml_to_requirements_txt(
pyproject: dict[str, Any],
config_path: str | None = None,
) -> list[str]:
"""
Convert a pyproject.toml file to a requirements.txt file.
If there is a `[tool.uv.sources]` section, we resolve the dependencies
to their corresponding source.
# dependencies = [
# "python-gcode",
# ]
#
# [tool.uv.sources]
# python-gcode = { git = "https://github.com/fetlab/python_gcode", rev = "new" }
Args:
pyproject: A dict containing the pyproject.toml contents.
config_path: The path to the pyproject.toml or inline script metadata. This
is used to resolve relative paths used in the dependencies.
""" # noqa: E501
dependencies = cast(list[str], pyproject.get("dependencies", []))
if not dependencies:
return []
uv_sources = pyproject.get("tool", {}).get("uv", {}).get("sources", {})
for dependency, source in uv_sources.items():
# Find the index of the dependency. This may have a version
# attached, so we cannot do .index()
dep_index: int | None = None
for i, dep in enumerate(dependencies):
if (
dep == dependency
or dep.startswith(f"{dependency}==")
or dep.startswith(f"{dependency}<")
or dep.startswith(f"{dependency}>")
or dep.startswith(f"{dependency}~")
):
dep_index = i
break
if dep_index is None:
continue
new_dependency = None
# Handle git dependencies
if "git" in source:
git_url = f"git+{source['git']}"
ref = (
source.get("rev") or source.get("branch") or source.get("tag")
)
new_dependency = (
f"{dependency} @ {git_url}@{ref}"
if ref
else f"{dependency} @ {git_url}"
)
# Handle local paths
elif "path" in source:
source_path = Path(source["path"])
# If path is relative and we have a config path, resolve it relative to the config path
if not source_path.is_absolute() and config_path:
config_dir = Path(config_path).parent
source_path = normalize_path(config_dir / source_path)
new_dependency = f"{dependency} @ {str(source_path)}"
# Handle URLs
elif "url" in source:
new_dependency = f"{dependency} @ {source['url']}"
if new_dependency:
if source.get("marker"):
new_dependency += f"; {source['marker']}"
dependencies[dep_index] = new_dependency
return dependencies
def is_marimo_dependency(dependency: str) -> bool:
# Split on any version specifier
without_version = re.split(r"[=<>~]+", dependency)[0]
# Match marimo and marimo[extras], but not marimo-<something-else>
return without_version == "marimo" or without_version.startswith("marimo[")
def get_headers_from_markdown(contents: str) -> dict[str, str]:
from marimo._convert.markdown.to_ir import extract_frontmatter
frontmatter, _ = extract_frontmatter(contents)
return get_headers_from_frontmatter(frontmatter)
def get_headers_from_frontmatter(
frontmatter: dict[str, Any],
) -> dict[str, str]:
from marimo._utils.scripts import wrap_script_metadata
headers = {"pyproject": "", "header": ""}
pyproject = frontmatter.get("pyproject", "")
if pyproject:
if not pyproject.startswith("#"):
# Wrap raw TOML content in PEP 723 format
pyproject = wrap_script_metadata(pyproject)
headers["pyproject"] = pyproject
headers["header"] = frontmatter.get("header", "")
return headers
def has_marimo_in_script_metadata(filepath: str) -> bool | None:
"""Check if marimo is in the file's PEP 723 script metadata dependencies.
Returns:
True if marimo is in dependencies
False if script metadata exists but marimo is not in dependencies
None if file has no script metadata
"""
project = _get_pyproject_from_filename(filepath)
if project is None:
return None
dependencies = project.get("dependencies", [])
return any(is_marimo_dependency(dep) for dep in dependencies)
def script_metadata_hash_from_filename(name: str) -> str | None:
project = _get_pyproject_from_filename(name)
if project is None:
return None
serialized = json.dumps(
project,
sort_keys=True,
separators=(",", ":"),
ensure_ascii=False,
)
return hash_code(serialized)
| {
"repo_id": "marimo-team/marimo",
"file_path": "marimo/_utils/inline_script_metadata.py",
"license": "Apache License 2.0",
"lines": 217,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
marimo-team/marimo:tests/_pyodide/test_bootstrap.py | # Copyright 2026 Marimo. All rights reserved.
from __future__ import annotations
import json
from typing import TYPE_CHECKING, Any, Callable
import pytest
from marimo._config.config import DEFAULT_CONFIG
from marimo._messaging.msgspec_encoder import encode_json_str
from marimo._pyodide.bootstrap import create_session, save_file
from marimo._pyodide.pyodide_session import PyodideSession
from marimo._server.models.models import SaveNotebookRequest
from marimo._session.model import SessionMode
from marimo._types.ids import CellId_t
if TYPE_CHECKING:
from pathlib import Path
@pytest.fixture
def mock_message_callback() -> Callable[[str], None]:
def callback(message: str) -> None:
pass
return callback
FILE_CONTENTS = """
import marimo
app = marimo.App()
@app.cell
def _():
"Hello"
return
"""
@pytest.fixture
def mock_app_file(tmp_path: Path) -> Path:
filename = tmp_path / "test.py"
filename.write_text(FILE_CONTENTS)
return filename
@pytest.fixture
def mock_app_file_with_script_config(tmp_path: Path) -> Path:
filename = tmp_path / "test.py"
content = f"""# /// script
# [tool.marimo.runtime]
# auto_instantiate = false
# on_cell_change = "lazy"
# [tool.marimo.display]
# theme = "dark"
# ///
{FILE_CONTENTS}
"""
filename.write_text(content)
return filename
async def test_create_session_with_default_config(
mock_message_callback: Callable[[str], None],
mock_app_file: Path,
) -> None:
session, _ = create_session(
filename=str(mock_app_file),
query_params={},
message_callback=mock_message_callback,
user_config=DEFAULT_CONFIG,
)
assert isinstance(session, PyodideSession)
assert session.mode == SessionMode.EDIT
async def test_create_session_with_script_config(
mock_message_callback: Callable[[str], None],
mock_app_file_with_script_config: Path,
) -> None:
session, _ = create_session(
filename=str(mock_app_file_with_script_config),
query_params={},
message_callback=mock_message_callback,
user_config=DEFAULT_CONFIG,
)
# Script config should override default theme
assert session._initial_user_config["display"]["theme"] == "dark"
async def test_create_session_with_invalid_script_config(
mock_message_callback: Callable[[str], None],
tmp_path: Path,
) -> None:
# Create a file with invalid config
filename = tmp_path / "test.py"
content = f"""# ---
# marimo:
# invalid: true
# ---
{FILE_CONTENTS}
"""
filename.write_text(content)
session, _ = create_session(
filename=str(filename),
query_params={},
message_callback=mock_message_callback,
user_config=DEFAULT_CONFIG,
)
# Invalid config should be ignored and default config should be used
assert (
session._initial_user_config["display"]["theme"]
== DEFAULT_CONFIG["display"]["theme"]
)
async def test_instantiate(
mock_message_callback: Callable[[str], None],
mock_app_file: Path,
) -> None:
session, _ = create_session(
filename=str(mock_app_file),
query_params={},
message_callback=mock_message_callback,
user_config=DEFAULT_CONFIG,
)
# Mock the put_control_request method to capture the request
captured_request = None
original_put_control_request = session.put_control_request
def mock_put_control_request(request: Any) -> None:
nonlocal captured_request
captured_request = request
original_put_control_request(request)
session.put_control_request = mock_put_control_request
# Call instantiate
from marimo._pyodide.bootstrap import instantiate
instantiate(session)
# Verify the request was created correctly
assert captured_request is not None
assert len(captured_request.execution_requests) == 1
assert captured_request.execution_requests[0].code == '"Hello"'
assert captured_request.auto_run is True
def test_save_file(
mock_app_file: Path,
) -> None:
# Create a save request
request = SaveNotebookRequest(
codes=["print('hello')"],
names=["cell-1"],
configs=[{}],
cell_ids=[CellId_t("cell-1")],
layout=None,
filename=str(mock_app_file),
)
# Save the file
save_file(
request=encode_json_str(request),
filename=str(mock_app_file),
)
# Verify the file was saved correctly
saved_content = mock_app_file.read_text()
assert "print('hello')" in saved_content
async def test_message_callback_format(
mock_app_file: Path,
) -> None:
"""Test that message_callback receives properly formatted JSON."""
received_messages: list[str] = []
session, _ = create_session(
filename=str(mock_app_file),
query_params={},
message_callback=lambda text: received_messages.append(text),
user_config=DEFAULT_CONFIG,
)
assert len(received_messages) >= 1
parsed = json.loads(received_messages[0])
assert parsed["op"] == "kernel-ready"
assert isinstance(parsed["data"], dict)
| {
"repo_id": "marimo-team/marimo",
"file_path": "tests/_pyodide/test_bootstrap.py",
"license": "Apache License 2.0",
"lines": 155,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.